From debfc367526cd54e6cbe6ab9a463360ba079c487 Mon Sep 17 00:00:00 2001 From: Marco Castelluccio Date: Fri, 15 Nov 2019 00:10:17 +0000 Subject: [PATCH] Bug 1575008 - WebGPU implementation basis r=baku,bzbarsky MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change vendors `wgpu` library in-tree and hooks up the initialization bits. It implements adapter and device initialization and adds a simple test. Complementary ecosystem tracker - https://github.com/gfx-rs/wgpu/issues/374 Current status: - [x] Architecture - [x] figure out the IPC story - [ ] move wgpu crates into a dedicated folder (postponed as https://bugzilla.mozilla.org/show_bug.cgi?id=1594182) - [x] Linux - [x] avoid depending on spirv_cross - [x] macOS - [x] due to cross-compiling shaders - [x] need the dependency update - [x] stop using gcc - [x] unexpected SSL header collision - https://phabricator.services.mozilla.com/D51148 - [x] undefined Metal symbols - [x] missing webrtc headers for IPDL magic - https://phabricator.services.mozilla.com/D51558 - [x] spirv-cross linking failure in ASAN - https://phabricator.services.mozilla.com/D52688 - [x] Windows - [x] due to "ipc-channel" not supporting Windows yet - [x] due to some exceptional stuff - [x] undefined symbol: `D3D12CreateDevice` - [x] d3d12.dll is not found, dxgi1_4 doesn't present - [x] d3d11.dll and dxgi.dll need to be explicitly loaded on win32 mingw - [x] libbacktrace fails to link on win32 mingw - [x] cc mislinking C++ standard library - [x] Android - [x] spirv-cross fails to build due to exceptions Update-1: We decided to go with IPDL mechanism instead of Rust based ipc-channel (or any alternatives), which unblocks Windows build. Update-2: It appears that WebGPUThreading isn't needed any more as the child thread (and its event loop) is now managed by IPDL infrastructure. This PR removes it 🎉 . Update-3: InstanceProvider is also removed. Update-4: All set, the try is green, waiting for dependent changes to go in. Differential Revision: https://phabricator.services.mozilla.com/D49458 UltraBlame original commit: 4d16c3d62cfc0503075206bccfbd3b32ad396e64 --- .cargo/config.in | 10 + .clang-format-ignore | 1 + Cargo.lock | 471 +- Cargo.toml | 2 + dom/base/nsGlobalWindowInner.cpp | 5 - dom/base/nsGlobalWindowInner.h | 4 +- dom/webgpu/Adapter.cpp | 28 +- dom/webgpu/Adapter.h | 13 +- dom/webgpu/Device.cpp | 18 +- dom/webgpu/Device.h | 8 +- dom/webgpu/Instance.cpp | 56 +- dom/webgpu/Instance.h | 16 +- dom/webgpu/InstanceProvider.cpp | 38 - dom/webgpu/InstanceProvider.h | 55 - dom/webgpu/ObjectModel.h | 4 +- dom/webgpu/ffi/moz.build | 32 + dom/webgpu/ffi/wgpu.h | 31 + dom/webgpu/ipc/PWebGPU.ipdl | 38 + dom/webgpu/ipc/WebGPUChild.cpp | 84 + dom/webgpu/ipc/WebGPUChild.h | 68 + dom/webgpu/ipc/WebGPUParent.cpp | 57 + dom/webgpu/ipc/WebGPUParent.h | 43 + dom/webgpu/ipc/WebGPUSerialize.h | 34 + dom/webgpu/ipc/WebGPUTypes.h | 18 + dom/webgpu/mochitest/mochitest.ini | 2 +- .../mochitest/test_device_creation.html | 25 + dom/webgpu/moz.build | 21 +- dom/webgpu/thread/WebGPUThreading.cpp | 61 - dom/webgpu/thread/WebGPUThreading.h | 42 - dom/webgpu/thread/moz.build | 18 - dom/webgpu/wgpu-native/Cargo.toml | 53 + dom/webgpu/wgpu-native/cbindgen.toml | 37 + dom/webgpu/wgpu-native/src/binding_model.rs | 132 + .../wgpu-native/src/command/allocator.rs | 165 + dom/webgpu/wgpu-native/src/command/bind.rs | 222 + dom/webgpu/wgpu-native/src/command/compute.rs | 316 + dom/webgpu/wgpu-native/src/command/mod.rs | 769 + dom/webgpu/wgpu-native/src/command/render.rs | 848 + .../wgpu-native/src/command/transfer.rs | 421 + dom/webgpu/wgpu-native/src/conv.rs | 661 + dom/webgpu/wgpu-native/src/device.rs | 2261 + dom/webgpu/wgpu-native/src/hub.rs | 556 + dom/webgpu/wgpu-native/src/id.rs | 142 + dom/webgpu/wgpu-native/src/instance.rs | 563 + dom/webgpu/wgpu-native/src/lib.rs | 234 + dom/webgpu/wgpu-native/src/pipeline.rs | 354 + dom/webgpu/wgpu-native/src/resource.rs | 373 + dom/webgpu/wgpu-native/src/swap_chain.rs | 254 + dom/webgpu/wgpu-native/src/track/buffer.rs | 124 + dom/webgpu/wgpu-native/src/track/mod.rs | 472 + dom/webgpu/wgpu-native/src/track/range.rs | 411 + dom/webgpu/wgpu-native/src/track/texture.rs | 301 + dom/webgpu/wgpu-remote/Cargo.toml | 20 + dom/webgpu/wgpu-remote/cbindgen.toml | 46 + dom/webgpu/wgpu-remote/src/lib.rs | 150 + dom/webgpu/wgpu-remote/src/server.rs | 53 + dom/webidl/WebGPU.webidl | 10 +- gfx/ipc/GPUParent.cpp | 9 - gfx/layers/ipc/CompositorBridgeChild.cpp | 30 + gfx/layers/ipc/CompositorBridgeChild.h | 12 + gfx/layers/ipc/CompositorBridgeParent.cpp | 17 + gfx/layers/ipc/CompositorBridgeParent.h | 14 + .../ipc/ContentCompositorBridgeParent.cpp | 14 + .../ipc/ContentCompositorBridgeParent.h | 7 + gfx/layers/ipc/PCompositorBridge.ipdl | 6 + gfx/thebes/gfxPlatform.cpp | 2 + servo/components/style/Cargo.toml | 4 +- .../rust/arrayvec/.cargo-checksum.json | 2 +- third_party/rust/arrayvec/Cargo.toml | 17 +- third_party/rust/arrayvec/README.rst | 40 + third_party/rust/arrayvec/benches/extend.rs | 49 +- third_party/rust/arrayvec/build.rs | 90 - third_party/rust/arrayvec/src/array.rs | 57 +- third_party/rust/arrayvec/src/array_string.rs | 173 +- third_party/rust/arrayvec/src/char.rs | 74 +- third_party/rust/arrayvec/src/lib.rs | 236 +- third_party/rust/arrayvec/src/maybe_uninit.rs | 30 +- .../rust/arrayvec/src/maybe_uninit_nodrop.rs | 41 - .../rust/arrayvec/src/maybe_uninit_stable.rs | 40 - third_party/rust/arrayvec/src/range.rs | 42 - third_party/rust/arrayvec/tests/serde.rs | 2 +- third_party/rust/arrayvec/tests/tests.rs | 180 +- third_party/rust/ash/.cargo-checksum.json | 1 + third_party/rust/ash/Cargo.toml | 29 + third_party/rust/ash/output | 0 third_party/rust/ash/src/allocator.rs | 120 + third_party/rust/ash/src/device.rs | 1997 + third_party/rust/ash/src/entry.rs | 281 + .../ash/src/extensions/experimental/amd.rs | 701 + .../ash/src/extensions/experimental/mod.rs | 1 + .../ash/src/extensions/ext/debug_marker.rs | 67 + .../ash/src/extensions/ext/debug_report.rs | 61 + .../ash/src/extensions/ext/debug_utils.rs | 157 + .../rust/ash/src/extensions/ext/mod.rs | 7 + .../ash/src/extensions/khr/android_surface.rs | 48 + .../src/extensions/khr/display_swapchain.rs | 50 + .../rust/ash/src/extensions/khr/mod.rs | 17 + .../rust/ash/src/extensions/khr/surface.rs | 137 + .../rust/ash/src/extensions/khr/swapchain.rs | 129 + .../ash/src/extensions/khr/wayland_surface.rs | 48 + .../ash/src/extensions/khr/win32_surface.rs | 48 + .../ash/src/extensions/khr/xcb_surface.rs | 48 + .../ash/src/extensions/khr/xlib_surface.rs | 48 + third_party/rust/ash/src/extensions/mod.rs | 5 + .../ash/src/extensions/mvk/ios_surface.rs | 48 + .../ash/src/extensions/mvk/macos_surface.rs | 48 + .../rust/ash/src/extensions/mvk/mod.rs | 5 + .../rust/ash/src/extensions/nv/mesh_shader.rs | 70 + third_party/rust/ash/src/extensions/nv/mod.rs | 5 + .../rust/ash/src/extensions/nv/ray_tracing.rs | 274 + third_party/rust/ash/src/instance.rs | 427 + third_party/rust/ash/src/lib.rs | 80 + third_party/rust/ash/src/prelude.rs | 2 + third_party/rust/ash/src/util.rs | 138 + third_party/rust/ash/src/version.rs | 3 + third_party/rust/ash/src/vk.rs | 63147 ++++++++++++++++ .../rust/ash/tests/constant_size_arrays.rs | 41 + third_party/rust/ash/tests/display.rs | 18 + third_party/rust/atom/.cargo-checksum.json | 1 + third_party/rust/atom/Cargo.toml | 19 + third_party/rust/atom/LICENSE | 175 + third_party/rust/atom/examples/fifo.rs | 83 + third_party/rust/atom/examples/simple.rs | 33 + third_party/rust/atom/readme.md | 101 + third_party/rust/atom/src/lib.rs | 340 + third_party/rust/atom/tests/atom.rs | 189 + .../rust/backtrace/.cargo-checksum.json | 2 +- third_party/rust/backtrace/Cargo.lock | 392 + third_party/rust/backtrace/Cargo.toml | 101 +- third_party/rust/backtrace/README.md | 24 +- third_party/rust/backtrace/appveyor.yml | 20 - .../rust/backtrace/benches/benchmarks.rs | 94 + third_party/rust/backtrace/ci/android-sdk.sh | 74 + .../docker/arm-linux-androideabi/Dockerfile | 23 +- .../powerpc-unknown-linux-gnu/Dockerfile | 9 - third_party/rust/backtrace/ci/run-docker.sh | 2 +- .../rust/backtrace/ci/runtest-android.rs | 50 + third_party/rust/backtrace/examples/raw.rs | 22 +- .../rust/backtrace/src/backtrace/dbghelp.rs | 230 +- .../rust/backtrace/src/backtrace/libunwind.rs | 94 +- .../rust/backtrace/src/backtrace/mod.rs | 74 +- .../rust/backtrace/src/backtrace/noop.rs | 6 +- .../backtrace/src/backtrace/unix_backtrace.rs | 41 +- third_party/rust/backtrace/src/capture.rs | 358 +- third_party/rust/backtrace/src/dbghelp.rs | 370 + third_party/rust/backtrace/src/dylib.rs | 70 - third_party/rust/backtrace/src/lib.rs | 108 +- third_party/rust/backtrace/src/print.rs | 268 + .../rust/backtrace/src/print/fuchsia.rs | 432 + .../src/symbolize/coresymbolication.rs | 259 +- .../rust/backtrace/src/symbolize/dbghelp.rs | 270 +- .../rust/backtrace/src/symbolize/dladdr.rs | 127 +- .../backtrace/src/symbolize/dladdr_resolve.rs | 50 + .../rust/backtrace/src/symbolize/gimli.rs | 753 +- .../backtrace/src/symbolize/libbacktrace.rs | 451 +- .../rust/backtrace/src/symbolize/mod.rs | 422 +- .../rust/backtrace/src/symbolize/noop.rs | 26 +- third_party/rust/backtrace/src/types.rs | 83 + third_party/rust/backtrace/src/windows.rs | 623 + .../backtrace/tests/accuracy/auxiliary.rs | 16 + .../rust/backtrace/tests/accuracy/main.rs | 92 + .../rust/backtrace/tests/concurrent-panics.rs | 72 + .../rust/backtrace/tests/long_fn_name.rs | 17 +- .../rust/backtrace/tests/skip_inner_frames.rs | 49 + third_party/rust/backtrace/tests/smoke.rs | 193 +- .../rust/blake2b_simd/.cargo-checksum.json | 2 +- third_party/rust/blake2b_simd/Cargo.toml | 4 +- third_party/rust/block/.cargo-checksum.json | 1 + third_party/rust/block/Cargo.toml | 23 + third_party/rust/block/README.md | 42 + third_party/rust/block/src/lib.rs | 396 + third_party/rust/block/src/test_utils.rs | 31 + third_party/rust/cc/.cargo-checksum.json | 2 +- third_party/rust/cc/Cargo.lock | 154 + third_party/rust/cc/Cargo.toml | 15 +- third_party/rust/cc/README.md | 8 - third_party/rust/cc/azure-pipelines.yml | 87 - third_party/rust/cc/ci/azure-install-rust.yml | 23 - third_party/rust/cc/ci/azure-steps.yml | 17 - third_party/rust/cc/src/bin/gcc-shim.rs | 39 +- third_party/rust/cc/src/com.rs | 14 +- third_party/rust/cc/src/lib.rs | 569 +- third_party/rust/cc/src/setup_config.rs | 40 +- third_party/rust/cc/src/winapi.rs | 4 +- third_party/rust/cc/src/windows_registry.rs | 184 +- third_party/rust/cc/tests/cc_env.rs | 13 +- third_party/rust/cc/tests/cflags.rs | 15 + third_party/rust/cc/tests/cxxflags.rs | 15 + third_party/rust/cc/tests/support/mod.rs | 54 +- third_party/rust/cc/tests/test.rs | 82 +- third_party/rust/cocoa/.cargo-checksum.json | 1 + third_party/rust/cocoa/COPYRIGHT | 5 + third_party/rust/cocoa/Cargo.lock | 95 + third_party/rust/cocoa/Cargo.toml | 44 + third_party/rust/cocoa/LICENSE-APACHE | 201 + third_party/rust/cocoa/LICENSE-MIT | 25 + third_party/rust/cocoa/README.md | 6 + third_party/rust/cocoa/examples/color.rs | 120 + third_party/rust/cocoa/examples/fullscreen.rs | 97 + .../rust/cocoa/examples/hello_world.rs | 53 + third_party/rust/cocoa/examples/tab_view.rs | 87 + third_party/rust/cocoa/src/appkit.rs | 4155 + third_party/rust/cocoa/src/base.rs | 28 + third_party/rust/cocoa/src/foundation.rs | 1318 + third_party/rust/cocoa/src/lib.rs | 32 + third_party/rust/cocoa/src/macros.rs | 79 + third_party/rust/cocoa/src/quartzcore.rs | 1859 + third_party/rust/cocoa/tests/foundation.rs | 189 + .../rust/colorful/.cargo-checksum.json | 1 + third_party/rust/colorful/Cargo.toml | 26 + third_party/rust/colorful/CodeOfConduct.md | 74 + third_party/rust/colorful/README.md | 196 + third_party/rust/colorful/license | 9 + third_party/rust/colorful/rustfmt.toml | 5 + .../rust/colorful/src/core/color_string.rs | 134 + third_party/rust/colorful/src/core/colors.rs | 847 + third_party/rust/colorful/src/core/hsl.rs | 105 + third_party/rust/colorful/src/core/mod.rs | 36 + third_party/rust/colorful/src/core/rgb.rs | 90 + third_party/rust/colorful/src/core/style.rs | 24 + third_party/rust/colorful/src/core/symbols.rs | 55 + third_party/rust/colorful/src/lib.rs | 316 + .../rust/colorful/tests/test_all_color.rs | 280 + .../rust/colorful/tests/test_animation.rs | 31 + third_party/rust/colorful/tests/test_basic.rs | 53 + third_party/rust/colorful/tests/test_extra.rs | 10 + .../rust/colorful/tests/test_gradient.rs | 16 + third_party/rust/colorful/tests/test_hsl.rs | 10 + .../rust/copyless/.cargo-checksum.json | 1 + third_party/rust/copyless/CHANGELOG.md | 17 + third_party/rust/copyless/Cargo.toml | 26 + third_party/rust/copyless/LICENSE | 373 + third_party/rust/copyless/README.md | 35 + third_party/rust/copyless/bors.toml | 5 + third_party/rust/copyless/rustfmt.toml | 2 + third_party/rust/copyless/src/boxed.rs | 62 + third_party/rust/copyless/src/lib.rs | 11 + third_party/rust/copyless/src/vec.rs | 75 + .../rust/core-graphics/.cargo-checksum.json | 2 +- third_party/rust/core-graphics/Cargo.toml | 2 +- third_party/rust/core-graphics/src/color.rs | 2 + third_party/rust/core-graphics/src/lib.rs | 2 + third_party/rust/core-graphics/src/window.rs | 149 + .../rust/crossbeam-deque/.cargo-checksum.json | 2 +- third_party/rust/crossbeam-deque/CHANGELOG.md | 5 + third_party/rust/crossbeam-deque/Cargo.toml | 8 +- third_party/rust/crossbeam-deque/LICENSE-MIT | 4 + third_party/rust/crossbeam-deque/README.md | 5 + third_party/rust/crossbeam-deque/src/lib.rs | 66 +- .../rust/crossbeam-deque/tests/fifo.rs | 15 +- .../rust/crossbeam-deque/tests/injector.rs | 20 +- .../rust/crossbeam-deque/tests/lifo.rs | 15 +- .../rust/crossbeam-epoch/.cargo-checksum.json | 2 +- third_party/rust/crossbeam-epoch/CHANGELOG.md | 7 + third_party/rust/crossbeam-epoch/Cargo.lock | 98 +- third_party/rust/crossbeam-epoch/Cargo.toml | 12 +- third_party/rust/crossbeam-epoch/README.md | 8 +- third_party/rust/crossbeam-epoch/build.rs | 8 + .../rust/crossbeam-epoch/src/atomic.rs | 4 +- .../rust/crossbeam-epoch/src/deferred.rs | 6 + third_party/rust/crossbeam-epoch/src/guard.rs | 2 + .../rust/crossbeam-epoch/src/internal.rs | 59 +- third_party/rust/crossbeam-epoch/src/lib.rs | 7 +- .../rust/crossbeam-epoch/src/sync/queue.rs | 3 + .../.cargo-checksum.json | 1 + .../rust/crossbeam-utils-0.6.5/CHANGELOG.md | 89 + .../rust/crossbeam-utils-0.6.5/Cargo.toml | 37 + .../rust/crossbeam-utils-0.6.5/LICENSE-APACHE | 201 + .../rust/crossbeam-utils-0.6.5/LICENSE-MIT | 23 + .../rust/crossbeam-utils-0.6.5/README.md | 72 + .../benches/atomic_cell.rs | 159 + .../src/atomic/atomic_cell.rs | 924 + .../src/atomic/consume.rs | 82 + .../crossbeam-utils-0.6.5/src/atomic/mod.rs | 7 + .../rust/crossbeam-utils-0.6.5/src/backoff.rs | 294 + .../crossbeam-utils-0.6.5/src/cache_padded.rs | 116 + .../rust/crossbeam-utils-0.6.5/src/lib.rs | 67 + .../crossbeam-utils-0.6.5/src/sync/mod.rs | 17 + .../crossbeam-utils-0.6.5/src/sync/parker.rs | 311 + .../src/sync/sharded_lock.rs | 600 + .../src/sync/wait_group.rs | 139 + .../rust/crossbeam-utils-0.6.5/src/thread.rs | 529 + .../tests/atomic_cell.rs | 208 + .../tests/cache_padded.rs | 112 + .../crossbeam-utils-0.6.5/tests/parker.rs | 42 + .../tests/sharded_lock.rs | 245 + .../crossbeam-utils-0.6.5/tests/thread.rs | 175 + .../crossbeam-utils-0.6.5/tests/wait_group.rs | 66 + .../rust/crossbeam-utils/.cargo-checksum.json | 2 +- third_party/rust/crossbeam-utils/CHANGELOG.md | 17 +- third_party/rust/crossbeam-utils/Cargo.toml | 9 +- third_party/rust/crossbeam-utils/LICENSE-MIT | 4 + third_party/rust/crossbeam-utils/README.md | 19 +- .../crossbeam-utils/benches/atomic_cell.rs | 58 +- third_party/rust/crossbeam-utils/build.rs | 8 + .../crossbeam-utils/src/atomic/atomic_cell.rs | 280 +- .../rust/crossbeam-utils/src/atomic/mod.rs | 18 + .../crossbeam-utils/src/atomic/seq_lock.rs | 88 + .../src/atomic/seq_lock_wide.rs | 132 + .../rust/crossbeam-utils/src/backoff.rs | 4 +- .../rust/crossbeam-utils/src/cache_padded.rs | 17 +- third_party/rust/crossbeam-utils/src/lib.rs | 11 +- .../rust/crossbeam-utils/src/thread.rs | 102 +- .../rust/crossbeam-utils/tests/atomic_cell.rs | 26 + .../rust/crossbeam-utils/tests/parker.rs | 3 +- .../crossbeam-utils/tests/sharded_lock.rs | 30 +- .../rust/crossbeam-utils/tests/thread.rs | 22 +- third_party/rust/d3d12/.cargo-checksum.json | 1 + third_party/rust/d3d12/Cargo.toml | 37 + third_party/rust/d3d12/README.md | 2 + third_party/rust/d3d12/appveyor.yml | 29 + third_party/rust/d3d12/bors.toml | 5 + third_party/rust/d3d12/src/com.rs | 102 + .../rust/d3d12/src/command_allocator.rs | 14 + third_party/rust/d3d12/src/command_list.rs | 328 + third_party/rust/d3d12/src/debug.rs | 46 + third_party/rust/d3d12/src/descriptor.rs | 297 + third_party/rust/d3d12/src/device.rs | 346 + third_party/rust/d3d12/src/dxgi.rs | 219 + third_party/rust/d3d12/src/heap.rs | 86 + third_party/rust/d3d12/src/lib.rs | 101 + third_party/rust/d3d12/src/pso.rs | 164 + third_party/rust/d3d12/src/query.rs | 15 + third_party/rust/d3d12/src/queue.rs | 34 + third_party/rust/d3d12/src/resource.rs | 55 + third_party/rust/d3d12/src/sync.rs | 41 + .../rust/gfx-auxil/.cargo-checksum.json | 1 + third_party/rust/gfx-auxil/Cargo.toml | 35 + third_party/rust/gfx-auxil/src/lib.rs | 50 + .../gfx-backend-dx11/.cargo-checksum.json | 1 + third_party/rust/gfx-backend-dx11/Cargo.toml | 69 + third_party/rust/gfx-backend-dx11/README.md | 13 + .../rust/gfx-backend-dx11/shaders/blit.hlsl | 63 + .../rust/gfx-backend-dx11/shaders/clear.hlsl | 22 + .../rust/gfx-backend-dx11/shaders/copy.hlsl | 517 + third_party/rust/gfx-backend-dx11/src/conv.rs | 824 + .../rust/gfx-backend-dx11/src/debug.rs | 92 + .../rust/gfx-backend-dx11/src/device.rs | 2628 + third_party/rust/gfx-backend-dx11/src/dxgi.rs | 210 + .../rust/gfx-backend-dx11/src/internal.rs | 1308 + third_party/rust/gfx-backend-dx11/src/lib.rs | 3239 + .../rust/gfx-backend-dx11/src/shader.rs | 315 + .../gfx-backend-dx12/.cargo-checksum.json | 1 + third_party/rust/gfx-backend-dx12/Cargo.toml | 64 + third_party/rust/gfx-backend-dx12/README.md | 13 + .../rust/gfx-backend-dx12/shaders/blit.hlsl | 29 + .../rust/gfx-backend-dx12/src/command.rs | 2618 + third_party/rust/gfx-backend-dx12/src/conv.rs | 633 + .../gfx-backend-dx12/src/descriptors_cpu.rs | 158 + .../rust/gfx-backend-dx12/src/device.rs | 3507 + .../rust/gfx-backend-dx12/src/internal.rs | 239 + third_party/rust/gfx-backend-dx12/src/lib.rs | 1343 + third_party/rust/gfx-backend-dx12/src/pool.rs | 120 + .../rust/gfx-backend-dx12/src/resource.rs | 759 + .../gfx-backend-dx12/src/root_constants.rs | 300 + .../rust/gfx-backend-dx12/src/window.rs | 233 + .../gfx-backend-empty/.cargo-checksum.json | 1 + third_party/rust/gfx-backend-empty/Cargo.toml | 28 + third_party/rust/gfx-backend-empty/src/lib.rs | 1021 + .../gfx-backend-metal/.cargo-checksum.json | 1 + third_party/rust/gfx-backend-metal/Cargo.toml | 98 + third_party/rust/gfx-backend-metal/README.md | 13 + .../rust/gfx-backend-metal/shaders/blit.metal | 110 + .../gfx-backend-metal/shaders/clear.metal | 79 + .../rust/gfx-backend-metal/shaders/fill.metal | 33 + .../shaders/gfx_shaders.metallib | Bin 0 -> 56133 bytes .../rust/gfx-backend-metal/shaders/macros.h | 5 + .../rust/gfx-backend-metal/src/command.rs | 4739 ++ .../rust/gfx-backend-metal/src/conversions.rs | 1231 + .../rust/gfx-backend-metal/src/device.rs | 3042 + .../rust/gfx-backend-metal/src/internal.rs | 569 + third_party/rust/gfx-backend-metal/src/lib.rs | 1033 + .../rust/gfx-backend-metal/src/native.rs | 1085 + .../rust/gfx-backend-metal/src/soft.rs | 519 + .../rust/gfx-backend-metal/src/window.rs | 732 + .../gfx-backend-vulkan/.cargo-checksum.json | 1 + .../rust/gfx-backend-vulkan/Cargo.toml | 75 + third_party/rust/gfx-backend-vulkan/README.md | 13 + .../rust/gfx-backend-vulkan/src/command.rs | 971 + .../rust/gfx-backend-vulkan/src/conv.rs | 587 + .../rust/gfx-backend-vulkan/src/device.rs | 2307 + .../rust/gfx-backend-vulkan/src/info.rs | 5 + .../rust/gfx-backend-vulkan/src/lib.rs | 1434 + .../rust/gfx-backend-vulkan/src/native.rs | 180 + .../rust/gfx-backend-vulkan/src/pool.rs | 60 + .../rust/gfx-backend-vulkan/src/window.rs | 597 + third_party/rust/gfx-hal/.cargo-checksum.json | 1 + third_party/rust/gfx-hal/Cargo.toml | 49 + third_party/rust/gfx-hal/src/adapter.rs | 153 + third_party/rust/gfx-hal/src/buffer.rs | 138 + third_party/rust/gfx-hal/src/command/clear.rs | 70 + third_party/rust/gfx-hal/src/command/mod.rs | 564 + .../rust/gfx-hal/src/command/structs.rs | 86 + third_party/rust/gfx-hal/src/device.rs | 793 + third_party/rust/gfx-hal/src/format.rs | 623 + third_party/rust/gfx-hal/src/image.rs | 639 + third_party/rust/gfx-hal/src/lib.rs | 459 + third_party/rust/gfx-hal/src/memory.rs | 101 + third_party/rust/gfx-hal/src/pass.rs | 185 + third_party/rust/gfx-hal/src/pool.rs | 43 + third_party/rust/gfx-hal/src/pso/compute.rs | 31 + .../rust/gfx-hal/src/pso/descriptor.rs | 253 + third_party/rust/gfx-hal/src/pso/graphics.rs | 289 + .../rust/gfx-hal/src/pso/input_assembler.rs | 146 + third_party/rust/gfx-hal/src/pso/mod.rs | 290 + .../rust/gfx-hal/src/pso/output_merger.rs | 362 + .../rust/gfx-hal/src/pso/specialization.rs | 132 + third_party/rust/gfx-hal/src/query.rs | 106 + third_party/rust/gfx-hal/src/queue/family.rs | 52 + third_party/rust/gfx-hal/src/queue/mod.rs | 149 + third_party/rust/gfx-hal/src/range.rs | 59 + third_party/rust/gfx-hal/src/window.rs | 526 + .../rust/hibitset/.cargo-checksum.json | 1 + third_party/rust/hibitset/Cargo.toml | 34 + third_party/rust/hibitset/LICENSE-APACHE | 201 + third_party/rust/hibitset/LICENSE-MIT | 25 + third_party/rust/hibitset/README.md | 22 + third_party/rust/hibitset/benches/benches.rs | 93 + third_party/rust/hibitset/benches/iter.rs | 248 + third_party/rust/hibitset/bors.toml | 1 + third_party/rust/hibitset/src/atomic.rs | 419 + third_party/rust/hibitset/src/iter/drain.rs | 45 + third_party/rust/hibitset/src/iter/mod.rs | 150 + .../rust/hibitset/src/iter/parallel.rs | 242 + third_party/rust/hibitset/src/lib.rs | 723 + third_party/rust/hibitset/src/ops.rs | 720 + third_party/rust/hibitset/src/util.rs | 377 + .../rust/malloc_buf/.cargo-checksum.json | 1 + third_party/rust/malloc_buf/Cargo.toml | 14 + third_party/rust/malloc_buf/src/lib.rs | 95 + third_party/rust/metal/.cargo-checksum.json | 1 + third_party/rust/metal/Cargo.lock | 560 + third_party/rust/metal/Cargo.toml | 86 + third_party/rust/metal/LICENSE-APACHE | 201 + third_party/rust/metal/LICENSE-MIT | 25 + third_party/rust/metal/README.md | 20 + third_party/rust/metal/bors.toml | 8 + .../metal/examples/argument-buffer/main.rs | 43 + third_party/rust/metal/examples/bind/main.rs | 42 + third_party/rust/metal/examples/caps/main.rs | 32 + .../compute/compute-argument-buffer.metal | 14 + .../compute/compute-argument-buffer.rs | 101 + .../metal/examples/compute/default.metallib | Bin 0 -> 3237 bytes .../metal/examples/compute/embedded-lib.rs | 31 + .../rust/metal/examples/compute/main.rs | 88 + .../rust/metal/examples/compute/shaders.metal | 10 + .../rust/metal/examples/library/main.rs | 17 + .../rust/metal/examples/reflection/main.rs | 83 + .../metal/examples/window/default.metallib | Bin 0 -> 5844 bytes .../rust/metal/examples/window/main.rs | 185 + .../rust/metal/examples/window/shaders.metal | 31 + third_party/rust/metal/src/argument.rs | 322 + third_party/rust/metal/src/buffer.rs | 62 + third_party/rust/metal/src/capturemanager.rs | 103 + third_party/rust/metal/src/commandbuffer.rs | 122 + third_party/rust/metal/src/commandqueue.rs | 44 + third_party/rust/metal/src/constants.rs | 137 + third_party/rust/metal/src/depthstencil.rs | 164 + third_party/rust/metal/src/device.rs | 1741 + third_party/rust/metal/src/drawable.rs | 20 + third_party/rust/metal/src/encoder.rs | 1069 + third_party/rust/metal/src/heap.rs | 110 + third_party/rust/metal/src/lib.rs | 401 + third_party/rust/metal/src/library.rs | 254 + .../rust/metal/src/pipeline/compute.rs | 410 + third_party/rust/metal/src/pipeline/mod.rs | 14 + third_party/rust/metal/src/pipeline/render.rs | 409 + third_party/rust/metal/src/renderpass.rs | 310 + third_party/rust/metal/src/resource.rs | 105 + third_party/rust/metal/src/sampler.rs | 132 + third_party/rust/metal/src/texture.rs | 323 + third_party/rust/metal/src/types.rs | 38 + .../rust/metal/src/vertexdescriptor.rs | 239 + .../rust/neqo-common/.cargo-checksum.json | 2 +- third_party/rust/neqo-common/src/log.rs | 5 - third_party/rust/neqo-common/tests/log.rs | 10 +- .../rust/neqo-crypto/.cargo-checksum.json | 2 +- third_party/rust/neqo-crypto/build.rs | 4 +- third_party/rust/neqo-crypto/src/agent.rs | 16 +- third_party/rust/neqo-crypto/src/agentio.rs | 8 +- .../rust/neqo-http3/.cargo-checksum.json | 2 +- third_party/rust/neqo-http3/src/connection.rs | 99 +- .../rust/neqo-http3/src/connection_client.rs | 29 +- .../rust/neqo-http3/src/connection_server.rs | 23 +- .../neqo-http3/src/control_stream_local.rs | 4 +- .../neqo-http3/src/control_stream_remote.rs | 6 +- third_party/rust/neqo-http3/src/hframe.rs | 32 +- .../rust/neqo-http3/src/stream_type_reader.rs | 7 +- .../rust/neqo-http3/src/transaction_client.rs | 64 +- .../rust/neqo-http3/src/transaction_server.rs | 45 +- .../rust/neqo-qpack/.cargo-checksum.json | 2 +- third_party/rust/neqo-qpack/src/decoder.rs | 38 +- third_party/rust/neqo-qpack/src/encoder.rs | 36 +- .../rust/neqo-transport/.cargo-checksum.json | 2 +- .../rust/neqo-transport/src/connection.rs | 247 +- third_party/rust/neqo-transport/src/crypto.rs | 20 +- third_party/rust/neqo-transport/src/dump.rs | 2 +- .../rust/neqo-transport/src/flow_mgr.rs | 15 +- third_party/rust/neqo-transport/src/frame.rs | 8 +- .../rust/neqo-transport/src/recovery.rs | 44 +- .../rust/neqo-transport/src/recv_stream.rs | 16 +- .../rust/neqo-transport/src/send_stream.rs | 53 +- third_party/rust/neqo-transport/src/server.rs | 30 +- .../rust/neqo-transport/src/tracking.rs | 37 +- third_party/rust/objc/.cargo-checksum.json | 1 + third_party/rust/objc/CHANGELOG.md | 109 + third_party/rust/objc/Cargo.toml | 33 + third_party/rust/objc/LICENSE.txt | 21 + third_party/rust/objc/README.md | 99 + third_party/rust/objc/examples/example.rs | 45 + third_party/rust/objc/src/declare.rs | 340 + third_party/rust/objc/src/encode.rs | 279 + third_party/rust/objc/src/exception.rs | 11 + third_party/rust/objc/src/lib.rs | 90 + third_party/rust/objc/src/macros.rs | 140 + .../rust/objc/src/message/apple/arm.rs | 40 + .../rust/objc/src/message/apple/arm64.rs | 18 + .../rust/objc/src/message/apple/mod.rs | 40 + .../rust/objc/src/message/apple/x86.rs | 40 + .../rust/objc/src/message/apple/x86_64.rs | 32 + third_party/rust/objc/src/message/gnustep.rs | 35 + third_party/rust/objc/src/message/mod.rs | 296 + third_party/rust/objc/src/message/verify.rs | 49 + third_party/rust/objc/src/rc/autorelease.rs | 30 + third_party/rust/objc/src/rc/mod.rs | 123 + third_party/rust/objc/src/rc/strong.rs | 73 + third_party/rust/objc/src/rc/weak.rs | 50 + third_party/rust/objc/src/runtime.rs | 632 + third_party/rust/objc/src/test_utils.rs | 187 + .../rust/objc_exception/.cargo-checksum.json | 1 + third_party/rust/objc_exception/Cargo.toml | 17 + third_party/rust/objc_exception/build.rs | 7 + .../rust/objc_exception/extern/exception.m | 21 + third_party/rust/objc_exception/src/lib.rs | 100 + .../rust/range-alloc/.cargo-checksum.json | 1 + third_party/rust/range-alloc/Cargo.toml | 26 + third_party/rust/range-alloc/src/lib.rs | 267 + .../raw-window-handle/.cargo-checksum.json | 1 + .../rust/raw-window-handle/CHANGELOG.md | 24 + third_party/rust/raw-window-handle/Cargo.toml | 36 + third_party/rust/raw-window-handle/LICENSE | 21 + third_party/rust/raw-window-handle/README.md | 7 + .../rust/raw-window-handle/appveyor.yml | 22 + .../rust/raw-window-handle/rustfmt.toml | 1 + .../rust/raw-window-handle/src/android.rs | 31 + third_party/rust/raw-window-handle/src/ios.rs | 34 + third_party/rust/raw-window-handle/src/lib.rs | 197 + .../rust/raw-window-handle/src/macos.rs | 33 + .../rust/raw-window-handle/src/unix.rs | 98 + third_party/rust/raw-window-handle/src/web.rs | 33 + .../rust/raw-window-handle/src/windows.rs | 34 + .../rust/relevant/.cargo-checksum.json | 1 + third_party/rust/relevant/Cargo.toml | 42 + third_party/rust/relevant/LICENSE-APACHE | 201 + third_party/rust/relevant/LICENSE-MIT | 25 + third_party/rust/relevant/README.md | 54 + third_party/rust/relevant/src/lib.rs | 89 + .../rendy-descriptor/.cargo-checksum.json | 1 + third_party/rust/rendy-descriptor/Cargo.toml | 35 + .../rust/rendy-descriptor/src/allocator.rs | 398 + third_party/rust/rendy-descriptor/src/lib.rs | 4 + .../rust/rendy-descriptor/src/ranges.rs | 187 + .../rust/rendy-memory/.cargo-checksum.json | 1 + third_party/rust/rendy-memory/Cargo.toml | 55 + .../rendy-memory/src/allocator/dedicated.rs | 188 + .../rendy-memory/src/allocator/dynamic.rs | 674 + .../rust/rendy-memory/src/allocator/linear.rs | 325 + .../rust/rendy-memory/src/allocator/mod.rs | 50 + third_party/rust/rendy-memory/src/block.rs | 36 + .../rust/rendy-memory/src/heaps/heap.rs | 49 + .../rendy-memory/src/heaps/memory_type.rs | 157 + .../rust/rendy-memory/src/heaps/mod.rs | 324 + third_party/rust/rendy-memory/src/lib.rs | 31 + .../rust/rendy-memory/src/mapping/mod.rs | 288 + .../rust/rendy-memory/src/mapping/range.rs | 101 + .../rust/rendy-memory/src/mapping/write.rs | 73 + third_party/rust/rendy-memory/src/memory.rs | 82 + third_party/rust/rendy-memory/src/usage.rs | 210 + third_party/rust/rendy-memory/src/util.rs | 125 + .../rust/rendy-memory/src/utilization.rs | 137 + .../rust/shared_library/.cargo-checksum.json | 1 + third_party/rust/shared_library/Cargo.toml | 24 + .../rust/shared_library/LICENSE-APACHE | 201 + third_party/rust/shared_library/LICENSE-MIT | 25 + .../shared_library/src/dynamic_library.rs | 410 + third_party/rust/shared_library/src/lib.rs | 175 + .../rust/spirv_cross/.cargo-checksum.json | 1 + third_party/rust/spirv_cross/Cargo.toml | 24 + third_party/rust/spirv_cross/build.rs | 61 + .../rust/spirv_cross/src/bindings_native.rs | 2228 + .../rust/spirv_cross/src/bindings_wasm.rs | 1862 + .../src/bindings_wasm_functions.rs | 669 + third_party/rust/spirv_cross/src/compiler.rs | 636 + .../rust/spirv_cross/src/emscripten.rs | 162 + third_party/rust/spirv_cross/src/glsl.rs | 190 + third_party/rust/spirv_cross/src/hlsl.rs | 159 + third_party/rust/spirv_cross/src/lib.rs | 87 + third_party/rust/spirv_cross/src/msl.rs | 463 + third_party/rust/spirv_cross/src/ptr_util.rs | 58 + third_party/rust/spirv_cross/src/spirv.rs | 604 + .../src/vendor/SPIRV-Cross/.clang-format | 167 + .../src/vendor/SPIRV-Cross/.gitignore | 20 + .../src/vendor/SPIRV-Cross/CMakeLists.txt | 584 + .../src/vendor/SPIRV-Cross/GLSL.std.450.h | 131 + .../src/vendor/SPIRV-Cross/LICENSE | 202 + .../src/vendor/SPIRV-Cross/Makefile | 41 + .../vendor/SPIRV-Cross/cmake/gitversion.in.h | 6 + .../src/vendor/SPIRV-Cross/format_all.sh | 8 + .../src/vendor/SPIRV-Cross/gn/BUILD.gn | 63 + .../include/spirv_cross/barrier.hpp | 79 + .../include/spirv_cross/external_interface.h | 126 + .../SPIRV-Cross/include/spirv_cross/image.hpp | 62 + .../spirv_cross/internal_interface.hpp | 603 + .../include/spirv_cross/sampler.hpp | 105 + .../include/spirv_cross/thread_group.hpp | 113 + .../src/vendor/SPIRV-Cross/main.cpp | 1259 + .../pkg-config/spirv-cross-c-shared.pc.in | 13 + .../src/vendor/SPIRV-Cross/spirv.h | 1972 + .../src/vendor/SPIRV-Cross/spirv.hpp | 1981 + .../src/vendor/SPIRV-Cross/spirv_cfg.cpp | 397 + .../src/vendor/SPIRV-Cross/spirv_cfg.hpp | 156 + .../src/vendor/SPIRV-Cross/spirv_common.hpp | 1712 + .../src/vendor/SPIRV-Cross/spirv_cpp.cpp | 549 + .../src/vendor/SPIRV-Cross/spirv_cpp.hpp | 86 + .../src/vendor/SPIRV-Cross/spirv_cross.cpp | 4639 ++ .../src/vendor/SPIRV-Cross/spirv_cross.hpp | 1055 + .../src/vendor/SPIRV-Cross/spirv_cross_c.cpp | 2176 + .../src/vendor/SPIRV-Cross/spirv_cross_c.h | 832 + .../SPIRV-Cross/spirv_cross_containers.hpp | 721 + .../spirv_cross_error_handling.hpp | 85 + .../SPIRV-Cross/spirv_cross_parsed_ir.cpp | 806 + .../SPIRV-Cross/spirv_cross_parsed_ir.hpp | 229 + .../vendor/SPIRV-Cross/spirv_cross_util.cpp | 70 + .../vendor/SPIRV-Cross/spirv_cross_util.hpp | 30 + .../src/vendor/SPIRV-Cross/spirv_glsl.cpp | 13044 ++++ .../src/vendor/SPIRV-Cross/spirv_glsl.hpp | 712 + .../src/vendor/SPIRV-Cross/spirv_hlsl.cpp | 4989 ++ .../src/vendor/SPIRV-Cross/spirv_hlsl.hpp | 233 + .../src/vendor/SPIRV-Cross/spirv_msl.cpp | 12444 +++ .../src/vendor/SPIRV-Cross/spirv_msl.hpp | 934 + .../src/vendor/SPIRV-Cross/spirv_parser.cpp | 1185 + .../src/vendor/SPIRV-Cross/spirv_parser.hpp | 94 + .../src/vendor/SPIRV-Cross/spirv_reflect.cpp | 635 + .../src/vendor/SPIRV-Cross/spirv_reflect.hpp | 83 + third_party/rust/spirv_cross/src/wrapper.cpp | 505 + third_party/rust/spirv_cross/src/wrapper.hpp | 189 + .../rust/spirv_cross/tests/common/mod.rs | 9 + .../rust/spirv_cross/tests/glsl_tests.rs | 216 + .../rust/spirv_cross/tests/hlsl_tests.rs | 106 + .../rust/spirv_cross/tests/msl_tests.rs | 309 + .../rust/spirv_cross/tests/shaders/array.vert | 20 + .../spirv_cross/tests/shaders/array.vert.spv | Bin 0 -> 1508 bytes .../tests/shaders/rasterize_disabled.vert | 22 + .../tests/shaders/rasterize_disabled.vert.spv | Bin 0 -> 1552 bytes .../spirv_cross/tests/shaders/sampler.frag | 12 + .../tests/shaders/sampler.frag.spv | Bin 0 -> 720 bytes .../spirv_cross/tests/shaders/simple.vert | 17 + .../spirv_cross/tests/shaders/simple.vert.spv | Bin 0 -> 1172 bytes .../tests/shaders/specialization.comp | 5 + .../tests/shaders/specialization.comp.spv | Bin 0 -> 260 bytes .../spirv_cross/tests/shaders/struct.frag | 18 + .../spirv_cross/tests/shaders/struct.frag.spv | Bin 0 -> 952 bytes .../spirv_cross/tests/shaders/struct.vert | 21 + .../spirv_cross/tests/shaders/struct.vert.spv | Bin 0 -> 672 bytes .../spirv_cross/tests/shaders/workgroup.comp | 9 + .../tests/shaders/workgroup.comp.spv | Bin 0 -> 360 bytes .../rust/spirv_cross/tests/spirv_tests.rs | 267 + .../rust/storage-map/.cargo-checksum.json | 1 + third_party/rust/storage-map/Cargo.toml | 27 + third_party/rust/storage-map/LICENSE | 201 + third_party/rust/storage-map/README.md | 2 + third_party/rust/storage-map/src/lib.rs | 172 + third_party/rust/storage-map/tests/ten.rs | 42 + third_party/rust/uluru/.cargo-checksum.json | 2 +- third_party/rust/uluru/Cargo.toml | 6 +- third_party/rust/wio/.cargo-checksum.json | 1 + third_party/rust/wio/Cargo.toml | 28 + third_party/rust/wio/LICENSE-APACHE | 201 + third_party/rust/wio/LICENSE-MIT | 19 + third_party/rust/wio/README.md | 4 + third_party/rust/wio/src/apc.rs | 39 + third_party/rust/wio/src/com.rs | 79 + third_party/rust/wio/src/console.rs | 270 + third_party/rust/wio/src/error.rs | 18 + third_party/rust/wio/src/handle.rs | 71 + third_party/rust/wio/src/lib.rs | 20 + third_party/rust/wio/src/perf.rs | 17 + third_party/rust/wio/src/pipe.rs | 16 + third_party/rust/wio/src/sleep.rs | 23 + third_party/rust/wio/src/thread.rs | 51 + third_party/rust/wio/src/ums.rs | 3 + third_party/rust/wio/src/wide.rs | 59 + third_party/rust/x11/.cargo-checksum.json | 1 + third_party/rust/x11/Cargo.toml | 45 + third_party/rust/x11/build.rs | 38 + third_party/rust/x11/examples/hello-world.rs | 89 + third_party/rust/x11/examples/input.rs | 384 + third_party/rust/x11/examples/xrecord.rs | 127 + third_party/rust/x11/src/dpms.rs | 44 + third_party/rust/x11/src/glx.rs | 249 + third_party/rust/x11/src/internal.rs | 41 + third_party/rust/x11/src/keysym.rs | 1332 + third_party/rust/x11/src/lib.rs | 37 + third_party/rust/x11/src/link.rs | 22 + third_party/rust/x11/src/xcursor.rs | 211 + third_party/rust/x11/src/xf86vmode.rs | 146 + third_party/rust/x11/src/xfixes.rs | 13 + third_party/rust/x11/src/xft.rs | 219 + third_party/rust/x11/src/xinerama.rs | 66 + third_party/rust/x11/src/xinput.rs | 165 + third_party/rust/x11/src/xinput2.rs | 758 + third_party/rust/x11/src/xlib.rs | 3404 + third_party/rust/x11/src/xlib_xcb.rs | 10 + third_party/rust/x11/src/xmd.rs | 12 + third_party/rust/x11/src/xmu.rs | 199 + third_party/rust/x11/src/xrandr.rs | 558 + third_party/rust/x11/src/xrecord.rs | 137 + third_party/rust/x11/src/xrender.rs | 463 + third_party/rust/x11/src/xss.rs | 98 + third_party/rust/x11/src/xt.rs | 398 + third_party/rust/x11/src/xtest.rs | 42 + toolkit/library/gtest/rust/Cargo.toml | 1 + toolkit/library/moz.build | 14 + toolkit/library/rust/Cargo.toml | 1 + toolkit/library/rust/gkrust-features.mozbuild | 2 +- toolkit/library/rust/shared/Cargo.toml | 4 +- toolkit/library/rust/shared/lib.rs | 10 +- tools/vcs/mach_commands.py | 6 + 728 files changed, 248351 insertions(+), 3036 deletions(-) delete mode 100644 dom/webgpu/InstanceProvider.cpp delete mode 100644 dom/webgpu/InstanceProvider.h create mode 100644 dom/webgpu/ffi/moz.build create mode 100644 dom/webgpu/ffi/wgpu.h create mode 100644 dom/webgpu/ipc/PWebGPU.ipdl create mode 100644 dom/webgpu/ipc/WebGPUChild.cpp create mode 100644 dom/webgpu/ipc/WebGPUChild.h create mode 100644 dom/webgpu/ipc/WebGPUParent.cpp create mode 100644 dom/webgpu/ipc/WebGPUParent.h create mode 100644 dom/webgpu/ipc/WebGPUSerialize.h create mode 100644 dom/webgpu/ipc/WebGPUTypes.h create mode 100644 dom/webgpu/mochitest/test_device_creation.html delete mode 100644 dom/webgpu/thread/WebGPUThreading.cpp delete mode 100644 dom/webgpu/thread/WebGPUThreading.h delete mode 100644 dom/webgpu/thread/moz.build create mode 100644 dom/webgpu/wgpu-native/Cargo.toml create mode 100644 dom/webgpu/wgpu-native/cbindgen.toml create mode 100644 dom/webgpu/wgpu-native/src/binding_model.rs create mode 100644 dom/webgpu/wgpu-native/src/command/allocator.rs create mode 100644 dom/webgpu/wgpu-native/src/command/bind.rs create mode 100644 dom/webgpu/wgpu-native/src/command/compute.rs create mode 100644 dom/webgpu/wgpu-native/src/command/mod.rs create mode 100644 dom/webgpu/wgpu-native/src/command/render.rs create mode 100644 dom/webgpu/wgpu-native/src/command/transfer.rs create mode 100644 dom/webgpu/wgpu-native/src/conv.rs create mode 100644 dom/webgpu/wgpu-native/src/device.rs create mode 100644 dom/webgpu/wgpu-native/src/hub.rs create mode 100644 dom/webgpu/wgpu-native/src/id.rs create mode 100644 dom/webgpu/wgpu-native/src/instance.rs create mode 100644 dom/webgpu/wgpu-native/src/lib.rs create mode 100644 dom/webgpu/wgpu-native/src/pipeline.rs create mode 100644 dom/webgpu/wgpu-native/src/resource.rs create mode 100644 dom/webgpu/wgpu-native/src/swap_chain.rs create mode 100644 dom/webgpu/wgpu-native/src/track/buffer.rs create mode 100644 dom/webgpu/wgpu-native/src/track/mod.rs create mode 100644 dom/webgpu/wgpu-native/src/track/range.rs create mode 100644 dom/webgpu/wgpu-native/src/track/texture.rs create mode 100644 dom/webgpu/wgpu-remote/Cargo.toml create mode 100644 dom/webgpu/wgpu-remote/cbindgen.toml create mode 100644 dom/webgpu/wgpu-remote/src/lib.rs create mode 100644 dom/webgpu/wgpu-remote/src/server.rs delete mode 100644 third_party/rust/arrayvec/build.rs delete mode 100644 third_party/rust/arrayvec/src/maybe_uninit_nodrop.rs delete mode 100644 third_party/rust/arrayvec/src/maybe_uninit_stable.rs delete mode 100644 third_party/rust/arrayvec/src/range.rs create mode 100644 third_party/rust/ash/.cargo-checksum.json create mode 100644 third_party/rust/ash/Cargo.toml create mode 100644 third_party/rust/ash/output create mode 100644 third_party/rust/ash/src/allocator.rs create mode 100644 third_party/rust/ash/src/device.rs create mode 100644 third_party/rust/ash/src/entry.rs create mode 100644 third_party/rust/ash/src/extensions/experimental/amd.rs create mode 100644 third_party/rust/ash/src/extensions/experimental/mod.rs create mode 100644 third_party/rust/ash/src/extensions/ext/debug_marker.rs create mode 100644 third_party/rust/ash/src/extensions/ext/debug_report.rs create mode 100644 third_party/rust/ash/src/extensions/ext/debug_utils.rs create mode 100644 third_party/rust/ash/src/extensions/ext/mod.rs create mode 100644 third_party/rust/ash/src/extensions/khr/android_surface.rs create mode 100644 third_party/rust/ash/src/extensions/khr/display_swapchain.rs create mode 100644 third_party/rust/ash/src/extensions/khr/mod.rs create mode 100644 third_party/rust/ash/src/extensions/khr/surface.rs create mode 100644 third_party/rust/ash/src/extensions/khr/swapchain.rs create mode 100644 third_party/rust/ash/src/extensions/khr/wayland_surface.rs create mode 100644 third_party/rust/ash/src/extensions/khr/win32_surface.rs create mode 100644 third_party/rust/ash/src/extensions/khr/xcb_surface.rs create mode 100644 third_party/rust/ash/src/extensions/khr/xlib_surface.rs create mode 100644 third_party/rust/ash/src/extensions/mod.rs create mode 100644 third_party/rust/ash/src/extensions/mvk/ios_surface.rs create mode 100644 third_party/rust/ash/src/extensions/mvk/macos_surface.rs create mode 100644 third_party/rust/ash/src/extensions/mvk/mod.rs create mode 100644 third_party/rust/ash/src/extensions/nv/mesh_shader.rs create mode 100644 third_party/rust/ash/src/extensions/nv/mod.rs create mode 100644 third_party/rust/ash/src/extensions/nv/ray_tracing.rs create mode 100644 third_party/rust/ash/src/instance.rs create mode 100644 third_party/rust/ash/src/lib.rs create mode 100644 third_party/rust/ash/src/prelude.rs create mode 100644 third_party/rust/ash/src/util.rs create mode 100644 third_party/rust/ash/src/version.rs create mode 100644 third_party/rust/ash/src/vk.rs create mode 100644 third_party/rust/ash/tests/constant_size_arrays.rs create mode 100644 third_party/rust/ash/tests/display.rs create mode 100644 third_party/rust/atom/.cargo-checksum.json create mode 100644 third_party/rust/atom/Cargo.toml create mode 100644 third_party/rust/atom/LICENSE create mode 100644 third_party/rust/atom/examples/fifo.rs create mode 100644 third_party/rust/atom/examples/simple.rs create mode 100644 third_party/rust/atom/readme.md create mode 100644 third_party/rust/atom/src/lib.rs create mode 100644 third_party/rust/atom/tests/atom.rs create mode 100644 third_party/rust/backtrace/Cargo.lock delete mode 100644 third_party/rust/backtrace/appveyor.yml create mode 100644 third_party/rust/backtrace/benches/benchmarks.rs create mode 100644 third_party/rust/backtrace/ci/android-sdk.sh delete mode 100644 third_party/rust/backtrace/ci/docker/powerpc-unknown-linux-gnu/Dockerfile create mode 100644 third_party/rust/backtrace/ci/runtest-android.rs create mode 100644 third_party/rust/backtrace/src/dbghelp.rs delete mode 100644 third_party/rust/backtrace/src/dylib.rs create mode 100644 third_party/rust/backtrace/src/print.rs create mode 100644 third_party/rust/backtrace/src/print/fuchsia.rs create mode 100644 third_party/rust/backtrace/src/symbolize/dladdr_resolve.rs create mode 100644 third_party/rust/backtrace/src/types.rs create mode 100644 third_party/rust/backtrace/src/windows.rs create mode 100644 third_party/rust/backtrace/tests/accuracy/auxiliary.rs create mode 100644 third_party/rust/backtrace/tests/accuracy/main.rs create mode 100644 third_party/rust/backtrace/tests/concurrent-panics.rs create mode 100644 third_party/rust/backtrace/tests/skip_inner_frames.rs create mode 100644 third_party/rust/block/.cargo-checksum.json create mode 100644 third_party/rust/block/Cargo.toml create mode 100644 third_party/rust/block/README.md create mode 100644 third_party/rust/block/src/lib.rs create mode 100644 third_party/rust/block/src/test_utils.rs create mode 100644 third_party/rust/cc/Cargo.lock delete mode 100644 third_party/rust/cc/azure-pipelines.yml delete mode 100644 third_party/rust/cc/ci/azure-install-rust.yml delete mode 100644 third_party/rust/cc/ci/azure-steps.yml create mode 100644 third_party/rust/cc/tests/cflags.rs create mode 100644 third_party/rust/cc/tests/cxxflags.rs create mode 100644 third_party/rust/cocoa/.cargo-checksum.json create mode 100644 third_party/rust/cocoa/COPYRIGHT create mode 100644 third_party/rust/cocoa/Cargo.lock create mode 100644 third_party/rust/cocoa/Cargo.toml create mode 100644 third_party/rust/cocoa/LICENSE-APACHE create mode 100644 third_party/rust/cocoa/LICENSE-MIT create mode 100644 third_party/rust/cocoa/README.md create mode 100644 third_party/rust/cocoa/examples/color.rs create mode 100644 third_party/rust/cocoa/examples/fullscreen.rs create mode 100644 third_party/rust/cocoa/examples/hello_world.rs create mode 100644 third_party/rust/cocoa/examples/tab_view.rs create mode 100644 third_party/rust/cocoa/src/appkit.rs create mode 100644 third_party/rust/cocoa/src/base.rs create mode 100644 third_party/rust/cocoa/src/foundation.rs create mode 100644 third_party/rust/cocoa/src/lib.rs create mode 100644 third_party/rust/cocoa/src/macros.rs create mode 100644 third_party/rust/cocoa/src/quartzcore.rs create mode 100644 third_party/rust/cocoa/tests/foundation.rs create mode 100644 third_party/rust/colorful/.cargo-checksum.json create mode 100644 third_party/rust/colorful/Cargo.toml create mode 100644 third_party/rust/colorful/CodeOfConduct.md create mode 100644 third_party/rust/colorful/README.md create mode 100644 third_party/rust/colorful/license create mode 100644 third_party/rust/colorful/rustfmt.toml create mode 100644 third_party/rust/colorful/src/core/color_string.rs create mode 100644 third_party/rust/colorful/src/core/colors.rs create mode 100644 third_party/rust/colorful/src/core/hsl.rs create mode 100644 third_party/rust/colorful/src/core/mod.rs create mode 100644 third_party/rust/colorful/src/core/rgb.rs create mode 100644 third_party/rust/colorful/src/core/style.rs create mode 100644 third_party/rust/colorful/src/core/symbols.rs create mode 100644 third_party/rust/colorful/src/lib.rs create mode 100644 third_party/rust/colorful/tests/test_all_color.rs create mode 100644 third_party/rust/colorful/tests/test_animation.rs create mode 100644 third_party/rust/colorful/tests/test_basic.rs create mode 100644 third_party/rust/colorful/tests/test_extra.rs create mode 100644 third_party/rust/colorful/tests/test_gradient.rs create mode 100644 third_party/rust/colorful/tests/test_hsl.rs create mode 100644 third_party/rust/copyless/.cargo-checksum.json create mode 100644 third_party/rust/copyless/CHANGELOG.md create mode 100644 third_party/rust/copyless/Cargo.toml create mode 100644 third_party/rust/copyless/LICENSE create mode 100644 third_party/rust/copyless/README.md create mode 100644 third_party/rust/copyless/bors.toml create mode 100644 third_party/rust/copyless/rustfmt.toml create mode 100644 third_party/rust/copyless/src/boxed.rs create mode 100644 third_party/rust/copyless/src/lib.rs create mode 100644 third_party/rust/copyless/src/vec.rs create mode 100644 third_party/rust/core-graphics/src/window.rs create mode 100644 third_party/rust/crossbeam-epoch/build.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/.cargo-checksum.json create mode 100644 third_party/rust/crossbeam-utils-0.6.5/CHANGELOG.md create mode 100644 third_party/rust/crossbeam-utils-0.6.5/Cargo.toml create mode 100644 third_party/rust/crossbeam-utils-0.6.5/LICENSE-APACHE create mode 100644 third_party/rust/crossbeam-utils-0.6.5/LICENSE-MIT create mode 100644 third_party/rust/crossbeam-utils-0.6.5/README.md create mode 100644 third_party/rust/crossbeam-utils-0.6.5/benches/atomic_cell.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/src/atomic/atomic_cell.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/src/atomic/consume.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/src/atomic/mod.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/src/backoff.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/src/cache_padded.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/src/lib.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/src/sync/mod.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/src/sync/parker.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/src/sync/sharded_lock.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/src/sync/wait_group.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/src/thread.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/tests/atomic_cell.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/tests/cache_padded.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/tests/parker.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/tests/sharded_lock.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/tests/thread.rs create mode 100644 third_party/rust/crossbeam-utils-0.6.5/tests/wait_group.rs create mode 100644 third_party/rust/crossbeam-utils/build.rs create mode 100644 third_party/rust/crossbeam-utils/src/atomic/seq_lock.rs create mode 100644 third_party/rust/crossbeam-utils/src/atomic/seq_lock_wide.rs create mode 100644 third_party/rust/d3d12/.cargo-checksum.json create mode 100644 third_party/rust/d3d12/Cargo.toml create mode 100644 third_party/rust/d3d12/README.md create mode 100644 third_party/rust/d3d12/appveyor.yml create mode 100644 third_party/rust/d3d12/bors.toml create mode 100644 third_party/rust/d3d12/src/com.rs create mode 100644 third_party/rust/d3d12/src/command_allocator.rs create mode 100644 third_party/rust/d3d12/src/command_list.rs create mode 100644 third_party/rust/d3d12/src/debug.rs create mode 100644 third_party/rust/d3d12/src/descriptor.rs create mode 100644 third_party/rust/d3d12/src/device.rs create mode 100644 third_party/rust/d3d12/src/dxgi.rs create mode 100644 third_party/rust/d3d12/src/heap.rs create mode 100644 third_party/rust/d3d12/src/lib.rs create mode 100644 third_party/rust/d3d12/src/pso.rs create mode 100644 third_party/rust/d3d12/src/query.rs create mode 100644 third_party/rust/d3d12/src/queue.rs create mode 100644 third_party/rust/d3d12/src/resource.rs create mode 100644 third_party/rust/d3d12/src/sync.rs create mode 100644 third_party/rust/gfx-auxil/.cargo-checksum.json create mode 100644 third_party/rust/gfx-auxil/Cargo.toml create mode 100644 third_party/rust/gfx-auxil/src/lib.rs create mode 100644 third_party/rust/gfx-backend-dx11/.cargo-checksum.json create mode 100644 third_party/rust/gfx-backend-dx11/Cargo.toml create mode 100644 third_party/rust/gfx-backend-dx11/README.md create mode 100644 third_party/rust/gfx-backend-dx11/shaders/blit.hlsl create mode 100644 third_party/rust/gfx-backend-dx11/shaders/clear.hlsl create mode 100644 third_party/rust/gfx-backend-dx11/shaders/copy.hlsl create mode 100644 third_party/rust/gfx-backend-dx11/src/conv.rs create mode 100644 third_party/rust/gfx-backend-dx11/src/debug.rs create mode 100644 third_party/rust/gfx-backend-dx11/src/device.rs create mode 100644 third_party/rust/gfx-backend-dx11/src/dxgi.rs create mode 100644 third_party/rust/gfx-backend-dx11/src/internal.rs create mode 100644 third_party/rust/gfx-backend-dx11/src/lib.rs create mode 100644 third_party/rust/gfx-backend-dx11/src/shader.rs create mode 100644 third_party/rust/gfx-backend-dx12/.cargo-checksum.json create mode 100644 third_party/rust/gfx-backend-dx12/Cargo.toml create mode 100644 third_party/rust/gfx-backend-dx12/README.md create mode 100644 third_party/rust/gfx-backend-dx12/shaders/blit.hlsl create mode 100644 third_party/rust/gfx-backend-dx12/src/command.rs create mode 100644 third_party/rust/gfx-backend-dx12/src/conv.rs create mode 100644 third_party/rust/gfx-backend-dx12/src/descriptors_cpu.rs create mode 100644 third_party/rust/gfx-backend-dx12/src/device.rs create mode 100644 third_party/rust/gfx-backend-dx12/src/internal.rs create mode 100644 third_party/rust/gfx-backend-dx12/src/lib.rs create mode 100644 third_party/rust/gfx-backend-dx12/src/pool.rs create mode 100644 third_party/rust/gfx-backend-dx12/src/resource.rs create mode 100644 third_party/rust/gfx-backend-dx12/src/root_constants.rs create mode 100644 third_party/rust/gfx-backend-dx12/src/window.rs create mode 100644 third_party/rust/gfx-backend-empty/.cargo-checksum.json create mode 100644 third_party/rust/gfx-backend-empty/Cargo.toml create mode 100644 third_party/rust/gfx-backend-empty/src/lib.rs create mode 100644 third_party/rust/gfx-backend-metal/.cargo-checksum.json create mode 100644 third_party/rust/gfx-backend-metal/Cargo.toml create mode 100644 third_party/rust/gfx-backend-metal/README.md create mode 100644 third_party/rust/gfx-backend-metal/shaders/blit.metal create mode 100644 third_party/rust/gfx-backend-metal/shaders/clear.metal create mode 100644 third_party/rust/gfx-backend-metal/shaders/fill.metal create mode 100644 third_party/rust/gfx-backend-metal/shaders/gfx_shaders.metallib create mode 100644 third_party/rust/gfx-backend-metal/shaders/macros.h create mode 100644 third_party/rust/gfx-backend-metal/src/command.rs create mode 100644 third_party/rust/gfx-backend-metal/src/conversions.rs create mode 100644 third_party/rust/gfx-backend-metal/src/device.rs create mode 100644 third_party/rust/gfx-backend-metal/src/internal.rs create mode 100644 third_party/rust/gfx-backend-metal/src/lib.rs create mode 100644 third_party/rust/gfx-backend-metal/src/native.rs create mode 100644 third_party/rust/gfx-backend-metal/src/soft.rs create mode 100644 third_party/rust/gfx-backend-metal/src/window.rs create mode 100644 third_party/rust/gfx-backend-vulkan/.cargo-checksum.json create mode 100644 third_party/rust/gfx-backend-vulkan/Cargo.toml create mode 100644 third_party/rust/gfx-backend-vulkan/README.md create mode 100644 third_party/rust/gfx-backend-vulkan/src/command.rs create mode 100644 third_party/rust/gfx-backend-vulkan/src/conv.rs create mode 100644 third_party/rust/gfx-backend-vulkan/src/device.rs create mode 100644 third_party/rust/gfx-backend-vulkan/src/info.rs create mode 100644 third_party/rust/gfx-backend-vulkan/src/lib.rs create mode 100644 third_party/rust/gfx-backend-vulkan/src/native.rs create mode 100644 third_party/rust/gfx-backend-vulkan/src/pool.rs create mode 100644 third_party/rust/gfx-backend-vulkan/src/window.rs create mode 100644 third_party/rust/gfx-hal/.cargo-checksum.json create mode 100644 third_party/rust/gfx-hal/Cargo.toml create mode 100644 third_party/rust/gfx-hal/src/adapter.rs create mode 100644 third_party/rust/gfx-hal/src/buffer.rs create mode 100644 third_party/rust/gfx-hal/src/command/clear.rs create mode 100644 third_party/rust/gfx-hal/src/command/mod.rs create mode 100644 third_party/rust/gfx-hal/src/command/structs.rs create mode 100644 third_party/rust/gfx-hal/src/device.rs create mode 100644 third_party/rust/gfx-hal/src/format.rs create mode 100644 third_party/rust/gfx-hal/src/image.rs create mode 100644 third_party/rust/gfx-hal/src/lib.rs create mode 100644 third_party/rust/gfx-hal/src/memory.rs create mode 100644 third_party/rust/gfx-hal/src/pass.rs create mode 100644 third_party/rust/gfx-hal/src/pool.rs create mode 100644 third_party/rust/gfx-hal/src/pso/compute.rs create mode 100644 third_party/rust/gfx-hal/src/pso/descriptor.rs create mode 100644 third_party/rust/gfx-hal/src/pso/graphics.rs create mode 100644 third_party/rust/gfx-hal/src/pso/input_assembler.rs create mode 100644 third_party/rust/gfx-hal/src/pso/mod.rs create mode 100644 third_party/rust/gfx-hal/src/pso/output_merger.rs create mode 100644 third_party/rust/gfx-hal/src/pso/specialization.rs create mode 100644 third_party/rust/gfx-hal/src/query.rs create mode 100644 third_party/rust/gfx-hal/src/queue/family.rs create mode 100644 third_party/rust/gfx-hal/src/queue/mod.rs create mode 100644 third_party/rust/gfx-hal/src/range.rs create mode 100644 third_party/rust/gfx-hal/src/window.rs create mode 100644 third_party/rust/hibitset/.cargo-checksum.json create mode 100644 third_party/rust/hibitset/Cargo.toml create mode 100644 third_party/rust/hibitset/LICENSE-APACHE create mode 100644 third_party/rust/hibitset/LICENSE-MIT create mode 100644 third_party/rust/hibitset/README.md create mode 100644 third_party/rust/hibitset/benches/benches.rs create mode 100644 third_party/rust/hibitset/benches/iter.rs create mode 100644 third_party/rust/hibitset/bors.toml create mode 100644 third_party/rust/hibitset/src/atomic.rs create mode 100644 third_party/rust/hibitset/src/iter/drain.rs create mode 100644 third_party/rust/hibitset/src/iter/mod.rs create mode 100644 third_party/rust/hibitset/src/iter/parallel.rs create mode 100644 third_party/rust/hibitset/src/lib.rs create mode 100644 third_party/rust/hibitset/src/ops.rs create mode 100644 third_party/rust/hibitset/src/util.rs create mode 100644 third_party/rust/malloc_buf/.cargo-checksum.json create mode 100644 third_party/rust/malloc_buf/Cargo.toml create mode 100644 third_party/rust/malloc_buf/src/lib.rs create mode 100644 third_party/rust/metal/.cargo-checksum.json create mode 100644 third_party/rust/metal/Cargo.lock create mode 100644 third_party/rust/metal/Cargo.toml create mode 100644 third_party/rust/metal/LICENSE-APACHE create mode 100644 third_party/rust/metal/LICENSE-MIT create mode 100644 third_party/rust/metal/README.md create mode 100644 third_party/rust/metal/bors.toml create mode 100644 third_party/rust/metal/examples/argument-buffer/main.rs create mode 100644 third_party/rust/metal/examples/bind/main.rs create mode 100644 third_party/rust/metal/examples/caps/main.rs create mode 100644 third_party/rust/metal/examples/compute/compute-argument-buffer.metal create mode 100644 third_party/rust/metal/examples/compute/compute-argument-buffer.rs create mode 100644 third_party/rust/metal/examples/compute/default.metallib create mode 100644 third_party/rust/metal/examples/compute/embedded-lib.rs create mode 100644 third_party/rust/metal/examples/compute/main.rs create mode 100644 third_party/rust/metal/examples/compute/shaders.metal create mode 100644 third_party/rust/metal/examples/library/main.rs create mode 100644 third_party/rust/metal/examples/reflection/main.rs create mode 100644 third_party/rust/metal/examples/window/default.metallib create mode 100644 third_party/rust/metal/examples/window/main.rs create mode 100644 third_party/rust/metal/examples/window/shaders.metal create mode 100644 third_party/rust/metal/src/argument.rs create mode 100644 third_party/rust/metal/src/buffer.rs create mode 100644 third_party/rust/metal/src/capturemanager.rs create mode 100644 third_party/rust/metal/src/commandbuffer.rs create mode 100644 third_party/rust/metal/src/commandqueue.rs create mode 100644 third_party/rust/metal/src/constants.rs create mode 100644 third_party/rust/metal/src/depthstencil.rs create mode 100644 third_party/rust/metal/src/device.rs create mode 100644 third_party/rust/metal/src/drawable.rs create mode 100644 third_party/rust/metal/src/encoder.rs create mode 100644 third_party/rust/metal/src/heap.rs create mode 100644 third_party/rust/metal/src/lib.rs create mode 100644 third_party/rust/metal/src/library.rs create mode 100644 third_party/rust/metal/src/pipeline/compute.rs create mode 100644 third_party/rust/metal/src/pipeline/mod.rs create mode 100644 third_party/rust/metal/src/pipeline/render.rs create mode 100644 third_party/rust/metal/src/renderpass.rs create mode 100644 third_party/rust/metal/src/resource.rs create mode 100644 third_party/rust/metal/src/sampler.rs create mode 100644 third_party/rust/metal/src/texture.rs create mode 100644 third_party/rust/metal/src/types.rs create mode 100644 third_party/rust/metal/src/vertexdescriptor.rs create mode 100644 third_party/rust/objc/.cargo-checksum.json create mode 100644 third_party/rust/objc/CHANGELOG.md create mode 100644 third_party/rust/objc/Cargo.toml create mode 100644 third_party/rust/objc/LICENSE.txt create mode 100644 third_party/rust/objc/README.md create mode 100644 third_party/rust/objc/examples/example.rs create mode 100644 third_party/rust/objc/src/declare.rs create mode 100644 third_party/rust/objc/src/encode.rs create mode 100644 third_party/rust/objc/src/exception.rs create mode 100644 third_party/rust/objc/src/lib.rs create mode 100644 third_party/rust/objc/src/macros.rs create mode 100644 third_party/rust/objc/src/message/apple/arm.rs create mode 100644 third_party/rust/objc/src/message/apple/arm64.rs create mode 100644 third_party/rust/objc/src/message/apple/mod.rs create mode 100644 third_party/rust/objc/src/message/apple/x86.rs create mode 100644 third_party/rust/objc/src/message/apple/x86_64.rs create mode 100644 third_party/rust/objc/src/message/gnustep.rs create mode 100644 third_party/rust/objc/src/message/mod.rs create mode 100644 third_party/rust/objc/src/message/verify.rs create mode 100644 third_party/rust/objc/src/rc/autorelease.rs create mode 100644 third_party/rust/objc/src/rc/mod.rs create mode 100644 third_party/rust/objc/src/rc/strong.rs create mode 100644 third_party/rust/objc/src/rc/weak.rs create mode 100644 third_party/rust/objc/src/runtime.rs create mode 100644 third_party/rust/objc/src/test_utils.rs create mode 100644 third_party/rust/objc_exception/.cargo-checksum.json create mode 100644 third_party/rust/objc_exception/Cargo.toml create mode 100644 third_party/rust/objc_exception/build.rs create mode 100644 third_party/rust/objc_exception/extern/exception.m create mode 100644 third_party/rust/objc_exception/src/lib.rs create mode 100644 third_party/rust/range-alloc/.cargo-checksum.json create mode 100644 third_party/rust/range-alloc/Cargo.toml create mode 100644 third_party/rust/range-alloc/src/lib.rs create mode 100644 third_party/rust/raw-window-handle/.cargo-checksum.json create mode 100644 third_party/rust/raw-window-handle/CHANGELOG.md create mode 100644 third_party/rust/raw-window-handle/Cargo.toml create mode 100644 third_party/rust/raw-window-handle/LICENSE create mode 100644 third_party/rust/raw-window-handle/README.md create mode 100644 third_party/rust/raw-window-handle/appveyor.yml create mode 100644 third_party/rust/raw-window-handle/rustfmt.toml create mode 100644 third_party/rust/raw-window-handle/src/android.rs create mode 100644 third_party/rust/raw-window-handle/src/ios.rs create mode 100644 third_party/rust/raw-window-handle/src/lib.rs create mode 100644 third_party/rust/raw-window-handle/src/macos.rs create mode 100644 third_party/rust/raw-window-handle/src/unix.rs create mode 100644 third_party/rust/raw-window-handle/src/web.rs create mode 100644 third_party/rust/raw-window-handle/src/windows.rs create mode 100644 third_party/rust/relevant/.cargo-checksum.json create mode 100644 third_party/rust/relevant/Cargo.toml create mode 100644 third_party/rust/relevant/LICENSE-APACHE create mode 100644 third_party/rust/relevant/LICENSE-MIT create mode 100644 third_party/rust/relevant/README.md create mode 100644 third_party/rust/relevant/src/lib.rs create mode 100644 third_party/rust/rendy-descriptor/.cargo-checksum.json create mode 100644 third_party/rust/rendy-descriptor/Cargo.toml create mode 100644 third_party/rust/rendy-descriptor/src/allocator.rs create mode 100644 third_party/rust/rendy-descriptor/src/lib.rs create mode 100644 third_party/rust/rendy-descriptor/src/ranges.rs create mode 100644 third_party/rust/rendy-memory/.cargo-checksum.json create mode 100644 third_party/rust/rendy-memory/Cargo.toml create mode 100644 third_party/rust/rendy-memory/src/allocator/dedicated.rs create mode 100644 third_party/rust/rendy-memory/src/allocator/dynamic.rs create mode 100644 third_party/rust/rendy-memory/src/allocator/linear.rs create mode 100644 third_party/rust/rendy-memory/src/allocator/mod.rs create mode 100644 third_party/rust/rendy-memory/src/block.rs create mode 100644 third_party/rust/rendy-memory/src/heaps/heap.rs create mode 100644 third_party/rust/rendy-memory/src/heaps/memory_type.rs create mode 100644 third_party/rust/rendy-memory/src/heaps/mod.rs create mode 100644 third_party/rust/rendy-memory/src/lib.rs create mode 100644 third_party/rust/rendy-memory/src/mapping/mod.rs create mode 100644 third_party/rust/rendy-memory/src/mapping/range.rs create mode 100644 third_party/rust/rendy-memory/src/mapping/write.rs create mode 100644 third_party/rust/rendy-memory/src/memory.rs create mode 100644 third_party/rust/rendy-memory/src/usage.rs create mode 100644 third_party/rust/rendy-memory/src/util.rs create mode 100644 third_party/rust/rendy-memory/src/utilization.rs create mode 100644 third_party/rust/shared_library/.cargo-checksum.json create mode 100644 third_party/rust/shared_library/Cargo.toml create mode 100644 third_party/rust/shared_library/LICENSE-APACHE create mode 100644 third_party/rust/shared_library/LICENSE-MIT create mode 100644 third_party/rust/shared_library/src/dynamic_library.rs create mode 100644 third_party/rust/shared_library/src/lib.rs create mode 100644 third_party/rust/spirv_cross/.cargo-checksum.json create mode 100644 third_party/rust/spirv_cross/Cargo.toml create mode 100644 third_party/rust/spirv_cross/build.rs create mode 100644 third_party/rust/spirv_cross/src/bindings_native.rs create mode 100644 third_party/rust/spirv_cross/src/bindings_wasm.rs create mode 100644 third_party/rust/spirv_cross/src/bindings_wasm_functions.rs create mode 100644 third_party/rust/spirv_cross/src/compiler.rs create mode 100644 third_party/rust/spirv_cross/src/emscripten.rs create mode 100644 third_party/rust/spirv_cross/src/glsl.rs create mode 100644 third_party/rust/spirv_cross/src/hlsl.rs create mode 100644 third_party/rust/spirv_cross/src/lib.rs create mode 100644 third_party/rust/spirv_cross/src/msl.rs create mode 100644 third_party/rust/spirv_cross/src/ptr_util.rs create mode 100644 third_party/rust/spirv_cross/src/spirv.rs create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/.clang-format create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/.gitignore create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/CMakeLists.txt create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/GLSL.std.450.h create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/LICENSE create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/Makefile create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/cmake/gitversion.in.h create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/format_all.sh create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/gn/BUILD.gn create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/barrier.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/external_interface.h create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/image.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/internal_interface.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/sampler.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/thread_group.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/main.cpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/pkg-config/spirv-cross-c-shared.pc.in create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv.h create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cfg.cpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cfg.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_common.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cpp.cpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cpp.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross.cpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_c.cpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_c.h create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_containers.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_error_handling.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.cpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_util.cpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_util.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_glsl.cpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_glsl.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_hlsl.cpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_hlsl.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_msl.cpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_msl.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_parser.cpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_parser.hpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_reflect.cpp create mode 100644 third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_reflect.hpp create mode 100644 third_party/rust/spirv_cross/src/wrapper.cpp create mode 100644 third_party/rust/spirv_cross/src/wrapper.hpp create mode 100644 third_party/rust/spirv_cross/tests/common/mod.rs create mode 100644 third_party/rust/spirv_cross/tests/glsl_tests.rs create mode 100644 third_party/rust/spirv_cross/tests/hlsl_tests.rs create mode 100644 third_party/rust/spirv_cross/tests/msl_tests.rs create mode 100644 third_party/rust/spirv_cross/tests/shaders/array.vert create mode 100644 third_party/rust/spirv_cross/tests/shaders/array.vert.spv create mode 100644 third_party/rust/spirv_cross/tests/shaders/rasterize_disabled.vert create mode 100644 third_party/rust/spirv_cross/tests/shaders/rasterize_disabled.vert.spv create mode 100644 third_party/rust/spirv_cross/tests/shaders/sampler.frag create mode 100644 third_party/rust/spirv_cross/tests/shaders/sampler.frag.spv create mode 100644 third_party/rust/spirv_cross/tests/shaders/simple.vert create mode 100644 third_party/rust/spirv_cross/tests/shaders/simple.vert.spv create mode 100644 third_party/rust/spirv_cross/tests/shaders/specialization.comp create mode 100644 third_party/rust/spirv_cross/tests/shaders/specialization.comp.spv create mode 100644 third_party/rust/spirv_cross/tests/shaders/struct.frag create mode 100644 third_party/rust/spirv_cross/tests/shaders/struct.frag.spv create mode 100644 third_party/rust/spirv_cross/tests/shaders/struct.vert create mode 100644 third_party/rust/spirv_cross/tests/shaders/struct.vert.spv create mode 100644 third_party/rust/spirv_cross/tests/shaders/workgroup.comp create mode 100644 third_party/rust/spirv_cross/tests/shaders/workgroup.comp.spv create mode 100644 third_party/rust/spirv_cross/tests/spirv_tests.rs create mode 100644 third_party/rust/storage-map/.cargo-checksum.json create mode 100644 third_party/rust/storage-map/Cargo.toml create mode 100644 third_party/rust/storage-map/LICENSE create mode 100644 third_party/rust/storage-map/README.md create mode 100644 third_party/rust/storage-map/src/lib.rs create mode 100644 third_party/rust/storage-map/tests/ten.rs create mode 100644 third_party/rust/wio/.cargo-checksum.json create mode 100644 third_party/rust/wio/Cargo.toml create mode 100644 third_party/rust/wio/LICENSE-APACHE create mode 100644 third_party/rust/wio/LICENSE-MIT create mode 100644 third_party/rust/wio/README.md create mode 100644 third_party/rust/wio/src/apc.rs create mode 100644 third_party/rust/wio/src/com.rs create mode 100644 third_party/rust/wio/src/console.rs create mode 100644 third_party/rust/wio/src/error.rs create mode 100644 third_party/rust/wio/src/handle.rs create mode 100644 third_party/rust/wio/src/lib.rs create mode 100644 third_party/rust/wio/src/perf.rs create mode 100644 third_party/rust/wio/src/pipe.rs create mode 100644 third_party/rust/wio/src/sleep.rs create mode 100644 third_party/rust/wio/src/thread.rs create mode 100644 third_party/rust/wio/src/ums.rs create mode 100644 third_party/rust/wio/src/wide.rs create mode 100644 third_party/rust/x11/.cargo-checksum.json create mode 100644 third_party/rust/x11/Cargo.toml create mode 100644 third_party/rust/x11/build.rs create mode 100644 third_party/rust/x11/examples/hello-world.rs create mode 100644 third_party/rust/x11/examples/input.rs create mode 100644 third_party/rust/x11/examples/xrecord.rs create mode 100644 third_party/rust/x11/src/dpms.rs create mode 100644 third_party/rust/x11/src/glx.rs create mode 100644 third_party/rust/x11/src/internal.rs create mode 100644 third_party/rust/x11/src/keysym.rs create mode 100644 third_party/rust/x11/src/lib.rs create mode 100644 third_party/rust/x11/src/link.rs create mode 100644 third_party/rust/x11/src/xcursor.rs create mode 100644 third_party/rust/x11/src/xf86vmode.rs create mode 100644 third_party/rust/x11/src/xfixes.rs create mode 100644 third_party/rust/x11/src/xft.rs create mode 100644 third_party/rust/x11/src/xinerama.rs create mode 100644 third_party/rust/x11/src/xinput.rs create mode 100644 third_party/rust/x11/src/xinput2.rs create mode 100644 third_party/rust/x11/src/xlib.rs create mode 100644 third_party/rust/x11/src/xlib_xcb.rs create mode 100644 third_party/rust/x11/src/xmd.rs create mode 100644 third_party/rust/x11/src/xmu.rs create mode 100644 third_party/rust/x11/src/xrandr.rs create mode 100644 third_party/rust/x11/src/xrecord.rs create mode 100644 third_party/rust/x11/src/xrender.rs create mode 100644 third_party/rust/x11/src/xss.rs create mode 100644 third_party/rust/x11/src/xt.rs create mode 100644 third_party/rust/x11/src/xtest.rs diff --git a/.cargo/config.in b/.cargo/config.in index 093290266c15..361a082cb6b0 100644 --- a/.cargo/config.in +++ b/.cargo/config.in @@ -12,6 +12,16 @@ branch = "master" git = "https://github.com/mozilla/neqo" replace-with = "vendored-sources" +[source."https://github.com/kvark/spirv_cross"] +branch = "wgpu" +git = "https://github.com/kvark/spirv_cross" +replace-with = "vendored-sources" + +[source."https://github.com/kvark/rust-objc-exception"] +branch = "cc" +git = "https://github.com/kvark/rust-objc-exception" +replace-with = "vendored-sources" + [source."https://github.com/jfkthame/mapped_hyph.git"] git = "https://github.com/jfkthame/mapped_hyph.git" replace-with = "vendored-sources" diff --git a/.clang-format-ignore b/.clang-format-ignore index 94a879f45005..91b1c44d230c 100644 --- a/.clang-format-ignore +++ b/.clang-format-ignore @@ -38,6 +38,7 @@ layout/style/nsStyleStructList.h # Autogenerated file gfx/gl/GLConsts.h gfx/webrender_bindings/webrender_ffi_generated.h +dom/webgpu/ffi/wgpu_ffi_generated.h intl/unicharutil/util/nsSpecialCasingData.cpp intl/unicharutil/util/nsUnicodePropertyData.cpp intl/unicharutil/util/nsUnicodeScriptCodes.h diff --git a/Cargo.lock b/Cargo.lock index 6df6b54d95c4..a12412f16f6b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -46,12 +46,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "arrayvec" -version = "0.4.11" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "ash" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "shared_library 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "atom" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "atomic" version = "0.4.5" @@ -92,7 +102,7 @@ dependencies = [ "audio_thread_priority 0.20.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", "cubeb 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", @@ -165,14 +175,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "backtrace" -version = "0.3.9" +version = "0.3.38" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "backtrace-sys 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-demangle 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -180,7 +189,7 @@ name = "backtrace-sys" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -335,14 +344,19 @@ dependencies = [ [[package]] name = "blake2b_simd" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "block" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "block-buffer" version = "0.7.3" @@ -411,7 +425,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cc" -version = "1.0.34" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -497,9 +511,28 @@ name = "cmake" version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "cocoa" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)", + "foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", + "objc 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "colorful" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "comedy" version = "0.1.0" @@ -523,6 +556,11 @@ dependencies = [ "time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "copyless" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "core-foundation" version = "0.6.3" @@ -539,7 +577,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "core-graphics" -version = "0.17.1" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -554,7 +592,7 @@ version = "13.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", - "core-graphics 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)", "foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -674,21 +712,21 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-epoch" -version = "0.7.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -711,6 +749,16 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "crossbeam-utils" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "cssparser" version = "0.25.9" @@ -817,6 +865,16 @@ dependencies = [ "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "d3d12" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "darling" version = "0.10.1" @@ -1035,7 +1093,7 @@ name = "failure" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)", "failure_derive 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1217,6 +1275,116 @@ dependencies = [ "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "gfx-auxil" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "spirv_cross 0.16.0 (git+https://github.com/kvark/spirv_cross?branch=wgpu)", +] + +[[package]] +name = "gfx-backend-dx11" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-auxil 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "range-alloc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "raw-window-handle 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "spirv_cross 0.16.0 (git+https://github.com/kvark/spirv_cross?branch=wgpu)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "wio 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "gfx-backend-dx12" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "d3d12 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-auxil 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "range-alloc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "raw-window-handle 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "spirv_cross 0.16.0 (git+https://github.com/kvark/spirv_cross?branch=wgpu)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "gfx-backend-empty" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "gfx-hal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "raw-window-handle 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "gfx-backend-metal" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cocoa 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)", + "copyless 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)", + "foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-auxil 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "metal 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", + "objc 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "range-alloc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "raw-window-handle 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "spirv_cross 0.16.0 (git+https://github.com/kvark/spirv_cross?branch=wgpu)", + "storage-map 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "gfx-backend-vulkan" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ash 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "objc 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "raw-window-handle 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "x11 2.18.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "gfx-hal" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "raw-window-handle 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "gkrust" version = "0.1.0" @@ -1242,7 +1410,7 @@ dependencies = [ name = "gkrust-shared" version = "0.1.0" dependencies = [ - "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "audio_thread_priority 0.20.2 (registry+https://github.com/rust-lang/crates.io-index)", "audioipc-client 0.4.0", "audioipc-server 0.2.3", @@ -1279,6 +1447,7 @@ dependencies = [ "static_prefs 0.1.0", "storage 0.1.0", "webrender_bindings 0.1.0", + "wgpu-remote 0.1.0", "xpcom 0.1.0", "xulstore 0.1.0", ] @@ -1392,6 +1561,14 @@ dependencies = [ "syn 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "hibitset" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "atom 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "http" version = "0.1.17" @@ -1609,7 +1786,7 @@ name = "libloading" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1635,7 +1812,7 @@ name = "libz-sys" version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1671,7 +1848,7 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bindgen 0.51.1 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1713,7 +1890,7 @@ name = "lucet-runtime" version = "0.1.1" source = "git+https://github.com/PLSysSec/lucet_sandbox_compiler#5c22392b5b1aaa60e915c75e92b57391e1e61e6d" dependencies = [ - "cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", "lucet-module 0.1.1 (git+https://github.com/PLSysSec/lucet_sandbox_compiler)", "lucet-runtime-internals 0.1.1 (git+https://github.com/PLSysSec/lucet_sandbox_compiler)", @@ -1729,7 +1906,7 @@ dependencies = [ "bincode 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", "failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "getrandom 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1771,6 +1948,14 @@ dependencies = [ "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "malloc_buf" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "malloc_size_of" version = "0.0.1" @@ -1853,6 +2038,20 @@ dependencies = [ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "metal" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cocoa 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)", + "foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "objc 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "mime" version = "0.3.13" @@ -1875,7 +2074,7 @@ name = "miniz-sys" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2091,7 +2290,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "neqo-common" version = "0.1.4" -source = "git+https://github.com/mozilla/neqo#a17c1e83bb44ed923eb16a4c675ffe569b3a08f3" +source = "git+https://github.com/mozilla/neqo#cf61e302a90ea844a61381a9e0bc464946187868" dependencies = [ "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2102,7 +2301,7 @@ dependencies = [ [[package]] name = "neqo-crypto" version = "0.1.4" -source = "git+https://github.com/mozilla/neqo#a17c1e83bb44ed923eb16a4c675ffe569b3a08f3" +source = "git+https://github.com/mozilla/neqo#cf61e302a90ea844a61381a9e0bc464946187868" dependencies = [ "bindgen 0.51.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2115,7 +2314,7 @@ dependencies = [ [[package]] name = "neqo-http3" version = "0.1.4" -source = "git+https://github.com/mozilla/neqo#a17c1e83bb44ed923eb16a4c675ffe569b3a08f3" +source = "git+https://github.com/mozilla/neqo#cf61e302a90ea844a61381a9e0bc464946187868" dependencies = [ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "neqo-common 0.1.4 (git+https://github.com/mozilla/neqo)", @@ -2129,7 +2328,7 @@ dependencies = [ [[package]] name = "neqo-qpack" version = "0.1.4" -source = "git+https://github.com/mozilla/neqo#a17c1e83bb44ed923eb16a4c675ffe569b3a08f3" +source = "git+https://github.com/mozilla/neqo#cf61e302a90ea844a61381a9e0bc464946187868" dependencies = [ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "neqo-common 0.1.4 (git+https://github.com/mozilla/neqo)", @@ -2140,7 +2339,7 @@ dependencies = [ [[package]] name = "neqo-transport" version = "0.1.4" -source = "git+https://github.com/mozilla/neqo#a17c1e83bb44ed923eb16a4c675ffe569b3a08f3" +source = "git+https://github.com/mozilla/neqo#cf61e302a90ea844a61381a9e0bc464946187868" dependencies = [ "derive_more 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2198,7 +2397,7 @@ version = "0.13.1" source = "git+https://github.com/shravanrn/nix/?branch=r0.13.1#4af6c367603869a30fddb5ffb0aba2b9477ba92e" dependencies = [ "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2309,6 +2508,23 @@ dependencies = [ "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "objc" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "malloc_buf 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "objc_exception 0.1.2 (git+https://github.com/kvark/rust-objc-exception?branch=cc)", +] + +[[package]] +name = "objc_exception" +version = "0.1.2" +source = "git+https://github.com/kvark/rust-objc-exception?branch=cc#c86ad3a52984461fc5c63980d12e8ceed847854c" +dependencies = [ + "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "object" version = "0.14.0" @@ -2707,12 +2923,25 @@ dependencies = [ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "range-alloc" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "raw-window-handle" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "rayon" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "either 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2722,7 +2951,7 @@ name = "rayon-core" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2778,6 +3007,15 @@ name = "regex-syntax" version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "relevant" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "remove_dir_all" version = "0.5.2" @@ -2786,6 +3024,31 @@ dependencies = [ "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "rendy-descriptor" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "gfx-hal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "relevant 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rendy-memory" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "colorful 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "hibitset 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "relevant 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ringbuf" version = "0.1.4" @@ -2855,7 +3118,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "blake2b_simd 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2b_simd 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3097,6 +3360,15 @@ dependencies = [ "opaque-debug 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "shared_library" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "shift_or_euc" version = "0.1.0" @@ -3168,6 +3440,14 @@ dependencies = [ "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "spirv_cross" +version = "0.16.0" +source = "git+https://github.com/kvark/spirv_cross?branch=wgpu#636677bad724797789239c16e6d332e9b4d97b86" +dependencies = [ + "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "stable_deref_trait" version = "1.0.0" @@ -3188,6 +3468,14 @@ dependencies = [ "xpcom 0.1.0", ] +[[package]] +name = "storage-map" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lock_api 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "storage_variant" version = "0.1.0" @@ -3213,7 +3501,7 @@ name = "style" version = "0.0.1" dependencies = [ "app_units 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bindgen 0.51.1 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3257,7 +3545,7 @@ dependencies = [ "to_shmem 0.0.1", "to_shmem_derive 0.0.1", "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "uluru 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "uluru 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-segmentation 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3598,7 +3886,7 @@ name = "tokio-threadpool" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3671,10 +3959,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "uluru" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3849,7 +4137,7 @@ dependencies = [ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", - "core-graphics 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)", "core-text 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "cstr 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "dwrote 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3887,7 +4175,7 @@ dependencies = [ "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", - "core-graphics 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)", "derive_more 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", "euclid 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)", "malloc_size_of_derive 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3906,7 +4194,7 @@ dependencies = [ "app_units 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", - "core-graphics 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)", "dwrote 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "euclid 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)", "foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3946,6 +4234,40 @@ dependencies = [ "nom 4.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "wgpu-native" +version = "0.4.0" +dependencies = [ + "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "copyless 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-backend-dx11 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-backend-dx12 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-backend-empty 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-backend-metal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-backend-vulkan 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "raw-window-handle 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rendy-descriptor 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rendy-memory 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wgpu-remote" +version = "0.1.0" +dependencies = [ + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "wgpu-native 0.4.0", +] + [[package]] name = "winapi" version = "0.2.8" @@ -4000,6 +4322,14 @@ dependencies = [ "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "wio" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "wr_malloc_size_of" version = "0.0.1" @@ -4034,6 +4364,15 @@ dependencies = [ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "x11" +version = "2.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "xfailure" version = "0.1.0" @@ -4121,14 +4460,16 @@ dependencies = [ "checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" "checksum app_units 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9dadc668390b373e73e4abbfc1f07238b09a25858f2f39c06cebc6d8e141d774" "checksum arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0d382e583f07208808f6b1249e60848879ba3543f57c32277bf52d69c2f0f0ee" -"checksum arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b8d73f9beda665eaa98ab9e4f7442bd4e7de6652587de55b2525e52e29c1b0ba" +"checksum arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" +"checksum ash 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)" = "003d1fb2eb12eb06d4a03dbe02eea67a9fac910fa97932ab9e3a75b96a1ea5e5" +"checksum atom 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3c86699c3f02778ec07158376991c8f783dd1f2f95c579ffaf0738dc984b2fe2" "checksum atomic 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "c210c1f4db048cda477b652d170572d84c9640695835f17663595d3bd543fc28" "checksum atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb2dcb6e6d35f20276943cc04bb98e538b348d525a04ac79c10021561d202f21" "checksum atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9a7d5b8723950951411ee34d271d99dddcc2035a16ab25310ea2c8cfd4369652" "checksum audio_thread_priority 0.20.2 (registry+https://github.com/rust-lang/crates.io-index)" = "197b2d259505d11c92d266e1784f01cc935eb764d2f54e16aedf4e5085197871" "checksum authenticator 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ec149e5d5d4caa2c9ead53a8ce1ea9c4204c388c65bf3b96c2d1dc0fcf4aeb66" "checksum autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b671c8fb71b457dd4ae18c4ba1e59aa81793daacc361d82fcd410cef0d491875" -"checksum backtrace 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "89a47830402e9981c5c41223151efcced65a0510c13097c769cede7efb34782a" +"checksum backtrace 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)" = "690a62be8920ccf773ee00ef0968649b0e724cda8bd5b12286302b4ae955fdf5" "checksum backtrace-sys 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)" = "c66d56ac8dabd07f6aacdaf633f4b8262f5b3601a810a0dcddffd5c22c69daa0" "checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" "checksum binary-space-partition 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "88ceb0d16c4fd0e42876e298d7d3ce3780dd9ebdcbe4199816a32c77e08597ff" @@ -4139,7 +4480,8 @@ dependencies = [ "checksum bit_reverse 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "5e97e02db5a2899c0377f3d6031d5da8296ca2b47abef6ed699de51b9e40a28c" "checksum bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a606a02debe2813760609f57a64a2ffd27d9fdf5b2f133eaca0b248dd92cdd2" "checksum bitreader 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "80b13e2ab064ff3aa0bdbf1eff533f9822dc37899821f5f98c67f263eab51707" -"checksum blake2b_simd 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)" = "5850aeee1552f495dd0250014cf64b82b7c8879a89d83b33bbdace2cc4f63182" +"checksum blake2b_simd 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b83b7baab1e671718d78204225800d6b170e648188ac7dc992e9d6bddf87d0c0" +"checksum block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" "checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" "checksum block-padding 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4fc4358306e344bf9775d0197fd00d2603e5afb0771bb353538630f022068ea3" "checksum boxfnonce 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8380105befe91099e6f69206164072c05bc92427ff6aa8a5171388317346dd75" @@ -4147,7 +4489,7 @@ dependencies = [ "checksum byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a019b10a2a7cdeb292db131fc8113e57ea2a908f6e7894b0c3c671893b65dbeb" "checksum bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e178b8e0e239e844b083d5a0d4a156b2654e67f9f80144d48398fcd736a24fb8" "checksum cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427" -"checksum cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)" = "30f813bf45048a18eda9190fd3c6b78644146056740c43172a5a3699118588fd" +"checksum cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)" = "aa87058dce70a3ff5621797f1506cb837edd02ac4c0ae642b4542dce802908b8" "checksum cexpr 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8fc0086be9ca82f7fc89fc873435531cb898b86e850005850de1f820e2db6e9b" "checksum cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "082bb9b28e00d3c9d39cc03e64ce4cea0f1bb9b3fde493f0cbc008472d22bdf4" "checksum chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "45912881121cb26fad7c38c17ba7daa18764771836b34fab7d3fbd93ed633878" @@ -4155,12 +4497,15 @@ dependencies = [ "checksum clap 2.31.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f0f16b89cbb9ee36d87483dc939fe9f1e13c05898d56d7b230a0d4dff033a536" "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" "checksum cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "56d741ea7a69e577f6d06b36b7dff4738f680593dc27a701ffa8506b73ce28bb" +"checksum cocoa 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8cd20045e880893b4a8286d5639e9ade85fb1f6a14c291f882cf8cf2149d37d9" +"checksum colorful 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0bca1619ff57dd7a56b58a8e25ef4199f123e78e503fe1653410350a1b98ae65" "checksum comedy 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d4f03fbb05a4df3523a44cda10340e6ae6bea03ee9d01240a1a2c1ef6c73e95" "checksum constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff012e225ce166d4422e0e78419d901719760f62ae2b7969ca6b564d1b54a9e" "checksum cookie 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "888604f00b3db336d2af898ec3c1d5d0ddf5e6d462220f2ededc33a87ac4bbd5" +"checksum copyless 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6ff9c56c9fb2a49c05ef0e431485a22400af20d33226dc0764d891d09e724127" "checksum core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4e2640d6d0bf22e82bed1b73c6aef8d5dd31e5abe6666c57e6d45e2649f4f887" "checksum core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b" -"checksum core-graphics 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)" = "62ceafe1622ffc9a332199096841d0ff9912ec8cf8f9cde01e254a7d5217cd10" +"checksum core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)" = "56790968ab1c8a1202a102e6de05fc6e1ec87da99e4e93e9a7d13efbfc1e95a9" "checksum core-text 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f3f46450d6f2397261af420b4ccce23807add2e45fa206410a03d66fb7f050ae" "checksum coreaudio-sys 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7e8f5954c1c7ccb55340443e8b29fca24013545a5e7d72c1ca7db4fc02b982ce" "checksum cose 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "72fa26cb151d3ae4b70f63d67d0fed57ce04220feafafbae7f503bef7aae590d" @@ -4174,10 +4519,11 @@ dependencies = [ "checksum cranelift-frontend 0.46.1 (git+https://github.com/CraneStation/Cranelift?rev=da179e4fd83d49b7ad6c9f286b1ea04d4f64907e)" = "" "checksum cranelift-wasm 0.46.1 (git+https://github.com/CraneStation/Cranelift?rev=da179e4fd83d49b7ad6c9f286b1ea04d4f64907e)" = "" "checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" -"checksum crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b18cd2e169ad86297e6bc0ad9aa679aee9daa4f19e8163860faf7c164e4f5a71" -"checksum crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9" +"checksum crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3aa945d63861bfe624b55d153a39684da1e8c0bc8fba932f7ee3a3c16cea3ca" +"checksum crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac" "checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" "checksum crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f8306fcef4a7b563b76b7dd949ca48f52bc1141aa067d2ea09565f3e2652aa5c" +"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4" "checksum cssparser 0.25.9 (registry+https://github.com/rust-lang/crates.io-index)" = "fbe18ca4efb9ba3716c6da66cc3d7e673bf59fa576353011f48c4cfddbdd740e" "checksum cssparser-macros 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "5bb1c84e87c717666564ec056105052331431803d606bd45529b28547b611eef" "checksum cstr 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b6557bdb1dc9647eae1cf7f5601b14cd45fc3c7ccf2df618387416fe542da6ea" @@ -4186,6 +4532,7 @@ dependencies = [ "checksum cubeb-backend 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5a1e7add4e7642a8aebb24172922318482bed52389a12cb339f728bbd4c4ed9c" "checksum cubeb-core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfd9b2ea1cb6afed9419b0d18fc4093df552ccb2300eb57793629f8cd370b4c8" "checksum cubeb-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "309c5839c5fa03c08363bd308566cbe4654b25a9984342d7546a33d55b80a3d6" +"checksum d3d12 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc7ed48e89905e5e146bcc1951cc3facb9e44aea9adf5dc01078cda1bd24b662" "checksum darling 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3fe629a532efad5526454efb0700f86d5ad7ff001acb37e431c8bf017a432a8e" "checksum darling_core 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ee54512bec54b41cf2337a22ddfadb53c7d4c738494dc2a186d7b037ad683b85" "checksum darling_macro 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0cd3e432e52c0810b72898296a69d66b1d78d1517dff6cde7a130557a55a62c1" @@ -4223,6 +4570,13 @@ dependencies = [ "checksum fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" "checksum generic-array 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3c0f28c2f5bfb5960175af447a2da7c18900693738343dc896ffbcabd9839592" "checksum getrandom 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8d1dffef07351aafe6ef177e4dd2b8dcf503e6bc765dea3b0de9ed149a3db1ec" +"checksum gfx-auxil 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "572eee952a9a23c99cfe3e4fd95d277784058a89ac3c77ff6fa3d80a4e321919" +"checksum gfx-backend-dx11 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c66c77836ff26cf9916e5c8745715a22eae1fc61d994ffa0bea8a7dbd708ece2" +"checksum gfx-backend-dx12 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b6e913cc800fb12eaba2c420091a02aca9aafbefd672600dfc5b52654343d341" +"checksum gfx-backend-empty 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d383e6bc48867cb37d298a20139fd1eec298f8f6d594690cd1c50ef25470cc7" +"checksum gfx-backend-metal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8de5c71f18ba805c95b84d6c78c472ef44485a6fc46e3b49fe1e6739c8d7b0c0" +"checksum gfx-backend-vulkan 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "62538fedd66a78968a162e8e1a29d085ffbc97f8782634684b2f7da7aea59207" +"checksum gfx-hal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "977716fea7800ab5bc9a1e048dd2f72b23af166d8c2f48c6fb6d1ce37d77ca7e" "checksum gl_generator 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "39a23d5e872a275135d66895d954269cf5e8661d234eb1c2480f4ce0d586acbd" "checksum gleam 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)" = "7f46fd8874e043ffac0d638ed1567a2584f7814f6d72b4db37ab1689004a26c4" "checksum glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" @@ -4232,6 +4586,7 @@ dependencies = [ "checksum headers 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc6e2e51d356081258ef05ff4c648138b5d3fe64b7300aaad3b820554a2b7fb6" "checksum headers-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "967131279aaa9f7c20c7205b45a391638a83ab118e6509b2d0ccbe08de044237" "checksum headers-derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f33cf300c485e3cbcba0235013fcc768723451c9b84d1b31aa7fec0491ac9a11" +"checksum hibitset 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "47e7292fd9f7fe89fa35c98048f2d0a69b79ed243604234d18f6f8a1aa6f408d" "checksum http 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "eed324f0f0daf6ec10c474f150505af2c143f251722bf9dbd1261bd1f2ee2c1a" "checksum httparse 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e8734b0cfd3bc3e101ec59100e101c2eecd19282202e87808b3037b442777a83" "checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e" @@ -4266,12 +4621,14 @@ dependencies = [ "checksum lucet-wasi 0.1.1 (git+https://github.com/PLSysSec/lucet_sandbox_compiler)" = "" "checksum lzw 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d947cbb889ed21c2a84be6ffbaebf5b4e0f4340638cba0444907e38b56be084" "checksum mach 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +"checksum malloc_buf 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" "checksum malloc_size_of_derive 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "35adee9ed962cf7d07d62cb58bc45029f3227f5b5b86246caa8632f06c187bc3" "checksum mapped_hyph 0.3.0 (git+https://github.com/jfkthame/mapped_hyph.git?tag=v0.3.0)" = "" "checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376" "checksum memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2efc7bc57c883d4a4d6e3246905283d8dae951bb3bd32f49d6ef297f546e1c39" "checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" "checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f" +"checksum metal 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ddf8052f20601c7af6293d3f7bf7b9159aee5974804fe65d871d437f933ec1eb" "checksum mime 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)" = "3e27ca21f40a310bd06d9031785f4801710d566c184a6e15bad4f1d9b65f9425" "checksum mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a0ed03949aef72dbdf3116a383d7b38b4768e6f960528cd6a6044aa9ed68599" "checksum miniz-sys 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1e9e3ae51cea1576ceba0dde3d484d30e6e5b86dee0b2d412fe3a16a15c98202" @@ -4304,6 +4661,8 @@ dependencies = [ "checksum num-rational 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4e96f040177bb3da242b5b1ecf3f54b5d5af3efbbfb18608977a5d2767b22f10" "checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1" "checksum num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "514f0d73e64be53ff320680ca671b64fe3fb91da01e1ae2ddc99eb51d453b20d" +"checksum objc 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "31d20fd2b37e07cf5125be68357b588672e8cefe9a96f8c17a9d46053b3e590d" +"checksum objc_exception 0.1.2 (git+https://github.com/kvark/rust-objc-exception?branch=cc)" = "" "checksum object 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)" = "81afbc5773e99efe9533d8a539dfac37e531dcd0f4eeb41584bae03ccf76d4c2" "checksum once_cell 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "891f486f630e5c5a4916c7e16c4b24a53e78c860b646e9f8e005e4f16847bfed" "checksum opaque-debug 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "51ecbcb821e1bd256d456fe858aaa7f380b63863eab2eb86eee1bd9f33dd6682" @@ -4344,6 +4703,8 @@ dependencies = [ "checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" "checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" "checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" +"checksum range-alloc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dd5927936723a9e8b715d37d7e4b390455087c4bdf25b9f702309460577b14f9" +"checksum raw-window-handle 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2e815b85b31e4d397ca9dd8eb1d692e9cb458b9f6ae8ac2232c995dca8236f87" "checksum rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "83a27732a533a1be0a0035a111fe76db89ad312f6f0347004c220c57f209a123" "checksum rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98dcf634205083b17d0861252431eb2acbfb698ab7478a2d20de07954f47ec7b" "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" @@ -4352,7 +4713,10 @@ dependencies = [ "checksum redox_users 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4ecedbca3bf205f8d8f5c2b44d83cd0690e39ee84b951ed649e9f1841132b66d" "checksum regex 1.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "d9d8297cc20bbb6184f8b45ff61c8ee6a9ac56c156cec8e38c3e5084773c44ad" "checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716" +"checksum relevant 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bbc232e13d37f4547f5b9b42a5efc380cabe5dbc1807f8b893580640b2ab0308" "checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" +"checksum rendy-descriptor 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f475bcc0505946e998590f1f0545c52ef4b559174a1b353a7ce6638def8b621e" +"checksum rendy-memory 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08f99de535d9e48d9cfab780b521702cc0d7183d354872d223967b75abae1199" "checksum ringbuf 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "588456c74d5ff0a5806bc084818e043e767533f743c11ee6f3ccf298599c6847" "checksum rkv 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9aab7c645d32e977e186448b0a5c2c3139a91a7f630cfd8a8c314d1d145e78bf" "checksum rlbox_lucet_sandbox 0.1.0 (git+https://github.com/PLSysSec/rlbox_lucet_sandbox/?rev=997c648eb0eaeaaa7a00a9eee20431f750b4e190)" = "" @@ -4385,6 +4749,7 @@ dependencies = [ "checksum serde_yaml 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)" = "38b08a9a90e5260fe01c6480ec7c811606df6d3a660415808c3c3fa8ed95b582" "checksum sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "23962131a91661d643c98940b20fcaffe62d776a823247be80a48fcb8b6fce68" "checksum sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b4d8bfd0e469f417657573d8451fb33d16cfe0989359b93baf3a1ffc639543d" +"checksum shared_library 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11" "checksum shift_or_euc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f930dea4685b9803954b9d74cdc175c6d946a22f2eafe5aa2e9a58cdcae7da8c" "checksum shift_or_euc_c 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c81ec08c8a68c45c48d8ef58b80ce038cc9945891c4a4996761e2ec5cba05abc" "checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" @@ -4394,7 +4759,9 @@ dependencies = [ "checksum smallbitvec 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1764fe2b30ee783bfe3b9b37b2649d8d590b3148bb12e0079715d4d5c673562e" "checksum smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ab606a9c5e214920bb66c458cd7be8ef094f813f20fe77a54cc7dbfff220d4b7" "checksum socket2 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)" = "df028e0e632c2a1823d920ad74895e7f9128e6438cbc4bc6fd1f180e644767b9" +"checksum spirv_cross 0.16.0 (git+https://github.com/kvark/spirv_cross?branch=wgpu)" = "" "checksum stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15132e0e364248108c5e2c02e3ab539be8d6f5d52a01ca9bbf27ed657316f02b" +"checksum storage-map 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fd0a4829a5c591dc24a944a736d6b1e4053e51339a79fd5d4702c4c999a9c45e" "checksum string 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00caf261d6f90f588f8450b8e1230fa0d5be49ee6140fdfbcb55335aff350970" "checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550" "checksum svg_fmt 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c666f0fed8e1e20e057af770af9077d72f3d5a33157b8537c1475dd8ffd6d32b" @@ -4430,7 +4797,7 @@ dependencies = [ "checksum toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a7540f4ffc193e0d3c94121edb19b055670d369f77d5804db11ae053a45b6e7e" "checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" "checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169" -"checksum uluru 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d2606e9192f308ddc4f0b3c5d1bf3400e28a70fff956e9d9f46d23b094746d9f" +"checksum uluru 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d7b39d0c32eba57d52d334e4bdd150df6e755264eefaa1ae2e7cd125f35e1ca" "checksum unicase 2.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a84e5511b2a947f3ae965dcb29b13b7b1691b6e7332cf5dbc1744138d5acb7f6" "checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" "checksum unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "51ccda9ef9efa3f7ef5d91e8f9b83bbe6955f9bf86aec89d5cce2c874625920f" @@ -4461,8 +4828,10 @@ dependencies = [ "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" "checksum wincolor 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "96f5016b18804d24db43cebf3c77269e7569b8954a8464501c216cc5e070eaa9" "checksum winreg 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a27a759395c1195c4cc5cda607ef6f8f6498f64e78f7900f5de0a127a424704a" +"checksum wio 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5d129932f4644ac2396cb456385cbf9e63b5b30c6e8dc4820bdca4eb082037a5" "checksum ws 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a6f5bb86663ff4d1639408410f50bf6050367a8525d644d49a6894cd618a631" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +"checksum x11 2.18.1 (registry+https://github.com/rust-lang/crates.io-index)" = "39697e3123f715483d311b5826e254b6f3cfebdd83cf7ef3358f579c3d68e235" "checksum xfailure 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "da90eac47bf1d7871b75004b9b631d107df15f37669383b23f0b5297bc7516b6" "checksum xml-rs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "541b12c998c5b56aa2b4e6f18f03664eef9a4fd0a246a55594efae6cc2d964b5" "checksum yaml-rust 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "95acf0db5515d07da9965ec0e0ba6cc2d825e2caeb7303b66ca441729801254e" diff --git a/Cargo.toml b/Cargo.toml index 48cc07df45e0..c16e8fc3731e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,8 @@ libudev-sys = { path = "dom/webauthn/libudev-sys" } packed_simd = { git = "https://github.com/hsivonen/packed_simd", branch = "rust_1_32" } rlbox_lucet_sandbox = { git = "https://github.com/PLSysSec/rlbox_lucet_sandbox/", rev="997c648eb0eaeaaa7a00a9eee20431f750b4e190" } nix = { git = "https://github.com/shravanrn/nix/", branch = "r0.13.1", rev="4af6c367603869a30fddb5ffb0aba2b9477ba92e" } +spirv_cross = { git = "https://github.com/kvark/spirv_cross", branch = "wgpu" } +objc_exception = { git = "https://github.com/kvark/rust-objc-exception", branch = "cc" } [patch.crates-io.cranelift-codegen] git = "https://github.com/CraneStation/Cranelift" diff --git a/dom/base/nsGlobalWindowInner.cpp b/dom/base/nsGlobalWindowInner.cpp index 20b3474540b8..e16bbb015a83 100644 --- a/dom/base/nsGlobalWindowInner.cpp +++ b/dom/base/nsGlobalWindowInner.cpp @@ -840,7 +840,6 @@ class PromiseDocumentFlushedResolver final { nsGlobalWindowInner::nsGlobalWindowInner(nsGlobalWindowOuter* aOuterWindow, WindowGlobalChild* aActor) : nsPIDOMWindowInner(aOuterWindow, aActor), - mozilla::webgpu::InstanceProvider(this), mWasOffline(false), mHasHadSlowScript(false), mIsChrome(false), @@ -1397,8 +1396,6 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INTERNAL(nsGlobalWindowInner) NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDocumentFlushedResolvers[i]->mCallback); } - static_cast(tmp)->CcTraverse(cb); - NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(nsGlobalWindowInner) @@ -1513,8 +1510,6 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(nsGlobalWindowInner) } tmp->mDocumentFlushedResolvers.Clear(); - static_cast(tmp)->CcUnlink(); - NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER NS_IMPL_CYCLE_COLLECTION_UNLINK_END diff --git a/dom/base/nsGlobalWindowInner.h b/dom/base/nsGlobalWindowInner.h index 4295ed642adb..a603ee8f769c 100644 --- a/dom/base/nsGlobalWindowInner.h +++ b/dom/base/nsGlobalWindowInner.h @@ -47,7 +47,6 @@ #include "mozilla/LinkedList.h" #include "mozilla/OwningNonNull.h" #include "mozilla/TimeStamp.h" -#include "mozilla/webgpu/InstanceProvider.h" #include "nsWrapperCacheInlines.h" #include "mozilla/dom/Document.h" #include "mozilla/dom/EventTarget.h" @@ -181,8 +180,7 @@ class nsGlobalWindowInner final : public mozilla::dom::EventTarget, public nsSupportsWeakReference, public nsIInterfaceRequestor, public PRCListStr, - public nsAPostRefreshObserver, - public mozilla::webgpu::InstanceProvider { + public nsAPostRefreshObserver { public: typedef mozilla::dom::BrowsingContext RemoteProxy; diff --git a/dom/webgpu/Adapter.cpp b/dom/webgpu/Adapter.cpp index caff2d283315..92df2ca2c042 100644 --- a/dom/webgpu/Adapter.cpp +++ b/dom/webgpu/Adapter.cpp @@ -6,15 +6,41 @@ #include "mozilla/dom/WebGPUBinding.h" #include "Adapter.h" +#include "Device.h" #include "Instance.h" +#include "ipc/WebGPUChild.h" +#include "mozilla/dom/Promise.h" namespace mozilla { namespace webgpu { -GPU_IMPL_CYCLE_COLLECTION(Adapter, mParent) +GPU_IMPL_CYCLE_COLLECTION(Adapter, mBridge, mParent) GPU_IMPL_JS_WRAP(Adapter) +Adapter::Adapter(Instance* const aParent, RawId aId) + : ChildOf(aParent), mBridge(aParent->GetBridge()), mId(aId) {} Adapter::~Adapter() = default; +WebGPUChild* Adapter::GetBridge() const { return mBridge; } + +already_AddRefed Adapter::RequestDevice( + const dom::GPUDeviceDescriptor& aDesc, ErrorResult& aRv) { + RefPtr promise = dom::Promise::Create(GetParentObject(), aRv); + if (NS_WARN_IF(aRv.Failed())) { + return nullptr; + } + + Maybe id = mBridge->AdapterRequestDevice(mId, aDesc); + if (id.isSome()) { + RefPtr device = new Device(this, id.value()); + promise->MaybeResolve(device); + } else { + promise->MaybeRejectWithDOMException(NS_ERROR_DOM_NOT_SUPPORTED_ERR, + "Unable to instanciate a Device"); + } + + return promise.forget(); +} + } } diff --git a/dom/webgpu/Adapter.h b/dom/webgpu/Adapter.h index 4ed46c67c355..a010ceb93eb9 100644 --- a/dom/webgpu/Adapter.h +++ b/dom/webgpu/Adapter.h @@ -7,6 +7,7 @@ #define GPU_Adapter_H_ #include "mozilla/AlreadyAddRefed.h" +#include "mozilla/webgpu/WebGPUTypes.h" #include "nsString.h" #include "ObjectModel.h" @@ -21,20 +22,28 @@ struct GPUFeatures; namespace webgpu { class Device; class Instance; +class WebGPUChild; class Adapter final : public ObjectBase, public ChildOf { public: GPU_DECL_CYCLE_COLLECTION(Adapter) GPU_DECL_JS_WRAP(Adapter) - const nsString mName; - private: Adapter() = delete; virtual ~Adapter(); + const RefPtr mBridge; + const RawId mId; + const nsString mName; + public: + explicit Adapter(Instance* const aParent, RawId aId); void GetName(nsString& out) const { out = mName; } + WebGPUChild* GetBridge() const; + + already_AddRefed RequestDevice( + const dom::GPUDeviceDescriptor& aDesc, ErrorResult& aRv); }; } diff --git a/dom/webgpu/Device.cpp b/dom/webgpu/Device.cpp index 72db539eb17e..04f4c7cdb367 100644 --- a/dom/webgpu/Device.cpp +++ b/dom/webgpu/Device.cpp @@ -7,17 +7,27 @@ #include "Device.h" #include "Adapter.h" +#include "ipc/WebGPUChild.h" namespace mozilla { namespace webgpu { -NS_IMPL_CYCLE_COLLECTION_INHERITED(Device, DOMEventTargetHelper) +NS_IMPL_CYCLE_COLLECTION_INHERITED(Device, DOMEventTargetHelper, mBridge) NS_IMPL_ISUPPORTS_CYCLE_COLLECTION_INHERITED_0(Device, DOMEventTargetHelper) GPU_IMPL_JS_WRAP(Device) -Device::Device(nsIGlobalObject* aGlobal) : DOMEventTargetHelper(aGlobal) {} - -Device::~Device() = default; +Device::Device(Adapter* const aParent, RawId aId) + : DOMEventTargetHelper(aParent->GetParentObject()), + mBridge(aParent->GetBridge()), + mId(aId) { + Unused << mId; +} + +Device::~Device() { + if (mBridge->IsOpen()) { + mBridge->SendDeviceDestroy(mId); + } +} void Device::GetLabel(nsAString& aValue) const { aValue = mLabel; } void Device::SetLabel(const nsAString& aLabel) { mLabel = aLabel; } diff --git a/dom/webgpu/Device.h b/dom/webgpu/Device.h index d5939fbd869f..c0f1137a66fc 100644 --- a/dom/webgpu/Device.h +++ b/dom/webgpu/Device.h @@ -7,7 +7,7 @@ #define GPU_DEVICE_H_ #include "mozilla/RefPtr.h" - +#include "mozilla/webgpu/WebGPUTypes.h" #include "mozilla/DOMEventTargetHelper.h" namespace mozilla { @@ -55,6 +55,7 @@ class RenderPipeline; class Sampler; class ShaderModule; class Texture; +class WebGPUChild; class Device final : public DOMEventTargetHelper { public: @@ -62,15 +63,16 @@ class Device final : public DOMEventTargetHelper { NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(Device, DOMEventTargetHelper) GPU_DECL_JS_WRAP(Device) - explicit Device(nsIGlobalObject* aGlobal); - private: Device() = delete; virtual ~Device(); + const RefPtr mBridge; + const RawId mId; nsString mLabel; public: + explicit Device(Adapter* const aParent, RawId aId); void GetLabel(nsAString& aValue) const; void SetLabel(const nsAString& aLabel); diff --git a/dom/webgpu/Instance.cpp b/dom/webgpu/Instance.cpp index 23bc757890e8..ff610f82ce1c 100644 --- a/dom/webgpu/Instance.cpp +++ b/dom/webgpu/Instance.cpp @@ -6,20 +6,35 @@ #include "Instance.h" #include "Adapter.h" -#include "InstanceProvider.h" +#include "gfxConfig.h" #include "nsIGlobalObject.h" +#include "ipc/WebGPUChild.h" +#include "ipc/WebGPUTypes.h" +#include "mozilla/layers/CompositorBridgeChild.h" namespace mozilla { namespace webgpu { -GPU_IMPL_CYCLE_COLLECTION(Instance, mParent) +GPU_IMPL_CYCLE_COLLECTION(Instance, mBridge, mOwner) -RefPtr Instance::Create(nsIGlobalObject* parent) { - return new Instance(parent); +already_AddRefed Instance::Create(nsIGlobalObject* aOwner) { + if (!gfx::gfxConfig::IsEnabled(gfx::Feature::WEBGPU)) { + return nullptr; + } + + RefPtr bridge = + layers::CompositorBridgeChild::Get()->GetWebGPUChild(); + if (NS_WARN_IF(!bridge)) { + MOZ_CRASH("Failed to create an IPDL bridge for WebGPU!"); + } + + RefPtr result = new Instance(aOwner, bridge); + return result.forget(); } -Instance::Instance(nsIGlobalObject* parent) : mParent(parent) {} +Instance::Instance(nsIGlobalObject* aOwner, WebGPUChild* aBridge) + : mOwner(aOwner), mBridge(aBridge) {} Instance::~Instance() = default; @@ -28,5 +43,36 @@ JSObject* Instance::WrapObject(JSContext* cx, return dom::GPU_Binding::Wrap(cx, this, givenProto); } +WebGPUChild* Instance::GetBridge() const { return mBridge; } + +already_AddRefed Instance::RequestAdapter( + const dom::GPURequestAdapterOptions& aOptions, ErrorResult& aRv) { + RefPtr promise = dom::Promise::Create(mOwner, aRv); + if (NS_WARN_IF(aRv.Failed())) { + return nullptr; + } + + RefPtr instance = this; + + mBridge->InstanceRequestAdapter(aOptions)->Then( + GetMainThreadSerialEventTarget(), __func__, + [promise, instance](RawId id) { + MOZ_ASSERT(id != 0); + RefPtr adapter = new Adapter(instance, id); + promise->MaybeResolve(adapter); + }, + [promise](const Maybe& aRv) { + if (aRv.isSome()) { + promise->MaybeRejectWithDOMException(NS_ERROR_DOM_ABORT_ERR, + "Internal communication error!"); + } else { + promise->MaybeRejectWithDOMException(NS_ERROR_DOM_INVALID_STATE_ERR, + "No matching adapter found!"); + } + }); + + return promise.forget(); +} + } } diff --git a/dom/webgpu/Instance.h b/dom/webgpu/Instance.h index a8c46cb67424..61ac1bf6bd1d 100644 --- a/dom/webgpu/Instance.h +++ b/dom/webgpu/Instance.h @@ -19,23 +19,29 @@ struct GPURequestAdapterOptions; namespace webgpu { class Adapter; -class InstanceProvider; +class GPUAdapter; +class WebGPUChild; class Instance final : public nsWrapperCache { public: GPU_DECL_CYCLE_COLLECTION(Instance) GPU_DECL_JS_WRAP(Instance) - nsCOMPtr mParent; + static already_AddRefed Create(nsIGlobalObject* aOwner); - static RefPtr Create(nsIGlobalObject* parent); + already_AddRefed RequestAdapter( + const dom::GPURequestAdapterOptions& aOptions, ErrorResult& aRv); private: - explicit Instance(nsIGlobalObject* parent); + explicit Instance(nsIGlobalObject* aOwner, WebGPUChild* aBridge); virtual ~Instance(); + nsCOMPtr mOwner; + const RefPtr mBridge; + public: - nsIGlobalObject* GetParentObject() const { return mParent.get(); } + nsIGlobalObject* GetParentObject() const { return mOwner; } + WebGPUChild* GetBridge() const; }; } diff --git a/dom/webgpu/InstanceProvider.cpp b/dom/webgpu/InstanceProvider.cpp deleted file mode 100644 index 8545ba43a8be..000000000000 --- a/dom/webgpu/InstanceProvider.cpp +++ /dev/null @@ -1,38 +0,0 @@ - - - - - -#include "InstanceProvider.h" - -#include "Instance.h" - -namespace mozilla { -namespace webgpu { - -InstanceProvider::InstanceProvider(nsIGlobalObject* const global) - : mGlobal(global) {} - -InstanceProvider::~InstanceProvider() = default; - -already_AddRefed InstanceProvider::Webgpu() const { - if (!mInstance) { - const auto inst = Instance::Create(mGlobal); - mInstance = Some(inst); - } - auto ret = mInstance.value(); - return ret.forget(); -} - -void InstanceProvider::CcTraverse( - nsCycleCollectionTraversalCallback& callback) const { - if (mInstance) { - CycleCollectionNoteChild(callback, mInstance.ref().get(), - "webgpu::InstanceProvider::mInstance", 0); - } -} - -void InstanceProvider::CcUnlink() { mInstance = Some(nullptr); } - -} -} diff --git a/dom/webgpu/InstanceProvider.h b/dom/webgpu/InstanceProvider.h deleted file mode 100644 index 097c977d4c39..000000000000 --- a/dom/webgpu/InstanceProvider.h +++ /dev/null @@ -1,55 +0,0 @@ - - - - - -#ifndef GPU_INSTANCE_PROVIDER_H_ -#define GPU_INSTANCE_PROVIDER_H_ - -#include "mozilla/AlreadyAddRefed.h" -#include "mozilla/Maybe.h" -#include "mozilla/RefPtr.h" - -class nsCycleCollectionTraversalCallback; -class nsIGlobalObject; - -namespace mozilla { -namespace webgpu { -class Instance; - -class InstanceProvider { - private: - nsIGlobalObject* const mGlobal; - mutable Maybe> mInstance; - - protected: - explicit InstanceProvider(nsIGlobalObject* global); - virtual ~InstanceProvider(); - - public: - already_AddRefed Webgpu() const; - - nsIGlobalObject* GetParentObject() const { return mGlobal; } - - void CcTraverse(nsCycleCollectionTraversalCallback&) const; - void CcUnlink(); -}; - -template -void ImplCycleCollectionTraverse(nsCycleCollectionTraversalCallback& callback, - const Maybe& field, const char* name, - uint32_t flags) { - if (field) { - CycleCollectionNoteChild(callback, field.value(), name, flags); - } -} - -template -void ImplCycleCollectionUnlink(Maybe& field) { - field = Nothing(); -} - -} -} - -#endif diff --git a/dom/webgpu/ObjectModel.h b/dom/webgpu/ObjectModel.h index d84a43fb4636..1c3a0e30d9ca 100644 --- a/dom/webgpu/ObjectModel.h +++ b/dom/webgpu/ObjectModel.h @@ -19,8 +19,8 @@ class ChildOf { public: const RefPtr mParent; - explicit ChildOf( - T* parent = nullptr); + explicit ChildOf(T* const parent); + protected: virtual ~ChildOf(); diff --git a/dom/webgpu/ffi/moz.build b/dom/webgpu/ffi/moz.build new file mode 100644 index 000000000000..be631048e524 --- /dev/null +++ b/dom/webgpu/ffi/moz.build @@ -0,0 +1,32 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +EXPORTS.mozilla.webgpu.ffi += [ + 'wgpu.h', +] + +UNIFIED_SOURCES += [ +] + +if CONFIG['COMPILE_ENVIRONMENT']: + GENERATED_FILES += [ + 'wgpu_ffi_generated.h', + ] + + EXPORTS.mozilla.webgpu.ffi += [ + '!wgpu_ffi_generated.h', + ] + + ffi_generated = GENERATED_FILES['wgpu_ffi_generated.h'] + ffi_generated.script = '/layout/style/RunCbindgen.py:generate' + ffi_generated.inputs = [ + '/dom/webgpu/wgpu-remote', + ] + + +include('/ipc/chromium/chromium-config.mozbuild') + +FINAL_LIBRARY = 'xul' diff --git a/dom/webgpu/ffi/wgpu.h b/dom/webgpu/ffi/wgpu.h new file mode 100644 index 000000000000..abb5752cfda2 --- /dev/null +++ b/dom/webgpu/ffi/wgpu.h @@ -0,0 +1,31 @@ + + + + + + +#ifndef WGPU_h +#define WGPU_h + + +namespace mozilla { +namespace webgpu { +namespace ffi { + +#define WGPU_INLINE +#define WGPU_FUNC +#define WGPU_DESTRUCTOR_SAFE_FUNC + +extern "C" { +#include "wgpu_ffi_generated.h" +} + +#undef WGPU_INLINE +#undef WGPU_FUNC +#undef WGPU_DESTRUCTOR_SAFE_FUNC + +} +} +} + +#endif diff --git a/dom/webgpu/ipc/PWebGPU.ipdl b/dom/webgpu/ipc/PWebGPU.ipdl new file mode 100644 index 000000000000..145ac1fff965 --- /dev/null +++ b/dom/webgpu/ipc/PWebGPU.ipdl @@ -0,0 +1,38 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: sw=2 ts=8 et : + */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +using RawId from "mozilla/webgpu/WebGPUTypes.h"; +using dom::GPURequestAdapterOptions from "mozilla/dom/WebGPUBinding.h"; +using dom::GPUDeviceDescriptor from "mozilla/dom/WebGPUBinding.h"; + +include "mozilla/webgpu/WebGPUSerialize.h"; +include protocol PCompositorBridge; + +namespace mozilla { +namespace webgpu { + +/** + * Represents the connection between a WebGPUChild actor that issues WebGPU + * command from the content process, and a WebGPUParent in the compositor + * process that runs the commands. + */ +async protocol PWebGPU +{ + manager PCompositorBridge; + +parent: + async InstanceRequestAdapter(GPURequestAdapterOptions options, RawId[] ids) returns (RawId adapterId); + async AdapterRequestDevice(RawId selfId, GPUDeviceDescriptor desc, RawId newId); + async DeviceDestroy(RawId selfId); + async Shutdown(); + +child: + async __delete__(); +}; + +} // webgpu +} // mozilla diff --git a/dom/webgpu/ipc/WebGPUChild.cpp b/dom/webgpu/ipc/WebGPUChild.cpp new file mode 100644 index 000000000000..fed42ebe39fa --- /dev/null +++ b/dom/webgpu/ipc/WebGPUChild.cpp @@ -0,0 +1,84 @@ + + + + + +#include "WebGPUChild.h" +#include "mozilla/dom/WebGPUBinding.h" +#include "mozilla/webgpu/ffi/wgpu.h" + +namespace mozilla { +namespace webgpu { + +NS_IMPL_CYCLE_COLLECTION(WebGPUChild) +NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(WebGPUChild, AddRef) +NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(WebGPUChild, Release) + +static ffi::WGPUClient* initialize() { + ffi::WGPUInfrastructure infra = ffi::wgpu_client_new(); + return infra.client; +} + +WebGPUChild::WebGPUChild() : mClient(initialize()), mIPCOpen(false) {} + +WebGPUChild::~WebGPUChild() { + if (mClient) { + ffi::wgpu_client_delete(mClient); + } +} + +RefPtr WebGPUChild::InstanceRequestAdapter( + const dom::GPURequestAdapterOptions& aOptions) { + const int max_ids = 10; + RawId ids[max_ids] = {0}; + unsigned long count = + ffi::wgpu_client_make_adapter_ids(mClient, ids, max_ids); + + auto client = mClient; + nsTArray sharedIds; + for (unsigned long i = 0; i != count; ++i) { + sharedIds.AppendElement(ids[i]); + } + + return SendInstanceRequestAdapter(aOptions, sharedIds) + ->Then( + GetCurrentThreadSerialEventTarget(), __func__, + [client, ids, count](const RawId& aId) { + if (aId == 0) { + ffi::wgpu_client_kill_adapter_ids(client, ids, count); + return RawIdPromise::CreateAndReject(Nothing(), __func__); + } else { + + unsigned int i = 0; + while (ids[i] != aId) { + i++; + } + if (i > 0) { + ffi::wgpu_client_kill_adapter_ids(client, ids, i); + } + if (i + 1 < count) { + ffi::wgpu_client_kill_adapter_ids(client, ids + i + 1, + count - i - 1); + } + return RawIdPromise::CreateAndResolve(aId, __func__); + } + }, + [client, ids, count](const ipc::ResponseRejectReason& aReason) { + ffi::wgpu_client_kill_adapter_ids(client, ids, count); + return RawIdPromise::CreateAndReject(Some(aReason), __func__); + }); +} + +Maybe WebGPUChild::AdapterRequestDevice( + RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc) { + RawId id = ffi::wgpu_client_make_device_id(mClient, aSelfId); + if (SendAdapterRequestDevice(aSelfId, aDesc, id)) { + return Some(id); + } else { + ffi::wgpu_client_kill_device_id(mClient, id); + return Nothing(); + } +} + +} +} diff --git a/dom/webgpu/ipc/WebGPUChild.h b/dom/webgpu/ipc/WebGPUChild.h new file mode 100644 index 000000000000..057377fdbfe2 --- /dev/null +++ b/dom/webgpu/ipc/WebGPUChild.h @@ -0,0 +1,68 @@ + + + + + +#ifndef WEBGPU_CHILD_H_ +#define WEBGPU_CHILD_H_ + +#include "mozilla/webgpu/PWebGPUChild.h" +#include "mozilla/MozPromise.h" + +namespace mozilla { +namespace dom { +struct GPURequestAdapterOptions; +} +namespace layers { +class CompositorBridgeChild; +} +namespace webgpu { +namespace ffi { +struct WGPUClient; +} + +typedef MozPromise, true> RawIdPromise; + +class WebGPUChild final : public PWebGPUChild { + public: + friend class layers::CompositorBridgeChild; + + NS_DECL_CYCLE_COLLECTION_NATIVE_CLASS(WebGPUChild) + NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(WebGPUChild) + + public: + explicit WebGPUChild(); + + bool IsOpen() const { return mIPCOpen; } + RefPtr InstanceRequestAdapter( + const dom::GPURequestAdapterOptions& aOptions); + Maybe AdapterRequestDevice(RawId aSelfId, + const dom::GPUDeviceDescriptor& aDesc); + + private: + virtual ~WebGPUChild(); + + + + + + + void AddIPDLReference() { + MOZ_ASSERT(!mIPCOpen); + mIPCOpen = true; + AddRef(); + } + void ReleaseIPDLReference() { + MOZ_ASSERT(mIPCOpen); + mIPCOpen = false; + Release(); + } + + ffi::WGPUClient* const mClient; + bool mIPCOpen; +}; + +} +} + +#endif diff --git a/dom/webgpu/ipc/WebGPUParent.cpp b/dom/webgpu/ipc/WebGPUParent.cpp new file mode 100644 index 000000000000..ba0a5b771f6f --- /dev/null +++ b/dom/webgpu/ipc/WebGPUParent.cpp @@ -0,0 +1,57 @@ + + + + + +#include "WebGPUParent.h" +#include "mozilla/webgpu/ffi/wgpu.h" + +namespace mozilla { +namespace webgpu { + +WebGPUParent::WebGPUParent() : mContext(ffi::wgpu_server_new()) {} + +WebGPUParent::~WebGPUParent() = default; + +ipc::IPCResult WebGPUParent::RecvInstanceRequestAdapter( + const dom::GPURequestAdapterOptions& aOptions, + const nsTArray& aTargetIds, + InstanceRequestAdapterResolver&& resolver) { + ffi::WGPURequestAdapterOptions options = {}; + if (aOptions.mPowerPreference.WasPassed()) { + options.power_preference = static_cast( + aOptions.mPowerPreference.Value()); + } + + + int8_t index = ffi::wgpu_server_instance_request_adapter( + mContext, &options, aTargetIds.Elements(), aTargetIds.Length()); + if (index >= 0) { + resolver(aTargetIds[index]); + } else { + resolver(0); + } + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvAdapterRequestDevice( + RawId aSelfId, const dom::GPUDeviceDescriptor& aOptions, RawId aNewId) { + ffi::WGPUDeviceDescriptor desc = {}; + + + ffi::wgpu_server_adapter_request_device(mContext, aSelfId, &desc, aNewId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvDeviceDestroy(RawId aSelfId) { + ffi::wgpu_server_device_destroy(mContext, aSelfId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvShutdown() { + ffi::wgpu_server_delete(const_cast(mContext)); + return IPC_OK(); +} + +} +} diff --git a/dom/webgpu/ipc/WebGPUParent.h b/dom/webgpu/ipc/WebGPUParent.h new file mode 100644 index 000000000000..817a8167a899 --- /dev/null +++ b/dom/webgpu/ipc/WebGPUParent.h @@ -0,0 +1,43 @@ + + + + + +#ifndef WEBGPU_PARENT_H_ +#define WEBGPU_PARENT_H_ + +#include "mozilla/webgpu/PWebGPUParent.h" +#include "WebGPUTypes.h" + +namespace mozilla { +namespace webgpu { +namespace ffi { +struct WGPUGlobal; +} + +class WebGPUParent final : public PWebGPUParent { + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebGPUParent) + + public: + explicit WebGPUParent(); + + ipc::IPCResult RecvInstanceRequestAdapter( + const dom::GPURequestAdapterOptions& aOptions, + const nsTArray& aTargetIds, + InstanceRequestAdapterResolver&& resolver); + ipc::IPCResult RecvAdapterRequestDevice(RawId aSelfId, + const dom::GPUDeviceDescriptor& aDesc, + RawId aNewId); + ipc::IPCResult RecvDeviceDestroy(RawId aSelfId); + ipc::IPCResult RecvShutdown(); + + private: + virtual ~WebGPUParent(); + + const ffi::WGPUGlobal* const mContext; +}; + +} +} + +#endif diff --git a/dom/webgpu/ipc/WebGPUSerialize.h b/dom/webgpu/ipc/WebGPUSerialize.h new file mode 100644 index 000000000000..52717ad33cf4 --- /dev/null +++ b/dom/webgpu/ipc/WebGPUSerialize.h @@ -0,0 +1,34 @@ + + + + + +#ifndef WEBGPU_SERIALIZE_H_ +#define WEBGPU_SERIALIZE_H_ + +#include "WebGPUTypes.h" +#include "ipc/IPCMessageUtils.h" +#include "mozilla/dom/WebGPUBinding.h" + +namespace IPC { + +#define DEFINE_IPC_SERIALIZER_ENUM(something) \ + template <> \ + struct ParamTraits \ + : public ContiguousEnumSerializer {} + +DEFINE_IPC_SERIALIZER_ENUM(mozilla::dom::GPUPowerPreference); + +DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPURequestAdapterOptions, + mPowerPreference); +DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPUExtensions, + mAnisotropicFiltering); +DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPULimits, mMaxBindGroups); +DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPUDeviceDescriptor, + mExtensions, mLimits); + +#undef DEFINE_IPC_SERIALIZER_ENUM + +} +#endif diff --git a/dom/webgpu/ipc/WebGPUTypes.h b/dom/webgpu/ipc/WebGPUTypes.h new file mode 100644 index 000000000000..b8ca734b47d5 --- /dev/null +++ b/dom/webgpu/ipc/WebGPUTypes.h @@ -0,0 +1,18 @@ + + + + + +#ifndef WEBGPU_TYPES_H_ +#define WEBGPU_TYPES_H_ + +namespace mozilla { +namespace webgpu { + +typedef uint64_t RawId; + +} + +} + +#endif diff --git a/dom/webgpu/mochitest/mochitest.ini b/dom/webgpu/mochitest/mochitest.ini index 0002429da42a..8d15bdb904b1 100644 --- a/dom/webgpu/mochitest/mochitest.ini +++ b/dom/webgpu/mochitest/mochitest.ini @@ -3,4 +3,4 @@ subsuite = webgl1-core prefs = dom.webgpu.enable=true [test_enabled.html] - +[test_device_creation.html] diff --git a/dom/webgpu/mochitest/test_device_creation.html b/dom/webgpu/mochitest/test_device_creation.html new file mode 100644 index 000000000000..3f4d03e006aa --- /dev/null +++ b/dom/webgpu/mochitest/test_device_creation.html @@ -0,0 +1,25 @@ + + + + + + + + + + + + diff --git a/dom/webgpu/moz.build b/dom/webgpu/moz.build index ee60c4ecb2c0..16631f288595 100644 --- a/dom/webgpu/moz.build +++ b/dom/webgpu/moz.build @@ -13,7 +13,7 @@ MOCHITEST_MANIFESTS += [ ] DIRS += [ - 'thread', + 'ffi', ] h_and_cpp = [ @@ -30,7 +30,6 @@ h_and_cpp = [ 'DeviceLostInfo', 'Fence', 'Instance', - 'InstanceProvider', 'ObjectModel', 'OutOfMemoryError', 'PipelineLayout', @@ -51,4 +50,22 @@ h_and_cpp = [ EXPORTS.mozilla.webgpu += [x + '.h' for x in h_and_cpp] UNIFIED_SOURCES += [x + '.cpp' for x in h_and_cpp] +IPDL_SOURCES += [ + 'ipc/PWebGPU.ipdl', +] + +EXPORTS.mozilla.webgpu += [ + 'ipc/WebGPUChild.h', + 'ipc/WebGPUParent.h', + 'ipc/WebGPUSerialize.h', + 'ipc/WebGPUTypes.h', +] + +UNIFIED_SOURCES += [ + 'ipc/WebGPUChild.cpp', + 'ipc/WebGPUParent.cpp', +] + +include('/ipc/chromium/chromium-config.mozbuild') + FINAL_LIBRARY = 'xul' diff --git a/dom/webgpu/thread/WebGPUThreading.cpp b/dom/webgpu/thread/WebGPUThreading.cpp deleted file mode 100644 index cd296cdbd363..000000000000 --- a/dom/webgpu/thread/WebGPUThreading.cpp +++ /dev/null @@ -1,61 +0,0 @@ - - - - - - -#include "WebGPUThreading.h" -#include "mtransport/runnable_utils.h" - -namespace mozilla { -namespace webgpu { - -static StaticRefPtr sWebGPUThread; - -WebGPUThreading::WebGPUThreading(base::Thread* aThread) : mThread(aThread) {} - -WebGPUThreading::~WebGPUThreading() { delete mThread; } - - -void WebGPUThreading::Start() { - MOZ_ASSERT(NS_IsMainThread()); - MOZ_ASSERT(!sWebGPUThread); - - base::Thread* thread = new base::Thread("WebGPU"); - - base::Thread::Options options; - if (!thread->StartWithOptions(options)) { - delete thread; - return; - } - - sWebGPUThread = new WebGPUThreading(thread); - const auto fnInit = []() {}; - - RefPtr runnable = - NS_NewRunnableFunction("WebGPUThreading fnInit", fnInit); - sWebGPUThread->GetLoop()->PostTask(runnable.forget()); -} - - -void WebGPUThreading::ShutDown() { - MOZ_ASSERT(NS_IsMainThread()); - MOZ_ASSERT(sWebGPUThread); - - const auto fnExit = []() {}; - - RefPtr runnable = - NS_NewRunnableFunction("WebGPUThreading fnExit", fnExit); - sWebGPUThread->GetLoop()->PostTask(runnable.forget()); - - sWebGPUThread = nullptr; -} - - -MessageLoop* WebGPUThreading::GetLoop() { - MOZ_ASSERT(NS_IsMainThread()); - return sWebGPUThread ? sWebGPUThread->mThread->message_loop() : nullptr; -} - -} -} diff --git a/dom/webgpu/thread/WebGPUThreading.h b/dom/webgpu/thread/WebGPUThreading.h deleted file mode 100644 index b36b9ae9fcca..000000000000 --- a/dom/webgpu/thread/WebGPUThreading.h +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - -#ifndef MOZILLA_WEBGPU_THREADING_H -#define MOZILLA_WEBGPU_THREADING_H - -#include "ThreadSafeRefcountingWithMainThreadDestruction.h" -#include "base/thread.h" -#include "mozilla/layers/SynchronousTask.h" - -namespace mozilla { -namespace webgpu { - -class WebGPUThreading final { - NS_INLINE_DECL_THREADSAFE_REFCOUNTING_WITH_MAIN_THREAD_DESTRUCTION( - WebGPUThreading) - - public: - - static void Start(); - - - static void ShutDown(); - - - - static MessageLoop* GetLoop(); - - private: - explicit WebGPUThreading(base::Thread* aThread); - ~WebGPUThreading(); - - base::Thread* const mThread; -}; - -} -} - -#endif diff --git a/dom/webgpu/thread/moz.build b/dom/webgpu/thread/moz.build deleted file mode 100644 index 5a450f10d1dc..000000000000 --- a/dom/webgpu/thread/moz.build +++ /dev/null @@ -1,18 +0,0 @@ -# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- -# vim: set filetype=python: -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. - -h_and_cpp = [ - 'WebGPUThreading', -] -EXPORTS.mozilla.webgpu += [x + '.h' for x in h_and_cpp] -UNIFIED_SOURCES += [x + '.cpp' for x in h_and_cpp] - - -LOCAL_INCLUDES += [ - '/ipc/chromium/src', -] - -FINAL_LIBRARY = 'xul' diff --git a/dom/webgpu/wgpu-native/Cargo.toml b/dom/webgpu/wgpu-native/Cargo.toml new file mode 100644 index 000000000000..4ed7ba1ae741 --- /dev/null +++ b/dom/webgpu/wgpu-native/Cargo.toml @@ -0,0 +1,53 @@ +[package] +name = "wgpu-native" +version = "0.4.0" +authors = [ + "Dzmitry Malyshau ", + "Joshua Groves ", +] +edition = "2018" +description = "WebGPU native implementation on gfx-hal" +homepage = "https://github.com/gfx-rs/wgpu" +repository = "https://github.com/gfx-rs/wgpu" +keywords = ["graphics"] +license = "MPL-2.0" + +[lib] +#crate-type = ["lib", "cdylib", "staticlib"] +crate-type = ["lib"] + +[features] +default = [] +local = ["lazy_static", "raw-window-handle"] +metal-auto-capture = ["gfx-backend-metal/auto-capture"] +#NOTE: glutin feature is not stable, use at your own risk +#glutin = ["gfx-backend-gl/glutin"] + +[dependencies] +arrayvec = "0.5" +bitflags = "1.0" +copyless = "0.1" +fxhash = "0.2" +lazy_static = { version = "1.1.0", optional = true } +log = "0.4" +hal = { package = "gfx-hal", version = "0.4" } +gfx-backend-empty = { version = "0.4" } +parking_lot = "0.9" +raw-window-handle = { version = "0.3", optional = true } +rendy-memory = "0.5" +rendy-descriptor = "0.5" +serde = { version = "1.0", features = ["serde_derive"], optional = true } +smallvec = "0.6" +vec_map = "0.8" + +[target.'cfg(any(target_os = "ios", target_os = "macos"))'.dependencies] +gfx-backend-metal = { version = "0.4" } +gfx-backend-vulkan = { version = "0.4", optional = true } + +[target.'cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))'.dependencies] +gfx-backend-vulkan = { version = "0.4", features = ["x11"] } + +[target.'cfg(windows)'.dependencies] +gfx-backend-dx12 = { version = "0.4.1" } +gfx-backend-dx11 = { version = "0.4" } +gfx-backend-vulkan = { version = "0.4" } diff --git a/dom/webgpu/wgpu-native/cbindgen.toml b/dom/webgpu/wgpu-native/cbindgen.toml new file mode 100644 index 000000000000..f111779e1e98 --- /dev/null +++ b/dom/webgpu/wgpu-native/cbindgen.toml @@ -0,0 +1,37 @@ +header = """ +#define WGPU_LOCAL +""" +include_version = true +braces = "SameLine" +line_length = 100 +tab_width = 2 +language = "C" + +[export] +prefix = "WGPU" +#TODO: figure out why cbindgen even tries to export a private type... +exclude = ["BufferMapResult"] + +[parse] +parse_deps = false + +[parse.expand] +features = ["local"] + +[fn] + +[struct] +derive_eq = true + +[enum] +prefix_with_name = true +derive_helper_methods = true + +[macro_expansion] +bitflags = true + +[defines] +"feature = local" = "WGPU_LOCAL" +"feature = gfx-backend-gl" = "WGPU_BACKEND_GL" +"feature = winit" = "WGPU_WINIT" +"feature = glutin" = "WGPU_GLUTIN" diff --git a/dom/webgpu/wgpu-native/src/binding_model.rs b/dom/webgpu/wgpu-native/src/binding_model.rs new file mode 100644 index 000000000000..e1c5fffd5873 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/binding_model.rs @@ -0,0 +1,132 @@ + + + + +use crate::{ + resource::TextureViewDimension, + track::TrackerSet, + BindGroupLayoutId, + BufferAddress, + BufferId, + DeviceId, + LifeGuard, + RefCount, + SamplerId, + Stored, + TextureViewId, +}; + +use arrayvec::ArrayVec; +use bitflags::bitflags; +use rendy_descriptor::{DescriptorRanges, DescriptorSet}; + +use std::borrow::Borrow; + +pub const MAX_BIND_GROUPS: usize = 4; + +bitflags! { + #[repr(transparent)] + pub struct ShaderStage: u32 { + const NONE = 0; + const VERTEX = 1; + const FRAGMENT = 2; + const COMPUTE = 4; + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum BindingType { + UniformBuffer = 0, + StorageBuffer = 1, + ReadonlyStorageBuffer = 2, + Sampler = 3, + SampledTexture = 4, + StorageTexture = 5, +} + +#[repr(C)] +#[derive(Clone, Debug, Hash)] +pub struct BindGroupLayoutBinding { + pub binding: u32, + pub visibility: ShaderStage, + pub ty: BindingType, + pub texture_dimension: TextureViewDimension, + pub multisampled: bool, + pub dynamic: bool, +} + +#[repr(C)] +#[derive(Debug)] +pub struct BindGroupLayoutDescriptor { + pub bindings: *const BindGroupLayoutBinding, + pub bindings_length: usize, +} + +#[derive(Debug)] +pub struct BindGroupLayout { + pub(crate) raw: B::DescriptorSetLayout, + pub(crate) bindings: Vec, + pub(crate) desc_ranges: DescriptorRanges, + pub(crate) dynamic_count: usize, +} + +#[repr(C)] +#[derive(Debug)] +pub struct PipelineLayoutDescriptor { + pub bind_group_layouts: *const BindGroupLayoutId, + pub bind_group_layouts_length: usize, +} + +#[derive(Debug)] +pub struct PipelineLayout { + pub(crate) raw: B::PipelineLayout, + pub(crate) bind_group_layout_ids: ArrayVec<[BindGroupLayoutId; MAX_BIND_GROUPS]>, +} + +#[repr(C)] +#[derive(Debug)] +pub struct BufferBinding { + pub buffer: BufferId, + pub offset: BufferAddress, + pub size: BufferAddress, +} + +#[repr(C)] +#[derive(Debug)] +pub enum BindingResource { + Buffer(BufferBinding), + Sampler(SamplerId), + TextureView(TextureViewId), +} + +#[repr(C)] +#[derive(Debug)] +pub struct BindGroupBinding { + pub binding: u32, + pub resource: BindingResource, +} + +#[repr(C)] +#[derive(Debug)] +pub struct BindGroupDescriptor { + pub layout: BindGroupLayoutId, + pub bindings: *const BindGroupBinding, + pub bindings_length: usize, +} + +#[derive(Debug)] +pub struct BindGroup { + pub(crate) raw: DescriptorSet, + pub(crate) device_id: Stored, + pub(crate) layout_id: BindGroupLayoutId, + pub(crate) life_guard: LifeGuard, + pub(crate) used: TrackerSet, + pub(crate) dynamic_count: usize, +} + +impl Borrow for BindGroup { + fn borrow(&self) -> &RefCount { + &self.life_guard.ref_count + } +} diff --git a/dom/webgpu/wgpu-native/src/command/allocator.rs b/dom/webgpu/wgpu-native/src/command/allocator.rs new file mode 100644 index 000000000000..82257d6cbcad --- /dev/null +++ b/dom/webgpu/wgpu-native/src/command/allocator.rs @@ -0,0 +1,165 @@ + + + + +use super::CommandBuffer; +use crate::{ + hub::GfxBackend, + track::TrackerSet, + DeviceId, + Features, + LifeGuard, + Stored, + SubmissionIndex, +}; + +use hal::{command::CommandBuffer as _, device::Device as _, pool::CommandPool as _}; +use parking_lot::Mutex; + +use std::{collections::HashMap, sync::atomic::Ordering, thread}; + +#[derive(Debug)] +struct CommandPool { + raw: B::CommandPool, + available: Vec, +} + +impl CommandPool { + fn allocate(&mut self) -> B::CommandBuffer { + if self.available.is_empty() { + let extra = unsafe { + self.raw.allocate_vec(20, hal::command::Level::Primary) + }; + self.available.extend(extra); + } + + self.available.pop().unwrap() + } +} + +#[derive(Debug)] +struct Inner { + pools: HashMap>, + pending: Vec>, +} + +impl Inner { + fn recycle(&mut self, cmd_buf: CommandBuffer) { + let pool = self.pools.get_mut(&cmd_buf.recorded_thread_id).unwrap(); + for mut raw in cmd_buf.raw { + unsafe { + raw.reset(false); + } + pool.available.push(raw); + } + } +} + +#[derive(Debug)] +pub struct CommandAllocator { + queue_family: hal::queue::QueueFamilyId, + inner: Mutex>, +} + +impl CommandAllocator { + pub(crate) fn allocate( + &self, + device_id: Stored, + device: &B::Device, + features: Features, + ) -> CommandBuffer { + + let thread_id = thread::current().id(); + let mut inner = self.inner.lock(); + + let pool = inner.pools.entry(thread_id).or_insert_with(|| CommandPool { + raw: unsafe { + device.create_command_pool( + self.queue_family, + hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL, + ) + } + .unwrap(), + available: Vec::new(), + }); + let init = pool.allocate(); + + CommandBuffer { + raw: vec![init], + is_recording: true, + recorded_thread_id: thread_id, + device_id, + life_guard: LifeGuard::new(), + trackers: TrackerSet::new(B::VARIANT), + used_swap_chain: None, + features, + } + } +} + +impl CommandAllocator { + pub fn new(queue_family: hal::queue::QueueFamilyId) -> Self { + CommandAllocator { + queue_family, + inner: Mutex::new(Inner { + pools: HashMap::new(), + pending: Vec::new(), + }), + } + } + + pub fn extend(&self, cmd_buf: &CommandBuffer) -> B::CommandBuffer { + let mut inner = self.inner.lock(); + let pool = inner.pools.get_mut(&cmd_buf.recorded_thread_id).unwrap(); + + if pool.available.is_empty() { + let extra = unsafe { + pool.raw.allocate_vec(20, hal::command::Level::Primary) + }; + pool.available.extend(extra); + } + + pool.available.pop().unwrap() + } + + pub fn after_submit(&self, mut cmd_buf: CommandBuffer, submit_index: SubmissionIndex) { + cmd_buf.trackers.clear(); + cmd_buf + .life_guard + .submission_index + .store(submit_index, Ordering::Release); + self.inner.lock().pending.push(cmd_buf); + } + + pub fn maintain(&self, last_done: SubmissionIndex) { + let mut inner = self.inner.lock(); + for i in (0 .. inner.pending.len()).rev() { + let index = inner.pending[i] + .life_guard + .submission_index + .load(Ordering::Acquire); + if index <= last_done { + let cmd_buf = inner.pending.swap_remove(i); + log::trace!( + "recycling comb submitted in {} when {} is done", + index, + last_done + ); + inner.recycle(cmd_buf); + } + } + } + + pub fn destroy(self, device: &B::Device) { + let mut inner = self.inner.lock(); + while let Some(cmd_buf) = inner.pending.pop() { + inner.recycle(cmd_buf); + } + for (_, mut pool) in inner.pools.drain() { + unsafe { + pool.raw.free(pool.available); + device.destroy_command_pool(pool.raw); + } + } + } +} diff --git a/dom/webgpu/wgpu-native/src/command/bind.rs b/dom/webgpu/wgpu-native/src/command/bind.rs new file mode 100644 index 000000000000..3d312ed074b1 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/command/bind.rs @@ -0,0 +1,222 @@ + + + + +use crate::{ + hub::GfxBackend, + BindGroup, + BindGroupId, + BindGroupLayoutId, + BufferAddress, + PipelineLayoutId, + Stored, +}; + +use smallvec::{smallvec, SmallVec}; + +use std::convert::identity; + +pub const DEFAULT_BIND_GROUPS: usize = 4; +type BindGroupMask = u8; + +#[derive(Clone, Debug)] +pub struct BindGroupPair { + layout_id: BindGroupLayoutId, + group_id: Stored, +} + +#[derive(Debug)] +pub enum LayoutChange<'a> { + Unchanged, + Match(BindGroupId, &'a [BufferAddress]), + Mismatch, +} + +#[derive(Debug)] +pub enum Provision { + Unchanged, + Changed { was_compatible: bool }, +} + +struct TakeSome { + iter: I, +} +impl Iterator for TakeSome +where + I: Iterator>, +{ + type Item = T; + fn next(&mut self) -> Option { + self.iter.next().and_then(identity) + } +} + +#[derive(Clone, Default, Debug)] +pub struct BindGroupEntry { + expected_layout_id: Option, + provided: Option, + dynamic_offsets: Vec, +} + +impl BindGroupEntry { + fn provide( + &mut self, + bind_group_id: BindGroupId, + bind_group: &BindGroup, + offsets: &[BufferAddress], + ) -> Provision { + debug_assert_eq!(B::VARIANT, bind_group_id.backend()); + + let was_compatible = match self.provided { + Some(BindGroupPair { + layout_id, + ref group_id, + }) => { + if group_id.value == bind_group_id && offsets == self.dynamic_offsets.as_slice() { + assert_eq!(layout_id, bind_group.layout_id); + return Provision::Unchanged; + } + self.expected_layout_id == Some(layout_id) + } + None => true, + }; + + self.provided = Some(BindGroupPair { + layout_id: bind_group.layout_id, + group_id: Stored { + value: bind_group_id, + ref_count: bind_group.life_guard.ref_count.clone(), + }, + }); + + self.dynamic_offsets.clear(); + self.dynamic_offsets.extend_from_slice(offsets); + + Provision::Changed { was_compatible } + } + + pub fn expect_layout(&mut self, bind_group_layout_id: BindGroupLayoutId) -> LayoutChange { + let some = Some(bind_group_layout_id); + if self.expected_layout_id != some { + self.expected_layout_id = some; + match self.provided { + Some(BindGroupPair { + layout_id, + ref group_id, + }) if layout_id == bind_group_layout_id => { + LayoutChange::Match(group_id.value, &self.dynamic_offsets) + } + Some(_) | None => LayoutChange::Mismatch, + } + } else { + LayoutChange::Unchanged + } + } + + fn is_valid(&self) -> bool { + match (self.expected_layout_id, self.provided.as_ref()) { + (None, _) => true, + (Some(_), None) => false, + (Some(layout), Some(pair)) => layout == pair.layout_id, + } + } + + fn actual_value(&self) -> Option { + self.expected_layout_id.and_then(|layout_id| { + self.provided.as_ref().and_then(|pair| { + if pair.layout_id == layout_id { + Some(pair.group_id.value) + } else { + None + } + }) + }) + } +} + +#[derive(Debug)] +pub struct Binder { + pub(crate) pipeline_layout_id: Option, + pub(crate) entries: SmallVec<[BindGroupEntry; DEFAULT_BIND_GROUPS]>, +} + +impl Binder { + pub(crate) fn new(max_bind_groups: u32) -> Self { + Self { + pipeline_layout_id: None, + entries: smallvec![Default::default(); max_bind_groups as usize], + } + } + + pub(crate) fn reset_expectations(&mut self, length: usize) { + for entry in self.entries[length ..].iter_mut() { + entry.expected_layout_id = None; + } + } + + + + + + + pub(crate) fn provide_entry<'a, B: GfxBackend>( + &'a mut self, + index: usize, + bind_group_id: BindGroupId, + bind_group: &BindGroup, + offsets: &[BufferAddress], + ) -> Option<( + PipelineLayoutId, + impl 'a + Iterator, + impl 'a + Iterator, + )> { + log::trace!("\tBinding [{}] = group {:?}", index, bind_group_id); + debug_assert_eq!(B::VARIANT, bind_group_id.backend()); + + match self.entries[index].provide(bind_group_id, bind_group, offsets) { + Provision::Unchanged => None, + Provision::Changed { was_compatible, .. } => { + let compatible_count = self.compatible_count(); + if index < compatible_count { + let end = compatible_count.min(if was_compatible { + index + 1 + } else { + self.entries.len() + }); + log::trace!("\t\tbinding up to {}", end); + Some(( + self.pipeline_layout_id?, + TakeSome { + iter: self.entries[index + 1 .. end] + .iter() + .map(|entry| entry.actual_value()), + }, + self.entries[index + 1 .. end] + .iter() + .flat_map(|entry| entry.dynamic_offsets.as_slice()), + )) + } else { + log::trace!("\t\tskipping above compatible {}", compatible_count); + None + } + } + } + } + + pub(crate) fn invalid_mask(&self) -> BindGroupMask { + self.entries.iter().enumerate().fold(0, |mask, (i, entry)| { + if entry.is_valid() { + mask + } else { + mask | 1u8 << i + } + }) + } + + fn compatible_count(&self) -> usize { + self.entries + .iter() + .position(|entry| !entry.is_valid()) + .unwrap_or(self.entries.len()) + } +} diff --git a/dom/webgpu/wgpu-native/src/command/compute.rs b/dom/webgpu/wgpu-native/src/command/compute.rs new file mode 100644 index 000000000000..949de752b1d8 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/command/compute.rs @@ -0,0 +1,316 @@ + + + + +use crate::{ + command::bind::{Binder, LayoutChange}, + device::all_buffer_stages, + hub::{GfxBackend, Global, Token}, + track::{Stitch, TrackerSet}, + BindGroupId, + BufferAddress, + BufferId, + BufferUsage, + CommandBuffer, + CommandBufferId, + ComputePassId, + ComputePipelineId, + RawString, + Stored, + BIND_BUFFER_ALIGNMENT, +}; +#[cfg(feature = "local")] +use crate::{gfx_select, hub::GLOBAL}; + +use hal::{self, command::CommandBuffer as _}; + +use std::iter; +#[cfg(feature = "local")] +use std::slice; + +#[derive(Debug)] +pub struct ComputePass { + raw: B::CommandBuffer, + cmb_id: Stored, + binder: Binder, + trackers: TrackerSet, +} + +impl ComputePass { + pub(crate) fn new( + raw: B::CommandBuffer, + cmb_id: Stored, + trackers: TrackerSet, + max_bind_groups: u32, + ) -> Self { + ComputePass { + raw, + cmb_id, + binder: Binder::new(max_bind_groups), + trackers, + } + } +} + + + +pub fn compute_pass_end_pass(global: &Global, pass_id: ComputePassId) { + let mut token = Token::root(); + let hub = B::hub(global); + let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); + let (pass, _) = hub.compute_passes.unregister(pass_id, &mut token); + let cmb = &mut cmb_guard[pass.cmb_id.value]; + + + + cmb.trackers = pass.trackers; + cmb.raw.push(pass.raw); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_compute_pass_end_pass(pass_id: ComputePassId) { + gfx_select!(pass_id => compute_pass_end_pass(&*GLOBAL, pass_id)) +} + +pub fn compute_pass_set_bind_group( + global: &Global, + pass_id: ComputePassId, + index: u32, + bind_group_id: BindGroupId, + offsets: &[BufferAddress], +) { + let hub = B::hub(global); + let mut token = Token::root(); + + let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); + let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); + let (mut pass_guard, mut token) = hub.compute_passes.write(&mut token); + let pass = &mut pass_guard[pass_id]; + + let bind_group = pass + .trackers + .bind_groups + .use_extend(&*bind_group_guard, bind_group_id, (), ()) + .unwrap(); + + assert_eq!(bind_group.dynamic_count, offsets.len()); + + if cfg!(debug_assertions) { + for off in offsets { + assert_eq!( + *off % BIND_BUFFER_ALIGNMENT, + 0, + "Misaligned dynamic buffer offset: {} does not align with {}", + off, + BIND_BUFFER_ALIGNMENT + ); + } + } + + + + let (buffer_guard, mut token) = hub.buffers.read(&mut token); + let (texture_guard, _) = hub.textures.read(&mut token); + + log::trace!( + "Encoding barriers on binding of {:?} in pass {:?}", + bind_group_id, + pass_id + ); + CommandBuffer::insert_barriers( + &mut pass.raw, + &mut pass.trackers, + &bind_group.used, + Stitch::Last, + &*buffer_guard, + &*texture_guard, + ); + + if let Some((pipeline_layout_id, follow_up_sets, follow_up_offsets)) = pass + .binder + .provide_entry(index as usize, bind_group_id, bind_group, offsets) + { + let bind_groups = iter::once(bind_group.raw.raw()) + .chain(follow_up_sets.map(|bg_id| bind_group_guard[bg_id].raw.raw())); + unsafe { + pass.raw.bind_compute_descriptor_sets( + &pipeline_layout_guard[pipeline_layout_id].raw, + index as usize, + bind_groups, + offsets + .iter() + .chain(follow_up_offsets) + .map(|&off| off as hal::command::DescriptorSetOffset), + ); + } + }; +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_compute_pass_set_bind_group( + pass_id: ComputePassId, + index: u32, + bind_group_id: BindGroupId, + offsets: *const BufferAddress, + offsets_length: usize, +) { + let offsets = if offsets_length != 0 { + unsafe { slice::from_raw_parts(offsets, offsets_length) } + } else { + &[] + }; + gfx_select!(pass_id => compute_pass_set_bind_group(&*GLOBAL, pass_id, index, bind_group_id, offsets)) +} + +#[no_mangle] +pub extern "C" fn wgpu_compute_pass_push_debug_group(_pass_id: ComputePassId, _label: RawString) { + //TODO +} + +#[no_mangle] +pub extern "C" fn wgpu_compute_pass_pop_debug_group(_pass_id: ComputePassId) { + //TODO +} + +#[no_mangle] +pub extern "C" fn wgpu_compute_pass_insert_debug_marker( + _pass_id: ComputePassId, + _label: RawString, +) { + //TODO +} + +// Compute-specific routines + +pub fn compute_pass_dispatch( + global: &Global, + pass_id: ComputePassId, + x: u32, + y: u32, + z: u32, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut pass_guard, _) = hub.compute_passes.write(&mut token); + unsafe { + pass_guard[pass_id].raw.dispatch([x, y, z]); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_compute_pass_dispatch(pass_id: ComputePassId, x: u32, y: u32, z: u32) { + gfx_select!(pass_id => compute_pass_dispatch(&*GLOBAL, pass_id, x, y, z)) +} + +pub fn compute_pass_dispatch_indirect( + global: &Global, + pass_id: ComputePassId, + indirect_buffer_id: BufferId, + indirect_offset: BufferAddress, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (buffer_guard, _) = hub.buffers.read(&mut token); + let (mut pass_guard, _) = hub.compute_passes.write(&mut token); + let pass = &mut pass_guard[pass_id]; + + let (src_buffer, src_pending) = pass.trackers.buffers.use_replace( + &*buffer_guard, + indirect_buffer_id, + (), + BufferUsage::INDIRECT, + ); + assert!(src_buffer.usage.contains(BufferUsage::INDIRECT)); + + let barriers = src_pending.map(|pending| hal::memory::Barrier::Buffer { + states: pending.to_states(), + target: &src_buffer.raw, + families: None, + range: None .. None, + }); + + unsafe { + pass.raw.pipeline_barrier( + all_buffer_stages() .. all_buffer_stages(), + hal::memory::Dependencies::empty(), + barriers, + ); + pass.raw.dispatch_indirect(&src_buffer.raw, indirect_offset); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_compute_pass_dispatch_indirect( + pass_id: ComputePassId, + indirect_buffer_id: BufferId, + indirect_offset: BufferAddress, +) { + gfx_select!(pass_id => compute_pass_dispatch_indirect(&*GLOBAL, pass_id, indirect_buffer_id, indirect_offset)) +} + +pub fn compute_pass_set_pipeline( + global: &Global, + pass_id: ComputePassId, + pipeline_id: ComputePipelineId, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); + let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); + let (mut pass_guard, mut token) = hub.compute_passes.write(&mut token); + let pass = &mut pass_guard[pass_id]; + let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token); + let pipeline = &pipeline_guard[pipeline_id]; + + unsafe { + pass.raw.bind_compute_pipeline(&pipeline.raw); + } + + // Rebind resources + if pass.binder.pipeline_layout_id != Some(pipeline.layout_id.clone()) { + let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id]; + pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone()); + pass.binder + .reset_expectations(pipeline_layout.bind_group_layout_ids.len()); + let mut is_compatible = true; + + for (index, (entry, &bgl_id)) in pass + .binder + .entries + .iter_mut() + .zip(&pipeline_layout.bind_group_layout_ids) + .enumerate() + { + match entry.expect_layout(bgl_id) { + LayoutChange::Match(bg_id, offsets) if is_compatible => { + let desc_set = bind_group_guard[bg_id].raw.raw(); + unsafe { + pass.raw.bind_compute_descriptor_sets( + &pipeline_layout.raw, + index, + iter::once(desc_set), + offsets.iter().map(|offset| *offset as u32), + ); + } + } + LayoutChange::Match(..) | LayoutChange::Unchanged => {} + LayoutChange::Mismatch => { + is_compatible = false; + } + } + } + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_compute_pass_set_pipeline( + pass_id: ComputePassId, + pipeline_id: ComputePipelineId, +) { + gfx_select!(pass_id => compute_pass_set_pipeline(&*GLOBAL, pass_id, pipeline_id)) +} diff --git a/dom/webgpu/wgpu-native/src/command/mod.rs b/dom/webgpu/wgpu-native/src/command/mod.rs new file mode 100644 index 000000000000..a901839136f7 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/command/mod.rs @@ -0,0 +1,769 @@ + + + + +mod allocator; +mod bind; +mod compute; +mod render; +mod transfer; + +pub(crate) use self::allocator::CommandAllocator; +pub use self::compute::*; +pub use self::render::*; +pub use self::transfer::*; + +use crate::{ + conv, + device::{ + all_buffer_stages, + all_image_stages, + FramebufferKey, + RenderPassContext, + RenderPassKey, + }, + hub::{GfxBackend, Global, Storage, Token}, + id::{Input, Output}, + resource::TextureViewInner, + track::{Stitch, TrackerSet}, + Buffer, + BufferId, + Color, + CommandBufferId, + CommandEncoderId, + ComputePassId, + DeviceId, + Features, + LifeGuard, + RenderPassId, + Stored, + Texture, + TextureId, + TextureUsage, + TextureViewId, +}; +#[cfg(feature = "local")] +use crate::{gfx_select, hub::GLOBAL}; + +use arrayvec::ArrayVec; +use hal::{adapter::PhysicalDevice as _, command::CommandBuffer as _, device::Device as _}; + +#[cfg(feature = "local")] +use std::marker::PhantomData; +use std::{borrow::Borrow, collections::hash_map::Entry, iter, mem, ptr, slice, thread::ThreadId}; + + +pub struct RenderBundle { + _raw: B::CommandBuffer, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum LoadOp { + Clear = 0, + Load = 1, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum StoreOp { + Clear = 0, + Store = 1, +} + +#[repr(C)] +#[derive(Debug)] +pub struct RenderPassColorAttachmentDescriptor { + pub attachment: TextureViewId, + pub resolve_target: *const TextureViewId, + pub load_op: LoadOp, + pub store_op: StoreOp, + pub clear_color: Color, +} + +#[repr(C)] +#[derive(Debug)] +pub struct RenderPassDepthStencilAttachmentDescriptor { + pub attachment: T, + pub depth_load_op: LoadOp, + pub depth_store_op: StoreOp, + pub clear_depth: f32, + pub stencil_load_op: LoadOp, + pub stencil_store_op: StoreOp, + pub clear_stencil: u32, +} + +#[repr(C)] +#[derive(Debug)] +pub struct RenderPassDescriptor { + pub color_attachments: *const RenderPassColorAttachmentDescriptor, + pub color_attachments_length: usize, + pub depth_stencil_attachment: *const RenderPassDepthStencilAttachmentDescriptor, +} + +#[repr(C)] +#[derive(Clone, Debug, Default)] +pub struct ComputePassDescriptor { + pub todo: u32, +} + +#[derive(Debug)] +pub struct CommandBuffer { + pub(crate) raw: Vec, + is_recording: bool, + recorded_thread_id: ThreadId, + pub(crate) device_id: Stored, + pub(crate) life_guard: LifeGuard, + pub(crate) trackers: TrackerSet, + pub(crate) used_swap_chain: Option<(Stored, B::Framebuffer)>, + pub(crate) features: Features, +} + +impl CommandBuffer { + pub(crate) fn insert_barriers( + raw: &mut B::CommandBuffer, + base: &mut TrackerSet, + head: &TrackerSet, + stitch: Stitch, + buffer_guard: &Storage, BufferId>, + texture_guard: &Storage, TextureId>, + ) { + log::trace!("\tstitch {:?}", stitch); + debug_assert_eq!(B::VARIANT, base.backend()); + debug_assert_eq!(B::VARIANT, head.backend()); + + let buffer_barriers = base + .buffers + .merge_replace(&head.buffers, stitch) + .map(|pending| { + log::trace!("\tbuffer -> {:?}", pending); + hal::memory::Barrier::Buffer { + states: pending.to_states(), + target: &buffer_guard[pending.id].raw, + range: None .. None, + families: None, + } + }); + let texture_barriers = base + .textures + .merge_replace(&head.textures, stitch) + .map(|pending| { + log::trace!("\ttexture -> {:?}", pending); + hal::memory::Barrier::Image { + states: pending.to_states(), + target: &texture_guard[pending.id].raw, + range: pending.selector, + families: None, + } + }); + base.views.merge_extend(&head.views).unwrap(); + base.bind_groups.merge_extend(&head.bind_groups).unwrap(); + base.samplers.merge_extend(&head.samplers).unwrap(); + + let stages = all_buffer_stages() | all_image_stages(); + unsafe { + raw.pipeline_barrier( + stages .. stages, + hal::memory::Dependencies::empty(), + buffer_barriers.chain(texture_barriers), + ); + } + } +} + +#[repr(C)] +#[derive(Clone, Debug, Default)] +pub struct CommandEncoderDescriptor { + // MSVC doesn't allow zero-sized structs + // We can remove this when we actually have a field + pub todo: u32, +} + +#[repr(C)] +#[derive(Clone, Debug, Default)] +pub struct CommandBufferDescriptor { + pub todo: u32, +} + +pub fn command_encoder_finish( + global: &Global, + encoder_id: CommandEncoderId, + _desc: &CommandBufferDescriptor, +) -> CommandBufferId { + let hub = B::hub(global); + let mut token = Token::root(); + //TODO: actually close the last recorded command buffer + let (mut comb_guard, _) = hub.command_buffers.write(&mut token); + let comb = &mut comb_guard[encoder_id]; + assert!(comb.is_recording); + comb.is_recording = false; + // stop tracking the swapchain image, if used + if let Some((ref view_id, _)) = comb.used_swap_chain { + comb.trackers.views.remove(view_id.value); + } + encoder_id +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_command_encoder_finish( + encoder_id: CommandEncoderId, + desc: Option<&CommandBufferDescriptor>, +) -> CommandBufferId { + let desc = &desc.cloned().unwrap_or_default(); + gfx_select!(encoder_id => command_encoder_finish(&*GLOBAL, encoder_id, desc)) +} + +pub fn command_encoder_begin_render_pass( + global: &Global, + encoder_id: CommandEncoderId, + desc: &RenderPassDescriptor, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + + let (adapter_guard, mut token) = hub.adapters.read(&mut token); + let (device_guard, mut token) = hub.devices.read(&mut token); + let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); + let cmb = &mut cmb_guard[encoder_id]; + let device = &device_guard[cmb.device_id.value]; + + let limits = adapter_guard[device.adapter_id] + .raw + .physical_device + .limits(); + let samples_count_limit = limits.framebuffer_color_sample_counts; + + let mut current_comb = device.com_allocator.extend(cmb); + unsafe { + current_comb.begin( + hal::command::CommandBufferFlags::ONE_TIME_SUBMIT, + hal::command::CommandBufferInheritanceInfo::default(), + ); + } + + let pass = { + let (_, mut token) = hub.buffers.read(&mut token); //skip token + let (texture_guard, mut token) = hub.textures.read(&mut token); + let (view_guard, _) = hub.texture_views.read(&mut token); + + let mut extent = None; + let mut barriers = Vec::new(); + let mut used_swap_chain_image = None::>; + + let color_attachments = + unsafe { slice::from_raw_parts(desc.color_attachments, desc.color_attachments_length) }; + let depth_stencil_attachment = unsafe { desc.depth_stencil_attachment.as_ref() }; + + let sample_count = color_attachments + .get(0) + .map(|at| view_guard[at.attachment].samples) + .unwrap_or(1); + assert!( + sample_count & samples_count_limit != 0, + "Attachment sample_count must be supported by physical device limits" + ); + + log::trace!( + "Encoding render pass begin in command buffer {:?}", + encoder_id + ); + let rp_key = { + let trackers = &mut cmb.trackers; + + let depth_stencil = depth_stencil_attachment.map(|at| { + let view = trackers + .views + .use_extend(&*view_guard, at.attachment, (), ()) + .unwrap(); + if let Some(ex) = extent { + assert_eq!(ex, view.extent); + } else { + extent = Some(view.extent); + } + let texture_id = match view.inner { + TextureViewInner::Native { ref source_id, .. } => source_id.value, + TextureViewInner::SwapChain { .. } => { + panic!("Unexpected depth/stencil use of swapchain image!") + } + }; + + let texture = &texture_guard[texture_id]; + assert!(texture.usage.contains(TextureUsage::OUTPUT_ATTACHMENT)); + + let old_layout = match trackers.textures.query(texture_id, view.range.clone()) { + Some(usage) => { + conv::map_texture_state( + usage, + hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL, + ) + .1 + } + None => { + // Required sub-resources have inconsistent states, we need to + // issue individual barriers instead of relying on the render pass. + let pending = trackers.textures.change_replace( + texture_id, + &texture.life_guard.ref_count, + view.range.clone(), + TextureUsage::OUTPUT_ATTACHMENT, + ); + + barriers.extend(pending.map(|pending| { + log::trace!("\tdepth-stencil {:?}", pending); + hal::memory::Barrier::Image { + states: pending.to_states(), + target: &texture.raw, + families: None, + range: pending.selector, + } + })); + hal::image::Layout::DepthStencilAttachmentOptimal + } + }; + hal::pass::Attachment { + format: Some(conv::map_texture_format(view.format, device.features)), + samples: view.samples, + ops: conv::map_load_store_ops(at.depth_load_op, at.depth_store_op), + stencil_ops: conv::map_load_store_ops(at.stencil_load_op, at.stencil_store_op), + layouts: old_layout .. hal::image::Layout::DepthStencilAttachmentOptimal, + } + }); + + let mut colors = ArrayVec::new(); + let mut resolves = ArrayVec::new(); + + for at in color_attachments { + let view = &view_guard[at.attachment]; + if let Some(ex) = extent { + assert_eq!(ex, view.extent); + } else { + extent = Some(view.extent); + } + assert_eq!( + view.samples, sample_count, + "All attachments must have the same sample_count" + ); + let first_use = + trackers + .views + .init(at.attachment, &view.life_guard.ref_count, (), ()); + + let layouts = match view.inner { + TextureViewInner::Native { ref source_id, .. } => { + let texture = &texture_guard[source_id.value]; + assert!(texture.usage.contains(TextureUsage::OUTPUT_ATTACHMENT)); + + let old_layout = + match trackers.textures.query(source_id.value, view.range.clone()) { + Some(usage) => { + conv::map_texture_state(usage, hal::format::Aspects::COLOR).1 + } + None => { + // Required sub-resources have inconsistent states, we need to + // issue individual barriers instead of relying on the render pass. + let pending = trackers.textures.change_replace( + source_id.value, + &texture.life_guard.ref_count, + view.range.clone(), + TextureUsage::OUTPUT_ATTACHMENT, + ); + barriers.extend(pending.map(|pending| { + log::trace!("\tcolor {:?}", pending); + hal::memory::Barrier::Image { + states: pending.to_states(), + target: &texture.raw, + families: None, + range: pending.selector, + } + })); + hal::image::Layout::ColorAttachmentOptimal + } + }; + old_layout .. hal::image::Layout::ColorAttachmentOptimal + } + TextureViewInner::SwapChain { .. } => { + if let Some((ref view_id, _)) = cmb.used_swap_chain { + assert_eq!(view_id.value, at.attachment); + } else { + assert!(used_swap_chain_image.is_none()); + used_swap_chain_image = Some(Stored { + value: at.attachment, + ref_count: view.life_guard.ref_count.clone(), + }); + } + + let end = hal::image::Layout::Present; + let start = if first_use { + hal::image::Layout::Undefined + } else { + end + }; + start .. end + } + }; + + colors.push(hal::pass::Attachment { + format: Some(conv::map_texture_format(view.format, device.features)), + samples: view.samples, + ops: conv::map_load_store_ops(at.load_op, at.store_op), + stencil_ops: hal::pass::AttachmentOps::DONT_CARE, + layouts, + }); + } + + for &resolve_target in color_attachments + .iter() + .flat_map(|at| unsafe { at.resolve_target.as_ref() }) + { + let view = &view_guard[resolve_target]; + assert_eq!(extent, Some(view.extent)); + assert_eq!( + view.samples, 1, + "All resolve_targets must have a sample_count of 1" + ); + let first_use = + trackers + .views + .init(resolve_target, &view.life_guard.ref_count, (), ()); + + let layouts = match view.inner { + TextureViewInner::Native { ref source_id, .. } => { + let texture = &texture_guard[source_id.value]; + assert!(texture.usage.contains(TextureUsage::OUTPUT_ATTACHMENT)); + + let old_layout = + match trackers.textures.query(source_id.value, view.range.clone()) { + Some(usage) => { + conv::map_texture_state(usage, hal::format::Aspects::COLOR).1 + } + None => { + // Required sub-resources have inconsistent states, we need to + // issue individual barriers instead of relying on the render pass. + let pending = trackers.textures.change_replace( + source_id.value, + &texture.life_guard.ref_count, + view.range.clone(), + TextureUsage::OUTPUT_ATTACHMENT, + ); + barriers.extend(pending.map(|pending| { + log::trace!("\tresolve {:?}", pending); + hal::memory::Barrier::Image { + states: pending.to_states(), + target: &texture.raw, + families: None, + range: pending.selector, + } + })); + hal::image::Layout::ColorAttachmentOptimal + } + }; + old_layout .. hal::image::Layout::ColorAttachmentOptimal + } + TextureViewInner::SwapChain { .. } => { + if let Some((ref view_id, _)) = cmb.used_swap_chain { + assert_eq!(view_id.value, resolve_target); + } else { + assert!(used_swap_chain_image.is_none()); + used_swap_chain_image = Some(Stored { + value: resolve_target, + ref_count: view.life_guard.ref_count.clone(), + }); + } + + let end = hal::image::Layout::Present; + let start = if first_use { + hal::image::Layout::Undefined + } else { + end + }; + start .. end + } + }; + + resolves.push(hal::pass::Attachment { + format: Some(conv::map_texture_format(view.format, device.features)), + samples: view.samples, + ops: hal::pass::AttachmentOps::new( + hal::pass::AttachmentLoadOp::DontCare, + hal::pass::AttachmentStoreOp::Store, + ), + stencil_ops: hal::pass::AttachmentOps::DONT_CARE, + layouts, + }); + } + + RenderPassKey { + colors, + resolves, + depth_stencil, + } + }; + + if !barriers.is_empty() { + unsafe { + current_comb.pipeline_barrier( + all_image_stages() .. all_image_stages(), + hal::memory::Dependencies::empty(), + barriers, + ); + } + } + + let mut render_pass_cache = device.render_passes.lock(); + let render_pass = match render_pass_cache.entry(rp_key.clone()) { + Entry::Occupied(e) => e.into_mut(), + Entry::Vacant(e) => { + let color_ids = [ + (0, hal::image::Layout::ColorAttachmentOptimal), + (1, hal::image::Layout::ColorAttachmentOptimal), + (2, hal::image::Layout::ColorAttachmentOptimal), + (3, hal::image::Layout::ColorAttachmentOptimal), + ]; + + let mut resolve_ids = ArrayVec::<[_; crate::device::MAX_COLOR_TARGETS]>::new(); + let mut attachment_index = color_attachments.len(); + if color_attachments + .iter() + .any(|at| at.resolve_target != ptr::null()) + { + for (i, at) in color_attachments.iter().enumerate() { + if at.resolve_target == ptr::null() { + resolve_ids.push(( + hal::pass::ATTACHMENT_UNUSED, + hal::image::Layout::ColorAttachmentOptimal, + )); + } else { + let sample_count_check = + view_guard[color_attachments[i].attachment].samples; + assert!(sample_count_check > 1, "RenderPassColorAttachmentDescriptor with a resolve_target must have an attachment with sample_count > 1"); + resolve_ids.push(( + attachment_index, + hal::image::Layout::ColorAttachmentOptimal, + )); + attachment_index += 1; + } + } + } + + let depth_id = ( + attachment_index, + hal::image::Layout::DepthStencilAttachmentOptimal, + ); + + let subpass = hal::pass::SubpassDesc { + colors: &color_ids[.. color_attachments.len()], + resolves: &resolve_ids, + depth_stencil: depth_stencil_attachment.map(|_| &depth_id), + inputs: &[], + preserves: &[], + }; + + let pass = unsafe { + device + .raw + .create_render_pass(e.key().all(), &[subpass], &[]) + } + .unwrap(); + e.insert(pass) + } + }; + + let mut framebuffer_cache; + let fb_key = FramebufferKey { + colors: color_attachments.iter().map(|at| at.attachment).collect(), + resolves: color_attachments + .iter() + .filter_map(|at| unsafe { at.resolve_target.as_ref() }.cloned()) + .collect(), + depth_stencil: depth_stencil_attachment.map(|at| at.attachment), + }; + + let framebuffer = match used_swap_chain_image.take() { + Some(view_id) => { + assert!(cmb.used_swap_chain.is_none()); + // Always create a new framebuffer and delete it after presentation. + let attachments = fb_key.all().map(|&id| match view_guard[id].inner { + TextureViewInner::Native { ref raw, .. } => raw, + TextureViewInner::SwapChain { ref image, .. } => Borrow::borrow(image), + }); + let framebuffer = unsafe { + device + .raw + .create_framebuffer(&render_pass, attachments, extent.unwrap()) + .unwrap() + }; + cmb.used_swap_chain = Some((view_id, framebuffer)); + &mut cmb.used_swap_chain.as_mut().unwrap().1 + } + None => { + // Cache framebuffers by the device. + framebuffer_cache = device.framebuffers.lock(); + match framebuffer_cache.entry(fb_key) { + Entry::Occupied(e) => e.into_mut(), + Entry::Vacant(e) => { + let fb = { + let attachments = e.key().all().map(|&id| match view_guard[id].inner { + TextureViewInner::Native { ref raw, .. } => raw, + TextureViewInner::SwapChain { ref image, .. } => { + Borrow::borrow(image) + } + }); + unsafe { + device.raw.create_framebuffer( + &render_pass, + attachments, + extent.unwrap(), + ) + } + .unwrap() + }; + e.insert(fb) + } + } + } + }; + + let rect = { + let ex = extent.unwrap(); + hal::pso::Rect { + x: 0, + y: 0, + w: ex.width as _, + h: ex.height as _, + } + }; + + let clear_values = color_attachments + .iter() + .zip(&rp_key.colors) + .flat_map(|(at, key)| { + match at.load_op { + LoadOp::Load => None, + LoadOp::Clear => { + use hal::format::ChannelType; + //TODO: validate sign/unsign and normalized ranges of the color values + let value = match key.format.unwrap().base_format().1 { + ChannelType::Unorm + | ChannelType::Snorm + | ChannelType::Ufloat + | ChannelType::Sfloat + | ChannelType::Uscaled + | ChannelType::Sscaled + | ChannelType::Srgb => hal::command::ClearColor { + float32: conv::map_color_f32(&at.clear_color), + }, + ChannelType::Sint => hal::command::ClearColor { + sint32: conv::map_color_i32(&at.clear_color), + }, + ChannelType::Uint => hal::command::ClearColor { + uint32: conv::map_color_u32(&at.clear_color), + }, + }; + Some(hal::command::ClearValue { color: value }) + } + } + }) + .chain(depth_stencil_attachment.and_then(|at| { + match (at.depth_load_op, at.stencil_load_op) { + (LoadOp::Load, LoadOp::Load) => None, + (LoadOp::Clear, _) | (_, LoadOp::Clear) => { + let value = hal::command::ClearDepthStencil { + depth: at.clear_depth, + stencil: at.clear_stencil, + }; + Some(hal::command::ClearValue { + depth_stencil: value, + }) + } + } + })); + + unsafe { + current_comb.begin_render_pass( + render_pass, + framebuffer, + rect, + clear_values, + hal::command::SubpassContents::Inline, + ); + current_comb.set_scissors(0, iter::once(&rect)); + current_comb.set_viewports( + 0, + iter::once(hal::pso::Viewport { + rect, + depth: 0.0 .. 1.0, + }), + ); + } + + let context = RenderPassContext { + colors: color_attachments + .iter() + .map(|at| view_guard[at.attachment].format) + .collect(), + resolves: color_attachments + .iter() + .filter_map(|at| unsafe { at.resolve_target.as_ref() }) + .map(|resolve| view_guard[*resolve].format) + .collect(), + depth_stencil: depth_stencil_attachment.map(|at| view_guard[at.attachment].format), + }; + + RenderPass::new( + current_comb, + Stored { + value: encoder_id, + ref_count: cmb.life_guard.ref_count.clone(), + }, + context, + sample_count, + cmb.features.max_bind_groups, + ) + }; + hub.render_passes.register_identity(id_in, pass, &mut token) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_command_encoder_begin_render_pass( + encoder_id: CommandEncoderId, + desc: &RenderPassDescriptor, +) -> RenderPassId { + gfx_select!(encoder_id => command_encoder_begin_render_pass(&*GLOBAL, encoder_id, desc, PhantomData)) +} + +pub fn command_encoder_begin_compute_pass( + global: &Global, + encoder_id: CommandEncoderId, + _desc: &ComputePassDescriptor, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + + let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); + let cmb = &mut cmb_guard[encoder_id]; + + let raw = cmb.raw.pop().unwrap(); + let trackers = mem::replace(&mut cmb.trackers, TrackerSet::new(encoder_id.backend())); + let stored = Stored { + value: encoder_id, + ref_count: cmb.life_guard.ref_count.clone(), + }; + + let pass = ComputePass::new(raw, stored, trackers, cmb.features.max_bind_groups); + hub.compute_passes + .register_identity(id_in, pass, &mut token) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_command_encoder_begin_compute_pass( + encoder_id: CommandEncoderId, + desc: Option<&ComputePassDescriptor>, +) -> ComputePassId { + let desc = &desc.cloned().unwrap_or_default(); + gfx_select!(encoder_id => command_encoder_begin_compute_pass(&*GLOBAL, encoder_id, desc, PhantomData)) +} diff --git a/dom/webgpu/wgpu-native/src/command/render.rs b/dom/webgpu/wgpu-native/src/command/render.rs new file mode 100644 index 000000000000..c99054446eca --- /dev/null +++ b/dom/webgpu/wgpu-native/src/command/render.rs @@ -0,0 +1,848 @@ + + + + +use crate::{ + command::bind::{Binder, LayoutChange}, + conv, + device::{RenderPassContext, BIND_BUFFER_ALIGNMENT, MAX_VERTEX_BUFFERS}, + hub::{GfxBackend, Global, Token}, + pipeline::{IndexFormat, InputStepMode, PipelineFlags}, + resource::BufferUsage, + track::{Stitch, TrackerSet}, + BindGroupId, + BufferAddress, + BufferId, + Color, + CommandBuffer, + CommandBufferId, + RenderPassId, + RenderPipelineId, + Stored, +}; +#[cfg(feature = "local")] +use crate::{gfx_select, hub::GLOBAL, RawString, RenderBundleId}; + +use hal::command::CommandBuffer as _; + +#[cfg(feature = "local")] +use std::slice; +use std::{iter, ops::Range}; + +#[derive(Debug, PartialEq)] +enum OptionalState { + Unused, + Required, + Set, +} + +impl OptionalState { + fn require(&mut self, require: bool) { + if require && *self == OptionalState::Unused { + *self = OptionalState::Required; + } + } +} + +#[derive(Debug, PartialEq)] +enum DrawError { + MissingBlendColor, + MissingStencilReference, + IncompatibleBindGroup { + index: u32, + + + }, +} + +#[derive(Debug)] +pub struct IndexState { + bound_buffer_view: Option<(BufferId, Range)>, + format: IndexFormat, + limit: u32, +} + +impl IndexState { + fn update_limit(&mut self) { + self.limit = match self.bound_buffer_view { + Some((_, ref range)) => { + let shift = match self.format { + IndexFormat::Uint16 => 1, + IndexFormat::Uint32 => 2, + }; + ((range.end - range.start) >> shift) as u32 + } + None => 0, + } + } +} + +#[derive(Clone, Copy, Debug)] +pub struct VertexBufferState { + total_size: BufferAddress, + stride: BufferAddress, + rate: InputStepMode, +} + +impl VertexBufferState { + const EMPTY: Self = VertexBufferState { + total_size: 0, + stride: 0, + rate: InputStepMode::Vertex, + }; +} + +#[derive(Debug)] +pub struct VertexState { + inputs: [VertexBufferState; MAX_VERTEX_BUFFERS], + vertex_limit: u32, + instance_limit: u32, +} + +impl VertexState { + fn update_limits(&mut self) { + self.vertex_limit = !0; + self.instance_limit = !0; + for vbs in &self.inputs { + if vbs.stride == 0 { + continue; + } + let limit = (vbs.total_size / vbs.stride) as u32; + match vbs.rate { + InputStepMode::Vertex => self.vertex_limit = self.vertex_limit.min(limit), + InputStepMode::Instance => self.instance_limit = self.instance_limit.min(limit), + } + } + } +} + +#[derive(Debug)] +pub struct RenderPass { + raw: B::CommandBuffer, + cmb_id: Stored, + context: RenderPassContext, + binder: Binder, + trackers: TrackerSet, + blend_color_status: OptionalState, + stencil_reference_status: OptionalState, + index_state: IndexState, + vertex_state: VertexState, + sample_count: u8, +} + +impl RenderPass { + pub(crate) fn new( + raw: B::CommandBuffer, + cmb_id: Stored, + context: RenderPassContext, + sample_count: u8, + max_bind_groups: u32, + ) -> Self { + RenderPass { + raw, + cmb_id, + context, + binder: Binder::new(max_bind_groups), + trackers: TrackerSet::new(B::VARIANT), + blend_color_status: OptionalState::Unused, + stencil_reference_status: OptionalState::Unused, + index_state: IndexState { + bound_buffer_view: None, + format: IndexFormat::Uint16, + limit: 0, + }, + vertex_state: VertexState { + inputs: [VertexBufferState::EMPTY; MAX_VERTEX_BUFFERS], + vertex_limit: 0, + instance_limit: 0, + }, + sample_count, + } + } + + fn is_ready(&self) -> Result<(), DrawError> { + + let bind_mask = self.binder.invalid_mask(); + if bind_mask != 0 { + + return Err(DrawError::IncompatibleBindGroup { + index: bind_mask.trailing_zeros() as u32, + }); + } + if self.blend_color_status == OptionalState::Required { + return Err(DrawError::MissingBlendColor); + } + if self.stencil_reference_status == OptionalState::Required { + return Err(DrawError::MissingStencilReference); + } + Ok(()) + } +} + + + +pub fn render_pass_end_pass(global: &Global, pass_id: RenderPassId) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); + let (mut pass, mut token) = hub.render_passes.unregister(pass_id, &mut token); + unsafe { + pass.raw.end_render_pass(); + } + pass.trackers.optimize(); + let cmb = &mut cmb_guard[pass.cmb_id.value]; + let (buffer_guard, mut token) = hub.buffers.read(&mut token); + let (texture_guard, _) = hub.textures.read(&mut token); + + match cmb.raw.last_mut() { + Some(last) => { + log::trace!("Encoding barriers before pass {:?}", pass_id); + CommandBuffer::insert_barriers( + last, + &mut cmb.trackers, + &pass.trackers, + Stitch::Last, + &*buffer_guard, + &*texture_guard, + ); + unsafe { last.finish() }; + } + None => { + cmb.trackers.merge_extend(&pass.trackers); + } + } + + cmb.raw.push(pass.raw); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_end_pass(pass_id: RenderPassId) { + gfx_select!(pass_id => render_pass_end_pass(&*GLOBAL, pass_id)) +} + +pub fn render_pass_set_bind_group( + global: &Global, + pass_id: RenderPassId, + index: u32, + bind_group_id: BindGroupId, + offsets: &[BufferAddress], +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); + let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); + + let (mut pass_guard, _) = hub.render_passes.write(&mut token); + let pass = &mut pass_guard[pass_id]; + + let bind_group = pass + .trackers + .bind_groups + .use_extend(&*bind_group_guard, bind_group_id, (), ()) + .unwrap(); + + assert_eq!(bind_group.dynamic_count, offsets.len()); + + if cfg!(debug_assertions) { + for off in offsets { + assert_eq!( + *off % BIND_BUFFER_ALIGNMENT, + 0, + "Misaligned dynamic buffer offset: {} does not align with {}", + off, + BIND_BUFFER_ALIGNMENT + ); + } + } + + pass.trackers.merge_extend(&bind_group.used); + + if let Some((pipeline_layout_id, follow_up_sets, follow_up_offsets)) = pass + .binder + .provide_entry(index as usize, bind_group_id, bind_group, offsets) + { + let bind_groups = iter::once(bind_group.raw.raw()) + .chain(follow_up_sets.map(|bg_id| bind_group_guard[bg_id].raw.raw())); + unsafe { + pass.raw.bind_graphics_descriptor_sets( + &&pipeline_layout_guard[pipeline_layout_id].raw, + index as usize, + bind_groups, + offsets + .iter() + .chain(follow_up_offsets) + .map(|&off| off as hal::command::DescriptorSetOffset), + ); + } + }; +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_set_bind_group( + pass_id: RenderPassId, + index: u32, + bind_group_id: BindGroupId, + offsets: *const BufferAddress, + offsets_length: usize, +) { + let offsets = if offsets_length != 0 { + unsafe { slice::from_raw_parts(offsets, offsets_length) } + } else { + &[] + }; + gfx_select!(pass_id => render_pass_set_bind_group(&*GLOBAL, pass_id, index, bind_group_id, offsets)) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_push_debug_group(_pass_id: RenderPassId, _label: RawString) { + //TODO +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_pop_debug_group(_pass_id: RenderPassId) { + //TODO +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_insert_debug_marker(_pass_id: RenderPassId, _label: RawString) { + //TODO +} + +// Render-specific routines + +pub fn render_pass_set_index_buffer( + global: &Global, + pass_id: RenderPassId, + buffer_id: BufferId, + offset: BufferAddress, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut pass_guard, mut token) = hub.render_passes.write(&mut token); + let (buffer_guard, _) = hub.buffers.read(&mut token); + + let pass = &mut pass_guard[pass_id]; + let buffer = pass + .trackers + .buffers + .use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDEX) + .unwrap(); + assert!(buffer.usage.contains(BufferUsage::INDEX)); + + let range = offset .. buffer.size; + pass.index_state.bound_buffer_view = Some((buffer_id, range)); + pass.index_state.update_limit(); + + let view = hal::buffer::IndexBufferView { + buffer: &buffer.raw, + offset, + index_type: conv::map_index_format(pass.index_state.format), + }; + + unsafe { + pass.raw.bind_index_buffer(view); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_set_index_buffer( + pass_id: RenderPassId, + buffer_id: BufferId, + offset: BufferAddress, +) { + gfx_select!(pass_id => render_pass_set_index_buffer(&*GLOBAL, pass_id, buffer_id, offset)) +} + +pub fn render_pass_set_vertex_buffers( + global: &Global, + pass_id: RenderPassId, + start_slot: u32, + buffers: &[BufferId], + offsets: &[BufferAddress], +) { + let hub = B::hub(global); + let mut token = Token::root(); + assert_eq!(buffers.len(), offsets.len()); + + let (mut pass_guard, mut token) = hub.render_passes.write(&mut token); + let (buffer_guard, _) = hub.buffers.read(&mut token); + + let pass = &mut pass_guard[pass_id]; + for (vbs, (&id, &offset)) in pass.vertex_state.inputs[start_slot as usize ..] + .iter_mut() + .zip(buffers.iter().zip(offsets)) + { + let buffer = pass + .trackers + .buffers + .use_extend(&*buffer_guard, id, (), BufferUsage::VERTEX) + .unwrap(); + assert!(buffer.usage.contains(BufferUsage::VERTEX)); + + vbs.total_size = buffer.size - offset; + } + + pass.vertex_state.update_limits(); + + let buffers = buffers + .iter() + .map(|&id| &buffer_guard[id].raw) + .zip(offsets.iter().cloned()); + + unsafe { + pass.raw.bind_vertex_buffers(start_slot, buffers); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_set_vertex_buffers( + pass_id: RenderPassId, + start_slot: u32, + buffers: *const BufferId, + offsets: *const BufferAddress, + length: usize, +) { + let buffers = unsafe { slice::from_raw_parts(buffers, length) }; + let offsets = unsafe { slice::from_raw_parts(offsets, length) }; + gfx_select!(pass_id => render_pass_set_vertex_buffers(&*GLOBAL, pass_id, start_slot, buffers, offsets)) +} + +pub fn render_pass_draw( + global: &Global, + pass_id: RenderPassId, + vertex_count: u32, + instance_count: u32, + first_vertex: u32, + first_instance: u32, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut pass_guard, _) = hub.render_passes.write(&mut token); + let pass = &mut pass_guard[pass_id]; + pass.is_ready().unwrap(); + + assert!( + first_vertex + vertex_count <= pass.vertex_state.vertex_limit, + "Vertex out of range!" + ); + assert!( + first_instance + instance_count <= pass.vertex_state.instance_limit, + "Instance out of range!" + ); + + unsafe { + pass.raw.draw( + first_vertex .. first_vertex + vertex_count, + first_instance .. first_instance + instance_count, + ); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_draw( + pass_id: RenderPassId, + vertex_count: u32, + instance_count: u32, + first_vertex: u32, + first_instance: u32, +) { + gfx_select!(pass_id => render_pass_draw(&*GLOBAL, pass_id, vertex_count, instance_count, first_vertex, first_instance)) +} + +pub fn render_pass_draw_indirect( + global: &Global, + pass_id: RenderPassId, + indirect_buffer_id: BufferId, + indirect_offset: BufferAddress, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut pass_guard, _) = hub.render_passes.write(&mut token); + let (buffer_guard, _) = hub.buffers.read(&mut token); + let pass = &mut pass_guard[pass_id]; + pass.is_ready().unwrap(); + + let buffer = pass + .trackers + .buffers + .use_extend( + &*buffer_guard, + indirect_buffer_id, + (), + BufferUsage::INDIRECT, + ) + .unwrap(); + assert!(buffer.usage.contains(BufferUsage::INDIRECT)); + + unsafe { + pass.raw.draw_indirect(&buffer.raw, indirect_offset, 1, 0); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_draw_indirect( + pass_id: RenderPassId, + indirect_buffer_id: BufferId, + indirect_offset: BufferAddress, +) { + gfx_select!(pass_id => render_pass_draw_indirect(&*GLOBAL, pass_id, indirect_buffer_id, indirect_offset)) +} + +pub fn render_pass_draw_indexed( + global: &Global, + pass_id: RenderPassId, + index_count: u32, + instance_count: u32, + first_index: u32, + base_vertex: i32, + first_instance: u32, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut pass_guard, _) = hub.render_passes.write(&mut token); + let pass = &mut pass_guard[pass_id]; + pass.is_ready().unwrap(); + + //TODO: validate that base_vertex + max_index() is within the provided range + assert!( + first_index + index_count <= pass.index_state.limit, + "Index out of range!" + ); + assert!( + first_instance + instance_count <= pass.vertex_state.instance_limit, + "Instance out of range!" + ); + + unsafe { + pass.raw.draw_indexed( + first_index .. first_index + index_count, + base_vertex, + first_instance .. first_instance + instance_count, + ); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_draw_indexed( + pass_id: RenderPassId, + index_count: u32, + instance_count: u32, + first_index: u32, + base_vertex: i32, + first_instance: u32, +) { + gfx_select!(pass_id => render_pass_draw_indexed(&*GLOBAL, pass_id, index_count, instance_count, first_index, base_vertex, first_instance)) +} + +pub fn render_pass_draw_indexed_indirect( + global: &Global, + pass_id: RenderPassId, + indirect_buffer_id: BufferId, + indirect_offset: BufferAddress, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut pass_guard, _) = hub.render_passes.write(&mut token); + let (buffer_guard, _) = hub.buffers.read(&mut token); + let pass = &mut pass_guard[pass_id]; + pass.is_ready().unwrap(); + + let buffer = pass + .trackers + .buffers + .use_extend( + &*buffer_guard, + indirect_buffer_id, + (), + BufferUsage::INDIRECT, + ) + .unwrap(); + assert!(buffer.usage.contains(BufferUsage::INDIRECT)); + + unsafe { + pass.raw + .draw_indexed_indirect(&buffer.raw, indirect_offset, 1, 0); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_draw_indexed_indirect( + pass_id: RenderPassId, + indirect_buffer_id: BufferId, + indirect_offset: BufferAddress, +) { + gfx_select!(pass_id => render_pass_draw_indexed_indirect(&*GLOBAL, pass_id, indirect_buffer_id, indirect_offset)) +} + +pub fn render_pass_set_pipeline( + global: &Global, + pass_id: RenderPassId, + pipeline_id: RenderPipelineId, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); + let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); + let (mut pass_guard, mut token) = hub.render_passes.write(&mut token); + let pass = &mut pass_guard[pass_id]; + let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token); + let pipeline = &pipeline_guard[pipeline_id]; + + assert!( + pass.context.compatible(&pipeline.pass_context), + "The render pipeline is not compatible with the pass!" + ); + assert_eq!( + pipeline.sample_count, pass.sample_count, + "The render pipeline and renderpass have mismatching sample_count" + ); + + pass.blend_color_status + .require(pipeline.flags.contains(PipelineFlags::BLEND_COLOR)); + pass.stencil_reference_status + .require(pipeline.flags.contains(PipelineFlags::STENCIL_REFERENCE)); + + unsafe { + pass.raw.bind_graphics_pipeline(&pipeline.raw); + } + + // Rebind resource + if pass.binder.pipeline_layout_id != Some(pipeline.layout_id.clone()) { + let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id]; + pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone()); + pass.binder + .reset_expectations(pipeline_layout.bind_group_layout_ids.len()); + let mut is_compatible = true; + + for (index, (entry, &bgl_id)) in pass + .binder + .entries + .iter_mut() + .zip(&pipeline_layout.bind_group_layout_ids) + .enumerate() + { + match entry.expect_layout(bgl_id) { + LayoutChange::Match(bg_id, offsets) if is_compatible => { + let desc_set = bind_group_guard[bg_id].raw.raw(); + unsafe { + pass.raw.bind_graphics_descriptor_sets( + &pipeline_layout.raw, + index, + iter::once(desc_set), + offsets.iter().map(|offset| *offset as u32), + ); + } + } + LayoutChange::Match(..) | LayoutChange::Unchanged => {} + LayoutChange::Mismatch => { + is_compatible = false; + } + } + } + } + + // Rebind index buffer if the index format has changed with the pipeline switch + if pass.index_state.format != pipeline.index_format { + pass.index_state.format = pipeline.index_format; + pass.index_state.update_limit(); + + if let Some((buffer_id, ref range)) = pass.index_state.bound_buffer_view { + let (buffer_guard, _) = hub.buffers.read(&mut token); + let buffer = pass + .trackers + .buffers + .use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDEX) + .unwrap(); + + let view = hal::buffer::IndexBufferView { + buffer: &buffer.raw, + offset: range.start, + index_type: conv::map_index_format(pass.index_state.format), + }; + + unsafe { + pass.raw.bind_index_buffer(view); + } + } + } + // Update vertex buffer limits + for (vbs, &(stride, rate)) in pass + .vertex_state + .inputs + .iter_mut() + .zip(&pipeline.vertex_strides) + { + vbs.stride = stride; + vbs.rate = rate; + } + for vbs in pass.vertex_state.inputs[pipeline.vertex_strides.len() ..].iter_mut() { + vbs.stride = 0; + vbs.rate = InputStepMode::Vertex; + } + pass.vertex_state.update_limits(); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_set_pipeline( + pass_id: RenderPassId, + pipeline_id: RenderPipelineId, +) { + gfx_select!(pass_id => render_pass_set_pipeline(&*GLOBAL, pass_id, pipeline_id)) +} + +pub fn render_pass_set_blend_color( + global: &Global, + pass_id: RenderPassId, + color: &Color, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut pass_guard, _) = hub.render_passes.write(&mut token); + let pass = &mut pass_guard[pass_id]; + + pass.blend_color_status = OptionalState::Set; + + unsafe { + pass.raw.set_blend_constants(conv::map_color_f32(color)); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_set_blend_color(pass_id: RenderPassId, color: &Color) { + gfx_select!(pass_id => render_pass_set_blend_color(&*GLOBAL, pass_id, color)) +} + +pub fn render_pass_set_stencil_reference( + global: &Global, + pass_id: RenderPassId, + value: u32, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut pass_guard, _) = hub.render_passes.write(&mut token); + let pass = &mut pass_guard[pass_id]; + + pass.stencil_reference_status = OptionalState::Set; + + unsafe { + pass.raw.set_stencil_reference(hal::pso::Face::all(), value); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_set_stencil_reference(pass_id: RenderPassId, value: u32) { + gfx_select!(pass_id => render_pass_set_stencil_reference(&*GLOBAL, pass_id, value)) +} + +pub fn render_pass_set_viewport( + global: &Global, + pass_id: RenderPassId, + x: f32, + y: f32, + w: f32, + h: f32, + min_depth: f32, + max_depth: f32, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut pass_guard, _) = hub.render_passes.write(&mut token); + let pass = &mut pass_guard[pass_id]; + + unsafe { + use std::convert::TryFrom; + use std::i16; + + pass.raw.set_viewports( + 0, + &[hal::pso::Viewport { + rect: hal::pso::Rect { + x: i16::try_from(x.round() as i64).unwrap_or(0), + y: i16::try_from(y.round() as i64).unwrap_or(0), + w: i16::try_from(w.round() as i64).unwrap_or(i16::MAX), + h: i16::try_from(h.round() as i64).unwrap_or(i16::MAX), + }, + depth: min_depth .. max_depth, + }], + ); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_set_viewport( + pass_id: RenderPassId, + x: f32, + y: f32, + w: f32, + h: f32, + min_depth: f32, + max_depth: f32, +) { + gfx_select!(pass_id => render_pass_set_viewport(&*GLOBAL, pass_id, x, y, w, h, min_depth, max_depth)) +} + +pub fn render_pass_set_scissor_rect( + global: &Global, + pass_id: RenderPassId, + x: u32, + y: u32, + w: u32, + h: u32, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut pass_guard, _) = hub.render_passes.write(&mut token); + let pass = &mut pass_guard[pass_id]; + + unsafe { + use std::convert::TryFrom; + use std::i16; + + pass.raw.set_scissors( + 0, + &[hal::pso::Rect { + x: i16::try_from(x).unwrap_or(0), + y: i16::try_from(y).unwrap_or(0), + w: i16::try_from(w).unwrap_or(i16::MAX), + h: i16::try_from(h).unwrap_or(i16::MAX), + }], + ); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_set_scissor_rect( + pass_id: RenderPassId, + x: u32, + y: u32, + w: u32, + h: u32, +) { + gfx_select!(pass_id => render_pass_set_scissor_rect(&*GLOBAL, pass_id, x, y, w, h)) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_render_pass_execute_bundles( + _pass_id: RenderPassId, + _bundles: *const RenderBundleId, + _bundles_length: usize, +) { + unimplemented!() +} diff --git a/dom/webgpu/wgpu-native/src/command/transfer.rs b/dom/webgpu/wgpu-native/src/command/transfer.rs new file mode 100644 index 000000000000..a5e6930a0b08 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/command/transfer.rs @@ -0,0 +1,421 @@ + + + + +use crate::{ + conv, + device::{all_buffer_stages, all_image_stages}, + hub::{GfxBackend, Global, Token}, + BufferAddress, + BufferId, + BufferUsage, + CommandEncoderId, + Extent3d, + Origin3d, + TextureId, + TextureUsage, +}; +#[cfg(feature = "local")] +use crate::{gfx_select, hub::GLOBAL}; + +use hal::command::CommandBuffer as _; + +use std::iter; + +const BITS_PER_BYTE: u32 = 8; + +#[repr(C)] +#[derive(Debug)] +pub struct BufferCopyView { + pub buffer: BufferId, + pub offset: BufferAddress, + pub row_pitch: u32, + pub image_height: u32, +} + +#[repr(C)] +#[derive(Debug)] +pub struct TextureCopyView { + pub texture: TextureId, + pub mip_level: u32, + pub array_layer: u32, + pub origin: Origin3d, +} + +impl TextureCopyView { + + + fn to_selector(&self, aspects: hal::format::Aspects) -> hal::image::SubresourceRange { + let level = self.mip_level as hal::image::Level; + let layer = self.array_layer as hal::image::Layer; + hal::image::SubresourceRange { + aspects, + levels: level .. level + 1, + layers: layer .. layer + 1, + } + } + + fn to_sub_layers(&self, aspects: hal::format::Aspects) -> hal::image::SubresourceLayers { + let layer = self.array_layer as hal::image::Layer; + hal::image::SubresourceLayers { + aspects, + level: self.mip_level as hal::image::Level, + layers: layer .. layer + 1, + } + } +} + +pub fn command_encoder_copy_buffer_to_buffer( + global: &Global, + command_encoder_id: CommandEncoderId, + source: BufferId, + source_offset: BufferAddress, + destination: BufferId, + destination_offset: BufferAddress, + size: BufferAddress, +) { + let hub = B::hub(global); + let mut token = Token::root(); + + let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); + let cmb = &mut cmb_guard[command_encoder_id]; + let (buffer_guard, _) = hub.buffers.read(&mut token); + + + let mut barriers = Vec::new(); + + let (src_buffer, src_pending) = + cmb.trackers + .buffers + .use_replace(&*buffer_guard, source, (), BufferUsage::COPY_SRC); + assert!(src_buffer.usage.contains(BufferUsage::COPY_SRC)); + + barriers.extend(src_pending.map(|pending| hal::memory::Barrier::Buffer { + states: pending.to_states(), + target: &src_buffer.raw, + families: None, + range: None .. None, + })); + + let (dst_buffer, dst_pending) = + cmb.trackers + .buffers + .use_replace(&*buffer_guard, destination, (), BufferUsage::COPY_DST); + assert!(dst_buffer.usage.contains(BufferUsage::COPY_DST)); + + barriers.extend(dst_pending.map(|pending| hal::memory::Barrier::Buffer { + states: pending.to_states(), + target: &dst_buffer.raw, + families: None, + range: None .. None, + })); + + let region = hal::command::BufferCopy { + src: source_offset, + dst: destination_offset, + size, + }; + let cmb_raw = cmb.raw.last_mut().unwrap(); + unsafe { + cmb_raw.pipeline_barrier( + all_buffer_stages() .. all_buffer_stages(), + hal::memory::Dependencies::empty(), + barriers, + ); + cmb_raw.copy_buffer(&src_buffer.raw, &dst_buffer.raw, iter::once(region)); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_command_encoder_copy_buffer_to_buffer( + command_encoder_id: CommandEncoderId, + source: BufferId, + source_offset: BufferAddress, + destination: BufferId, + destination_offset: BufferAddress, + size: BufferAddress, +) { + gfx_select!(command_encoder_id => command_encoder_copy_buffer_to_buffer( + &*GLOBAL, + command_encoder_id, + source, source_offset, + destination, + destination_offset, + size)) +} + +pub fn command_encoder_copy_buffer_to_texture( + global: &Global, + command_encoder_id: CommandEncoderId, + source: &BufferCopyView, + destination: &TextureCopyView, + copy_size: Extent3d, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); + let cmb = &mut cmb_guard[command_encoder_id]; + let (buffer_guard, mut token) = hub.buffers.read(&mut token); + let (texture_guard, _) = hub.textures.read(&mut token); + let aspects = texture_guard[destination.texture].full_range.aspects; + + let (src_buffer, src_pending) = + cmb.trackers + .buffers + .use_replace(&*buffer_guard, source.buffer, (), BufferUsage::COPY_SRC); + assert!(src_buffer.usage.contains(BufferUsage::COPY_SRC)); + + let src_barriers = src_pending.map(|pending| hal::memory::Barrier::Buffer { + states: pending.to_states(), + target: &src_buffer.raw, + families: None, + range: None .. None, + }); + + let (dst_texture, dst_pending) = cmb.trackers.textures.use_replace( + &*texture_guard, + destination.texture, + destination.to_selector(aspects), + TextureUsage::COPY_DST, + ); + assert!(dst_texture.usage.contains(TextureUsage::COPY_DST)); + + let dst_barriers = dst_pending.map(|pending| hal::memory::Barrier::Image { + states: pending.to_states(), + target: &dst_texture.raw, + families: None, + range: pending.selector, + }); + + let aspects = dst_texture.full_range.aspects; + let bytes_per_texel = conv::map_texture_format(dst_texture.format, cmb.features) + .surface_desc() + .bits as u32 + / BITS_PER_BYTE; + let buffer_width = source.row_pitch / bytes_per_texel; + assert_eq!(source.row_pitch % bytes_per_texel, 0); + let region = hal::command::BufferImageCopy { + buffer_offset: source.offset, + buffer_width, + buffer_height: source.image_height, + image_layers: destination.to_sub_layers(aspects), + image_offset: conv::map_origin(destination.origin), + image_extent: conv::map_extent(copy_size), + }; + let cmb_raw = cmb.raw.last_mut().unwrap(); + let stages = all_buffer_stages() | all_image_stages(); + unsafe { + cmb_raw.pipeline_barrier( + stages .. stages, + hal::memory::Dependencies::empty(), + src_barriers.chain(dst_barriers), + ); + cmb_raw.copy_buffer_to_image( + &src_buffer.raw, + &dst_texture.raw, + hal::image::Layout::TransferDstOptimal, + iter::once(region), + ); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_command_encoder_copy_buffer_to_texture( + command_encoder_id: CommandEncoderId, + source: &BufferCopyView, + destination: &TextureCopyView, + copy_size: Extent3d, +) { + gfx_select!(command_encoder_id => command_encoder_copy_buffer_to_texture( + &*GLOBAL, + command_encoder_id, + source, + destination, + copy_size)) +} + +pub fn command_encoder_copy_texture_to_buffer( + global: &Global, + command_encoder_id: CommandEncoderId, + source: &TextureCopyView, + destination: &BufferCopyView, + copy_size: Extent3d, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); + let cmb = &mut cmb_guard[command_encoder_id]; + let (buffer_guard, mut token) = hub.buffers.read(&mut token); + let (texture_guard, _) = hub.textures.read(&mut token); + let aspects = texture_guard[source.texture].full_range.aspects; + + let (src_texture, src_pending) = cmb.trackers.textures.use_replace( + &*texture_guard, + source.texture, + source.to_selector(aspects), + TextureUsage::COPY_SRC, + ); + assert!(src_texture.usage.contains(TextureUsage::COPY_SRC)); + + let src_barriers = src_pending.map(|pending| hal::memory::Barrier::Image { + states: pending.to_states(), + target: &src_texture.raw, + families: None, + range: pending.selector, + }); + + let (dst_buffer, dst_barriers) = cmb.trackers.buffers.use_replace( + &*buffer_guard, + destination.buffer, + (), + BufferUsage::COPY_DST, + ); + assert!(dst_buffer.usage.contains(BufferUsage::COPY_DST)); + + let dst_barrier = dst_barriers.map(|pending| hal::memory::Barrier::Buffer { + states: pending.to_states(), + target: &dst_buffer.raw, + families: None, + range: None .. None, + }); + + let aspects = src_texture.full_range.aspects; + let bytes_per_texel = conv::map_texture_format(src_texture.format, cmb.features) + .surface_desc() + .bits as u32 + / BITS_PER_BYTE; + let buffer_width = destination.row_pitch / bytes_per_texel; + assert_eq!(destination.row_pitch % bytes_per_texel, 0); + let region = hal::command::BufferImageCopy { + buffer_offset: destination.offset, + buffer_width, + buffer_height: destination.image_height, + image_layers: source.to_sub_layers(aspects), + image_offset: conv::map_origin(source.origin), + image_extent: conv::map_extent(copy_size), + }; + let cmb_raw = cmb.raw.last_mut().unwrap(); + let stages = all_buffer_stages() | all_image_stages(); + unsafe { + cmb_raw.pipeline_barrier( + stages .. stages, + hal::memory::Dependencies::empty(), + src_barriers.chain(dst_barrier), + ); + cmb_raw.copy_image_to_buffer( + &src_texture.raw, + hal::image::Layout::TransferSrcOptimal, + &dst_buffer.raw, + iter::once(region), + ); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_command_encoder_copy_texture_to_buffer( + command_encoder_id: CommandEncoderId, + source: &TextureCopyView, + destination: &BufferCopyView, + copy_size: Extent3d, +) { + gfx_select!(command_encoder_id => command_encoder_copy_texture_to_buffer( + &*GLOBAL, + command_encoder_id, + source, + destination, + copy_size)) +} + +pub fn command_encoder_copy_texture_to_texture( + global: &Global, + command_encoder_id: CommandEncoderId, + source: &TextureCopyView, + destination: &TextureCopyView, + copy_size: Extent3d, +) { + let hub = B::hub(global); + let mut token = Token::root(); + + let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); + let cmb = &mut cmb_guard[command_encoder_id]; + let (_, mut token) = hub.buffers.read(&mut token); + let (texture_guard, _) = hub.textures.read(&mut token); + + + let mut barriers = Vec::new(); + let aspects = texture_guard[source.texture].full_range.aspects + & texture_guard[destination.texture].full_range.aspects; + + let (src_texture, src_pending) = cmb.trackers.textures.use_replace( + &*texture_guard, + source.texture, + source.to_selector(aspects), + TextureUsage::COPY_SRC, + ); + assert!(src_texture.usage.contains(TextureUsage::COPY_SRC)); + + barriers.extend(src_pending.map(|pending| hal::memory::Barrier::Image { + states: pending.to_states(), + target: &src_texture.raw, + families: None, + range: pending.selector, + })); + + let (dst_texture, dst_pending) = cmb.trackers.textures.use_replace( + &*texture_guard, + destination.texture, + destination.to_selector(aspects), + TextureUsage::COPY_DST, + ); + assert!(dst_texture.usage.contains(TextureUsage::COPY_DST)); + + barriers.extend(dst_pending.map(|pending| hal::memory::Barrier::Image { + states: pending.to_states(), + target: &dst_texture.raw, + families: None, + range: pending.selector, + })); + + let aspects = src_texture.full_range.aspects & dst_texture.full_range.aspects; + let region = hal::command::ImageCopy { + src_subresource: source.to_sub_layers(aspects), + src_offset: conv::map_origin(source.origin), + dst_subresource: destination.to_sub_layers(aspects), + dst_offset: conv::map_origin(destination.origin), + extent: conv::map_extent(copy_size), + }; + let cmb_raw = cmb.raw.last_mut().unwrap(); + unsafe { + cmb_raw.pipeline_barrier( + all_image_stages() .. all_image_stages(), + hal::memory::Dependencies::empty(), + barriers, + ); + cmb_raw.copy_image( + &src_texture.raw, + hal::image::Layout::TransferSrcOptimal, + &dst_texture.raw, + hal::image::Layout::TransferDstOptimal, + iter::once(region), + ); + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_command_encoder_copy_texture_to_texture( + command_encoder_id: CommandEncoderId, + source: &TextureCopyView, + destination: &TextureCopyView, + copy_size: Extent3d, +) { + gfx_select!(command_encoder_id => command_encoder_copy_texture_to_texture( + &*GLOBAL, + command_encoder_id, + source, + destination, + copy_size)) +} diff --git a/dom/webgpu/wgpu-native/src/conv.rs b/dom/webgpu/wgpu-native/src/conv.rs new file mode 100644 index 000000000000..83cd245874a5 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/conv.rs @@ -0,0 +1,661 @@ + + + + +use crate::{binding_model, command, pipeline, resource, Color, Extent3d, Features, Origin3d}; + +pub fn map_buffer_usage( + usage: resource::BufferUsage, +) -> (hal::buffer::Usage, hal::memory::Properties) { + use crate::resource::BufferUsage as W; + use hal::buffer::Usage as U; + use hal::memory::Properties as P; + + let mut hal_memory = P::empty(); + if usage.contains(W::MAP_READ) { + hal_memory |= P::CPU_VISIBLE | P::CPU_CACHED; + } + if usage.contains(W::MAP_WRITE) { + hal_memory |= P::CPU_VISIBLE; + } + + let mut hal_usage = U::empty(); + if usage.contains(W::COPY_SRC) { + hal_usage |= U::TRANSFER_SRC; + } + if usage.contains(W::COPY_DST) { + hal_usage |= U::TRANSFER_DST; + } + if usage.contains(W::INDEX) { + hal_usage |= U::INDEX; + } + if usage.contains(W::VERTEX) { + hal_usage |= U::VERTEX; + } + if usage.contains(W::UNIFORM) { + hal_usage |= U::UNIFORM; + } + if usage.contains(W::STORAGE) { + hal_usage |= U::STORAGE; + } + if usage.contains(W::INDIRECT) { + hal_usage |= U::INDIRECT; + } + + (hal_usage, hal_memory) +} + +pub fn map_texture_usage( + usage: resource::TextureUsage, + aspects: hal::format::Aspects, +) -> hal::image::Usage { + use crate::resource::TextureUsage as W; + use hal::image::Usage as U; + + let mut value = U::empty(); + if usage.contains(W::COPY_SRC) { + value |= U::TRANSFER_SRC; + } + if usage.contains(W::COPY_DST) { + value |= U::TRANSFER_DST; + } + if usage.contains(W::SAMPLED) { + value |= U::SAMPLED; + } + if usage.contains(W::STORAGE) { + value |= U::STORAGE; + } + if usage.contains(W::OUTPUT_ATTACHMENT) { + if aspects.intersects(hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL) { + value |= U::DEPTH_STENCIL_ATTACHMENT; + } else { + value |= U::COLOR_ATTACHMENT; + } + } + + + value +} + +pub fn map_binding_type( + binding: &binding_model::BindGroupLayoutBinding, +) -> hal::pso::DescriptorType { + use crate::binding_model::BindingType as Bt; + use hal::pso::DescriptorType as H; + match binding.ty { + Bt::UniformBuffer => { + if binding.dynamic { + H::UniformBufferDynamic + } else { + H::UniformBuffer + } + } + Bt::StorageBuffer | Bt::ReadonlyStorageBuffer => { + if binding.dynamic { + H::StorageBufferDynamic + } else { + H::StorageBuffer + } + } + Bt::Sampler => H::Sampler, + Bt::SampledTexture => H::SampledImage, + Bt::StorageTexture => H::StorageImage, + } +} + +pub fn map_shader_stage_flags( + shader_stage_flags: binding_model::ShaderStage, +) -> hal::pso::ShaderStageFlags { + use crate::binding_model::ShaderStage as Ss; + use hal::pso::ShaderStageFlags as H; + + let mut value = H::empty(); + if shader_stage_flags.contains(Ss::VERTEX) { + value |= H::VERTEX; + } + if shader_stage_flags.contains(Ss::FRAGMENT) { + value |= H::FRAGMENT; + } + if shader_stage_flags.contains(Ss::COMPUTE) { + value |= H::COMPUTE; + } + value +} + +pub fn map_origin(origin: Origin3d) -> hal::image::Offset { + hal::image::Offset { + x: origin.x as i32, + y: origin.y as i32, + z: origin.z as i32, + } +} + +pub fn map_extent(extent: Extent3d) -> hal::image::Extent { + hal::image::Extent { + width: extent.width, + height: extent.height, + depth: extent.depth, + } +} + +pub fn map_primitive_topology(primitive_topology: pipeline::PrimitiveTopology) -> hal::pso::Primitive { + use crate::pipeline::PrimitiveTopology as Pt; + use hal::pso::Primitive as H; + match primitive_topology { + Pt::PointList => H::PointList, + Pt::LineList => H::LineList, + Pt::LineStrip => H::LineStrip, + Pt::TriangleList => H::TriangleList, + Pt::TriangleStrip => H::TriangleStrip, + } +} + +pub fn map_color_state_descriptor( + desc: &pipeline::ColorStateDescriptor, +) -> hal::pso::ColorBlendDesc { + let color_mask = desc.write_mask; + let blend_state = if desc.color_blend != pipeline::BlendDescriptor::REPLACE + || desc.alpha_blend != pipeline::BlendDescriptor::REPLACE + { + Some(hal::pso::BlendState { + color: map_blend_descriptor(&desc.color_blend), + alpha: map_blend_descriptor(&desc.alpha_blend), + }) + } else { + None + }; + hal::pso::ColorBlendDesc { + mask: map_color_write_flags(color_mask), + blend: blend_state, + } +} + +fn map_color_write_flags(flags: pipeline::ColorWrite) -> hal::pso::ColorMask { + use crate::pipeline::ColorWrite as Cw; + use hal::pso::ColorMask as H; + + let mut value = H::empty(); + if flags.contains(Cw::RED) { + value |= H::RED; + } + if flags.contains(Cw::GREEN) { + value |= H::GREEN; + } + if flags.contains(Cw::BLUE) { + value |= H::BLUE; + } + if flags.contains(Cw::ALPHA) { + value |= H::ALPHA; + } + value +} + +fn map_blend_descriptor(blend_desc: &pipeline::BlendDescriptor) -> hal::pso::BlendOp { + use crate::pipeline::BlendOperation as Bo; + use hal::pso::BlendOp as H; + match blend_desc.operation { + Bo::Add => H::Add { + src: map_blend_factor(blend_desc.src_factor), + dst: map_blend_factor(blend_desc.dst_factor), + }, + Bo::Subtract => H::Sub { + src: map_blend_factor(blend_desc.src_factor), + dst: map_blend_factor(blend_desc.dst_factor), + }, + Bo::ReverseSubtract => H::RevSub { + src: map_blend_factor(blend_desc.src_factor), + dst: map_blend_factor(blend_desc.dst_factor), + }, + Bo::Min => H::Min, + Bo::Max => H::Max, + } +} + +fn map_blend_factor(blend_factor: pipeline::BlendFactor) -> hal::pso::Factor { + use crate::pipeline::BlendFactor as Bf; + use hal::pso::Factor as H; + match blend_factor { + Bf::Zero => H::Zero, + Bf::One => H::One, + Bf::SrcColor => H::SrcColor, + Bf::OneMinusSrcColor => H::OneMinusSrcColor, + Bf::SrcAlpha => H::SrcAlpha, + Bf::OneMinusSrcAlpha => H::OneMinusSrcAlpha, + Bf::DstColor => H::DstColor, + Bf::OneMinusDstColor => H::OneMinusDstColor, + Bf::DstAlpha => H::DstAlpha, + Bf::OneMinusDstAlpha => H::OneMinusDstAlpha, + Bf::SrcAlphaSaturated => H::SrcAlphaSaturate, + Bf::BlendColor => H::ConstColor, + Bf::OneMinusBlendColor => H::OneMinusConstColor, + } +} + +pub fn map_depth_stencil_state_descriptor( + desc: &pipeline::DepthStencilStateDescriptor, +) -> hal::pso::DepthStencilDesc { + hal::pso::DepthStencilDesc { + depth: if desc.depth_write_enabled + || desc.depth_compare != resource::CompareFunction::Always + { + Some(hal::pso::DepthTest { + fun: map_compare_function(desc.depth_compare), + write: desc.depth_write_enabled, + }) + } else { + None + }, + depth_bounds: false, + stencil: if desc.stencil_read_mask != !0 + || desc.stencil_write_mask != !0 + || desc.stencil_front != pipeline::StencilStateFaceDescriptor::IGNORE + || desc.stencil_back != pipeline::StencilStateFaceDescriptor::IGNORE + { + Some(hal::pso::StencilTest { + faces: hal::pso::Sided { + front: map_stencil_face(&desc.stencil_front), + back: map_stencil_face(&desc.stencil_back), + }, + read_masks: hal::pso::State::Static(hal::pso::Sided::new(desc.stencil_read_mask)), + write_masks: hal::pso::State::Static(hal::pso::Sided::new(desc.stencil_write_mask)), + reference_values: if desc.needs_stencil_reference() { + hal::pso::State::Dynamic + } else { + hal::pso::State::Static(hal::pso::Sided::new(0)) + }, + }) + } else { + None + }, + } +} + +fn map_stencil_face( + stencil_state_face_desc: &pipeline::StencilStateFaceDescriptor, +) -> hal::pso::StencilFace { + hal::pso::StencilFace { + fun: map_compare_function(stencil_state_face_desc.compare), + op_fail: map_stencil_operation(stencil_state_face_desc.fail_op), + op_depth_fail: map_stencil_operation(stencil_state_face_desc.depth_fail_op), + op_pass: map_stencil_operation(stencil_state_face_desc.pass_op), + } +} + +pub fn map_compare_function(compare_function: resource::CompareFunction) -> hal::pso::Comparison { + use crate::resource::CompareFunction as Cf; + use hal::pso::Comparison as H; + match compare_function { + Cf::Never => H::Never, + Cf::Less => H::Less, + Cf::Equal => H::Equal, + Cf::LessEqual => H::LessEqual, + Cf::Greater => H::Greater, + Cf::NotEqual => H::NotEqual, + Cf::GreaterEqual => H::GreaterEqual, + Cf::Always => H::Always, + } +} + +fn map_stencil_operation(stencil_operation: pipeline::StencilOperation) -> hal::pso::StencilOp { + use crate::pipeline::StencilOperation as So; + use hal::pso::StencilOp as H; + match stencil_operation { + So::Keep => H::Keep, + So::Zero => H::Zero, + So::Replace => H::Replace, + So::Invert => H::Invert, + So::IncrementClamp => H::IncrementClamp, + So::DecrementClamp => H::DecrementClamp, + So::IncrementWrap => H::IncrementWrap, + So::DecrementWrap => H::DecrementWrap, + } +} + +pub(crate) fn map_texture_format( + texture_format: resource::TextureFormat, + features: Features, +) -> hal::format::Format { + use crate::resource::TextureFormat as Tf; + use hal::format::Format as H; + match texture_format { + + Tf::R8Unorm => H::R8Unorm, + Tf::R8Snorm => H::R8Snorm, + Tf::R8Uint => H::R8Uint, + Tf::R8Sint => H::R8Sint, + + + Tf::R16Unorm => H::R16Unorm, + Tf::R16Snorm => H::R16Snorm, + Tf::R16Uint => H::R16Uint, + Tf::R16Sint => H::R16Sint, + Tf::R16Float => H::R16Sfloat, + + Tf::Rg8Unorm => H::Rg8Unorm, + Tf::Rg8Snorm => H::Rg8Snorm, + Tf::Rg8Uint => H::Rg8Uint, + Tf::Rg8Sint => H::Rg8Sint, + + + Tf::R32Uint => H::R32Uint, + Tf::R32Sint => H::R32Sint, + Tf::R32Float => H::R32Sfloat, + Tf::Rg16Unorm => H::Rg16Unorm, + Tf::Rg16Snorm => H::Rg16Snorm, + Tf::Rg16Uint => H::Rg16Uint, + Tf::Rg16Sint => H::Rg16Sint, + Tf::Rg16Float => H::Rg16Sfloat, + Tf::Rgba8Unorm => H::Rgba8Unorm, + Tf::Rgba8UnormSrgb => H::Rgba8Srgb, + Tf::Rgba8Snorm => H::Rgba8Snorm, + Tf::Rgba8Uint => H::Rgba8Uint, + Tf::Rgba8Sint => H::Rgba8Sint, + Tf::Bgra8Unorm => H::Bgra8Unorm, + Tf::Bgra8UnormSrgb => H::Bgra8Srgb, + + + Tf::Rgb10a2Unorm => H::A2r10g10b10Unorm, + Tf::Rg11b10Float => H::B10g11r11Ufloat, + + + Tf::Rg32Uint => H::Rg32Uint, + Tf::Rg32Sint => H::Rg32Sint, + Tf::Rg32Float => H::Rg32Sfloat, + Tf::Rgba16Unorm => H::Rgba16Unorm, + Tf::Rgba16Snorm => H::Rgba16Snorm, + Tf::Rgba16Uint => H::Rgba16Uint, + Tf::Rgba16Sint => H::Rgba16Sint, + Tf::Rgba16Float => H::Rgba16Sfloat, + + + Tf::Rgba32Uint => H::Rgba32Uint, + Tf::Rgba32Sint => H::Rgba32Sint, + Tf::Rgba32Float => H::Rgba32Sfloat, + + + Tf::Depth32Float => H::D32Sfloat, + Tf::Depth24Plus => { + if features.supports_texture_d24_s8 { + H::D24UnormS8Uint + } else { + H::D32Sfloat + } + } + Tf::Depth24PlusStencil8 => { + if features.supports_texture_d24_s8 { + H::D24UnormS8Uint + } else { + H::D32SfloatS8Uint + } + } + } +} + +pub fn map_vertex_format(vertex_format: pipeline::VertexFormat) -> hal::format::Format { + use crate::pipeline::VertexFormat as Vf; + use hal::format::Format as H; + match vertex_format { + Vf::Uchar2 => H::Rg8Uint, + Vf::Uchar4 => H::Rgba8Uint, + Vf::Char2 => H::Rg8Sint, + Vf::Char4 => H::Rgba8Sint, + Vf::Uchar2Norm => H::Rg8Unorm, + Vf::Uchar4Norm => H::Rgba8Unorm, + Vf::Char2Norm => H::Rg8Snorm, + Vf::Char4Norm => H::Rgba8Snorm, + Vf::Ushort2 => H::Rg16Uint, + Vf::Ushort4 => H::Rgba16Uint, + Vf::Short2 => H::Rg16Sint, + Vf::Short4 => H::Rgba16Sint, + Vf::Ushort2Norm => H::Rg16Unorm, + Vf::Ushort4Norm => H::Rgba16Unorm, + Vf::Short2Norm => H::Rg16Snorm, + Vf::Short4Norm => H::Rgba16Snorm, + Vf::Half2 => H::Rg16Sfloat, + Vf::Half4 => H::Rgba16Sfloat, + Vf::Float => H::R32Sfloat, + Vf::Float2 => H::Rg32Sfloat, + Vf::Float3 => H::Rgb32Sfloat, + Vf::Float4 => H::Rgba32Sfloat, + Vf::Uint => H::R32Uint, + Vf::Uint2 => H::Rg32Uint, + Vf::Uint3 => H::Rgb32Uint, + Vf::Uint4 => H::Rgba32Uint, + Vf::Int => H::R32Sint, + Vf::Int2 => H::Rg32Sint, + Vf::Int3 => H::Rgb32Sint, + Vf::Int4 => H::Rgba32Sint, + } +} + +fn checked_u32_as_u16(value: u32) -> u16 { + assert!(value <= ::std::u16::MAX as u32); + value as u16 +} + +pub fn map_texture_dimension_size( + dimension: resource::TextureDimension, + Extent3d { + width, + height, + depth, + }: Extent3d, + array_size: u32, + sample_size: u32, +) -> hal::image::Kind { + use crate::resource::TextureDimension::*; + use hal::image::Kind as H; + match dimension { + D1 => { + assert_eq!(height, 1); + assert_eq!(depth, 1); + assert_eq!(sample_size, 1); + H::D1(width, checked_u32_as_u16(array_size)) + } + D2 => { + assert_eq!(depth, 1); + assert!( + sample_size == 1 + || sample_size == 2 + || sample_size == 4 + || sample_size == 8 + || sample_size == 16 + || sample_size == 32, + "Invalid sample_count of {}", + sample_size + ); + H::D2( + width, + height, + checked_u32_as_u16(array_size), + sample_size as u8, + ) + } + D3 => { + assert_eq!(array_size, 1); + assert_eq!(sample_size, 1); + H::D3(width, height, depth) + } + } +} + +pub fn map_texture_view_dimension( + dimension: resource::TextureViewDimension, +) -> hal::image::ViewKind { + use crate::resource::TextureViewDimension::*; + use hal::image::ViewKind as H; + match dimension { + D1 => H::D1, + D2 => H::D2, + D2Array => H::D2Array, + Cube => H::Cube, + CubeArray => H::CubeArray, + D3 => H::D3, + } +} + +pub fn map_buffer_state(usage: resource::BufferUsage) -> hal::buffer::State { + use crate::resource::BufferUsage as W; + use hal::buffer::Access as A; + + let mut access = A::empty(); + if usage.contains(W::COPY_SRC) { + access |= A::TRANSFER_READ; + } + if usage.contains(W::COPY_DST) { + access |= A::TRANSFER_WRITE; + } + if usage.contains(W::INDEX) { + access |= A::INDEX_BUFFER_READ; + } + if usage.contains(W::VERTEX) { + access |= A::VERTEX_BUFFER_READ; + } + if usage.contains(W::UNIFORM) { + access |= A::UNIFORM_READ | A::SHADER_READ; + } + if usage.contains(W::STORAGE) { + access |= A::SHADER_WRITE; + } + + access +} + +pub fn map_texture_state( + usage: resource::TextureUsage, + aspects: hal::format::Aspects, +) -> hal::image::State { + use crate::resource::TextureUsage as W; + use hal::image::{Access as A, Layout as L}; + + let is_color = aspects.contains(hal::format::Aspects::COLOR); + let layout = match usage { + W::UNINITIALIZED => return (A::empty(), L::Undefined), + W::COPY_SRC => L::TransferSrcOptimal, + W::COPY_DST => L::TransferDstOptimal, + W::SAMPLED => L::ShaderReadOnlyOptimal, + W::OUTPUT_ATTACHMENT if is_color => L::ColorAttachmentOptimal, + W::OUTPUT_ATTACHMENT => L::DepthStencilAttachmentOptimal, + _ => L::General, + }; + + let mut access = A::empty(); + if usage.contains(W::COPY_SRC) { + access |= A::TRANSFER_READ; + } + if usage.contains(W::COPY_DST) { + access |= A::TRANSFER_WRITE; + } + if usage.contains(W::SAMPLED) { + access |= A::SHADER_READ; + } + if usage.contains(W::STORAGE) { + access |= A::SHADER_WRITE; + } + if usage.contains(W::OUTPUT_ATTACHMENT) { + + access |= if is_color { + A::COLOR_ATTACHMENT_WRITE + } else { + A::DEPTH_STENCIL_ATTACHMENT_WRITE + }; + } + + (access, layout) +} + +pub fn map_load_store_ops( + load: command::LoadOp, + store: command::StoreOp, +) -> hal::pass::AttachmentOps { + hal::pass::AttachmentOps { + load: match load { + command::LoadOp::Clear => hal::pass::AttachmentLoadOp::Clear, + command::LoadOp::Load => hal::pass::AttachmentLoadOp::Load, + }, + store: match store { + command::StoreOp::Clear => hal::pass::AttachmentStoreOp::DontCare, + command::StoreOp::Store => hal::pass::AttachmentStoreOp::Store, + }, + } +} + +pub fn map_color_f32(color: &Color) -> hal::pso::ColorValue { + [ + color.r as f32, + color.g as f32, + color.b as f32, + color.a as f32, + ] +} +pub fn map_color_i32(color: &Color) -> [i32; 4] { + [ + color.r as i32, + color.g as i32, + color.b as i32, + color.a as i32, + ] +} +pub fn map_color_u32(color: &Color) -> [u32; 4] { + [ + color.r as u32, + color.g as u32, + color.b as u32, + color.a as u32, + ] +} + +pub fn map_filter(filter: resource::FilterMode) -> hal::image::Filter { + match filter { + resource::FilterMode::Nearest => hal::image::Filter::Nearest, + resource::FilterMode::Linear => hal::image::Filter::Linear, + } +} + +pub fn map_wrap(address: resource::AddressMode) -> hal::image::WrapMode { + use crate::resource::AddressMode as Am; + use hal::image::WrapMode as W; + match address { + Am::ClampToEdge => W::Clamp, + Am::Repeat => W::Tile, + Am::MirrorRepeat => W::Mirror, + } +} + +pub fn map_rasterization_state_descriptor( + desc: &pipeline::RasterizationStateDescriptor, +) -> hal::pso::Rasterizer { + hal::pso::Rasterizer { + depth_clamping: false, + polygon_mode: hal::pso::PolygonMode::Fill, + cull_face: match desc.cull_mode { + pipeline::CullMode::None => hal::pso::Face::empty(), + pipeline::CullMode::Front => hal::pso::Face::FRONT, + pipeline::CullMode::Back => hal::pso::Face::BACK, + }, + front_face: match desc.front_face { + pipeline::FrontFace::Ccw => hal::pso::FrontFace::CounterClockwise, + pipeline::FrontFace::Cw => hal::pso::FrontFace::Clockwise, + }, + depth_bias: if desc.depth_bias != 0 + || desc.depth_bias_slope_scale != 0.0 + || desc.depth_bias_clamp != 0.0 + { + Some(hal::pso::State::Static(hal::pso::DepthBias { + const_factor: desc.depth_bias as f32, + slope_factor: desc.depth_bias_slope_scale, + clamp: desc.depth_bias_clamp, + })) + } else { + None + }, + conservative: false, + } +} + +pub fn map_index_format(index_format: pipeline::IndexFormat) -> hal::IndexType { + match index_format { + pipeline::IndexFormat::Uint16 => hal::IndexType::U16, + pipeline::IndexFormat::Uint32 => hal::IndexType::U32, + } +} diff --git a/dom/webgpu/wgpu-native/src/device.rs b/dom/webgpu/wgpu-native/src/device.rs new file mode 100644 index 000000000000..5162127ba250 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/device.rs @@ -0,0 +1,2261 @@ + + + + +#[cfg(feature = "local")] +use crate::instance::Limits; +use crate::{ + binding_model, + command, + conv, + hub::{GfxBackend, Global, Token}, + id::{Input, Output}, + pipeline, + resource, + swap_chain, + track::{Stitch, TrackerSet}, + AdapterId, + BindGroupId, + BindGroupLayoutId, + BufferAddress, + BufferId, + BufferMapAsyncStatus, + BufferMapOperation, + CommandBufferId, + CommandEncoderId, + ComputePipelineId, + DeviceId, + FastHashMap, + Features, + LifeGuard, + PipelineLayoutId, + QueueId, + RefCount, + RenderPipelineId, + SamplerId, + ShaderModuleId, + Stored, + SubmissionIndex, + SurfaceId, + SwapChainId, + TextureDimension, + TextureFormat, + TextureId, + TextureUsage, + TextureViewId, +}; +#[cfg(feature = "local")] +use crate::{gfx_select, hub::GLOBAL}; + +use arrayvec::ArrayVec; +use copyless::VecHelper as _; +use hal::{ + self, + command::CommandBuffer as _, + device::Device as _, + queue::CommandQueue as _, + window::{PresentationSurface as _, Surface as _}, +}; +use parking_lot::Mutex; +use rendy_descriptor::{DescriptorAllocator, DescriptorRanges, DescriptorSet}; +use rendy_memory::{Block, Heaps, MemoryBlock}; + +#[cfg(feature = "local")] +use std::marker::PhantomData; +use std::{ + collections::hash_map::Entry, + ffi, + iter, + ops::Range, + ptr, + slice, + sync::atomic::Ordering, +}; + + +const CLEANUP_WAIT_MS: u64 = 5000; +pub const MAX_COLOR_TARGETS: usize = 4; +pub const MAX_MIP_LEVELS: usize = 16; +pub const MAX_VERTEX_BUFFERS: usize = 8; + + +pub const BIND_BUFFER_ALIGNMENT: hal::buffer::Offset = 256; + +pub fn all_buffer_stages() -> hal::pso::PipelineStage { + use hal::pso::PipelineStage as Ps; + Ps::DRAW_INDIRECT + | Ps::VERTEX_INPUT + | Ps::VERTEX_SHADER + | Ps::FRAGMENT_SHADER + | Ps::COMPUTE_SHADER + | Ps::TRANSFER + | Ps::HOST +} +pub fn all_image_stages() -> hal::pso::PipelineStage { + use hal::pso::PipelineStage as Ps; + Ps::EARLY_FRAGMENT_TESTS + | Ps::LATE_FRAGMENT_TESTS + | Ps::COLOR_ATTACHMENT_OUTPUT + | Ps::VERTEX_SHADER + | Ps::FRAGMENT_SHADER + | Ps::COMPUTE_SHADER + | Ps::TRANSFER +} + +#[derive(Clone, Copy, Debug, PartialEq)] +enum HostMap { + Read, + Write, +} + +#[derive(Clone, Debug, Hash, PartialEq)] +pub(crate) struct AttachmentData { + pub colors: ArrayVec<[T; MAX_COLOR_TARGETS]>, + pub resolves: ArrayVec<[T; MAX_COLOR_TARGETS]>, + pub depth_stencil: Option, +} +impl Eq for AttachmentData {} +impl AttachmentData { + pub(crate) fn all(&self) -> impl Iterator { + self.colors + .iter() + .chain(&self.resolves) + .chain(&self.depth_stencil) + } +} + +impl RenderPassContext { + + pub(crate) fn compatible(&self, other: &RenderPassContext) -> bool { + self.colors == other.colors && self.depth_stencil == other.depth_stencil + } +} + +pub(crate) type RenderPassKey = AttachmentData; +pub(crate) type FramebufferKey = AttachmentData; +pub(crate) type RenderPassContext = AttachmentData; + +#[derive(Debug, PartialEq)] +enum ResourceId { + Buffer(BufferId), + Texture(TextureId), + TextureView(TextureViewId), + BindGroup(BindGroupId), + Sampler(SamplerId), +} + +#[derive(Debug)] +enum NativeResource { + Buffer(B::Buffer, MemoryBlock), + Image(B::Image, MemoryBlock), + ImageView(B::ImageView), + Framebuffer(B::Framebuffer), + DescriptorSet(DescriptorSet), + Sampler(B::Sampler), +} + +#[derive(Debug)] +struct ActiveSubmission { + index: SubmissionIndex, + fence: B::Fence, + + + resources: Vec<(Option, NativeResource)>, + mapped: Vec, +} + + + + + + + + + + +#[derive(Debug)] +struct PendingResources { + + mapped: Vec>, + + + referenced: Vec<(ResourceId, RefCount)>, + + + + active: Vec>, + + + free: Vec>, + ready_to_map: Vec, +} + +impl PendingResources { + fn destroy(&mut self, resource_id: ResourceId, ref_count: RefCount) { + debug_assert!(!self.referenced.iter().any(|r| r.0 == resource_id)); + self.referenced.push((resource_id, ref_count)); + } + + fn map(&mut self, buffer: BufferId, ref_count: RefCount) { + self.mapped.push(Stored { + value: buffer, + ref_count, + }); + } + + + fn cleanup( + &mut self, + device: &B::Device, + heaps_mutex: &Mutex>, + descriptor_allocator_mutex: &Mutex>, + force_wait: bool, + ) -> SubmissionIndex { + if force_wait && !self.active.is_empty() { + let status = unsafe { + device.wait_for_fences( + self.active.iter().map(|a| &a.fence), + hal::device::WaitFor::All, + CLEANUP_WAIT_MS * 1_000_000, + ) + }; + assert_eq!(status, Ok(true), "GPU got stuck :("); + } + + + + let done_count = self + .active + .iter() + .position(|a| unsafe { !device.get_fence_status(&a.fence).unwrap() }) + .unwrap_or(self.active.len()); + let last_done = if done_count != 0 { + self.active[done_count - 1].index + } else { + return 0; + }; + + for a in self.active.drain(.. done_count) { + log::trace!("Active submission {} is done", a.index); + self.free.extend(a.resources.into_iter().map(|(_, r)| r)); + self.ready_to_map.extend(a.mapped); + unsafe { + device.destroy_fence(a.fence); + } + } + + let mut heaps = heaps_mutex.lock(); + let mut descriptor_allocator = descriptor_allocator_mutex.lock(); + for resource in self.free.drain(..) { + match resource { + NativeResource::Buffer(raw, memory) => unsafe { + device.destroy_buffer(raw); + heaps.free(device, memory); + }, + NativeResource::Image(raw, memory) => unsafe { + device.destroy_image(raw); + heaps.free(device, memory); + }, + NativeResource::ImageView(raw) => unsafe { + device.destroy_image_view(raw); + }, + NativeResource::Framebuffer(raw) => unsafe { + device.destroy_framebuffer(raw); + }, + NativeResource::DescriptorSet(raw) => unsafe { + descriptor_allocator.free(iter::once(raw)); + }, + NativeResource::Sampler(raw) => unsafe { + device.destroy_sampler(raw); + }, + } + } + + last_done + } + + fn triage_referenced( + &mut self, + global: &Global, + trackers: &mut TrackerSet, + mut token: &mut Token>, + ) { + // Before destruction, a resource is expected to have the following strong refs: + // - in resource itself + // - in the device tracker + // - in this list + const MIN_REFS: usize = 4; + + if self.referenced.iter().all(|r| r.1.load() >= MIN_REFS) { + return; + } + + let hub = B::hub(global); + //TODO: lock less, if possible + let (mut bind_group_guard, mut token) = hub.bind_groups.write(&mut token); + let (mut buffer_guard, mut token) = hub.buffers.write(&mut token); + let (mut texture_guard, mut token) = hub.textures.write(&mut token); + let (mut teview_view_guard, mut token) = hub.texture_views.write(&mut token); + let (mut sampler_guard, _) = hub.samplers.write(&mut token); + + for i in (0 .. self.referenced.len()).rev() { + let num_refs = self.referenced[i].1.load(); + if num_refs <= 3 { + let resource_id = self.referenced.swap_remove(i).0; + assert_eq!( + num_refs, 3, + "Resource {:?} misses some references", + resource_id + ); + let (life_guard, resource) = match resource_id { + ResourceId::Buffer(id) => { + if buffer_guard[id].pending_map_operation.is_some() { + continue; + } + trackers.buffers.remove(id); + let buf = buffer_guard.remove(id).unwrap(); + #[cfg(feature = "local")] + hub.buffers.identity.lock().free(id); + (buf.life_guard, NativeResource::Buffer(buf.raw, buf.memory)) + } + ResourceId::Texture(id) => { + trackers.textures.remove(id); + let tex = texture_guard.remove(id).unwrap(); + #[cfg(feature = "local")] + hub.textures.identity.lock().free(id); + (tex.life_guard, NativeResource::Image(tex.raw, tex.memory)) + } + ResourceId::TextureView(id) => { + trackers.views.remove(id); + let view = teview_view_guard.remove(id).unwrap(); + let raw = match view.inner { + resource::TextureViewInner::Native { raw, .. } => raw, + resource::TextureViewInner::SwapChain { .. } => unreachable!(), + }; + #[cfg(feature = "local")] + hub.texture_views.identity.lock().free(id); + (view.life_guard, NativeResource::ImageView(raw)) + } + ResourceId::BindGroup(id) => { + trackers.bind_groups.remove(id); + let bind_group = bind_group_guard.remove(id).unwrap(); + #[cfg(feature = "local")] + hub.bind_groups.identity.lock().free(id); + ( + bind_group.life_guard, + NativeResource::DescriptorSet(bind_group.raw), + ) + } + ResourceId::Sampler(id) => { + trackers.samplers.remove(id); + let sampler = sampler_guard.remove(id).unwrap(); + #[cfg(feature = "local")] + hub.samplers.identity.lock().free(id); + (sampler.life_guard, NativeResource::Sampler(sampler.raw)) + } + }; + + let submit_index = life_guard.submission_index.load(Ordering::Acquire); + match self.active.iter_mut().find(|a| a.index == submit_index) { + Some(a) => { + a.resources.alloc().init((Some(resource_id), resource)); + } + None => self.free.push(resource), + } + } + } + } + + fn triage_mapped(&mut self, global: &Global, token: &mut Token>) { + if self.mapped.is_empty() { + return; + } + let (buffer_guard, _) = B::hub(global).buffers.read(token); + + for stored in self.mapped.drain(..) { + let resource_id = stored.value; + let buf = &buffer_guard[resource_id]; + + let submit_index = buf.life_guard.submission_index.load(Ordering::Acquire); + log::trace!( + "Mapping of {:?} at submission {:?} gets assigned to active {:?}", + resource_id, + submit_index, + self.active.iter().position(|a| a.index == submit_index) + ); + + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.ready_to_map, |a| &mut a.mapped) + .push(resource_id); + } + } + + fn triage_framebuffers( + &mut self, + global: &Global, + framebuffers: &mut FastHashMap, + token: &mut Token>, + ) { + let (texture_view_guard, _) = B::hub(global).texture_views.read(token); + let remove_list = framebuffers + .keys() + .filter_map(|key| { + let mut last_submit: SubmissionIndex = 0; + for &at in key.all() { + if texture_view_guard.contains(at) { + return None; + } + // This attachment is no longer registered. + // Let's see if it's used by any of the active submissions. + let res_id = &Some(ResourceId::TextureView(at)); + for a in &self.active { + if a.resources.iter().any(|&(ref id, _)| id == res_id) { + last_submit = last_submit.max(a.index); + } + } + } + Some((key.clone(), last_submit)) + }) + .collect::>(); + + for (ref key, submit_index) in remove_list { + let resource = NativeResource::Framebuffer(framebuffers.remove(key).unwrap()); + match self.active.iter_mut().find(|a| a.index == submit_index) { + Some(a) => { + a.resources.alloc().init((None, resource)); + } + None => self.free.push(resource), + } + } + } + + fn handle_mapping( + &mut self, + global: &Global, + raw: &B::Device, + token: &mut Token>, + ) -> Vec { + if self.ready_to_map.is_empty() { + return Vec::new(); + } + let (mut buffer_guard, _) = B::hub(global).buffers.write(token); + self.ready_to_map + .drain(..) + .map(|buffer_id| { + let buffer = &mut buffer_guard[buffer_id]; + let operation = buffer.pending_map_operation.take().unwrap(); + let result = match operation { + BufferMapOperation::Read(ref range, ..) => { + map_buffer(raw, buffer, range.clone(), HostMap::Read) + } + BufferMapOperation::Write(ref range, ..) => { + map_buffer(raw, buffer, range.clone(), HostMap::Write) + } + }; + (operation, result) + }) + .collect() + } +} + +type BufferMapResult = Result<*mut u8, hal::device::MapError>; +type BufferMapPendingCallback = (BufferMapOperation, BufferMapResult); + +fn map_buffer( + raw: &B::Device, + buffer: &mut resource::Buffer, + buffer_range: Range, + kind: HostMap, +) -> BufferMapResult { + let is_coherent = buffer + .memory + .properties() + .contains(hal::memory::Properties::COHERENT); + let (ptr, mapped_range) = { + let mapped = buffer.memory.map(raw, buffer_range)?; + (mapped.ptr(), mapped.range()) + }; + + if !is_coherent { + match kind { + HostMap::Read => unsafe { + raw.invalidate_mapped_memory_ranges(iter::once(( + buffer.memory.memory(), + mapped_range, + ))) + .unwrap(); + }, + HostMap::Write => { + buffer.mapped_write_ranges.push(mapped_range); + } + } + } + + Ok(ptr.as_ptr()) +} + +#[derive(Debug)] +pub struct Device { + pub(crate) raw: B::Device, + pub(crate) adapter_id: AdapterId, + pub(crate) queue_group: hal::queue::QueueGroup, + pub(crate) com_allocator: command::CommandAllocator, + mem_allocator: Mutex>, + desc_allocator: Mutex>, + life_guard: LifeGuard, + pub(crate) trackers: Mutex, + pub(crate) render_passes: Mutex>, + pub(crate) framebuffers: Mutex>, + pending: Mutex>, + pub(crate) features: Features, +} + +impl Device { + pub(crate) fn new( + raw: B::Device, + adapter_id: AdapterId, + queue_group: hal::queue::QueueGroup, + mem_props: hal::adapter::MemoryProperties, + supports_texture_d24_s8: bool, + max_bind_groups: u32, + ) -> Self { + // don't start submission index at zero + let life_guard = LifeGuard::new(); + life_guard.submission_index.fetch_add(1, Ordering::Relaxed); + + let heaps = { + let types = mem_props.memory_types.iter().map(|mt| { + use rendy_memory::{DynamicConfig, HeapsConfig, LinearConfig}; + let config = HeapsConfig { + linear: if mt.properties.contains(hal::memory::Properties::CPU_VISIBLE) { + Some(LinearConfig { + linear_size: 0x10_00_00, + }) + } else { + None + }, + dynamic: Some(DynamicConfig { + block_size_granularity: 0x1_00, + max_chunk_size: 0x1_00_00_00, + min_device_allocation: 0x1_00_00, + }), + }; + (mt.properties.into(), mt.heap_index as u32, config) + }); + unsafe { Heaps::new(types, mem_props.memory_heaps.iter().cloned()) } + }; + + Device { + raw, + adapter_id, + com_allocator: command::CommandAllocator::new(queue_group.family), + mem_allocator: Mutex::new(heaps), + desc_allocator: Mutex::new(DescriptorAllocator::new()), + queue_group, + life_guard, + trackers: Mutex::new(TrackerSet::new(B::VARIANT)), + render_passes: Mutex::new(FastHashMap::default()), + framebuffers: Mutex::new(FastHashMap::default()), + pending: Mutex::new(PendingResources { + mapped: Vec::new(), + referenced: Vec::new(), + active: Vec::new(), + free: Vec::new(), + ready_to_map: Vec::new(), + }), + features: Features { + max_bind_groups, + supports_texture_d24_s8, + }, + } + } + + fn maintain( + &self, + global: &Global, + force_wait: bool, + token: &mut Token, + ) -> Vec { + let mut pending = self.pending.lock(); + let mut trackers = self.trackers.lock(); + + pending.triage_referenced(global, &mut *trackers, token); + pending.triage_mapped(global, token); + pending.triage_framebuffers(global, &mut *self.framebuffers.lock(), token); + let last_done = pending.cleanup( + &self.raw, + &self.mem_allocator, + &self.desc_allocator, + force_wait, + ); + let callbacks = pending.handle_mapping(global, &self.raw, token); + + unsafe { + self.desc_allocator.lock().cleanup(&self.raw); + } + + if last_done != 0 { + self.com_allocator.maintain(last_done); + } + + callbacks + } + + //Note: this logic is specifically moved out of `handle_mapping()` in order to + // have nothing locked by the time we execute users callback code. + fn fire_map_callbacks>(callbacks: I) { + for (operation, result) in callbacks { + let (status, ptr) = match result { + Ok(ptr) => (BufferMapAsyncStatus::Success, ptr), + Err(e) => { + log::error!("failed to map buffer: {:?}", e); + (BufferMapAsyncStatus::Error, ptr::null_mut()) + } + }; + match operation { + BufferMapOperation::Read(_, on_read, userdata) => on_read(status, ptr, userdata), + BufferMapOperation::Write(_, on_write, userdata) => on_write(status, ptr, userdata), + } + } + } + + fn create_buffer( + &self, + self_id: DeviceId, + desc: &resource::BufferDescriptor, + ) -> resource::Buffer { + debug_assert_eq!(self_id.backend(), B::VARIANT); + let (usage, _memory_properties) = conv::map_buffer_usage(desc.usage); + + let rendy_usage = { + use rendy_memory::MemoryUsageValue as Muv; + use resource::BufferUsage as Bu; + + if !desc.usage.intersects(Bu::MAP_READ | Bu::MAP_WRITE) { + Muv::Data + } else if (Bu::MAP_WRITE | Bu::COPY_SRC).contains(desc.usage) { + Muv::Upload + } else if (Bu::MAP_READ | Bu::COPY_DST).contains(desc.usage) { + Muv::Download + } else { + Muv::Dynamic + } + }; + + let mut buffer = unsafe { self.raw.create_buffer(desc.size, usage).unwrap() }; + let requirements = unsafe { self.raw.get_buffer_requirements(&buffer) }; + let memory = self + .mem_allocator + .lock() + .allocate( + &self.raw, + requirements.type_mask as u32, + rendy_usage, + requirements.size, + requirements.alignment, + ) + .unwrap(); + + unsafe { + self.raw + .bind_buffer_memory(memory.memory(), memory.range().start, &mut buffer) + .unwrap() + }; + + resource::Buffer { + raw: buffer, + device_id: Stored { + value: self_id, + ref_count: self.life_guard.ref_count.clone(), + }, + usage: desc.usage, + memory, + size: desc.size, + mapped_write_ranges: Vec::new(), + pending_map_operation: None, + life_guard: LifeGuard::new(), + } + } + + fn create_texture( + &self, + self_id: DeviceId, + desc: &resource::TextureDescriptor, + ) -> resource::Texture { + debug_assert_eq!(self_id.backend(), B::VARIANT); + + // Ensure `D24Plus` textures cannot be copied + match desc.format { + TextureFormat::Depth24Plus | TextureFormat::Depth24PlusStencil8 => { + assert!(!desc + .usage + .intersects(TextureUsage::COPY_SRC | TextureUsage::COPY_DST)); + } + _ => {} + } + + let kind = conv::map_texture_dimension_size( + desc.dimension, + desc.size, + desc.array_layer_count, + desc.sample_count, + ); + let format = conv::map_texture_format(desc.format, self.features); + let aspects = format.surface_desc().aspects; + let usage = conv::map_texture_usage(desc.usage, aspects); + + assert!((desc.mip_level_count as usize) < MAX_MIP_LEVELS); + let mut view_capabilities = hal::image::ViewCapabilities::empty(); + + // 2D textures with array layer counts that are multiples of 6 could be cubemaps + // Following gpuweb/gpuweb#68 always add the hint in that case + if desc.dimension == TextureDimension::D2 && desc.array_layer_count % 6 == 0 { + view_capabilities |= hal::image::ViewCapabilities::KIND_CUBE; + }; + + // TODO: 2D arrays, cubemap arrays + + let mut image = unsafe { + self.raw.create_image( + kind, + desc.mip_level_count as hal::image::Level, + format, + hal::image::Tiling::Optimal, + usage, + view_capabilities, + ) + } + .unwrap(); + let requirements = unsafe { self.raw.get_image_requirements(&image) }; + + let memory = self + .mem_allocator + .lock() + .allocate( + &self.raw, + requirements.type_mask as u32, + rendy_memory::Data, + requirements.size, + requirements.alignment, + ) + .unwrap(); + + unsafe { + self.raw + .bind_image_memory(memory.memory(), memory.range().start, &mut image) + .unwrap() + }; + + resource::Texture { + raw: image, + device_id: Stored { + value: self_id, + ref_count: self.life_guard.ref_count.clone(), + }, + usage: desc.usage, + kind, + format: desc.format, + full_range: hal::image::SubresourceRange { + aspects, + levels: 0 .. desc.mip_level_count as hal::image::Level, + layers: 0 .. desc.array_layer_count as hal::image::Layer, + }, + memory, + life_guard: LifeGuard::new(), + } + } +} + +impl Device { + pub(crate) fn destroy_bind_group(&self, bind_group: binding_model::BindGroup) { + unsafe { + self.desc_allocator.lock().free(iter::once(bind_group.raw)); + } + } + + pub(crate) fn dispose(self) { + self.com_allocator.destroy(&self.raw); + let desc_alloc = self.desc_allocator.into_inner(); + unsafe { + desc_alloc.dispose(&self.raw); + } + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_get_limits(_device_id: DeviceId, limits: &mut Limits) { + *limits = Limits::default(); // TODO +} + +#[derive(Debug)] +pub struct ShaderModule { + pub(crate) raw: B::ShaderModule, +} + +pub fn device_create_buffer( + global: &Global, + device_id: DeviceId, + desc: &resource::BufferDescriptor, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + + let (device_guard, _) = hub.devices.read(&mut token); + let device = &device_guard[device_id]; + let buffer = device.create_buffer(device_id, desc); + + let (id, id_out) = hub.buffers.new_identity(id_in); + let ok = device.trackers.lock().buffers.init( + id, + &buffer.life_guard.ref_count, + (), + resource::BufferUsage::empty(), + ); + assert!(ok); + + hub.buffers.register(id, buffer, &mut token); + id_out +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_create_buffer( + device_id: DeviceId, + desc: &resource::BufferDescriptor, +) -> BufferId { + gfx_select!(device_id => device_create_buffer(&*GLOBAL, device_id, desc, PhantomData)) +} + +pub fn device_create_buffer_mapped( + global: &Global, + device_id: DeviceId, + desc: &resource::BufferDescriptor, + mapped_ptr_out: *mut *mut u8, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + let mut desc = desc.clone(); + desc.usage |= resource::BufferUsage::MAP_WRITE; + + let (device_guard, _) = hub.devices.read(&mut token); + let device = &device_guard[device_id]; + let mut buffer = device.create_buffer(device_id, &desc); + + match map_buffer(&device.raw, &mut buffer, 0 .. desc.size, HostMap::Write) { + Ok(ptr) => unsafe { + *mapped_ptr_out = ptr; + }, + Err(e) => { + log::error!("failed to create buffer in a mapped state: {:?}", e); + unsafe { + *mapped_ptr_out = ptr::null_mut(); + } + } + } + + let (id, id_out) = hub.buffers.new_identity(id_in); + let ok = device.trackers.lock().buffers.init( + id, + &buffer.life_guard.ref_count, + (), + resource::BufferUsage::MAP_WRITE, + ); + assert!(ok); + + hub.buffers.register(id, buffer, &mut token); + id_out +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_create_buffer_mapped( + device_id: DeviceId, + desc: &resource::BufferDescriptor, + mapped_ptr_out: *mut *mut u8, +) -> BufferId { + gfx_select!(device_id => device_create_buffer_mapped(&*GLOBAL, device_id, desc, mapped_ptr_out, PhantomData)) +} + +pub fn buffer_destroy(global: &Global, buffer_id: BufferId) { + let hub = B::hub(global); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + let (buffer_guard, _) = hub.buffers.read(&mut token); + let buffer = &buffer_guard[buffer_id]; + device_guard[buffer.device_id.value].pending.lock().destroy( + ResourceId::Buffer(buffer_id), + buffer.life_guard.ref_count.clone(), + ); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_buffer_destroy(buffer_id: BufferId) { + gfx_select!(buffer_id => buffer_destroy(&*GLOBAL, buffer_id)) +} + +pub fn device_create_texture( + global: &Global, + device_id: DeviceId, + desc: &resource::TextureDescriptor, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + + let (device_guard, _) = hub.devices.read(&mut token); + let device = &device_guard[device_id]; + let texture = device.create_texture(device_id, desc); + + let (id, id_out) = hub.textures.new_identity(id_in); + let ok = device.trackers.lock().textures.init( + id, + &texture.life_guard.ref_count, + texture.full_range.clone(), + resource::TextureUsage::UNINITIALIZED, + ); + assert!(ok); + + hub.textures.register(id, texture, &mut token); + id_out +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_create_texture( + device_id: DeviceId, + desc: &resource::TextureDescriptor, +) -> TextureId { + gfx_select!(device_id => device_create_texture(&*GLOBAL, device_id, desc, PhantomData)) +} + +pub fn texture_create_view( + global: &Global, + texture_id: TextureId, + desc: Option<&resource::TextureViewDescriptor>, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let (texture_guard, mut token) = hub.textures.read(&mut token); + let texture = &texture_guard[texture_id]; + let device = &device_guard[texture.device_id.value]; + + let (format, view_kind, range) = match desc { + Some(desc) => { + let kind = conv::map_texture_view_dimension(desc.dimension); + let end_level = if desc.level_count == 0 { + texture.full_range.levels.end + } else { + (desc.base_mip_level + desc.level_count) as u8 + }; + let end_layer = if desc.array_layer_count == 0 { + texture.full_range.layers.end + } else { + (desc.base_array_layer + desc.array_layer_count) as u16 + }; + let range = hal::image::SubresourceRange { + aspects: match desc.aspect { + resource::TextureAspect::All => texture.full_range.aspects, + resource::TextureAspect::DepthOnly => hal::format::Aspects::DEPTH, + resource::TextureAspect::StencilOnly => hal::format::Aspects::STENCIL, + }, + levels: desc.base_mip_level as u8 .. end_level, + layers: desc.base_array_layer as u16 .. end_layer, + }; + (desc.format, kind, range) + } + None => { + let kind = match texture.kind { + hal::image::Kind::D1(_, 1) => hal::image::ViewKind::D1, + hal::image::Kind::D1(..) => hal::image::ViewKind::D1Array, + hal::image::Kind::D2(_, _, 1, _) => hal::image::ViewKind::D2, + hal::image::Kind::D2(..) => hal::image::ViewKind::D2Array, + hal::image::Kind::D3(..) => hal::image::ViewKind::D3, + }; + (texture.format, kind, texture.full_range.clone()) + } + }; + + let raw = unsafe { + device + .raw + .create_image_view( + &texture.raw, + view_kind, + conv::map_texture_format(format, device.features), + hal::format::Swizzle::NO, + range.clone(), + ) + .unwrap() + }; + + let view = resource::TextureView { + inner: resource::TextureViewInner::Native { + raw, + source_id: Stored { + value: texture_id, + ref_count: texture.life_guard.ref_count.clone(), + }, + }, + format: texture.format, + extent: texture.kind.extent().at_level(range.levels.start), + samples: texture.kind.num_samples(), + range, + life_guard: LifeGuard::new(), + }; + + let (id, id_out) = hub.texture_views.new_identity(id_in); + let ok = device + .trackers + .lock() + .views + .init(id, &view.life_guard.ref_count, (), ()); + assert!(ok); + + hub.texture_views.register(id, view, &mut token); + id_out +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_texture_create_view( + texture_id: TextureId, + desc: Option<&resource::TextureViewDescriptor>, +) -> TextureViewId { + gfx_select!(texture_id => texture_create_view(&*GLOBAL, texture_id, desc, PhantomData)) +} + +pub fn texture_destroy(global: &Global, texture_id: TextureId) { + let hub = B::hub(global); + let mut token = Token::root(); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let (texture_guard, _) = hub.textures.read(&mut token); + let texture = &texture_guard[texture_id]; + device_guard[texture.device_id.value] + .pending + .lock() + .destroy( + ResourceId::Texture(texture_id), + texture.life_guard.ref_count.clone(), + ); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_texture_destroy(texture_id: TextureId) { + gfx_select!(texture_id => texture_destroy(&*GLOBAL, texture_id)) +} + +pub fn texture_view_destroy(global: &Global, texture_view_id: TextureViewId) { + let hub = B::hub(global); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + let (texture_guard, mut token) = hub.textures.read(&mut token); + let (texture_view_guard, _) = hub.texture_views.read(&mut token); + let view = &texture_view_guard[texture_view_id]; + let device_id = match view.inner { + resource::TextureViewInner::Native { ref source_id, .. } => { + texture_guard[source_id.value].device_id.value + } + resource::TextureViewInner::SwapChain { .. } => panic!("Can't destroy a swap chain image"), + }; + device_guard[device_id].pending.lock().destroy( + ResourceId::TextureView(texture_view_id), + view.life_guard.ref_count.clone(), + ); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_texture_view_destroy(texture_view_id: TextureViewId) { + gfx_select!(texture_view_id => texture_view_destroy(&*GLOBAL, texture_view_id)) +} + +pub fn device_create_sampler( + global: &Global, + device_id: DeviceId, + desc: &resource::SamplerDescriptor, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = &device_guard[device_id]; + + let info = hal::image::SamplerDesc { + min_filter: conv::map_filter(desc.min_filter), + mag_filter: conv::map_filter(desc.mag_filter), + mip_filter: conv::map_filter(desc.mipmap_filter), + wrap_mode: ( + conv::map_wrap(desc.address_mode_u), + conv::map_wrap(desc.address_mode_v), + conv::map_wrap(desc.address_mode_w), + ), + lod_bias: hal::image::Lod(0.0), + lod_range: hal::image::Lod(desc.lod_min_clamp) .. hal::image::Lod(desc.lod_max_clamp), + comparison: if desc.compare_function == resource::CompareFunction::Always { + None + } else { + Some(conv::map_compare_function(desc.compare_function)) + }, + border: hal::image::PackedColor(0), + normalized: true, + anisotropic: hal::image::Anisotropic::Off, //TODO + }; + + let sampler = resource::Sampler { + raw: unsafe { device.raw.create_sampler(&info).unwrap() }, + device_id: Stored { + value: device_id, + ref_count: device.life_guard.ref_count.clone(), + }, + life_guard: LifeGuard::new(), + }; + hub.samplers.register_identity(id_in, sampler, &mut token) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_create_sampler( + device_id: DeviceId, + desc: &resource::SamplerDescriptor, +) -> SamplerId { + gfx_select!(device_id => device_create_sampler(&*GLOBAL, device_id, desc, PhantomData)) +} + +pub fn sampler_destroy(global: &Global, sampler_id: SamplerId) { + let hub = B::hub(global); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + let (sampler_guard, _) = hub.samplers.read(&mut token); + let sampler = &sampler_guard[sampler_id]; + device_guard[sampler.device_id.value] + .pending + .lock() + .destroy( + ResourceId::Sampler(sampler_id), + sampler.life_guard.ref_count.clone(), + ); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_sampler_destroy(sampler_id: SamplerId) { + gfx_select!(sampler_id => sampler_destroy(&*GLOBAL, sampler_id)) +} + +pub fn device_create_bind_group_layout( + global: &Global, + device_id: DeviceId, + desc: &binding_model::BindGroupLayoutDescriptor, + id_in: Input, +) -> Output { + let mut token = Token::root(); + let hub = B::hub(global); + let bindings = unsafe { slice::from_raw_parts(desc.bindings, desc.bindings_length) }; + + let raw_bindings = bindings + .iter() + .map(|binding| hal::pso::DescriptorSetLayoutBinding { + binding: binding.binding, + ty: conv::map_binding_type(binding), + count: 1, //TODO: consolidate + stage_flags: conv::map_shader_stage_flags(binding.visibility), + immutable_samplers: false, // TODO + }) + .collect::>(); //TODO: avoid heap allocation + + let raw = unsafe { + let (device_guard, _) = hub.devices.read(&mut token); + device_guard[device_id] + .raw + .create_descriptor_set_layout(&raw_bindings, &[]) + .unwrap() + }; + + let layout = binding_model::BindGroupLayout { + raw, + bindings: bindings.to_vec(), + desc_ranges: DescriptorRanges::from_bindings(&raw_bindings), + dynamic_count: bindings.iter().filter(|b| b.dynamic).count(), + }; + + hub.bind_group_layouts + .register_identity(id_in, layout, &mut token) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_create_bind_group_layout( + device_id: DeviceId, + desc: &binding_model::BindGroupLayoutDescriptor, +) -> BindGroupLayoutId { + gfx_select!(device_id => device_create_bind_group_layout(&*GLOBAL, device_id, desc, PhantomData)) +} + +pub fn device_create_pipeline_layout( + global: &Global, + device_id: DeviceId, + desc: &binding_model::PipelineLayoutDescriptor, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = &device_guard[device_id]; + let bind_group_layout_ids = + unsafe { slice::from_raw_parts(desc.bind_group_layouts, desc.bind_group_layouts_length) }; + + assert!(desc.bind_group_layouts_length <= (device.features.max_bind_groups as usize), + "Cannot set a bind group which is beyond the `max_bind_groups` limit requested on device creation"); + + // TODO: push constants + let pipeline_layout = { + let (bind_group_layout_guard, _) = hub.bind_group_layouts.read(&mut token); + let descriptor_set_layouts = bind_group_layout_ids + .iter() + .map(|&id| &bind_group_layout_guard[id].raw); + unsafe { + device.raw.create_pipeline_layout(descriptor_set_layouts, &[]) + } + .unwrap() + }; + + let layout = binding_model::PipelineLayout { + raw: pipeline_layout, + bind_group_layout_ids: bind_group_layout_ids.iter().cloned().collect(), + }; + hub.pipeline_layouts + .register_identity(id_in, layout, &mut token) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_create_pipeline_layout( + device_id: DeviceId, + desc: &binding_model::PipelineLayoutDescriptor, +) -> PipelineLayoutId { + gfx_select!(device_id => device_create_pipeline_layout(&*GLOBAL, device_id, desc, PhantomData)) +} + +pub fn device_create_bind_group( + global: &Global, + device_id: DeviceId, + desc: &binding_model::BindGroupDescriptor, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = &device_guard[device_id]; + let (bind_group_layout_guard, _) = hub.bind_group_layouts.read(&mut token); + let bind_group_layout = &bind_group_layout_guard[desc.layout]; + let bindings = unsafe { slice::from_raw_parts(desc.bindings, desc.bindings_length as usize) }; + assert_eq!(bindings.len(), bind_group_layout.bindings.len()); + + let desc_set = unsafe { + let mut desc_sets = ArrayVec::<[_; 1]>::new(); + device + .desc_allocator + .lock() + .allocate( + &device.raw, + &bind_group_layout.raw, + bind_group_layout.desc_ranges, + 1, + &mut desc_sets, + ) + .unwrap(); + desc_sets.pop().unwrap() + }; + + // fill out the descriptors + let mut used = TrackerSet::new(B::VARIANT); + { + let (buffer_guard, mut token) = hub.buffers.read(&mut token); + let (texture_guard, mut token) = hub.textures.read(&mut token); //skip token + let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); + let (sampler_guard, _) = hub.samplers.read(&mut token); + + //TODO: group writes into contiguous sections + let mut writes = Vec::new(); + for (b, decl) in bindings.iter().zip(&bind_group_layout.bindings) { + let descriptor = match b.resource { + binding_model::BindingResource::Buffer(ref bb) => { + let (alignment, usage) = match decl.ty { + binding_model::BindingType::UniformBuffer => { + (BIND_BUFFER_ALIGNMENT, resource::BufferUsage::UNIFORM) + } + binding_model::BindingType::StorageBuffer => { + (BIND_BUFFER_ALIGNMENT, resource::BufferUsage::STORAGE) + } + binding_model::BindingType::ReadonlyStorageBuffer => { + (BIND_BUFFER_ALIGNMENT, resource::BufferUsage::STORAGE_READ) + } + binding_model::BindingType::Sampler + | binding_model::BindingType::SampledTexture + | binding_model::BindingType::StorageTexture => { + panic!("Mismatched buffer binding for {:?}", decl) + } + }; + assert_eq!( + bb.offset as hal::buffer::Offset % alignment, + 0, + "Misaligned buffer offset {}", + bb.offset + ); + let buffer = used + .buffers + .use_extend(&*buffer_guard, bb.buffer, (), usage) + .unwrap(); + assert!( + buffer.usage.contains(usage), + "Expected buffer usage {:?}", + usage + ); + + let end = if bb.size == 0 { + None + } else { + let end = bb.offset + bb.size; + assert!( + end <= buffer.size, + "Bound buffer range {:?} does not fit in buffer size {}", + bb.offset .. end, + buffer.size + ); + Some(end) + }; + + let range = Some(bb.offset) .. end; + hal::pso::Descriptor::Buffer(&buffer.raw, range) + } + binding_model::BindingResource::Sampler(id) => { + assert_eq!(decl.ty, binding_model::BindingType::Sampler); + let sampler = used + .samplers + .use_extend(&*sampler_guard, id, (), ()) + .unwrap(); + hal::pso::Descriptor::Sampler(&sampler.raw) + } + binding_model::BindingResource::TextureView(id) => { + let (usage, image_layout) = match decl.ty { + binding_model::BindingType::SampledTexture => ( + resource::TextureUsage::SAMPLED, + hal::image::Layout::ShaderReadOnlyOptimal, + ), + binding_model::BindingType::StorageTexture => { + (resource::TextureUsage::STORAGE, hal::image::Layout::General) + } + _ => panic!("Mismatched texture binding for {:?}", decl), + }; + let view = used + .views + .use_extend(&*texture_view_guard, id, (), ()) + .unwrap(); + match view.inner { + resource::TextureViewInner::Native { + ref raw, + ref source_id, + } => { + let texture = used + .textures + .use_extend( + &*texture_guard, + source_id.value, + view.range.clone(), + usage, + ) + .unwrap(); + assert!(texture.usage.contains(usage)); + + hal::pso::Descriptor::Image(raw, image_layout) + } + resource::TextureViewInner::SwapChain { .. } => { + panic!("Unable to create a bind group with a swap chain image") + } + } + } + }; + writes.alloc().init(hal::pso::DescriptorSetWrite { + set: desc_set.raw(), + binding: b.binding, + array_offset: 0, //TODO + descriptors: iter::once(descriptor), + }); + } + + unsafe { + device.raw.write_descriptor_sets(writes); + } + } + + let bind_group = binding_model::BindGroup { + raw: desc_set, + device_id: Stored { + value: device_id, + ref_count: device.life_guard.ref_count.clone(), + }, + layout_id: desc.layout, + life_guard: LifeGuard::new(), + used, + dynamic_count: bind_group_layout.dynamic_count, + }; + let (id, id_out) = hub.bind_groups.new_identity(id_in); + let ok = device + .trackers + .lock() + .bind_groups + .init(id, &bind_group.life_guard.ref_count, (), ()); + assert!(ok); + + hub.bind_groups.register(id, bind_group, &mut token); + id_out +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_create_bind_group( + device_id: DeviceId, + desc: &binding_model::BindGroupDescriptor, +) -> BindGroupId { + gfx_select!(device_id => device_create_bind_group(&*GLOBAL, device_id, desc, PhantomData)) +} + +pub fn bind_group_destroy(global: &Global, bind_group_id: BindGroupId) { + let hub = B::hub(global); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + let (bind_group_guard, _) = hub.bind_groups.read(&mut token); + let bind_group = &bind_group_guard[bind_group_id]; + device_guard[bind_group.device_id.value] + .pending + .lock() + .destroy( + ResourceId::BindGroup(bind_group_id), + bind_group.life_guard.ref_count.clone(), + ); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_bind_group_destroy(bind_group_id: BindGroupId) { + gfx_select!(bind_group_id => bind_group_destroy(&*GLOBAL, bind_group_id)) +} + +pub fn device_create_shader_module( + global: &Global, + device_id: DeviceId, + desc: &pipeline::ShaderModuleDescriptor, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + + let spv = unsafe { slice::from_raw_parts(desc.code.bytes, desc.code.length) }; + let shader = { + let (device_guard, _) = hub.devices.read(&mut token); + ShaderModule { + raw: unsafe { + device_guard[device_id] + .raw + .create_shader_module(spv) + .unwrap() + }, + } + }; + hub.shader_modules + .register_identity(id_in, shader, &mut token) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_create_shader_module( + device_id: DeviceId, + desc: &pipeline::ShaderModuleDescriptor, +) -> ShaderModuleId { + gfx_select!(device_id => device_create_shader_module(&*GLOBAL, device_id, desc, PhantomData)) +} + +pub fn device_create_command_encoder( + global: &Global, + device_id: DeviceId, + _desc: &command::CommandEncoderDescriptor, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = &device_guard[device_id]; + + let dev_stored = Stored { + value: device_id, + ref_count: device.life_guard.ref_count.clone(), + }; + let mut comb = device + .com_allocator + .allocate(dev_stored, &device.raw, device.features); + unsafe { + comb.raw.last_mut().unwrap().begin( + hal::command::CommandBufferFlags::ONE_TIME_SUBMIT, + hal::command::CommandBufferInheritanceInfo::default(), + ); + } + + hub.command_buffers + .register_identity(id_in, comb, &mut token) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_create_command_encoder( + device_id: DeviceId, + desc: Option<&command::CommandEncoderDescriptor>, +) -> CommandEncoderId { + let desc = &desc.cloned().unwrap_or_default(); + gfx_select!(device_id => device_create_command_encoder(&*GLOBAL, device_id, desc, PhantomData)) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_get_queue(device_id: DeviceId) -> QueueId { + device_id +} + +pub fn queue_submit( + global: &Global, + queue_id: QueueId, + command_buffer_ids: &[CommandBufferId], +) { + let hub = B::hub(global); + + let (submit_index, fence) = { + let mut token = Token::root(); + let (mut device_guard, mut token) = hub.devices.write(&mut token); + let (swap_chain_guard, mut token) = hub.swap_chains.read(&mut token); + let device = &mut device_guard[queue_id]; + + let mut trackers = device.trackers.lock(); + let mut signal_semaphores = Vec::new(); + + let submit_index = 1 + device + .life_guard + .submission_index + .fetch_add(1, Ordering::Relaxed); + + let (mut command_buffer_guard, mut token) = hub.command_buffers.write(&mut token); + let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); + let (buffer_guard, mut token) = hub.buffers.read(&mut token); + let (texture_guard, mut token) = hub.textures.read(&mut token); + let (mut texture_view_guard, mut token) = hub.texture_views.write(&mut token); + let (sampler_guard, _) = hub.samplers.read(&mut token); + + //TODO: if multiple command buffers are submitted, we can re-use the last + // native command buffer of the previous chain instead of always creating + // a temporary one, since the chains are not finished. + + // finish all the command buffers first + for &cmb_id in command_buffer_ids { + let comb = &mut command_buffer_guard[cmb_id]; + + if let Some((view_id, fbo)) = comb.used_swap_chain.take() { + match texture_view_guard[view_id.value].inner { + resource::TextureViewInner::Native { .. } => unreachable!(), + resource::TextureViewInner::SwapChain { + ref source_id, + ref mut framebuffers, + .. + } => { + if framebuffers.is_empty() { + let sem = &swap_chain_guard[source_id.value].semaphore; + signal_semaphores.push(sem); + } + framebuffers.push(fbo); + } + }; + } + + // optimize the tracked states + comb.trackers.optimize(); + + // update submission IDs + for id in comb.trackers.buffers.used() { + let buffer = &buffer_guard[id]; + assert!(buffer.pending_map_operation.is_none()); + buffer + .life_guard + .submission_index + .store(submit_index, Ordering::Release); + } + for id in comb.trackers.textures.used() { + texture_guard[id] + .life_guard + .submission_index + .store(submit_index, Ordering::Release); + } + for id in comb.trackers.views.used() { + texture_view_guard[id] + .life_guard + .submission_index + .store(submit_index, Ordering::Release); + } + for id in comb.trackers.bind_groups.used() { + bind_group_guard[id] + .life_guard + .submission_index + .store(submit_index, Ordering::Release); + } + for id in comb.trackers.samplers.used() { + sampler_guard[id] + .life_guard + .submission_index + .store(submit_index, Ordering::Release); + } + + // execute resource transitions + let mut transit = device.com_allocator.extend(comb); + unsafe { + transit.begin( + hal::command::CommandBufferFlags::ONE_TIME_SUBMIT, + hal::command::CommandBufferInheritanceInfo::default(), + ); + } + log::trace!("Stitching command buffer {:?} before submission", cmb_id); + command::CommandBuffer::insert_barriers( + &mut transit, + &mut *trackers, + &comb.trackers, + Stitch::Init, + &*buffer_guard, + &*texture_guard, + ); + unsafe { + transit.finish(); + } + comb.raw.insert(0, transit); + unsafe { + comb.raw.last_mut().unwrap().finish(); + } + } + + // now prepare the GPU submission + let fence = device.raw.create_fence(false).unwrap(); + let submission = hal::queue::Submission::<_, _, Vec<&B::Semaphore>> { + command_buffers: command_buffer_ids + .iter() + .flat_map(|&cmb_id| &command_buffer_guard[cmb_id].raw), + wait_semaphores: Vec::new(), + signal_semaphores, + }; + + unsafe { + device.queue_group.queues[0].submit(submission, Some(&fence)); + } + + (submit_index, fence) + }; + + // No need for write access to the device from here on out + let callbacks = { + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = &device_guard[queue_id]; + + let callbacks = device.maintain(global, false, &mut token); + device.pending.lock().active.alloc().init(ActiveSubmission { + index: submit_index, + fence, + resources: Vec::new(), + mapped: Vec::new(), + }); + + // finally, return the command buffers to the allocator + for &cmb_id in command_buffer_ids { + let (cmd_buf, _) = hub.command_buffers.unregister(cmb_id, &mut token); + device.com_allocator.after_submit(cmd_buf, submit_index); + } + + callbacks + }; + + Device::::fire_map_callbacks(callbacks); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_queue_submit( + queue_id: QueueId, + command_buffers: *const CommandBufferId, + command_buffers_length: usize, +) { + let command_buffer_ids = + unsafe { slice::from_raw_parts(command_buffers, command_buffers_length) }; + gfx_select!(queue_id => queue_submit(&*GLOBAL, queue_id, command_buffer_ids)) +} + +pub fn device_create_render_pipeline( + global: &Global, + device_id: DeviceId, + desc: &pipeline::RenderPipelineDescriptor, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + + let sc = desc.sample_count; + assert!( + sc == 1 || sc == 2 || sc == 4 || sc == 8 || sc == 16 || sc == 32, + "Invalid sample_count of {}", + sc + ); + let sc = sc as u8; + + let color_states = + unsafe { slice::from_raw_parts(desc.color_states, desc.color_states_length) }; + let depth_stencil_state = unsafe { desc.depth_stencil_state.as_ref() }; + + let rasterizer = conv::map_rasterization_state_descriptor( + &unsafe { desc.rasterization_state.as_ref() } + .cloned() + .unwrap_or_default(), + ); + + let desc_vbs = unsafe { + slice::from_raw_parts( + desc.vertex_input.vertex_buffers, + desc.vertex_input.vertex_buffers_length, + ) + }; + let mut vertex_strides = Vec::with_capacity(desc_vbs.len()); + let mut vertex_buffers = Vec::with_capacity(desc_vbs.len()); + let mut attributes = Vec::new(); + for (i, vb_state) in desc_vbs.iter().enumerate() { + vertex_strides + .alloc() + .init((vb_state.stride, vb_state.step_mode)); + if vb_state.attributes_length == 0 { + continue; + } + vertex_buffers.alloc().init(hal::pso::VertexBufferDesc { + binding: i as u32, + stride: vb_state.stride as u32, + rate: match vb_state.step_mode { + pipeline::InputStepMode::Vertex => hal::pso::VertexInputRate::Vertex, + pipeline::InputStepMode::Instance => hal::pso::VertexInputRate::Instance(1), + }, + }); + let desc_atts = + unsafe { slice::from_raw_parts(vb_state.attributes, vb_state.attributes_length) }; + for attribute in desc_atts { + assert_eq!(0, attribute.offset >> 32); + attributes.alloc().init(hal::pso::AttributeDesc { + location: attribute.shader_location, + binding: i as u32, + element: hal::pso::Element { + format: conv::map_vertex_format(attribute.format), + offset: attribute.offset as u32, + }, + }); + } + } + + let input_assembler = hal::pso::InputAssemblerDesc { + primitive: conv::map_primitive_topology(desc.primitive_topology), + with_adjacency: false, + restart_index: None, //TODO + }; + + let blender = hal::pso::BlendDesc { + logic_op: None, // TODO + targets: color_states + .iter() + .map(conv::map_color_state_descriptor) + .collect(), + }; + let depth_stencil = depth_stencil_state + .map(conv::map_depth_stencil_state_descriptor) + .unwrap_or_default(); + + let multisampling: Option = if sc == 1 { + None + } else { + Some(hal::pso::Multisampling { + rasterization_samples: sc, + sample_shading: None, + sample_mask: desc.sample_mask as u64, + alpha_coverage: desc.alpha_to_coverage_enabled, + alpha_to_one: false, + }) + }; + + // TODO + let baked_states = hal::pso::BakedStates { + viewport: None, + scissor: None, + blend_color: None, + depth_bounds: None, + }; + + let raw_pipeline = { + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = &device_guard[device_id]; + let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); + let layout = &pipeline_layout_guard[desc.layout].raw; + let (shader_module_guard, _) = hub.shader_modules.read(&mut token); + + let rp_key = RenderPassKey { + colors: color_states + .iter() + .map(|at| hal::pass::Attachment { + format: Some(conv::map_texture_format(at.format, device.features)), + samples: sc, + ops: hal::pass::AttachmentOps::PRESERVE, + stencil_ops: hal::pass::AttachmentOps::DONT_CARE, + layouts: hal::image::Layout::General .. hal::image::Layout::General, + }) + .collect(), + // We can ignore the resolves as the vulkan specs says: + // As an additional special case, if two render passes have a single subpass, + // they are compatible even if they have different resolve attachment references + // or depth/stencil resolve modes but satisfy the other compatibility conditions. + resolves: ArrayVec::new(), + depth_stencil: depth_stencil_state.map(|at| hal::pass::Attachment { + format: Some(conv::map_texture_format(at.format, device.features)), + samples: sc, + ops: hal::pass::AttachmentOps::PRESERVE, + stencil_ops: hal::pass::AttachmentOps::PRESERVE, + layouts: hal::image::Layout::General .. hal::image::Layout::General, + }), + }; + + let mut render_pass_cache = device.render_passes.lock(); + let main_pass = match render_pass_cache.entry(rp_key) { + Entry::Occupied(e) => e.into_mut(), + Entry::Vacant(e) => { + let color_ids = [ + (0, hal::image::Layout::ColorAttachmentOptimal), + (1, hal::image::Layout::ColorAttachmentOptimal), + (2, hal::image::Layout::ColorAttachmentOptimal), + (3, hal::image::Layout::ColorAttachmentOptimal), + ]; + + let depth_id = ( + desc.color_states_length, + hal::image::Layout::DepthStencilAttachmentOptimal, + ); + + let subpass = hal::pass::SubpassDesc { + colors: &color_ids[.. desc.color_states_length], + depth_stencil: depth_stencil_state.map(|_| &depth_id), + inputs: &[], + resolves: &[], + preserves: &[], + }; + + let pass = unsafe { + device + .raw + .create_render_pass(e.key().all(), &[subpass], &[]) + } + .unwrap(); + e.insert(pass) + } + }; + + let vertex = hal::pso::EntryPoint:: { + entry: unsafe { ffi::CStr::from_ptr(desc.vertex_stage.entry_point) } + .to_str() + .to_owned() + .unwrap(), // TODO + module: &shader_module_guard[desc.vertex_stage.module].raw, + specialization: hal::pso::Specialization::EMPTY, + }; + let fragment = + unsafe { desc.fragment_stage.as_ref() }.map(|stage| hal::pso::EntryPoint:: { + entry: unsafe { ffi::CStr::from_ptr(stage.entry_point) } + .to_str() + .to_owned() + .unwrap(), // TODO + module: &shader_module_guard[stage.module].raw, + specialization: hal::pso::Specialization::EMPTY, + }); + + let shaders = hal::pso::GraphicsShaderSet { + vertex, + hull: None, + domain: None, + geometry: None, + fragment, + }; + + let subpass = hal::pass::Subpass { + index: 0, + main_pass, + }; + + // TODO + let flags = hal::pso::PipelineCreationFlags::empty(); + // TODO + let parent = hal::pso::BasePipeline::None; + + let pipeline_desc = hal::pso::GraphicsPipelineDesc { + shaders, + rasterizer, + vertex_buffers, + attributes, + input_assembler, + blender, + depth_stencil, + multisampling, + baked_states, + layout, + subpass, + flags, + parent, + }; + + // TODO: cache + unsafe { + device + .raw + .create_graphics_pipeline(&pipeline_desc, None) + .unwrap() + } + }; + + let pass_context = RenderPassContext { + colors: color_states.iter().map(|state| state.format).collect(), + resolves: ArrayVec::new(), + depth_stencil: depth_stencil_state.map(|state| state.format), + }; + + let mut flags = pipeline::PipelineFlags::empty(); + for state in color_states { + if state.color_blend.uses_color() | state.alpha_blend.uses_color() { + flags |= pipeline::PipelineFlags::BLEND_COLOR; + } + } + if let Some(ds) = depth_stencil_state { + if ds.needs_stencil_reference() { + flags |= pipeline::PipelineFlags::STENCIL_REFERENCE; + } + } + + let pipeline = pipeline::RenderPipeline { + raw: raw_pipeline, + layout_id: desc.layout, + pass_context, + flags, + index_format: desc.vertex_input.index_format, + vertex_strides, + sample_count: sc, + }; + + hub.render_pipelines + .register_identity(id_in, pipeline, &mut token) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_create_render_pipeline( + device_id: DeviceId, + desc: &pipeline::RenderPipelineDescriptor, +) -> RenderPipelineId { + gfx_select!(device_id => device_create_render_pipeline(&*GLOBAL, device_id, desc, PhantomData)) +} + +pub fn device_create_compute_pipeline( + global: &Global, + device_id: DeviceId, + desc: &pipeline::ComputePipelineDescriptor, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + + let raw_pipeline = { + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = &device_guard[device_id].raw; + let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); + let layout = &pipeline_layout_guard[desc.layout].raw; + let pipeline_stage = &desc.compute_stage; + let (shader_module_guard, _) = hub.shader_modules.read(&mut token); + + let shader = hal::pso::EntryPoint:: { + entry: unsafe { ffi::CStr::from_ptr(pipeline_stage.entry_point) } + .to_str() + .to_owned() + .unwrap(), // TODO + module: &shader_module_guard[pipeline_stage.module].raw, + specialization: hal::pso::Specialization::EMPTY, + }; + + // TODO + let flags = hal::pso::PipelineCreationFlags::empty(); + // TODO + let parent = hal::pso::BasePipeline::None; + + let pipeline_desc = hal::pso::ComputePipelineDesc { + shader, + layout, + flags, + parent, + }; + + unsafe { + device + .create_compute_pipeline(&pipeline_desc, None) + .unwrap() + } + }; + + let pipeline = pipeline::ComputePipeline { + raw: raw_pipeline, + layout_id: desc.layout, + }; + hub.compute_pipelines + .register_identity(id_in, pipeline, &mut token) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_create_compute_pipeline( + device_id: DeviceId, + desc: &pipeline::ComputePipelineDescriptor, +) -> ComputePipelineId { + gfx_select!(device_id => device_create_compute_pipeline(&*GLOBAL, device_id, desc, PhantomData)) +} + +pub fn device_create_swap_chain( + global: &Global, + device_id: DeviceId, + surface_id: SurfaceId, + desc: &swap_chain::SwapChainDescriptor, +) -> SwapChainId { + log::info!("creating swap chain {:?}", desc); + let hub = B::hub(global); + let mut token = Token::root(); + + let (mut surface_guard, mut token) = global.surfaces.write(&mut token); + let (adapter_guard, mut token) = hub.adapters.read(&mut token); + let (device_guard, mut token) = hub.devices.read(&mut token); + let (mut swap_chain_guard, _) = hub.swap_chains.write(&mut token); + let device = &device_guard[device_id]; + let surface = &mut surface_guard[surface_id]; + + let (caps, formats) = { + let suf = B::get_surface_mut(surface); + let adapter = &adapter_guard[device.adapter_id]; + assert!(suf.supports_queue_family(&adapter.raw.queue_families[0])); + let formats = suf.supported_formats(&adapter.raw.physical_device); + let caps = suf.capabilities(&adapter.raw.physical_device); + (caps, formats) + }; + let num_frames = swap_chain::DESIRED_NUM_FRAMES + .max(*caps.image_count.start()) + .min(*caps.image_count.end()); + let config = desc.to_hal(num_frames, &device.features); + + if let Some(formats) = formats { + assert!( + formats.contains(&config.format), + "Requested format {:?} is not in supported list: {:?}", + config.format, + formats + ); + } + if desc.width < caps.extents.start().width + || desc.width > caps.extents.end().width + || desc.height < caps.extents.start().height + || desc.height > caps.extents.end().height + { + log::warn!( + "Requested size {}x{} is outside of the supported range: {:?}", + desc.width, + desc.height, + caps.extents + ); + } + + unsafe { + B::get_surface_mut(surface) + .configure_swapchain(&device.raw, config) + .unwrap(); + } + + let sc_id = surface_id.to_swap_chain_id(B::VARIANT); + if let Some(sc) = swap_chain_guard.remove(sc_id) { + unsafe { + device.raw.destroy_semaphore(sc.semaphore); + } + } + let swap_chain = swap_chain::SwapChain { + life_guard: LifeGuard::new(), + device_id: Stored { + value: device_id, + ref_count: device.life_guard.ref_count.clone(), + }, + desc: desc.clone(), + num_frames, + semaphore: device.raw.create_semaphore().unwrap(), + acquired_view_id: None, + }; + swap_chain_guard.insert(sc_id, swap_chain); + sc_id +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_create_swap_chain( + device_id: DeviceId, + surface_id: SurfaceId, + desc: &swap_chain::SwapChainDescriptor, +) -> SwapChainId { + gfx_select!(device_id => device_create_swap_chain(&*GLOBAL, device_id, surface_id, desc)) +} + +pub fn device_poll(global: &Global, device_id: DeviceId, force_wait: bool) { + let hub = B::hub(global); + let callbacks = { + let (device_guard, mut token) = hub.devices.read(&mut Token::root()); + device_guard[device_id].maintain(global, force_wait, &mut token) + }; + Device::::fire_map_callbacks(callbacks); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_poll(device_id: DeviceId, force_wait: bool) { + gfx_select!(device_id => device_poll(&*GLOBAL, device_id, force_wait)) +} + +pub fn device_destroy(global: &Global, device_id: DeviceId) { + let hub = B::hub(global); + let (device, mut token) = hub.devices.unregister(device_id, &mut Token::root()); + device.maintain(global, true, &mut token); + device.com_allocator.destroy(&device.raw); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_device_destroy(device_id: DeviceId) { + gfx_select!(device_id => device_destroy(&*GLOBAL, device_id)) +} + +pub type BufferMapReadCallback = + extern "C" fn(status: BufferMapAsyncStatus, data: *const u8, userdata: *mut u8); +pub type BufferMapWriteCallback = + extern "C" fn(status: BufferMapAsyncStatus, data: *mut u8, userdata: *mut u8); + +pub fn buffer_map_async( + global: &Global, + buffer_id: BufferId, + usage: resource::BufferUsage, + operation: BufferMapOperation, +) { + let hub = B::hub(global); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + + let (device_id, ref_count) = { + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + let buffer = &mut buffer_guard[buffer_id]; + + if usage.contains(resource::BufferUsage::MAP_READ) { + assert!(buffer.usage.contains(resource::BufferUsage::MAP_READ)); + } + + if usage.contains(resource::BufferUsage::MAP_WRITE) { + assert!(buffer.usage.contains(resource::BufferUsage::MAP_WRITE)); + } + + if buffer.pending_map_operation.is_some() { + operation.call_error(); + return; + } + + buffer.pending_map_operation = Some(operation); + (buffer.device_id.value, buffer.life_guard.ref_count.clone()) + }; + + let device = &device_guard[device_id]; + + device + .trackers + .lock() + .buffers + .change_replace(buffer_id, &ref_count, (), usage); + + device.pending.lock().map(buffer_id, ref_count); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_buffer_map_read_async( + buffer_id: BufferId, + start: BufferAddress, + size: BufferAddress, + callback: BufferMapReadCallback, + userdata: *mut u8, +) { + let operation = BufferMapOperation::Read(start .. start + size, callback, userdata); + gfx_select!(buffer_id => buffer_map_async(&*GLOBAL, buffer_id, resource::BufferUsage::MAP_READ, operation)) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_buffer_map_write_async( + buffer_id: BufferId, + start: BufferAddress, + size: BufferAddress, + callback: BufferMapWriteCallback, + userdata: *mut u8, +) { + let operation = BufferMapOperation::Write(start .. start + size, callback, userdata); + gfx_select!(buffer_id => buffer_map_async(&*GLOBAL, buffer_id, resource::BufferUsage::MAP_WRITE, operation)) +} + +pub fn buffer_unmap(global: &Global, buffer_id: BufferId) { + let hub = B::hub(global); + let mut token = Token::root(); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + + let buffer = &mut buffer_guard[buffer_id]; + let device_raw = &device_guard[buffer.device_id.value].raw; + + if !buffer.mapped_write_ranges.is_empty() { + unsafe { + device_raw + .flush_mapped_memory_ranges( + buffer + .mapped_write_ranges + .iter() + .map(|r| (buffer.memory.memory(), r.clone())), + ) + .unwrap() + }; + buffer.mapped_write_ranges.clear(); + } + + buffer.memory.unmap(device_raw); +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_buffer_unmap(buffer_id: BufferId) { + gfx_select!(buffer_id => buffer_unmap(&*GLOBAL, buffer_id)) +} diff --git a/dom/webgpu/wgpu-native/src/hub.rs b/dom/webgpu/wgpu-native/src/hub.rs new file mode 100644 index 000000000000..8bd50c937e64 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/hub.rs @@ -0,0 +1,556 @@ + + + + +use crate::{ + backend, + id::{Input, Output}, + Adapter, + AdapterId, + Backend, + BindGroup, + BindGroupId, + BindGroupLayout, + BindGroupLayoutId, + Buffer, + BufferId, + CommandBuffer, + CommandBufferId, + ComputePass, + ComputePassId, + ComputePipeline, + ComputePipelineId, + Device, + DeviceId, + Epoch, + Index, + Instance, + PipelineLayout, + PipelineLayoutId, + RenderPass, + RenderPassId, + RenderPipeline, + RenderPipelineId, + Sampler, + SamplerId, + ShaderModule, + ShaderModuleId, + Surface, + SurfaceId, + SwapChain, + SwapChainId, + Texture, + TextureId, + TextureView, + TextureViewId, + TypedId, +}; + +#[cfg(feature = "local")] +use parking_lot::Mutex; +use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +use vec_map::VecMap; + +#[cfg(debug_assertions)] +use std::cell::Cell; +#[cfg(feature = "local")] +use std::sync::Arc; +use std::{marker::PhantomData, ops}; + + + +#[derive(Debug)] +pub struct IdentityManager { + free: Vec, + epochs: Vec, + backend: Backend, + phantom: PhantomData, +} + +impl IdentityManager { + pub fn new(backend: Backend) -> Self { + IdentityManager { + free: Default::default(), + epochs: Default::default(), + backend, + phantom: PhantomData, + } + } +} + +impl IdentityManager { + pub fn alloc(&mut self) -> I { + match self.free.pop() { + Some(index) => I::zip(index, self.epochs[index as usize], self.backend), + None => { + let epoch = 1; + let id = I::zip(self.epochs.len() as Index, epoch, self.backend); + self.epochs.push(epoch); + id + } + } + } + + pub fn free(&mut self, id: I) { + let (index, epoch, backend) = id.unzip(); + debug_assert_eq!(backend, self.backend); + + if cfg!(debug_assertions) { + assert!(!self.free.contains(&index)); + } + let pe = &mut self.epochs[index as usize]; + assert_eq!(*pe, epoch); + *pe += 1; + self.free.push(index); + } +} + +#[derive(Debug)] +pub struct Storage { + + map: VecMap<(T, Epoch)>, + _phantom: PhantomData, +} + +impl ops::Index for Storage { + type Output = T; + fn index(&self, id: I) -> &T { + let (index, epoch, _) = id.unzip(); + let (ref value, storage_epoch) = self.map[index as usize]; + assert_eq!(epoch, storage_epoch); + value + } +} + +impl ops::IndexMut for Storage { + fn index_mut(&mut self, id: I) -> &mut T { + let (index, epoch, _) = id.unzip(); + let (ref mut value, storage_epoch) = self.map[index as usize]; + assert_eq!(epoch, storage_epoch); + value + } +} + +impl Storage { + pub fn contains(&self, id: I) -> bool { + let (index, epoch, _) = id.unzip(); + match self.map.get(index as usize) { + Some(&(_, storage_epoch)) => epoch == storage_epoch, + None => false, + } + } + + pub fn insert(&mut self, id: I, value: T) -> Option { + let (index, epoch, _) = id.unzip(); + let old = self.map.insert(index as usize, (value, epoch)); + old.map(|(v, _storage_epoch)| v) + } + + pub fn remove(&mut self, id: I) -> Option { + let (index, epoch, _) = id.unzip(); + self.map + .remove(index as usize) + .map(|(value, storage_epoch)| { + assert_eq!(epoch, storage_epoch); + value + }) + } +} + + + + + + + + + + + +pub trait Access {} + +pub enum Root {} + +impl Access for Root {} +impl Access for Root {} +impl Access for Instance {} +impl Access> for Root {} +impl Access> for Surface {} +impl Access> for Root {} +impl Access> for Surface {} +impl Access> for Adapter {} +impl Access> for Device {} +impl Access> for Root {} +impl Access> for Device {} +impl Access> for Root {} +impl Access> for Device {} +impl Access> for Root {} +impl Access> for Device {} +impl Access> for PipelineLayout {} +impl Access> for CommandBuffer {} +impl Access> for Root {} +impl Access> for Device {} +impl Access> for SwapChain {} +impl Access> for Root {} +impl Access> for BindGroup {} +impl Access> for CommandBuffer {} +impl Access> for Root {} +impl Access> for BindGroup {} +impl Access> for CommandBuffer {} +impl Access> for Root {} +impl Access> for ComputePass {} +impl Access> for Root {} +impl Access> for RenderPass {} +impl Access> for Root {} +impl Access> for PipelineLayout {} +impl Access> for Root {} +impl Access> for Device {} +impl Access> for BindGroupLayout {} +impl Access> for BindGroup {} +impl Access> for CommandBuffer {} +impl Access> for ComputePass {} +impl Access> for ComputePipeline {} +impl Access> for RenderPass {} +impl Access> for RenderPipeline {} +impl Access> for Root {} +impl Access> for Device {} +impl Access> for Buffer {} +impl Access> for Root {} +impl Access> for SwapChain {} +impl Access> for Device {} +impl Access> for Texture {} +impl Access> for Root {} +impl Access> for Device {} +impl Access> for TextureView {} + +#[cfg(debug_assertions)] +thread_local! { + static ACTIVE_TOKEN: Cell = Cell::new(0); +} + + + + + + +pub struct Token<'a, T: 'a> { + level: PhantomData<&'a T>, +} + +impl<'a, T> Token<'a, T> { + fn new() -> Self { + #[cfg(debug_assertions)] + ACTIVE_TOKEN.with(|active| { + let old = active.get(); + assert_ne!(old, 0, "Root token was dropped"); + active.set(old + 1); + }); + Token { level: PhantomData } + } +} + +impl Token<'static, Root> { + pub fn root() -> Self { + #[cfg(debug_assertions)] + ACTIVE_TOKEN.with(|active| { + assert_eq!(0, active.replace(1), "Root token is already active"); + }); + + Token { level: PhantomData } + } +} + +impl<'a, T> Drop for Token<'a, T> { + fn drop(&mut self) { + #[cfg(debug_assertions)] + ACTIVE_TOKEN.with(|active| { + let old = active.get(); + active.set(old - 1); + }); + } +} + + +#[derive(Debug)] +pub struct Registry { + #[cfg(feature = "local")] + pub identity: Mutex>, + data: RwLock>, + backend: Backend, +} + +impl Registry { + fn new(backend: Backend) -> Self { + Registry { + #[cfg(feature = "local")] + identity: Mutex::new(IdentityManager::new(backend)), + data: RwLock::new(Storage { + map: VecMap::new(), + _phantom: PhantomData, + }), + backend, + } + } +} + +impl Registry { + pub fn register>(&self, id: I, value: T, _token: &mut Token) { + debug_assert_eq!(id.unzip().2, self.backend); + let old = self.data.write().insert(id, value); + assert!(old.is_none()); + } + + #[cfg(feature = "local")] + pub fn new_identity(&self, _id_in: Input) -> (I, Output) { + let id = self.identity.lock().alloc(); + (id, id) + } + + #[cfg(not(feature = "local"))] + pub fn new_identity(&self, id_in: Input) -> (I, Output) { + + (id_in, PhantomData) + } + + pub fn register_identity>( + &self, + id_in: Input, + value: T, + token: &mut Token, + ) -> Output { + let (id, output) = self.new_identity(id_in); + self.register(id, value, token); + output + } + + pub fn unregister>(&self, id: I, _token: &mut Token) -> (T, Token) { + let value = self.data.write().remove(id).unwrap(); + + #[cfg(feature = "local")] + self.identity.lock().free(id); + (value, Token::new()) + } + + pub fn read>( + &self, + _token: &mut Token, + ) -> (RwLockReadGuard>, Token) { + (self.data.read(), Token::new()) + } + + pub fn write>( + &self, + _token: &mut Token, + ) -> (RwLockWriteGuard>, Token) { + (self.data.write(), Token::new()) + } +} + +#[derive(Debug)] +pub struct Hub { + pub adapters: Registry, AdapterId>, + pub devices: Registry, DeviceId>, + pub swap_chains: Registry, SwapChainId>, + pub pipeline_layouts: Registry, PipelineLayoutId>, + pub shader_modules: Registry, ShaderModuleId>, + pub bind_group_layouts: Registry, BindGroupLayoutId>, + pub bind_groups: Registry, BindGroupId>, + pub command_buffers: Registry, CommandBufferId>, + pub render_passes: Registry, RenderPassId>, + pub render_pipelines: Registry, RenderPipelineId>, + pub compute_passes: Registry, ComputePassId>, + pub compute_pipelines: Registry, ComputePipelineId>, + pub buffers: Registry, BufferId>, + pub textures: Registry, TextureId>, + pub texture_views: Registry, TextureViewId>, + pub samplers: Registry, SamplerId>, +} + +impl Default for Hub { + fn default() -> Self { + Hub { + adapters: Registry::new(B::VARIANT), + devices: Registry::new(B::VARIANT), + swap_chains: Registry::new(B::VARIANT), + pipeline_layouts: Registry::new(B::VARIANT), + shader_modules: Registry::new(B::VARIANT), + bind_group_layouts: Registry::new(B::VARIANT), + bind_groups: Registry::new(B::VARIANT), + command_buffers: Registry::new(B::VARIANT), + render_passes: Registry::new(B::VARIANT), + render_pipelines: Registry::new(B::VARIANT), + compute_passes: Registry::new(B::VARIANT), + compute_pipelines: Registry::new(B::VARIANT), + buffers: Registry::new(B::VARIANT), + textures: Registry::new(B::VARIANT), + texture_views: Registry::new(B::VARIANT), + samplers: Registry::new(B::VARIANT), + } + } +} + +impl Drop for Hub { + fn drop(&mut self) { + use crate::resource::TextureViewInner; + use hal::device::Device as _; + + let mut devices = self.devices.data.write(); + + for (_, (sampler, _)) in self.samplers.data.write().map.drain() { + unsafe { + devices[sampler.device_id.value].raw.destroy_sampler(sampler.raw); + } + } + { + let textures = self.textures.data.read(); + for (_, (texture_view, _)) in self.texture_views.data.write().map.drain() { + match texture_view.inner { + TextureViewInner::Native { raw, source_id } => { + let device = &devices[textures[source_id.value].device_id.value]; + unsafe { + device.raw.destroy_image_view(raw); + } + } + TextureViewInner::SwapChain { .. } => {} + } + } + } + for (_, (texture, _)) in self.textures.data.write().map.drain() { + unsafe { + devices[texture.device_id.value].raw.destroy_image(texture.raw); + } + } + for (_, (buffer, _)) in self.buffers.data.write().map.drain() { + unsafe { + devices[buffer.device_id.value].raw.destroy_buffer(buffer.raw); + } + } + for (_, (command_buffer, _)) in self.command_buffers.data.write().map.drain() { + devices[command_buffer.device_id.value].com_allocator.after_submit(command_buffer, 0); + } + for (_, (bind_group, _)) in self.bind_groups.data.write().map.drain() { + let device = &devices[bind_group.device_id.value]; + device.destroy_bind_group(bind_group); + } + + + + + + + + + + + + + for (_, (device, _)) in devices.map.drain() { + device.dispose(); + } + } +} + +#[derive(Debug, Default)] +pub struct Hubs { + #[cfg(any( + not(any(target_os = "ios", target_os = "macos")), + feature = "gfx-backend-vulkan" + ))] + vulkan: Hub, + #[cfg(any(target_os = "ios", target_os = "macos"))] + metal: Hub, + #[cfg(windows)] + dx12: Hub, + #[cfg(windows)] + dx11: Hub, +} + +#[derive(Debug)] +pub struct Global { + pub instance: Instance, + pub surfaces: Registry, + hubs: Hubs, +} + +impl Global { + fn new_impl(name: &str) -> Self { + Global { + instance: Instance::new(name, 1), + surfaces: Registry::new(Backend::Empty), + hubs: Hubs::default(), + } + } + + #[cfg(not(feature = "local"))] + pub fn new(name: &str) -> Self { + Self::new_impl(name) + } + + #[cfg(not(feature = "local"))] + pub fn delete(self) { + let Global { mut instance, surfaces, hubs } = self; + drop(hubs); + + for (_, (surface, _)) in surfaces.data.write().map.drain() { + instance.destroy_surface(surface); + } + } +} + +#[cfg(feature = "local")] +lazy_static::lazy_static! { + pub static ref GLOBAL: Arc = Arc::new(Global::new_impl("wgpu")); +} + +pub trait GfxBackend: hal::Backend { + const VARIANT: Backend; + fn hub(global: &Global) -> &Hub; + fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface; +} + +#[cfg(any( + not(any(target_os = "ios", target_os = "macos")), + feature = "gfx-backend-vulkan" +))] +impl GfxBackend for backend::Vulkan { + const VARIANT: Backend = Backend::Vulkan; + fn hub(global: &Global) -> &Hub { + &global.hubs.vulkan + } + fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface { + surface.vulkan.as_mut().unwrap() + } +} + +#[cfg(any(target_os = "ios", target_os = "macos"))] +impl GfxBackend for backend::Metal { + const VARIANT: Backend = Backend::Metal; + fn hub(global: &Global) -> &Hub { + &global.hubs.metal + } + fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface { + &mut surface.metal + } +} + +#[cfg(windows)] +impl GfxBackend for backend::Dx12 { + const VARIANT: Backend = Backend::Dx12; + fn hub(global: &Global) -> &Hub { + &global.hubs.dx12 + } + fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface { + surface.dx12.as_mut().unwrap() + } +} + +#[cfg(windows)] +impl GfxBackend for backend::Dx11 { + const VARIANT: Backend = Backend::Dx11; + fn hub(global: &Global) -> &Hub { + &global.hubs.dx11 + } + fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface { + &mut surface.dx11 + } +} diff --git a/dom/webgpu/wgpu-native/src/id.rs b/dom/webgpu/wgpu-native/src/id.rs new file mode 100644 index 000000000000..ade40e64e8c8 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/id.rs @@ -0,0 +1,142 @@ + + + + +use crate::{Backend, Epoch, Index}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use std::{fmt, marker::PhantomData}; + +const BACKEND_BITS: usize = 3; +const EPOCH_MASK: u32 = (1 << (32 - BACKEND_BITS)) - 1; +type Dummy = crate::backend::Empty; + +#[repr(transparent)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Id(u64, PhantomData); + +impl Id { + pub fn backend(&self) -> Backend { + match self.0 >> (64 - BACKEND_BITS) as u8 { + 0 => Backend::Empty, + 1 => Backend::Vulkan, + 2 => Backend::Metal, + 3 => Backend::Dx12, + 4 => Backend::Dx11, + 5 => Backend::Gl, + _ => unreachable!(), + } + } +} + +impl Copy for Id {} + +impl Clone for Id { + fn clone(&self) -> Self { + Self(self.0, PhantomData) + } +} + +impl fmt::Debug for Id { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.unzip().fmt(formatter) + } +} + +impl std::hash::Hash for Id { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + +impl PartialEq for Id { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +pub trait TypedId { + fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self; + fn unzip(self) -> (Index, Epoch, Backend); +} + +impl TypedId for Id { + fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self { + assert_eq!(0, epoch >> (32 - BACKEND_BITS)); + let v = index as u64 | ((epoch as u64) << 32) | ((backend as u64) << (64 - BACKEND_BITS)); + Id(v, PhantomData) + } + + fn unzip(self) -> (Index, Epoch, Backend) { + ( + self.0 as u32, + (self.0 >> 32) as u32 & EPOCH_MASK, + self.backend(), + ) + } +} + +#[cfg(not(feature = "local"))] +pub type Input = T; +#[cfg(feature = "local")] +pub type Input = PhantomData; +#[cfg(feature = "local")] +pub type Output = T; +#[cfg(not(feature = "local"))] +pub type Output = PhantomData; + + +pub type AdapterId = Id>; +pub type DeviceId = Id>; +pub type QueueId = DeviceId; + +pub type BufferId = Id>; +pub type TextureViewId = Id>; +pub type TextureId = Id>; +pub type SamplerId = Id>; + +pub type BindGroupLayoutId = Id>; +pub type PipelineLayoutId = Id>; +pub type BindGroupId = Id>; + +pub type InputStateId = Id; +pub type ShaderModuleId = Id>; +pub type RenderPipelineId = Id>; +pub type ComputePipelineId = Id>; + +pub type CommandBufferId = Id>; +pub type CommandEncoderId = CommandBufferId; +pub type RenderBundleId = Id>; +pub type RenderPassId = Id>; +pub type ComputePassId = Id>; + +pub type SurfaceId = Id; +pub type SwapChainId = Id>; + +impl SurfaceId { + pub(crate) fn to_swap_chain_id(&self, backend: Backend) -> SwapChainId { + let (index, epoch, _) = self.unzip(); + Id::zip(index, epoch, backend) + } +} +impl SwapChainId { + pub(crate) fn to_surface_id(&self) -> SurfaceId { + let (index, epoch, _) = self.unzip(); + Id::zip(index, epoch, Backend::Empty) + } +} + +#[test] +fn test_id_backend() { + for &b in &[ + Backend::Empty, + Backend::Vulkan, + Backend::Metal, + Backend::Dx12, + Backend::Dx11, + Backend::Gl, + ] { + let id: Id<()> = Id::zip(0, 0, b); + assert_eq!(id.backend(), b); + } +} diff --git a/dom/webgpu/wgpu-native/src/instance.rs b/dom/webgpu/wgpu-native/src/instance.rs new file mode 100644 index 000000000000..e51e28b740ba --- /dev/null +++ b/dom/webgpu/wgpu-native/src/instance.rs @@ -0,0 +1,563 @@ + + + + +use crate::{ + backend, + binding_model::MAX_BIND_GROUPS, + device::BIND_BUFFER_ALIGNMENT, + hub::{GfxBackend, Global, Token}, + id::{Input, Output}, + AdapterId, + AdapterInfo, + Backend, + Device, + DeviceId, +}; +#[cfg(feature = "local")] +use crate::{gfx_select, hub::GLOBAL, SurfaceId}; + +#[cfg(feature = "local")] +use bitflags::bitflags; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use hal::{self, adapter::PhysicalDevice as _, queue::QueueFamily as _, Instance as _}; +#[cfg(feature = "local")] +use std::marker::PhantomData; + + +#[derive(Debug)] +pub struct Instance { + #[cfg(any( + not(any(target_os = "ios", target_os = "macos")), + feature = "gfx-backend-vulkan" + ))] + vulkan: Option, + #[cfg(any(target_os = "ios", target_os = "macos"))] + metal: gfx_backend_metal::Instance, + #[cfg(windows)] + dx12: Option, + #[cfg(windows)] + dx11: gfx_backend_dx11::Instance, +} + +impl Instance { + pub fn new(name: &str, version: u32) -> Self { + Instance { + #[cfg(any( + not(any(target_os = "ios", target_os = "macos")), + feature = "gfx-backend-vulkan" + ))] + vulkan: gfx_backend_vulkan::Instance::create(name, version).ok(), + #[cfg(any(target_os = "ios", target_os = "macos"))] + metal: gfx_backend_metal::Instance::create(name, version).unwrap(), + #[cfg(windows)] + dx12: gfx_backend_dx12::Instance::create(name, version).ok(), + #[cfg(windows)] + dx11: gfx_backend_dx11::Instance::create(name, version).unwrap(), + } + } + + #[cfg(not(feature = "local"))] + pub(crate) fn destroy_surface(&mut self, surface: Surface) { + + #[cfg(any( + not(any(target_os = "ios", target_os = "macos")), + feature = "gfx-backend-vulkan" + ))] + { + if let Some(_suf) = surface.vulkan { + + } + } + #[cfg(any(target_os = "ios", target_os = "macos"))] + { + let _ = surface; + + } + #[cfg(windows)] + { + if let Some(_suf) = surface.dx12 { + + } + + } + } +} + +type GfxSurface = ::Surface; + +#[derive(Debug)] +pub struct Surface { + #[cfg(any( + not(any(target_os = "ios", target_os = "macos")), + feature = "gfx-backend-vulkan" + ))] + pub(crate) vulkan: Option>, + #[cfg(any(target_os = "ios", target_os = "macos"))] + pub(crate) metal: GfxSurface, + #[cfg(windows)] + pub(crate) dx12: Option>, + #[cfg(windows)] + pub(crate) dx11: GfxSurface, +} + +#[derive(Debug)] +pub struct Adapter { + pub(crate) raw: hal::adapter::Adapter, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum PowerPreference { + Default = 0, + LowPower = 1, + HighPerformance = 2, +} + +#[cfg(feature = "local")] +bitflags! { + #[repr(transparent)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct BackendBit: u32 { + const VULKAN = 1 << Backend::Vulkan as u32; + const GL = 1 << Backend::Gl as u32; + const METAL = 1 << Backend::Metal as u32; + const DX12 = 1 << Backend::Dx12 as u32; + const DX11 = 1 << Backend::Dx11 as u32; + /// Vulkan + METAL + DX12 + const PRIMARY = Self::VULKAN.bits | Self::METAL.bits | Self::DX12.bits; + /// OpenGL + DX11 + const SECONDARY = Self::GL.bits | Self::DX11.bits; + } +} + +#[cfg(feature = "local")] +impl From for BackendBit { + fn from(backend: Backend) -> Self { + BackendBit::from_bits(1 << backend as u32).unwrap() + } +} + +#[repr(C)] +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct RequestAdapterOptions { + pub power_preference: PowerPreference, + #[cfg(feature = "local")] + pub backends: BackendBit, +} + +impl Default for RequestAdapterOptions { + fn default() -> Self { + RequestAdapterOptions { + power_preference: PowerPreference::Default, + #[cfg(feature = "local")] + backends: BackendBit::PRIMARY, + } + } +} + +#[repr(C)] +#[derive(Clone, Debug, Default)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Extensions { + pub anisotropic_filtering: bool, +} + +#[repr(C)] +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Limits { + pub max_bind_groups: u32, +} + +impl Default for Limits { + fn default() -> Self { + Limits { + max_bind_groups: MAX_BIND_GROUPS as u32, + } + } +} + +#[repr(C)] +#[derive(Clone, Debug, Default)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct DeviceDescriptor { + pub extensions: Extensions, + pub limits: Limits, +} + +#[cfg(feature = "local")] +pub fn wgpu_create_surface(raw_handle: raw_window_handle::RawWindowHandle) -> SurfaceId { + use raw_window_handle::RawWindowHandle as Rwh; + + let instance = &GLOBAL.instance; + let surface = match raw_handle { + #[cfg(target_os = "ios")] + Rwh::IOS(h) => Surface { + #[cfg(feature = "gfx-backend-vulkan")] + vulkan: None, + metal: instance + .metal + .create_surface_from_uiview(h.ui_view, cfg!(debug_assertions)), + }, + #[cfg(target_os = "macos")] + Rwh::MacOS(h) => Surface { + #[cfg(feature = "gfx-backend-vulkan")] + vulkan: instance + .vulkan + .as_ref() + .map(|inst| inst.create_surface_from_ns_view(h.ns_view)), + metal: instance + .metal + .create_surface_from_nsview(h.ns_view, cfg!(debug_assertions)), + }, + #[cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))] + Rwh::Xlib(h) => Surface { + vulkan: instance + .vulkan + .as_ref() + .map(|inst| inst.create_surface_from_xlib(h.display as _, h.window as _)), + }, + #[cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))] + Rwh::Wayland(h) => Surface { + vulkan: instance + .vulkan + .as_ref() + .map(|inst| inst.create_surface_from_wayland(h.display, h.surface)), + }, + #[cfg(windows)] + Rwh::Windows(h) => Surface { + vulkan: instance + .vulkan + .as_ref() + .map(|inst| inst.create_surface_from_hwnd(std::ptr::null_mut(), h.hwnd)), + dx12: instance + .dx12 + .as_ref() + .map(|inst| inst.create_surface_from_hwnd(h.hwnd)), + dx11: instance.dx11.create_surface_from_hwnd(h.hwnd), + }, + _ => panic!("Unsupported window handle"), + }; + + let mut token = Token::root(); + GLOBAL + .surfaces + .register_identity(PhantomData, surface, &mut token) +} + +#[cfg(all( + feature = "local", + unix, + not(target_os = "ios"), + not(target_os = "macos") +))] +#[no_mangle] +pub extern "C" fn wgpu_create_surface_from_xlib( + display: *mut *const std::ffi::c_void, + window: u64, +) -> SurfaceId { + use raw_window_handle::unix::XlibHandle; + wgpu_create_surface(raw_window_handle::RawWindowHandle::Xlib(XlibHandle { + window, + display: display as *mut _, + ..XlibHandle::empty() + })) +} + +#[cfg(all(feature = "local", any(target_os = "ios", target_os = "macos")))] +#[no_mangle] +pub extern "C" fn wgpu_create_surface_from_metal_layer(layer: *mut std::ffi::c_void) -> SurfaceId { + let surface = Surface { + #[cfg(feature = "gfx-backend-vulkan")] + vulkan: None, + metal: GLOBAL + .instance + .metal + .create_surface_from_layer(layer as *mut _, cfg!(debug_assertions)), + }; + + GLOBAL + .surfaces + .register_identity(PhantomData, surface, &mut Token::root()) +} + +#[cfg(all(feature = "local", windows))] +#[no_mangle] +pub extern "C" fn wgpu_create_surface_from_windows_hwnd( + _hinstance: *mut std::ffi::c_void, + hwnd: *mut std::ffi::c_void, +) -> SurfaceId { + use raw_window_handle::windows::WindowsHandle; + wgpu_create_surface(raw_window_handle::RawWindowHandle::Windows( + raw_window_handle::windows::WindowsHandle { + hwnd, + ..WindowsHandle::empty() + }, + )) +} + +pub fn request_adapter( + global: &Global, + desc: &RequestAdapterOptions, + input_ids: &[Input], +) -> Option { + let instance = &global.instance; + let mut device_types = Vec::new(); + + #[cfg(not(feature = "local"))] + let find_input = |b: Backend| input_ids.iter().find(|id| id.backend() == b).cloned(); + #[cfg(feature = "local")] + let find_input = |b: Backend| { + let _ = input_ids; + if desc.backends.contains(b.into()) { + Some(PhantomData) + } else { + None + } + }; + #[cfg(not(feature = "local"))] + let pick = |_output, input_maybe| input_maybe; + #[cfg(feature = "local")] + let pick = |output, _input_maybe| Some(output); + + let id_vulkan = find_input(Backend::Vulkan); + let id_metal = find_input(Backend::Metal); + let id_dx12 = find_input(Backend::Dx12); + let id_dx11 = find_input(Backend::Dx11); + + #[cfg(any( + not(any(target_os = "ios", target_os = "macos")), + feature = "gfx-backend-vulkan" + ))] + let mut adapters_vk = match instance.vulkan { + Some(ref inst) if id_vulkan.is_some() => { + let adapters = inst.enumerate_adapters(); + device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone())); + adapters + } + _ => Vec::new(), + }; + #[cfg(any(target_os = "ios", target_os = "macos"))] + let mut adapters_mtl = if id_metal.is_some() { + let adapters = instance.metal.enumerate_adapters(); + device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone())); + adapters + } else { + Vec::new() + }; + #[cfg(windows)] + let mut adapters_dx12 = match instance.dx12 { + Some(ref inst) if id_dx12.is_some() => { + let adapters = inst.enumerate_adapters(); + device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone())); + adapters + } + _ => Vec::new(), + }; + #[cfg(windows)] + let mut adapters_dx11 = if id_dx11.is_some() { + let adapters = instance.dx11.enumerate_adapters(); + device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone())); + adapters + } else { + Vec::new() + }; + + if device_types.is_empty() { + log::warn!("No adapters are available!"); + return None; + } + + let (mut integrated, mut discrete, mut virt, mut other) = (None, None, None, None); + + for (i, ty) in device_types.into_iter().enumerate() { + match ty { + hal::adapter::DeviceType::IntegratedGpu => { + integrated = integrated.or(Some(i)); + } + hal::adapter::DeviceType::DiscreteGpu => { + discrete = discrete.or(Some(i)); + } + hal::adapter::DeviceType::VirtualGpu => { + virt = virt.or(Some(i)); + } + _ => { + other = other.or(Some(i)); + } + } + } + + let preferred_gpu = match desc.power_preference { + PowerPreference::Default => integrated.or(discrete).or(other).or(virt), + PowerPreference::LowPower => integrated.or(other).or(discrete).or(virt), + PowerPreference::HighPerformance => discrete.or(other).or(integrated).or(virt), + }; + let mut token = Token::root(); + + let mut selected = preferred_gpu.unwrap_or(0); + #[cfg(any( + not(any(target_os = "ios", target_os = "macos")), + feature = "gfx-backend-vulkan" + ))] + { + if selected < adapters_vk.len() { + let adapter = Adapter { + raw: adapters_vk.swap_remove(selected), + }; + log::info!("Adapter Vulkan {:?}", adapter.raw.info); + let id_out = backend::Vulkan::hub(global).adapters.register_identity( + id_vulkan.unwrap(), + adapter, + &mut token, + ); + return pick(id_out, id_vulkan); + } + selected -= adapters_vk.len(); + } + #[cfg(any(target_os = "ios", target_os = "macos"))] + { + if selected < adapters_mtl.len() { + let adapter = Adapter { + raw: adapters_mtl.swap_remove(selected), + }; + log::info!("Adapter Metal {:?}", adapter.raw.info); + let id_out = backend::Metal::hub(global).adapters.register_identity( + id_metal.unwrap(), + adapter, + &mut token, + ); + return pick(id_out, id_metal); + } + selected -= adapters_mtl.len(); + } + #[cfg(windows)] + { + if selected < adapters_dx12.len() { + let adapter = Adapter { + raw: adapters_dx12.swap_remove(selected), + }; + log::info!("Adapter Dx12 {:?}", adapter.raw.info); + let id_out = backend::Dx12::hub(global).adapters.register_identity( + id_dx12.unwrap(), + adapter, + &mut token, + ); + return pick(id_out, id_dx12); + } + selected -= adapters_dx12.len(); + if selected < adapters_dx11.len() { + let adapter = Adapter { + raw: adapters_dx11.swap_remove(selected), + }; + log::info!("Adapter Dx11 {:?}", adapter.raw.info); + let id_out = backend::Dx11::hub(global).adapters.register_identity( + id_dx11.unwrap(), + adapter, + &mut token, + ); + return pick(id_out, id_dx11); + } + selected -= adapters_dx11.len(); + } + let _ = (selected, id_vulkan, id_metal, id_dx12, id_dx11); + unreachable!() +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_request_adapter(desc: Option<&RequestAdapterOptions>) -> AdapterId { + request_adapter(&*GLOBAL, &desc.cloned().unwrap_or_default(), &[]).unwrap() +} + +pub fn adapter_request_device( + global: &Global, + adapter_id: AdapterId, + desc: &DeviceDescriptor, + id_in: Input, +) -> Output { + let hub = B::hub(global); + let mut token = Token::root(); + let device = { + let (adapter_guard, _) = hub.adapters.read(&mut token); + let adapter = &adapter_guard[adapter_id].raw; + + let family = adapter + .queue_families + .iter() + .find(|family| family.queue_type().supports_graphics()) + .unwrap(); + let mut gpu = unsafe { + adapter + .physical_device + .open(&[(family, &[1.0])], hal::Features::empty()) + .unwrap() + }; + + let limits = adapter.physical_device.limits(); + assert_eq!( + 0, + BIND_BUFFER_ALIGNMENT % limits.min_storage_buffer_offset_alignment, + "Adapter storage buffer offset alignment not compatible with WGPU" + ); + assert_eq!( + 0, + BIND_BUFFER_ALIGNMENT % limits.min_uniform_buffer_offset_alignment, + "Adapter uniform buffer offset alignment not compatible with WGPU" + ); + if desc.limits.max_bind_groups == 0 { + log::warn!("max_bind_groups limit is missing"); + } else { + assert!( + u32::from(limits.max_bound_descriptor_sets) >= desc.limits.max_bind_groups, + "Adapter does not support the requested max_bind_groups" + ); + } + + let mem_props = adapter.physical_device.memory_properties(); + + let supports_texture_d24_s8 = adapter + .physical_device + .format_properties(Some(hal::format::Format::D24UnormS8Uint)) + .optimal_tiling + .contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT); + + Device::new( + gpu.device, + adapter_id, + gpu.queue_groups.swap_remove(0), + mem_props, + supports_texture_d24_s8, + desc.limits.max_bind_groups, + ) + }; + + hub.devices.register_identity(id_in, device, &mut token) +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_adapter_request_device( + adapter_id: AdapterId, + desc: Option<&DeviceDescriptor>, +) -> DeviceId { + let desc = &desc.cloned().unwrap_or_default(); + gfx_select!(adapter_id => adapter_request_device(&*GLOBAL, adapter_id, desc, PhantomData)) +} + +pub fn adapter_get_info(global: &Global, adapter_id: AdapterId) -> AdapterInfo { + let hub = B::hub(global); + let mut token = Token::root(); + let (adapter_guard, _) = hub.adapters.read(&mut token); + let adapter = &adapter_guard[adapter_id]; + adapter.raw.info.clone() +} + +#[cfg(feature = "local")] +pub fn wgpu_adapter_get_info(adapter_id: AdapterId) -> AdapterInfo { + gfx_select!(adapter_id => adapter_get_info(&*GLOBAL, adapter_id)) +} diff --git a/dom/webgpu/wgpu-native/src/lib.rs b/dom/webgpu/wgpu-native/src/lib.rs new file mode 100644 index 000000000000..ad146587aae5 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/lib.rs @@ -0,0 +1,234 @@ + + + + +pub mod backend { + #[cfg(windows)] + pub use gfx_backend_dx11::Backend as Dx11; + #[cfg(windows)] + pub use gfx_backend_dx12::Backend as Dx12; + pub use gfx_backend_empty::Backend as Empty; + #[cfg(any(target_os = "ios", target_os = "macos"))] + pub use gfx_backend_metal::Backend as Metal; + #[cfg(any( + not(any(target_os = "ios", target_os = "macos")), + feature = "gfx-backend-vulkan" + ))] + pub use gfx_backend_vulkan::Backend as Vulkan; +} + +mod binding_model; +mod command; +mod conv; +mod device; +mod hub; +mod id; +mod instance; +mod pipeline; +mod resource; +mod swap_chain; +mod track; + +pub use self::binding_model::*; +pub use self::command::*; +pub use self::device::*; +#[cfg(not(feature = "local"))] +pub use self::hub::{Access, Global, IdentityManager, Registry, Token}; +pub use self::id::*; +pub use self::instance::*; +pub use self::pipeline::*; +pub use self::resource::*; +pub use self::swap_chain::*; +pub use hal::adapter::AdapterInfo; +pub use hal::pso::read_spirv; + +use std::{ + os::raw::c_char, + ptr, + sync::atomic::{AtomicUsize, Ordering}, +}; + +type SubmissionIndex = usize; +type Index = u32; +type Epoch = u32; + +#[repr(u8)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum Backend { + Empty = 0, + Vulkan = 1, + Metal = 2, + Dx12 = 3, + Dx11 = 4, + Gl = 5, +} + +pub type BufferAddress = u64; +pub type RawString = *const c_char; + + +#[derive(Debug)] +pub struct RefCount(ptr::NonNull); + +unsafe impl Send for RefCount {} +unsafe impl Sync for RefCount {} + +impl RefCount { + const MAX: usize = 1 << 24; + + fn load(&self) -> usize { + unsafe { self.0.as_ref() }.load(Ordering::Acquire) + } +} + +impl Clone for RefCount { + fn clone(&self) -> Self { + let old_size = unsafe { self.0.as_ref() }.fetch_add(1, Ordering::Relaxed); + assert!(old_size < Self::MAX); + RefCount(self.0) + } +} + +impl Drop for RefCount { + fn drop(&mut self) { + if unsafe { self.0.as_ref() }.fetch_sub(1, Ordering::Relaxed) == 1 { + let _ = unsafe { Box::from_raw(self.0.as_ptr()) }; + } + } +} + +#[derive(Debug)] +struct LifeGuard { + ref_count: RefCount, + submission_index: AtomicUsize, +} + +impl LifeGuard { + fn new() -> Self { + let bx = Box::new(AtomicUsize::new(1)); + LifeGuard { + ref_count: RefCount(ptr::NonNull::new(Box::into_raw(bx)).unwrap()), + submission_index: AtomicUsize::new(0), + } + } +} + +#[derive(Clone, Debug)] +struct Stored { + value: T, + ref_count: RefCount, +} + +#[repr(C)] +#[derive(Clone, Copy, Debug)] +pub struct Color { + pub r: f64, + pub g: f64, + pub b: f64, + pub a: f64, +} + +impl Color { + pub const TRANSPARENT: Self = Color { + r: 0.0, + g: 0.0, + b: 0.0, + a: 0.0, + }; + pub const BLACK: Self = Color { + r: 0.0, + g: 0.0, + b: 0.0, + a: 1.0, + }; + pub const WHITE: Self = Color { + r: 1.0, + g: 1.0, + b: 1.0, + a: 1.0, + }; + pub const RED: Self = Color { + r: 1.0, + g: 0.0, + b: 0.0, + a: 1.0, + }; + pub const GREEN: Self = Color { + r: 0.0, + g: 1.0, + b: 0.0, + a: 1.0, + }; + pub const BLUE: Self = Color { + r: 0.0, + g: 0.0, + b: 1.0, + a: 1.0, + }; +} + +#[repr(C)] +#[derive(Clone, Copy, Debug)] +pub struct Origin3d { + pub x: f32, + pub y: f32, + pub z: f32, +} + +impl Origin3d { + pub const ZERO: Self = Origin3d { + x: 0.0, + y: 0.0, + z: 0.0, + }; +} + +impl Default for Origin3d { + fn default() -> Self { + Origin3d::ZERO + } +} + +#[repr(C)] +#[derive(Clone, Copy, Debug)] +pub struct Extent3d { + pub width: u32, + pub height: u32, + pub depth: u32, +} + +#[repr(C)] +#[derive(Debug)] +pub struct U32Array { + pub bytes: *const u32, + pub length: usize, +} + +#[derive(Debug)] +pub enum InputState {} + +#[macro_export] +macro_rules! gfx_select { + ($id:expr => $function:ident( $($param:expr),+ )) => { + match $id.backend() { + #[cfg(any(not(any(target_os = "ios", target_os = "macos")), feature = "gfx-backend-vulkan"))] + $crate::Backend::Vulkan => $function::<$crate::backend::Vulkan>( $($param),+ ), + #[cfg(any(target_os = "ios", target_os = "macos"))] + $crate::Backend::Metal => $function::<$crate::backend::Metal>( $($param),+ ), + #[cfg(windows)] + $crate::Backend::Dx12 => $function::<$crate::backend::Dx12>( $($param),+ ), + #[cfg(windows)] + $crate::Backend::Dx11 => $function::<$crate::backend::Dx11>( $($param),+ ), + _ => unreachable!() + } + }; +} + +#[derive(Clone, Copy, Debug)] +pub(crate) struct Features { + pub max_bind_groups: u32, + pub supports_texture_d24_s8: bool, +} + + +type FastHashMap = std::collections::HashMap>; diff --git a/dom/webgpu/wgpu-native/src/pipeline.rs b/dom/webgpu/wgpu-native/src/pipeline.rs new file mode 100644 index 000000000000..71a99299b798 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/pipeline.rs @@ -0,0 +1,354 @@ + + + + +use crate::{ + device::RenderPassContext, + resource, + BufferAddress, + PipelineLayoutId, + RawString, + ShaderModuleId, + U32Array, +}; + +use bitflags::bitflags; + +pub type ShaderLocation = u32; + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum BlendFactor { + Zero = 0, + One = 1, + SrcColor = 2, + OneMinusSrcColor = 3, + SrcAlpha = 4, + OneMinusSrcAlpha = 5, + DstColor = 6, + OneMinusDstColor = 7, + DstAlpha = 8, + OneMinusDstAlpha = 9, + SrcAlphaSaturated = 10, + BlendColor = 11, + OneMinusBlendColor = 12, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum BlendOperation { + Add = 0, + Subtract = 1, + ReverseSubtract = 2, + Min = 3, + Max = 4, +} + +impl Default for BlendOperation { + fn default() -> Self { + BlendOperation::Add + } +} + +bitflags! { + #[repr(transparent)] + pub struct ColorWrite: u32 { + const RED = 1; + const GREEN = 2; + const BLUE = 4; + const ALPHA = 8; + const COLOR = 7; + const ALL = 15; + } +} + +impl Default for ColorWrite { + fn default() -> Self { + ColorWrite::ALL + } +} + +#[repr(C)] +#[derive(Clone, Debug, PartialEq)] +pub struct BlendDescriptor { + pub src_factor: BlendFactor, + pub dst_factor: BlendFactor, + pub operation: BlendOperation, +} + +impl BlendDescriptor { + pub const REPLACE: Self = BlendDescriptor { + src_factor: BlendFactor::One, + dst_factor: BlendFactor::Zero, + operation: BlendOperation::Add, + }; + + pub fn uses_color(&self) -> bool { + match (self.src_factor, self.dst_factor) { + (BlendFactor::BlendColor, _) + | (BlendFactor::OneMinusBlendColor, _) + | (_, BlendFactor::BlendColor) + | (_, BlendFactor::OneMinusBlendColor) => true, + (_, _) => false, + } + } +} + +impl Default for BlendDescriptor { + fn default() -> Self { + BlendDescriptor::REPLACE + } +} + +#[repr(C)] +#[derive(Clone, Debug)] +pub struct ColorStateDescriptor { + pub format: resource::TextureFormat, + pub alpha_blend: BlendDescriptor, + pub color_blend: BlendDescriptor, + pub write_mask: ColorWrite, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum StencilOperation { + Keep = 0, + Zero = 1, + Replace = 2, + Invert = 3, + IncrementClamp = 4, + DecrementClamp = 5, + IncrementWrap = 6, + DecrementWrap = 7, +} + +impl Default for StencilOperation { + fn default() -> Self { + StencilOperation::Keep + } +} + +#[repr(C)] +#[derive(Clone, Debug, PartialEq)] +pub struct StencilStateFaceDescriptor { + pub compare: resource::CompareFunction, + pub fail_op: StencilOperation, + pub depth_fail_op: StencilOperation, + pub pass_op: StencilOperation, +} + +impl StencilStateFaceDescriptor { + pub const IGNORE: Self = StencilStateFaceDescriptor { + compare: resource::CompareFunction::Always, + fail_op: StencilOperation::Keep, + depth_fail_op: StencilOperation::Keep, + pass_op: StencilOperation::Keep, + }; +} + +impl Default for StencilStateFaceDescriptor { + fn default() -> Self { + StencilStateFaceDescriptor::IGNORE + } +} + +#[repr(C)] +#[derive(Clone, Debug)] +pub struct DepthStencilStateDescriptor { + pub format: resource::TextureFormat, + pub depth_write_enabled: bool, + pub depth_compare: resource::CompareFunction, + pub stencil_front: StencilStateFaceDescriptor, + pub stencil_back: StencilStateFaceDescriptor, + pub stencil_read_mask: u32, + pub stencil_write_mask: u32, +} + +impl DepthStencilStateDescriptor { + pub fn needs_stencil_reference(&self) -> bool { + !self.stencil_front.compare.is_trivial() || !self.stencil_back.compare.is_trivial() + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum IndexFormat { + Uint16 = 0, + Uint32 = 1, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum VertexFormat { + Uchar2 = 1, + Uchar4 = 3, + Char2 = 5, + Char4 = 7, + Uchar2Norm = 9, + Uchar4Norm = 11, + Char2Norm = 14, + Char4Norm = 16, + Ushort2 = 18, + Ushort4 = 20, + Short2 = 22, + Short4 = 24, + Ushort2Norm = 26, + Ushort4Norm = 28, + Short2Norm = 30, + Short4Norm = 32, + Half2 = 34, + Half4 = 36, + Float = 37, + Float2 = 38, + Float3 = 39, + Float4 = 40, + Uint = 41, + Uint2 = 42, + Uint3 = 43, + Uint4 = 44, + Int = 45, + Int2 = 46, + Int3 = 47, + Int4 = 48, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum InputStepMode { + Vertex = 0, + Instance = 1, +} + +#[repr(C)] +#[derive(Clone, Debug)] +pub struct VertexAttributeDescriptor { + pub offset: BufferAddress, + pub format: VertexFormat, + pub shader_location: ShaderLocation, +} + +#[repr(C)] +#[derive(Debug)] +pub struct VertexBufferDescriptor { + pub stride: BufferAddress, + pub step_mode: InputStepMode, + pub attributes: *const VertexAttributeDescriptor, + pub attributes_length: usize, +} + +#[repr(C)] +#[derive(Debug)] +pub struct VertexInputDescriptor { + pub index_format: IndexFormat, + pub vertex_buffers: *const VertexBufferDescriptor, + pub vertex_buffers_length: usize, +} + +#[repr(C)] +#[derive(Debug)] +pub struct ShaderModuleDescriptor { + pub code: U32Array, +} + +#[repr(C)] +#[derive(Debug)] +pub struct ProgrammableStageDescriptor { + pub module: ShaderModuleId, + pub entry_point: RawString, +} + +#[repr(C)] +#[derive(Debug)] +pub struct ComputePipelineDescriptor { + pub layout: PipelineLayoutId, + pub compute_stage: ProgrammableStageDescriptor, +} + +#[derive(Debug)] +pub struct ComputePipeline { + pub(crate) raw: B::ComputePipeline, + pub(crate) layout_id: PipelineLayoutId, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum PrimitiveTopology { + PointList = 0, + LineList = 1, + LineStrip = 2, + TriangleList = 3, + TriangleStrip = 4, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum FrontFace { + Ccw = 0, + Cw = 1, +} + +impl Default for FrontFace { + fn default() -> Self { + FrontFace::Ccw + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum CullMode { + None = 0, + Front = 1, + Back = 2, +} + +impl Default for CullMode { + fn default() -> Self { + CullMode::None + } +} + +#[repr(C)] +#[derive(Clone, Debug, Default)] +pub struct RasterizationStateDescriptor { + pub front_face: FrontFace, + pub cull_mode: CullMode, + pub depth_bias: i32, + pub depth_bias_slope_scale: f32, + pub depth_bias_clamp: f32, +} + +#[repr(C)] +#[derive(Debug)] +pub struct RenderPipelineDescriptor { + pub layout: PipelineLayoutId, + pub vertex_stage: ProgrammableStageDescriptor, + pub fragment_stage: *const ProgrammableStageDescriptor, + pub primitive_topology: PrimitiveTopology, + pub rasterization_state: *const RasterizationStateDescriptor, + pub color_states: *const ColorStateDescriptor, + pub color_states_length: usize, + pub depth_stencil_state: *const DepthStencilStateDescriptor, + pub vertex_input: VertexInputDescriptor, + pub sample_count: u32, + pub sample_mask: u32, + pub alpha_to_coverage_enabled: bool, +} + +bitflags! { + #[repr(transparent)] + pub struct PipelineFlags: u32 { + const BLEND_COLOR = 1; + const STENCIL_REFERENCE = 2; + } +} + +#[derive(Debug)] +pub struct RenderPipeline { + pub(crate) raw: B::GraphicsPipeline, + pub(crate) layout_id: PipelineLayoutId, + pub(crate) pass_context: RenderPassContext, + pub(crate) flags: PipelineFlags, + pub(crate) index_format: IndexFormat, + pub(crate) sample_count: u8, + pub(crate) vertex_strides: Vec<(BufferAddress, InputStepMode)>, +} diff --git a/dom/webgpu/wgpu-native/src/resource.rs b/dom/webgpu/wgpu-native/src/resource.rs new file mode 100644 index 000000000000..028e981358b5 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/resource.rs @@ -0,0 +1,373 @@ + + + + +use crate::{ + BufferAddress, + BufferMapReadCallback, + BufferMapWriteCallback, + DeviceId, + Extent3d, + LifeGuard, + RefCount, + Stored, + SwapChainId, + TextureId, +}; + +use bitflags::bitflags; +use hal; +use rendy_memory::MemoryBlock; +use smallvec::SmallVec; + +use std::borrow::Borrow; + +bitflags! { + #[repr(transparent)] + pub struct BufferUsage: u32 { + const MAP_READ = 1; + const MAP_WRITE = 2; + const COPY_SRC = 4; + const COPY_DST = 8; + const INDEX = 16; + const VERTEX = 32; + const UNIFORM = 64; + const STORAGE = 128; + const STORAGE_READ = 256; + const INDIRECT = 512; + const NONE = 0; + /// The combination of all read-only usages. + const READ_ALL = Self::MAP_READ.bits | Self::COPY_SRC.bits | + Self::INDEX.bits | Self::VERTEX.bits | Self::UNIFORM.bits | + Self::STORAGE_READ.bits | Self::INDIRECT.bits; + /// The combination of all write-only and read-write usages. + const WRITE_ALL = Self::MAP_WRITE.bits | Self::COPY_DST.bits | Self::STORAGE.bits; + /// The combination of all usages that the are guaranteed to be be ordered by the hardware. + /// If a usage is not ordered, then even if it doesn't change between draw calls, there + /// still need to be pipeline barriers inserted for synchronization. + const ORDERED = Self::READ_ALL.bits; + } +} + +#[repr(C)] +#[derive(Clone, Debug)] +pub struct BufferDescriptor { + pub size: BufferAddress, + pub usage: BufferUsage, +} + +#[repr(C)] +#[derive(Debug)] +pub enum BufferMapAsyncStatus { + Success, + Error, + Unknown, + ContextLost, +} + +#[derive(Clone, Debug)] +pub enum BufferMapOperation { + Read(std::ops::Range, BufferMapReadCallback, *mut u8), + Write(std::ops::Range, BufferMapWriteCallback, *mut u8), +} + +unsafe impl Send for BufferMapOperation {} +unsafe impl Sync for BufferMapOperation {} + +impl BufferMapOperation { + pub(crate) fn call_error(self) { + match self { + BufferMapOperation::Read(_, callback, userdata) => { + log::error!("wgpu_buffer_map_read_async failed: buffer mapping is pending"); + callback(BufferMapAsyncStatus::Error, std::ptr::null_mut(), userdata); + } + BufferMapOperation::Write(_, callback, userdata) => { + log::error!("wgpu_buffer_map_write_async failed: buffer mapping is pending"); + callback(BufferMapAsyncStatus::Error, std::ptr::null_mut(), userdata); + } + } + } +} + +#[derive(Debug)] +pub struct Buffer { + pub(crate) raw: B::Buffer, + pub(crate) device_id: Stored, + pub(crate) usage: BufferUsage, + pub(crate) memory: MemoryBlock, + pub(crate) size: BufferAddress, + pub(crate) mapped_write_ranges: Vec>, + pub(crate) pending_map_operation: Option, + pub(crate) life_guard: LifeGuard, +} + +impl Borrow for Buffer { + fn borrow(&self) -> &RefCount { + &self.life_guard.ref_count + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum TextureDimension { + D1, + D2, + D3, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum TextureFormat { + + R8Unorm = 0, + R8Snorm = 1, + R8Uint = 2, + R8Sint = 3, + + + R16Unorm = 4, + R16Snorm = 5, + R16Uint = 6, + R16Sint = 7, + R16Float = 8, + + Rg8Unorm = 9, + Rg8Snorm = 10, + Rg8Uint = 11, + Rg8Sint = 12, + + + R32Uint = 13, + R32Sint = 14, + R32Float = 15, + Rg16Unorm = 16, + Rg16Snorm = 17, + Rg16Uint = 18, + Rg16Sint = 19, + Rg16Float = 20, + Rgba8Unorm = 21, + Rgba8UnormSrgb = 22, + Rgba8Snorm = 23, + Rgba8Uint = 24, + Rgba8Sint = 25, + Bgra8Unorm = 26, + Bgra8UnormSrgb = 27, + + + Rgb10a2Unorm = 28, + Rg11b10Float = 29, + + + Rg32Uint = 30, + Rg32Sint = 31, + Rg32Float = 32, + Rgba16Unorm = 33, + Rgba16Snorm = 34, + Rgba16Uint = 35, + Rgba16Sint = 36, + Rgba16Float = 37, + + + Rgba32Uint = 38, + Rgba32Sint = 39, + Rgba32Float = 40, + + + Depth32Float = 41, + Depth24Plus = 42, + Depth24PlusStencil8 = 43, +} + +bitflags! { + #[repr(transparent)] + pub struct TextureUsage: u32 { + const COPY_SRC = 1; + const COPY_DST = 2; + const SAMPLED = 4; + const STORAGE = 8; + const OUTPUT_ATTACHMENT = 16; + const NONE = 0; + /// The combination of all read-only usages. + const READ_ALL = Self::COPY_SRC.bits | Self::SAMPLED.bits; + /// The combination of all write-only and read-write usages. + const WRITE_ALL = Self::COPY_DST.bits | Self::STORAGE.bits | Self::OUTPUT_ATTACHMENT.bits; + /// The combination of all usages that the are guaranteed to be be ordered by the hardware. + /// If a usage is not ordered, then even if it doesn't change between draw calls, there + /// still need to be pipeline barriers inserted for synchronization. + const ORDERED = Self::READ_ALL.bits | Self::OUTPUT_ATTACHMENT.bits; + const UNINITIALIZED = 0xFFFF; + } +} + +#[repr(C)] +#[derive(Debug)] +pub struct TextureDescriptor { + pub size: Extent3d, + pub array_layer_count: u32, + pub mip_level_count: u32, + pub sample_count: u32, + pub dimension: TextureDimension, + pub format: TextureFormat, + pub usage: TextureUsage, +} + +#[derive(Debug)] +pub struct Texture { + pub(crate) raw: B::Image, + pub(crate) device_id: Stored, + pub(crate) usage: TextureUsage, + pub(crate) kind: hal::image::Kind, + pub(crate) format: TextureFormat, + pub(crate) full_range: hal::image::SubresourceRange, + pub(crate) memory: MemoryBlock, + pub(crate) life_guard: LifeGuard, +} + +impl Borrow for Texture { + fn borrow(&self) -> &RefCount { + &self.life_guard.ref_count + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum TextureAspect { + All, + StencilOnly, + DepthOnly, +} + +impl Default for TextureAspect { + fn default() -> Self { + TextureAspect::All + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum TextureViewDimension { + D1, + D2, + D2Array, + Cube, + CubeArray, + D3, +} + +#[repr(C)] +#[derive(Debug)] +pub struct TextureViewDescriptor { + pub format: TextureFormat, + pub dimension: TextureViewDimension, + pub aspect: TextureAspect, + pub base_mip_level: u32, + pub level_count: u32, + pub base_array_layer: u32, + pub array_layer_count: u32, +} + +#[derive(Debug)] +pub(crate) enum TextureViewInner { + Native { + raw: B::ImageView, + source_id: Stored, + }, + SwapChain { + image: >::SwapchainImage, + source_id: Stored, + framebuffers: SmallVec<[B::Framebuffer; 1]>, + }, +} + +#[derive(Debug)] +pub struct TextureView { + pub(crate) inner: TextureViewInner, + + pub(crate) format: TextureFormat, + pub(crate) extent: hal::image::Extent, + pub(crate) samples: hal::image::NumSamples, + pub(crate) range: hal::image::SubresourceRange, + pub(crate) life_guard: LifeGuard, +} + +impl Borrow for TextureView { + fn borrow(&self) -> &RefCount { + &self.life_guard.ref_count + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum AddressMode { + ClampToEdge = 0, + Repeat = 1, + MirrorRepeat = 2, +} + +impl Default for AddressMode { + fn default() -> Self { + AddressMode::ClampToEdge + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum FilterMode { + Nearest = 0, + Linear = 1, +} + +impl Default for FilterMode { + fn default() -> Self { + FilterMode::Nearest + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum CompareFunction { + Never = 0, + Less = 1, + Equal = 2, + LessEqual = 3, + Greater = 4, + NotEqual = 5, + GreaterEqual = 6, + Always = 7, +} + +impl CompareFunction { + pub fn is_trivial(&self) -> bool { + match *self { + CompareFunction::Never | CompareFunction::Always => true, + _ => false, + } + } +} + +#[repr(C)] +#[derive(Debug)] +pub struct SamplerDescriptor { + pub address_mode_u: AddressMode, + pub address_mode_v: AddressMode, + pub address_mode_w: AddressMode, + pub mag_filter: FilterMode, + pub min_filter: FilterMode, + pub mipmap_filter: FilterMode, + pub lod_min_clamp: f32, + pub lod_max_clamp: f32, + pub compare_function: CompareFunction, +} + +#[derive(Debug)] +pub struct Sampler { + pub(crate) raw: B::Sampler, + pub(crate) device_id: Stored, + pub(crate) life_guard: LifeGuard, +} + +impl Borrow for Sampler { + fn borrow(&self) -> &RefCount { + &self.life_guard.ref_count + } +} diff --git a/dom/webgpu/wgpu-native/src/swap_chain.rs b/dom/webgpu/wgpu-native/src/swap_chain.rs new file mode 100644 index 000000000000..bb2890ec9ebc --- /dev/null +++ b/dom/webgpu/wgpu-native/src/swap_chain.rs @@ -0,0 +1,254 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +use crate::{ + conv, + hub::{GfxBackend, Global, Token}, + resource, + DeviceId, + Extent3d, + Features, + Input, + LifeGuard, + Stored, + SwapChainId, + TextureViewId, +}; +#[cfg(feature = "local")] +use crate::{gfx_select, hub::GLOBAL}; + +use hal::{self, device::Device as _, queue::CommandQueue as _, window::PresentationSurface as _}; + +use smallvec::SmallVec; + +#[cfg(feature = "local")] +use std::marker::PhantomData; + + +const FRAME_TIMEOUT_MS: u64 = 1000; +pub const DESIRED_NUM_FRAMES: u32 = 3; + +#[derive(Debug)] +pub struct SwapChain { + pub(crate) life_guard: LifeGuard, + pub(crate) device_id: Stored, + pub(crate) desc: SwapChainDescriptor, + pub(crate) num_frames: hal::window::SwapImageIndex, + pub(crate) semaphore: B::Semaphore, + pub(crate) acquired_view_id: Option>, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub enum PresentMode { + NoVsync = 0, + Vsync = 1, +} + +#[repr(C)] +#[derive(Clone, Debug)] +pub struct SwapChainDescriptor { + pub usage: resource::TextureUsage, + pub format: resource::TextureFormat, + pub width: u32, + pub height: u32, + pub present_mode: PresentMode, +} + +impl SwapChainDescriptor { + pub(crate) fn to_hal( + &self, + num_frames: u32, + features: &Features, + ) -> hal::window::SwapchainConfig { + let mut config = hal::window::SwapchainConfig::new( + self.width, + self.height, + conv::map_texture_format(self.format, *features), + num_frames, + ); + + config.image_usage = conv::map_texture_usage(self.usage, hal::format::Aspects::COLOR); + config.composite_alpha_mode = hal::window::CompositeAlphaMode::OPAQUE; + config.present_mode = match self.present_mode { + PresentMode::NoVsync => hal::window::PresentMode::IMMEDIATE, + PresentMode::Vsync => hal::window::PresentMode::FIFO, + }; + config + } + + pub fn to_texture_desc(&self) -> resource::TextureDescriptor { + resource::TextureDescriptor { + size: Extent3d { + width: self.width, + height: self.height, + depth: 1, + }, + mip_level_count: 1, + array_layer_count: 1, + sample_count: 1, + dimension: resource::TextureDimension::D2, + format: self.format, + usage: self.usage, + } + } +} + +#[repr(C)] +#[derive(Debug)] +pub struct SwapChainOutput { + pub view_id: TextureViewId, +} + +pub fn swap_chain_get_next_texture( + global: &Global, + swap_chain_id: SwapChainId, + view_id_in: Input, +) -> SwapChainOutput { + let hub = B::hub(global); + let mut token = Token::root(); + + let (mut surface_guard, mut token) = global.surfaces.write(&mut token); + let surface = &mut surface_guard[swap_chain_id.to_surface_id()]; + let (device_guard, mut token) = hub.devices.read(&mut token); + let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token); + let sc = &mut swap_chain_guard[swap_chain_id]; + let device = &device_guard[sc.device_id.value]; + + let (image, _) = { + let suf = B::get_surface_mut(surface); + match unsafe { suf.acquire_image(FRAME_TIMEOUT_MS * 1_000_000) } { + Ok(surface_image) => surface_image, + Err(hal::window::AcquireError::Timeout) => { + panic!("GPU took too much time processing last frames :("); + } + Err(e) => { + log::warn!("acquire_image() failed ({:?}), reconfiguring swapchain", e); + let desc = sc.desc.to_hal(sc.num_frames, &device.features); + unsafe { + suf.configure_swapchain(&device.raw, desc).unwrap(); + suf.acquire_image(FRAME_TIMEOUT_MS * 1_000_000).unwrap() + } + } + } + }; + + let view = resource::TextureView { + inner: resource::TextureViewInner::SwapChain { + image, + source_id: Stored { + value: swap_chain_id, + ref_count: sc.life_guard.ref_count.clone(), + }, + framebuffers: SmallVec::new(), + }, + format: sc.desc.format, + extent: hal::image::Extent { + width: sc.desc.width, + height: sc.desc.height, + depth: 1, + }, + samples: 1, + range: hal::image::SubresourceRange { + aspects: hal::format::Aspects::COLOR, + layers: 0 .. 1, + levels: 0 .. 1, + }, + life_guard: LifeGuard::new(), + }; + let ref_count = view.life_guard.ref_count.clone(); + let (view_id, _) = hub.texture_views.new_identity(view_id_in); + hub.texture_views.register(view_id, view, &mut token); + + assert!( + sc.acquired_view_id.is_none(), + "Swap chain image is already acquired" + ); + sc.acquired_view_id = Some(Stored { + value: view_id, + ref_count, + }); + + SwapChainOutput { view_id } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_swap_chain_get_next_texture(swap_chain_id: SwapChainId) -> SwapChainOutput { + gfx_select!(swap_chain_id => swap_chain_get_next_texture(&*GLOBAL, swap_chain_id, PhantomData)) +} + +pub fn swap_chain_present(global: &Global, swap_chain_id: SwapChainId) { + let hub = B::hub(global); + let mut token = Token::root(); + + let (mut surface_guard, mut token) = global.surfaces.write(&mut token); + let surface = &mut surface_guard[swap_chain_id.to_surface_id()]; + let (mut device_guard, mut token) = hub.devices.write(&mut token); + let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token); + let sc = &mut swap_chain_guard[swap_chain_id]; + let device = &mut device_guard[sc.device_id.value]; + + let view_id = sc + .acquired_view_id + .take() + .expect("Swap chain image is not acquired"); + let (view, _) = hub.texture_views.unregister(view_id.value, &mut token); + let (image, framebuffers) = match view.inner { + resource::TextureViewInner::Native { .. } => unreachable!(), + resource::TextureViewInner::SwapChain { + image, framebuffers, .. + } => (image, framebuffers), + }; + + let err = unsafe { + let queue = &mut device.queue_group.queues[0]; + queue.present_surface(B::get_surface_mut(surface), image, Some(&sc.semaphore)) + }; + if let Err(e) = err { + log::warn!("present failed: {:?}", e); + } + + for fbo in framebuffers { + unsafe { + device.raw.destroy_framebuffer(fbo); + } + } +} + +#[cfg(feature = "local")] +#[no_mangle] +pub extern "C" fn wgpu_swap_chain_present(swap_chain_id: SwapChainId) { + gfx_select!(swap_chain_id => swap_chain_present(&*GLOBAL, swap_chain_id)) +} diff --git a/dom/webgpu/wgpu-native/src/track/buffer.rs b/dom/webgpu/wgpu-native/src/track/buffer.rs new file mode 100644 index 000000000000..deda7edb2985 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/track/buffer.rs @@ -0,0 +1,124 @@ + + + + +use super::{PendingTransition, ResourceState, Stitch, Unit}; +use crate::{conv, resource::BufferUsage, BufferId}; +use std::ops::Range; + + +pub type BufferState = Unit; + +impl PendingTransition { + + pub fn to_states(&self) -> Range { + conv::map_buffer_state(self.usage.start) .. conv::map_buffer_state(self.usage.end) + } +} + +impl Default for BufferState { + fn default() -> Self { + BufferState { + init: BufferUsage::empty(), + last: BufferUsage::empty(), + } + } +} + +impl ResourceState for BufferState { + type Id = BufferId; + type Selector = (); + type Usage = BufferUsage; + + fn query(&self, _selector: Self::Selector) -> Option { + Some(self.last) + } + + fn change( + &mut self, + id: Self::Id, + _selector: Self::Selector, + usage: Self::Usage, + output: Option<&mut Vec>>, + ) -> Result<(), PendingTransition> { + let old = self.last; + if usage != old || !BufferUsage::ORDERED.contains(usage) { + let pending = PendingTransition { + id, + selector: (), + usage: old .. usage, + }; + self.last = match output { + Some(transitions) => { + transitions.push(pending); + usage + } + None => { + if !old.is_empty() + && old != usage + && BufferUsage::WRITE_ALL.intersects(old | usage) + { + return Err(pending); + } + old | usage + } + }; + } + Ok(()) + } + + fn merge( + &mut self, + id: Self::Id, + other: &Self, + stitch: Stitch, + output: Option<&mut Vec>>, + ) -> Result<(), PendingTransition> { + let old = self.last; + let new = other.select(stitch); + self.last = if old == new && BufferUsage::ORDERED.contains(new) { + other.last + } else { + let pending = PendingTransition { + id, + selector: (), + usage: old .. new, + }; + match output { + Some(transitions) => { + transitions.push(pending); + other.last + } + None => { + if !old.is_empty() && BufferUsage::WRITE_ALL.intersects(old | new) { + return Err(pending); + } + old | new + } + } + }; + Ok(()) + } + + fn optimize(&mut self) {} +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{Backend, TypedId}; + + #[test] + fn change() { + let mut bs = Unit { + init: BufferUsage::INDEX, + last: BufferUsage::STORAGE, + }; + let id = TypedId::zip(0, 0, Backend::Empty); + assert!(bs.change(id, (), BufferUsage::VERTEX, None).is_err()); + bs.change(id, (), BufferUsage::VERTEX, Some(&mut Vec::new())) + .unwrap(); + bs.change(id, (), BufferUsage::INDEX, None).unwrap(); + assert_eq!(bs.last, BufferUsage::VERTEX | BufferUsage::INDEX); + } +} diff --git a/dom/webgpu/wgpu-native/src/track/mod.rs b/dom/webgpu/wgpu-native/src/track/mod.rs new file mode 100644 index 000000000000..beaff381b90b --- /dev/null +++ b/dom/webgpu/wgpu-native/src/track/mod.rs @@ -0,0 +1,472 @@ + + + + +mod buffer; +mod range; +mod texture; + +use crate::{ + hub::Storage, + Backend, + BindGroupId, + Epoch, + FastHashMap, + Index, + RefCount, + SamplerId, + TextureViewId, + TypedId, +}; + +use std::{ + borrow::Borrow, + collections::hash_map::Entry, + fmt::Debug, + marker::PhantomData, + ops::Range, + vec::Drain, +}; + +use buffer::BufferState; +use texture::TextureState; + + + + +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct Unit { + init: U, + last: U, +} + +impl Unit { + + fn new(usage: U) -> Self { + Unit { + init: usage, + last: usage, + } + } + + + + + + + + fn select(&self, stitch: Stitch) -> U { + match stitch { + Stitch::Init => self.init, + Stitch::Last => self.last, + } + } +} + + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum Stitch { + + Init, + + Last, +} + + + +pub trait ResourceState: Clone + Default { + + type Id: Copy + Debug + TypedId; + + type Selector: Debug; + + type Usage: Debug; + + + + + + + + fn query(&self, selector: Self::Selector) -> Option; + + + + + + + + + + + + + fn change( + &mut self, + id: Self::Id, + selector: Self::Selector, + usage: Self::Usage, + output: Option<&mut Vec>>, + ) -> Result<(), PendingTransition>; + + + + + + + + + + + + + + fn merge( + &mut self, + id: Self::Id, + other: &Self, + stitch: Stitch, + output: Option<&mut Vec>>, + ) -> Result<(), PendingTransition>; + + + fn optimize(&mut self); +} + + + +#[derive(Clone, Debug)] +struct Resource { + ref_count: RefCount, + state: S, + epoch: Epoch, +} + + + + +#[derive(Debug)] +pub struct PendingTransition { + pub id: S::Id, + pub selector: S::Selector, + pub usage: Range, +} + + +#[derive(Debug)] +pub struct ResourceTracker { + + map: FastHashMap>, + + temp: Vec>, + + backend: Backend, +} + +impl ResourceTracker { + + pub fn new(backend: Backend) -> Self { + ResourceTracker { + map: FastHashMap::default(), + temp: Vec::new(), + backend, + } + } + + + pub fn remove(&mut self, id: S::Id) -> bool { + let (index, epoch, backend) = id.unzip(); + debug_assert_eq!(backend, self.backend); + match self.map.remove(&index) { + Some(resource) => { + assert_eq!(resource.epoch, epoch); + true + } + None => false, + } + } + + + pub fn optimize(&mut self) { + for resource in self.map.values_mut() { + resource.state.optimize(); + } + } + + + pub fn used<'a>(&'a self) -> impl 'a + Iterator { + let backend = self.backend; + self.map + .iter() + .map(move |(&index, resource)| S::Id::zip(index, resource.epoch, backend)) + } + + + fn clear(&mut self) { + self.map.clear(); + } + + + + + pub fn init( + &mut self, + id: S::Id, + ref_count: &RefCount, + selector: S::Selector, + default: S::Usage, + ) -> bool { + let mut state = S::default(); + match state.change(id, selector, default, None) { + Ok(()) => (), + Err(_) => unreachable!(), + } + + let (index, epoch, backend) = id.unzip(); + debug_assert_eq!(backend, self.backend); + self.map + .insert( + index, + Resource { + ref_count: ref_count.clone(), + state, + epoch, + }, + ) + .is_none() + } + + + + + + pub fn query(&mut self, id: S::Id, selector: S::Selector) -> Option { + let (index, epoch, backend) = id.unzip(); + debug_assert_eq!(backend, self.backend); + let res = self.map.get(&index)?; + assert_eq!(res.epoch, epoch); + res.state.query(selector) + } + + + + fn get_or_insert<'a>( + self_backend: Backend, + map: &'a mut FastHashMap>, + id: S::Id, + ref_count: &RefCount, + ) -> &'a mut Resource { + let (index, epoch, backend) = id.unzip(); + debug_assert_eq!(self_backend, backend); + match map.entry(index) { + Entry::Vacant(e) => e.insert(Resource { + ref_count: ref_count.clone(), + state: S::default(), + epoch, + }), + Entry::Occupied(e) => { + assert_eq!(e.get().epoch, epoch); + e.into_mut() + } + } + } + + + + + pub fn change_extend( + &mut self, + id: S::Id, + ref_count: &RefCount, + selector: S::Selector, + usage: S::Usage, + ) -> Result<(), PendingTransition> { + Self::get_or_insert(self.backend, &mut self.map, id, ref_count) + .state + .change(id, selector, usage, None) + } + + + pub fn change_replace( + &mut self, + id: S::Id, + ref_count: &RefCount, + selector: S::Selector, + usage: S::Usage, + ) -> Drain> { + let res = Self::get_or_insert(self.backend, &mut self.map, id, ref_count); + res.state + .change(id, selector, usage, Some(&mut self.temp)) + .ok(); + self.temp.drain(..) + } + + + + pub fn merge_extend(&mut self, other: &Self) -> Result<(), PendingTransition> { + debug_assert_eq!(self.backend, other.backend); + for (&index, new) in other.map.iter() { + match self.map.entry(index) { + Entry::Vacant(e) => { + e.insert(new.clone()); + } + Entry::Occupied(e) => { + assert_eq!(e.get().epoch, new.epoch); + let id = S::Id::zip(index, new.epoch, self.backend); + e.into_mut() + .state + .merge(id, &new.state, Stitch::Last, None)?; + } + } + } + Ok(()) + } + + + + pub fn merge_replace<'a>( + &'a mut self, + other: &'a Self, + stitch: Stitch, + ) -> Drain> { + for (&index, new) in other.map.iter() { + match self.map.entry(index) { + Entry::Vacant(e) => { + e.insert(new.clone()); + } + Entry::Occupied(e) => { + assert_eq!(e.get().epoch, new.epoch); + let id = S::Id::zip(index, new.epoch, self.backend); + e.into_mut() + .state + .merge(id, &new.state, stitch, Some(&mut self.temp)) + .ok(); + } + } + } + self.temp.drain(..) + } + + + + + + + pub fn use_extend<'a, T: 'a + Borrow>( + &mut self, + storage: &'a Storage, + id: S::Id, + selector: S::Selector, + usage: S::Usage, + ) -> Result<&'a T, S::Usage> { + let item = &storage[id]; + self.change_extend(id, item.borrow(), selector, usage) + .map(|()| item) + .map_err(|pending| pending.usage.start) + } + + + + + + pub fn use_replace<'a, T: 'a + Borrow>( + &mut self, + storage: &'a Storage, + id: S::Id, + selector: S::Selector, + usage: S::Usage, + ) -> (&'a T, Drain>) { + let item = &storage[id]; + let drain = self.change_replace(id, item.borrow(), selector, usage); + (item, drain) + } +} + + +impl ResourceState for PhantomData { + type Id = I; + type Selector = (); + type Usage = (); + + fn query(&self, _selector: Self::Selector) -> Option { + Some(()) + } + + fn change( + &mut self, + _id: Self::Id, + _selector: Self::Selector, + _usage: Self::Usage, + _output: Option<&mut Vec>>, + ) -> Result<(), PendingTransition> { + Ok(()) + } + + fn merge( + &mut self, + _id: Self::Id, + _other: &Self, + _stitch: Stitch, + _output: Option<&mut Vec>>, + ) -> Result<(), PendingTransition> { + Ok(()) + } + + fn optimize(&mut self) {} +} + + + +#[derive(Debug)] +pub struct TrackerSet { + pub buffers: ResourceTracker, + pub textures: ResourceTracker, + pub views: ResourceTracker>, + pub bind_groups: ResourceTracker>, + pub samplers: ResourceTracker>, +} + +impl TrackerSet { + + pub fn new(backend: Backend) -> Self { + TrackerSet { + buffers: ResourceTracker::new(backend), + textures: ResourceTracker::new(backend), + views: ResourceTracker::new(backend), + bind_groups: ResourceTracker::new(backend), + samplers: ResourceTracker::new(backend), + } + } + + + pub fn clear(&mut self) { + self.buffers.clear(); + self.textures.clear(); + self.views.clear(); + self.bind_groups.clear(); + self.samplers.clear(); + } + + + pub fn optimize(&mut self) { + self.buffers.optimize(); + self.textures.optimize(); + self.views.optimize(); + self.bind_groups.optimize(); + self.samplers.optimize(); + } + + + + pub fn merge_extend(&mut self, other: &Self) { + self.buffers.merge_extend(&other.buffers).unwrap(); + self.textures.merge_extend(&other.textures).unwrap(); + self.views.merge_extend(&other.views).unwrap(); + self.bind_groups.merge_extend(&other.bind_groups).unwrap(); + self.samplers.merge_extend(&other.samplers).unwrap(); + } + + pub fn backend(&self) -> Backend { + self.buffers.backend + } +} diff --git a/dom/webgpu/wgpu-native/src/track/range.rs b/dom/webgpu/wgpu-native/src/track/range.rs new file mode 100644 index 000000000000..3f4cd61efd00 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/track/range.rs @@ -0,0 +1,411 @@ + + + + +use std::{cmp::Ordering, fmt::Debug, iter::Peekable, ops::Range, slice::Iter}; + + + + +#[derive(Clone, Debug)] +pub struct RangedStates { + + + ranges: Vec<(Range, T)>, +} + +impl Default for RangedStates { + fn default() -> Self { + RangedStates { ranges: Vec::new() } + } +} + +impl RangedStates { + + #[cfg(test)] + pub fn new(values: &[(Range, T)]) -> Self { + RangedStates { + ranges: values.to_vec(), + } + } + + + pub fn clear(&mut self) { + self.ranges.clear(); + } + + + + + + pub fn append(&mut self, index: Range, value: T) { + if let Some(last) = self.ranges.last() { + debug_assert!(last.0.end <= index.start); + } + self.ranges.push((index, value)); + } + + + + #[cfg(test)] + fn check_sanity(&self) { + for a in self.ranges.iter() { + assert!(a.0.start < a.0.end); + } + for (a, b) in self.ranges.iter().zip(self.ranges[1 ..].iter()) { + assert!(a.0.end <= b.0.start); + } + } + + + pub fn coalesce(&mut self) { + let mut num_removed = 0; + let mut iter = self.ranges.iter_mut(); + let mut cur = match iter.next() { + Some(elem) => elem, + None => return, + }; + while let Some(next) = iter.next() { + if cur.0.end == next.0.start && cur.1 == next.1 { + num_removed += 1; + cur.0.end = next.0.end; + next.0.end = next.0.start; + } else { + cur = next; + } + } + if num_removed != 0 { + self.ranges.retain(|pair| pair.0.start != pair.0.end); + } + } + + + + + + pub fn query( + &self, + index: &Range, + fun: impl Fn(&T) -> U, + ) -> Option> { + let mut result = None; + for &(ref range, ref value) in self.ranges.iter() { + if range.end > index.start && range.start < index.end { + let old = result.replace(fun(value)); + if old.is_some() && old != result { + return Some(Err(())); + } + } + } + result.map(Ok) + } + + + + + + pub fn isolate(&mut self, index: &Range, default: T) -> &mut [(Range, T)] { + + + + + let mut start_pos = match self.ranges.iter().position(|pair| pair.0.end > index.start) { + Some(pos) => pos, + None => { + let pos = self.ranges.len(); + self.ranges.push((index.clone(), default)); + return &mut self.ranges[pos ..]; + } + }; + + { + let (range, value) = self.ranges[start_pos].clone(); + if range.start < index.start { + self.ranges[start_pos].0.start = index.start; + self.ranges + .insert(start_pos, (range.start .. index.start, value)); + start_pos += 1; + } + } + let mut pos = start_pos; + let mut range_pos = index.start; + loop { + let (range, value) = self.ranges[pos].clone(); + if range.start >= index.end { + self.ranges.insert(pos, (range_pos .. index.end, default)); + pos += 1; + break; + } + if range.start > range_pos { + self.ranges.insert(pos, (range_pos .. range.start, default)); + pos += 1; + range_pos = range.start; + } + if range.end >= index.end { + if range.end != index.end { + self.ranges[pos].0.start = index.end; + self.ranges.insert(pos, (range_pos .. index.end, value)); + } + pos += 1; + break; + } + pos += 1; + range_pos = range.end; + if pos == self.ranges.len() { + self.ranges.push((range_pos .. index.end, default)); + pos += 1; + break; + } + } + + &mut self.ranges[start_pos .. pos] + } + + + + #[cfg(test)] + pub fn sanely_isolated(&self, index: Range, default: T) -> Vec<(Range, T)> { + let mut clone = self.clone(); + let result = clone.isolate(&index, default).to_vec(); + clone.check_sanity(); + result + } + + + + + + pub fn merge<'a>(&'a self, other: &'a Self, base: I) -> Merge<'a, I, T> { + Merge { + base, + sa: self.ranges.iter().peekable(), + sb: other.ranges.iter().peekable(), + } + } +} + + + +#[derive(Debug)] +pub struct Merge<'a, I, T> { + base: I, + sa: Peekable, T)>>, + sb: Peekable, T)>>, +} + +impl<'a, I: Copy + Debug + Ord, T: Copy + Debug> Iterator for Merge<'a, I, T> { + type Item = (Range, Range>); + fn next(&mut self) -> Option { + match (self.sa.peek(), self.sb.peek()) { + + (Some(&(ref ra, va)), Some(&(ref rb, vb))) => { + let (range, usage) = if ra.start < self.base { + + if self.base == rb.start { + + debug_assert!(self.base < ra.end); + (self.base .. ra.end.min(rb.end), Some(*va) .. Some(*vb)) + } else { + + debug_assert!(self.base < rb.start); + (self.base .. rb.start, Some(*va) .. None) + } + } else if rb.start < self.base { + + if self.base == ra.start { + + debug_assert!(self.base < rb.end); + (self.base .. ra.end.min(rb.end), Some(*va) .. Some(*vb)) + } else { + + debug_assert!(self.base < ra.start); + (self.base .. ra.start, None .. Some(*vb)) + } + } else { + + match ra.start.cmp(&rb.start) { + + Ordering::Equal => (ra.start .. ra.end.min(rb.end), Some(*va) .. Some(*vb)), + + Ordering::Less => (ra.start .. rb.start.min(ra.end), Some(*va) .. None), + + Ordering::Greater => (rb.start .. ra.start.min(rb.end), None .. Some(*vb)), + } + }; + self.base = range.end; + if ra.end == range.end { + let _ = self.sa.next(); + } + if rb.end == range.end { + let _ = self.sb.next(); + } + Some((range, usage)) + } + + (None, Some(&(ref rb, vb))) => { + let range = self.base.max(rb.start) .. rb.end; + self.base = rb.end; + let _ = self.sb.next(); + Some((range, None .. Some(*vb))) + } + + (Some(&(ref ra, va)), None) => { + let range = self.base.max(ra.start) .. ra.end; + self.base = ra.end; + let _ = self.sa.next(); + Some((range, Some(*va) .. None)) + } + + (None, None) => None, + } + } +} + +#[cfg(test)] +mod test { + + use super::RangedStates; + use std::{fmt::Debug, ops::Range}; + + fn easy_merge( + ra: Vec<(Range, T)>, + rb: Vec<(Range, T)>, + ) -> Vec<(Range, Range>)> { + RangedStates { ranges: ra } + .merge(&RangedStates { ranges: rb }, 0) + .collect() + } + + #[test] + fn sane_good() { + let rs = RangedStates { + ranges: vec![(1 .. 4, 9u8), (4 .. 5, 9)], + }; + rs.check_sanity(); + } + + #[test] + #[should_panic] + fn sane_empty() { + let rs = RangedStates { + ranges: vec![(1 .. 4, 9u8), (5 .. 5, 9)], + }; + rs.check_sanity(); + } + + #[test] + #[should_panic] + fn sane_intersect() { + let rs = RangedStates { + ranges: vec![(1 .. 4, 9u8), (3 .. 5, 9)], + }; + rs.check_sanity(); + } + + #[test] + fn coalesce() { + let mut rs = RangedStates { + ranges: vec![(1 .. 4, 9u8), (4 .. 5, 9), (5 .. 7, 1), (8 .. 9, 1)], + }; + rs.coalesce(); + rs.check_sanity(); + assert_eq!(rs.ranges, vec![(1 .. 5, 9), (5 .. 7, 1), (8 .. 9, 1),]); + } + + #[test] + fn query() { + let rs = RangedStates { + ranges: vec![(1 .. 4, 1u8), (5 .. 7, 2)], + }; + assert_eq!(rs.query(&(0 .. 1), |v| *v), None); + assert_eq!(rs.query(&(1 .. 3), |v| *v), Some(Ok(1))); + assert_eq!(rs.query(&(1 .. 6), |v| *v), Some(Err(()))); + } + + #[test] + fn isolate() { + let rs = RangedStates { + ranges: vec![(1 .. 4, 9u8), (4 .. 5, 9), (5 .. 7, 1), (8 .. 9, 1)], + }; + assert_eq!(&rs.sanely_isolated(4 .. 5, 0), &[(4 .. 5, 9u8),]); + assert_eq!( + &rs.sanely_isolated(0 .. 6, 0), + &[(0 .. 1, 0), (1 .. 4, 9u8), (4 .. 5, 9), (5 .. 6, 1),] + ); + assert_eq!( + &rs.sanely_isolated(8 .. 10, 1), + &[(8 .. 9, 1), (9 .. 10, 1),] + ); + assert_eq!( + &rs.sanely_isolated(6 .. 9, 0), + &[(6 .. 7, 1), (7 .. 8, 0), (8 .. 9, 1),] + ); + } + + #[test] + fn merge_same() { + assert_eq!( + easy_merge(vec![(1 .. 4, 0u8),], vec![(1 .. 4, 2u8),],), + vec![(1 .. 4, Some(0) .. Some(2)),] + ); + } + + #[test] + fn merge_empty() { + assert_eq!( + easy_merge(vec![(1 .. 2, 0u8),], vec![],), + vec![(1 .. 2, Some(0) .. None),] + ); + assert_eq!( + easy_merge(vec![], vec![(3 .. 4, 1u8),],), + vec![(3 .. 4, None .. Some(1)),] + ); + } + + #[test] + fn merge_separate() { + assert_eq!( + easy_merge(vec![(1 .. 2, 0u8), (5 .. 6, 1u8),], vec![(2 .. 4, 2u8),],), + vec![ + (1 .. 2, Some(0) .. None), + (2 .. 4, None .. Some(2)), + (5 .. 6, Some(1) .. None), + ] + ); + } + + #[test] + fn merge_subset() { + assert_eq!( + easy_merge(vec![(1 .. 6, 0u8),], vec![(2 .. 4, 2u8),],), + vec![ + (1 .. 2, Some(0) .. None), + (2 .. 4, Some(0) .. Some(2)), + (4 .. 6, Some(0) .. None), + ] + ); + assert_eq!( + easy_merge(vec![(2 .. 4, 0u8),], vec![(1 .. 4, 2u8),],), + vec![(1 .. 2, None .. Some(2)), (2 .. 4, Some(0) .. Some(2)),] + ); + } + + #[test] + fn merge_all() { + assert_eq!( + easy_merge( + vec![(1 .. 4, 0u8), (5 .. 8, 1u8),], + vec![(2 .. 6, 2u8), (7 .. 9, 3u8),], + ), + vec![ + (1 .. 2, Some(0) .. None), + (2 .. 4, Some(0) .. Some(2)), + (4 .. 5, None .. Some(2)), + (5 .. 6, Some(1) .. Some(2)), + (6 .. 7, Some(1) .. None), + (7 .. 8, Some(1) .. Some(3)), + (8 .. 9, None .. Some(3)), + ] + ); + } +} diff --git a/dom/webgpu/wgpu-native/src/track/texture.rs b/dom/webgpu/wgpu-native/src/track/texture.rs new file mode 100644 index 000000000000..071f9bc03210 --- /dev/null +++ b/dom/webgpu/wgpu-native/src/track/texture.rs @@ -0,0 +1,301 @@ + + + + +use super::{range::RangedStates, PendingTransition, ResourceState, Stitch, Unit}; +use crate::{conv, device::MAX_MIP_LEVELS, resource::TextureUsage, TextureId}; + +use arrayvec::ArrayVec; + +use std::ops::Range; + + +type PlaneStates = RangedStates>; + + + +#[derive(Clone, Debug, Default)] +struct MipState { + color: PlaneStates, + depth: PlaneStates, + stencil: PlaneStates, +} + +#[derive(Clone, Debug, Default)] +pub struct TextureState { + mips: ArrayVec<[MipState; MAX_MIP_LEVELS]>, +} + +impl PendingTransition { + + pub fn to_states(&self) -> Range { + conv::map_texture_state(self.usage.start, self.selector.aspects) + .. conv::map_texture_state(self.usage.end, self.selector.aspects) + } + + + + + + + + + fn record( + self, + output: Option<&mut &mut Vec>, + replace: TextureUsage, + ) -> Result { + let u = self.usage.clone(); + match output { + Some(out) => { + out.push(self); + Ok(replace) + } + None => { + if !u.start.is_empty() + && u.start != u.end + && TextureUsage::WRITE_ALL.intersects(u.start | u.end) + { + Err(self) + } else { + Ok(u.start | u.end) + } + } + } + } +} + +impl ResourceState for TextureState { + type Id = TextureId; + type Selector = hal::image::SubresourceRange; + type Usage = TextureUsage; + + fn query(&self, selector: Self::Selector) -> Option { + let mut result = None; + let num_levels = self.mips.len(); + let mip_start = num_levels.min(selector.levels.start as usize); + let mip_end = num_levels.min(selector.levels.end as usize); + for mip in self.mips[mip_start .. mip_end].iter() { + for &(aspect, plane_states) in &[ + (hal::format::Aspects::COLOR, &mip.color), + (hal::format::Aspects::DEPTH, &mip.depth), + (hal::format::Aspects::STENCIL, &mip.stencil), + ] { + if !selector.aspects.contains(aspect) { + continue; + } + match plane_states.query(&selector.layers, |unit| unit.last) { + None => {} + Some(Ok(usage)) if result == Some(usage) => {} + Some(Ok(usage)) if result.is_none() => { + result = Some(usage); + } + Some(Ok(_)) | Some(Err(())) => return None, + } + } + } + result + } + + fn change( + &mut self, + id: Self::Id, + selector: Self::Selector, + usage: Self::Usage, + mut output: Option<&mut Vec>>, + ) -> Result<(), PendingTransition> { + while self.mips.len() < selector.levels.end as usize { + self.mips.push(MipState::default()); + } + for (mip_id, mip) in self.mips + [selector.levels.start as usize .. selector.levels.end as usize] + .iter_mut() + .enumerate() + { + let level = selector.levels.start + mip_id as hal::image::Level; + for &mut (aspect, ref mut plane_states) in &mut [ + (hal::format::Aspects::COLOR, &mut mip.color), + (hal::format::Aspects::DEPTH, &mut mip.depth), + (hal::format::Aspects::STENCIL, &mut mip.stencil), + ] { + if !selector.aspects.contains(aspect) { + continue; + } + let layers = plane_states.isolate(&selector.layers, Unit::new(usage)); + for &mut (ref range, ref mut unit) in layers { + if unit.last == usage && TextureUsage::ORDERED.contains(usage) { + continue; + } + let pending = PendingTransition { + id, + selector: hal::image::SubresourceRange { + aspects: hal::format::Aspects::COLOR, + levels: level .. level + 1, + layers: range.clone(), + }, + usage: unit.last .. usage, + }; + unit.last = pending.record(output.as_mut(), usage)?; + } + } + } + Ok(()) + } + + fn merge( + &mut self, + id: Self::Id, + other: &Self, + stitch: Stitch, + mut output: Option<&mut Vec>>, + ) -> Result<(), PendingTransition> { + assert!(output.is_some() || stitch == Stitch::Last); + + let mut temp = Vec::new(); + while self.mips.len() < other.mips.len() as usize { + self.mips.push(MipState::default()); + } + + for (mip_id, (mip_self, mip_other)) in self.mips.iter_mut().zip(&other.mips).enumerate() { + let level = mip_id as hal::image::Level; + for &mut (aspects, ref mut planes_self, planes_other) in &mut [ + ( + hal::format::Aspects::COLOR, + &mut mip_self.color, + &mip_other.color, + ), + ( + hal::format::Aspects::DEPTH, + &mut mip_self.depth, + &mip_other.depth, + ), + ( + hal::format::Aspects::STENCIL, + &mut mip_self.stencil, + &mip_other.stencil, + ), + ] { + temp.extend(planes_self.merge(planes_other, 0)); + planes_self.clear(); + + for (layers, states) in temp.drain(..) { + let unit = match states { + Range { + start: None, + end: None, + } => unreachable!(), + Range { + start: Some(start), + end: None, + } => start, + Range { + start: None, + end: Some(end), + } => end, + Range { + start: Some(start), + end: Some(end), + } => { + let mut final_usage = end.select(stitch); + if start.last != final_usage + || !TextureUsage::ORDERED.contains(final_usage) + { + let pending = PendingTransition { + id, + selector: hal::image::SubresourceRange { + aspects, + levels: level .. level + 1, + layers: layers.clone(), + }, + usage: start.last .. final_usage, + }; + final_usage = pending.record(output.as_mut(), end.last)?; + } + Unit { + init: start.init, + last: final_usage, + } + } + }; + planes_self.append(layers, unit); + } + } + } + + Ok(()) + } + + fn optimize(&mut self) { + for mip in self.mips.iter_mut() { + mip.color.coalesce(); + mip.depth.coalesce(); + mip.stencil.coalesce(); + } + } +} + + +#[cfg(test)] +mod test { + + + use super::*; + use hal::{format::Aspects, image::SubresourceRange}; + + #[test] + fn query() { + let mut ts = TextureState::default(); + ts.mips.push(MipState::default()); + ts.mips.push(MipState::default()); + ts.mips[1].color = PlaneStates::new(&[ + (1 .. 3, Unit::new(TextureUsage::SAMPLED)), + (3 .. 5, Unit::new(TextureUsage::SAMPLED)), + (5 .. 6, Unit::new(TextureUsage::STORAGE)), + ]); + assert_eq!( + ts.query(SubresourceRange { + aspects: Aspects::COLOR, + levels: 1 .. 2, + layers: 2 .. 5, + }), + // level 1 matches + Some(TextureUsage::SAMPLED), + ); + assert_eq!( + ts.query(SubresourceRange { + aspects: Aspects::DEPTH, + levels: 1 .. 2, + layers: 2 .. 5, + }), + // no depth found + None, + ); + assert_eq!( + ts.query(SubresourceRange { + aspects: Aspects::COLOR, + levels: 0 .. 2, + layers: 2 .. 5, + }), + // level 0 is empty, level 1 matches + Some(TextureUsage::SAMPLED), + ); + assert_eq!( + ts.query(SubresourceRange { + aspects: Aspects::COLOR, + levels: 1 .. 2, + layers: 1 .. 5, + }), + // level 1 matches with gaps + Some(TextureUsage::SAMPLED), + ); + assert_eq!( + ts.query(SubresourceRange { + aspects: Aspects::COLOR, + levels: 1 .. 2, + layers: 4 .. 6, + }), + // level 1 doesn't match + None, + ); + } +} diff --git a/dom/webgpu/wgpu-remote/Cargo.toml b/dom/webgpu/wgpu-remote/Cargo.toml new file mode 100644 index 000000000000..1c35a47fde83 --- /dev/null +++ b/dom/webgpu/wgpu-remote/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "wgpu-remote" +version = "0.1.0" +authors = [ + "Dzmitry Malyshau ", + "Joshua Groves ", +] +edition = "2018" + +[lib] +#crate-type = ["lib", "cdylib", "staticlib"] +crate-type = ["lib"] + +[features] +default = [] + +[dependencies] +wgn = { path = "../wgpu-native", package = "wgpu-native", version = "0.4" } +log = "0.4" +parking_lot = { version = "0.9" } diff --git a/dom/webgpu/wgpu-remote/cbindgen.toml b/dom/webgpu/wgpu-remote/cbindgen.toml new file mode 100644 index 000000000000..2d552f481912 --- /dev/null +++ b/dom/webgpu/wgpu-remote/cbindgen.toml @@ -0,0 +1,46 @@ +header = """/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */""" +autogen_warning = """/* DO NOT MODIFY THIS MANUALLY! This file was generated using cbindgen. + * To generate this file: + * 1. Get the latest cbindgen using `cargo install --force cbindgen` + * a. Alternatively, you can clone `https://github.com/eqrion/cbindgen` and use a tagged release + * 2. Run `rustup run nightly cbindgen toolkit/library/rust/ --lockfile Cargo.lock --crate wgpu-remote -o dom/webgpu/ffi/wgpu_ffi_generated.h` + */ + +typedef void WGPUEmpty; +""" +include_version = true +braces = "SameLine" +line_length = 100 +tab_width = 2 +language = "C" + +[export] +prefix = "WGPU" +exclude = ["BufferMapResult"] + +[parse] +parse_deps = true +include = ["wgpu-native"] + +[fn] +prefix = "WGPU_INLINE" +postfix = "WGPU_FUNC" +args = "Vertical" +rename_args = "GeckoCase" + +[struct] +derive_eq = true + +[enum] +prefix_with_name = true +derive_helper_methods = true + +[macro_expansion] +bitflags = true + +[defines] +"target_os = windows" = "XP_WIN" +"target_os = macos" = "XP_MACOSX" +"target_os = android" = "ANDROID" diff --git a/dom/webgpu/wgpu-remote/src/lib.rs b/dom/webgpu/wgpu-remote/src/lib.rs new file mode 100644 index 000000000000..3153e28aa21e --- /dev/null +++ b/dom/webgpu/wgpu-remote/src/lib.rs @@ -0,0 +1,150 @@ + + + + +use wgn::{AdapterId, Backend, DeviceId, IdentityManager, SurfaceId}; + +use parking_lot::Mutex; + +use std::{ptr, slice}; + +pub mod server; + +#[derive(Debug)] +struct IdentityHub { + adapters: IdentityManager, + devices: IdentityManager, +} + +impl IdentityHub { + fn new(backend: Backend) -> Self { + IdentityHub { + adapters: IdentityManager::new(backend), + devices: IdentityManager::new(backend), + } + } +} + +#[derive(Debug)] +struct Identities { + surfaces: IdentityManager, + vulkan: IdentityHub, + #[cfg(any(target_os = "ios", target_os = "macos"))] + metal: IdentityHub, + #[cfg(windows)] + dx12: IdentityHub, +} + +impl Identities { + fn new() -> Self { + Identities { + surfaces: IdentityManager::new(Backend::Empty), + vulkan: IdentityHub::new(Backend::Vulkan), + #[cfg(any(target_os = "ios", target_os = "macos"))] + metal: IdentityHub::new(Backend::Metal), + #[cfg(windows)] + dx12: IdentityHub::new(Backend::Dx12), + } + } + + fn select(&mut self, backend: Backend) -> &mut IdentityHub { + match backend { + Backend::Vulkan => &mut self.vulkan, + #[cfg(any(target_os = "ios", target_os = "macos"))] + Backend::Metal => &mut self.metal, + #[cfg(windows)] + Backend::Dx12 => &mut self.dx12, + _ => panic!("Unexpected backend: {:?}", backend), + } + } +} + +#[derive(Debug)] +pub struct Client { + identities: Mutex, +} + +#[repr(C)] +#[derive(Debug)] +pub struct Infrastructure { + pub client: *mut Client, + pub error: *const u8, +} + +#[no_mangle] +pub extern "C" fn wgpu_client_new() -> Infrastructure { + log::info!("Initializing WGPU client"); + let client = Box::new(Client { + identities: Mutex::new(Identities::new()), + }); + Infrastructure { + client: Box::into_raw(client), + error: ptr::null(), + } +} + +#[no_mangle] +pub extern "C" fn wgpu_client_delete(client: *mut Client) { + log::info!("Terminating WGPU client"); + let _client = unsafe { Box::from_raw(client) }; +} + +#[no_mangle] +pub extern "C" fn wgpu_client_make_adapter_ids( + client: &Client, + ids: *mut wgn::AdapterId, + id_length: usize, +) -> usize { + let mut identities = client.identities.lock(); + assert_ne!(id_length, 0); + let mut ids = unsafe { slice::from_raw_parts_mut(ids, id_length) }.iter_mut(); + + *ids.next().unwrap() = identities.vulkan.adapters.alloc(); + + #[cfg(any(target_os = "ios", target_os = "macos"))] + { + *ids.next().unwrap() = identities.metal.adapters.alloc(); + } + #[cfg(windows)] + { + *ids.next().unwrap() = identities.dx12.adapters.alloc(); + } + + id_length - ids.len() +} + +#[no_mangle] +pub extern "C" fn wgpu_client_kill_adapter_ids( + client: &Client, + ids: *const wgn::AdapterId, + id_length: usize, +) { + let mut identity = client.identities.lock(); + let ids = unsafe { slice::from_raw_parts(ids, id_length) }; + for &id in ids { + identity.select(id.backend()).adapters.free(id) + } +} + +#[no_mangle] +pub extern "C" fn wgpu_client_make_device_id( + client: &Client, + adapter_id: wgn::AdapterId, +) -> wgn::DeviceId { + client + .identities + .lock() + .select(adapter_id.backend()) + .devices + .alloc() +} + +#[no_mangle] +pub extern "C" fn wgpu_client_kill_device_id(client: &Client, id: wgn::DeviceId) { + client + .identities + .lock() + .select(id.backend()) + .devices + .free(id) +} diff --git a/dom/webgpu/wgpu-remote/src/server.rs b/dom/webgpu/wgpu-remote/src/server.rs new file mode 100644 index 000000000000..2bc2ad295acc --- /dev/null +++ b/dom/webgpu/wgpu-remote/src/server.rs @@ -0,0 +1,53 @@ + + + + +use std::slice; + +#[no_mangle] +pub extern "C" fn wgpu_server_new() -> *mut wgn::Global { + log::info!("Initializing WGPU server"); + Box::into_raw(Box::new(wgn::Global::new("wgpu"))) +} + +#[no_mangle] +pub extern "C" fn wgpu_server_delete(global: *mut wgn::Global) { + log::info!("Terminating WGPU server"); + unsafe { Box::from_raw(global) }.delete(); + log::info!("\t...done"); +} + +/// Request an adapter according to the specified options. +/// Provide the list of IDs to pick from. +/// +/// Returns the index in this list, or -1 if unable to pick. +#[no_mangle] +pub extern "C" fn wgpu_server_instance_request_adapter( + global: &wgn::Global, + desc: &wgn::RequestAdapterOptions, + ids: *const wgn::AdapterId, + id_length: usize, +) -> i8 { + let ids = unsafe { slice::from_raw_parts(ids, id_length) }; + match wgn::request_adapter(global, desc, ids) { + Some(id) => ids.iter().position(|&i| i == id).unwrap() as i8, + None => -1, + } +} + +#[no_mangle] +pub extern "C" fn wgpu_server_adapter_request_device( + global: &wgn::Global, + self_id: wgn::AdapterId, + desc: &wgn::DeviceDescriptor, + new_id: wgn::DeviceId, +) { + use wgn::adapter_request_device as func; + wgn::gfx_select!(self_id => func(global, self_id, desc, new_id)); +} + +#[no_mangle] +pub extern "C" fn wgpu_server_device_destroy(global: &wgn::Global, self_id: wgn::DeviceId) { + use wgn::device_destroy as func; + wgn::gfx_select!(self_id => func(global, self_id)) +} diff --git a/dom/webidl/WebGPU.webidl b/dom/webidl/WebGPU.webidl index 3f8444eef150..4f0c700fb8ea 100644 --- a/dom/webidl/WebGPU.webidl +++ b/dom/webidl/WebGPU.webidl @@ -57,8 +57,9 @@ dictionary GPUObjectDescriptorBase { Exposed=Window, ] interface GPU { - //[Exposed=Window] - //Promise requestAdapter(optional GPURequestAdapterOptions options = {}); + // May reject with DOMException + [NewObject] + Promise requestAdapter(optional GPURequestAdapterOptions options = {}); }; // Add a "webgpu" member to Navigator/Worker that contains the global instance of a "WebGPU" @@ -82,8 +83,9 @@ interface GPUAdapter { //GPUExtensions getExtensions(); //readonly attribute GPULimits limits; Don't expose higher limits for now. - // May reject with DOMException // TODO: DOMException("OperationError")? - //Promise requestDevice(optional GPUDeviceDescriptor descriptor = {}); + // May reject with DOMException + [NewObject] + Promise requestDevice(optional GPUDeviceDescriptor descriptor = {}); }; GPUAdapter includes GPUObjectBase; diff --git a/gfx/ipc/GPUParent.cpp b/gfx/ipc/GPUParent.cpp index a30b9707d2b5..1ecbd8ba36bd 100644 --- a/gfx/ipc/GPUParent.cpp +++ b/gfx/ipc/GPUParent.cpp @@ -22,7 +22,6 @@ #include "mozilla/RemoteDecoderManagerChild.h" #include "mozilla/RemoteDecoderManagerParent.h" #include "mozilla/dom/MemoryReportRequest.h" -#include "mozilla/webgpu/WebGPUThreading.h" #include "mozilla/gfx/2D.h" #include "mozilla/gfx/gfxVars.h" #include "mozilla/image/ImageMemoryReporter.h" @@ -272,10 +271,6 @@ mozilla::ipc::IPCResult GPUParent::RecvInit( } #endif - if (gfxConfig::IsEnabled(Feature::WEBGPU)) { - webgpu::WebGPUThreading::Start(); - } - VRManager::ManagerInit(); GPUDeviceData data; @@ -559,10 +554,6 @@ void GPUParent::ActorDestroy(ActorDestroyReason aWhy) { image::ImageMemoryReporter::ShutdownForWebRender(); - if (gfxConfig::IsEnabled(Feature::WEBGPU)) { - webgpu::WebGPUThreading::ShutDown(); - } - gl::GLContextProvider::Shutdown(); diff --git a/gfx/layers/ipc/CompositorBridgeChild.cpp b/gfx/layers/ipc/CompositorBridgeChild.cpp index 664e75ca5aec..eb08a06dd3c0 100644 --- a/gfx/layers/ipc/CompositorBridgeChild.cpp +++ b/gfx/layers/ipc/CompositorBridgeChild.cpp @@ -30,8 +30,10 @@ #include "mozilla/gfx/gfxVars.h" #include "mozilla/gfx/GPUProcessManager.h" #include "mozilla/gfx/Logging.h" +#include "mozilla/webgpu/WebGPUChild.h" #include "mozilla/mozalloc.h" #include "mozilla/Telemetry.h" +#include "gfxConfig.h" #include "nsAutoPtr.h" #include "nsDebug.h" #include "nsIObserver.h" @@ -185,6 +187,12 @@ void CompositorBridgeChild::Destroy() { Unused << child->SendDestroy(); } + AutoTArray webGPUChildren; + ManagedPWebGPUChild(webGPUChildren); + for (PWebGPUChild* child : webGPUChildren) { + Unused << child->SendShutdown(); + } + const ManagedContainer& textures = ManagedPTextureChild(); for (auto iter = textures.ConstIter(); !iter.Done(); iter.Next()) { RefPtr texture = @@ -958,6 +966,16 @@ void CompositorBridgeChild::EndCanvasTransaction() { } } +RefPtr CompositorBridgeChild::GetWebGPUChild() { + MOZ_ASSERT(gfx::gfxConfig::IsEnabled(gfx::Feature::WEBGPU)); + if (!mWebGPUChild) { + webgpu::PWebGPUChild* bridge = SendPWebGPUConstructor(); + mWebGPUChild = static_cast(bridge); + } + + return mWebGPUChild; +} + bool CompositorBridgeChild::AllocUnsafeShmem( size_t aSize, ipc::SharedMemory::SharedMemoryType aType, ipc::Shmem* aShmem) { @@ -1048,6 +1066,18 @@ bool CompositorBridgeChild::DeallocPWebRenderBridgeChild( return true; } +webgpu::PWebGPUChild* CompositorBridgeChild::AllocPWebGPUChild() { + webgpu::WebGPUChild* child = new webgpu::WebGPUChild(); + child->AddIPDLReference(); + return child; +} + +bool CompositorBridgeChild::DeallocPWebGPUChild(webgpu::PWebGPUChild* aActor) { + webgpu::WebGPUChild* child = static_cast(aActor); + child->ReleaseIPDLReference(); + return true; +} + void CompositorBridgeChild::ClearSharedFrameMetricsData(LayersId aLayersId) { for (auto iter = mFrameMetricsTable.Iter(); !iter.Done(); iter.Next()) { nsAutoPtr& data = iter.Data(); diff --git a/gfx/layers/ipc/CompositorBridgeChild.h b/gfx/layers/ipc/CompositorBridgeChild.h index 0995a5361014..b592822ebe30 100644 --- a/gfx/layers/ipc/CompositorBridgeChild.h +++ b/gfx/layers/ipc/CompositorBridgeChild.h @@ -31,6 +31,11 @@ namespace dom { class BrowserChild; } +namespace webgpu { +class PWebGPUChild; +class WebGPUChild; +} + namespace widget { class CompositorWidget; } @@ -127,6 +132,8 @@ class CompositorBridgeChild final : public PCompositorBridgeChild, void EndCanvasTransaction(); + RefPtr GetWebGPUChild(); + @@ -221,6 +228,9 @@ class CompositorBridgeChild final : public PCompositorBridgeChild, const wr::PipelineId& aPipelineId, const LayoutDeviceIntSize&); bool DeallocPWebRenderBridgeChild(PWebRenderBridgeChild* aActor); + webgpu::PWebGPUChild* AllocPWebGPUChild(); + bool DeallocPWebGPUChild(webgpu::PWebGPUChild* aActor); + wr::MaybeExternalImageId GetNextExternalImageId() override; wr::PipelineId GetNextPipelineId(); @@ -399,6 +409,8 @@ class CompositorBridgeChild final : public PCompositorBridgeChild, uintptr_t mTotalFlushCount; RefPtr mCanvasChild; + + RefPtr mWebGPUChild; }; } diff --git a/gfx/layers/ipc/CompositorBridgeParent.cpp b/gfx/layers/ipc/CompositorBridgeParent.cpp index b7fa292d4222..f6c2c863b5b1 100644 --- a/gfx/layers/ipc/CompositorBridgeParent.cpp +++ b/gfx/layers/ipc/CompositorBridgeParent.cpp @@ -65,6 +65,7 @@ #include "mozilla/layers/WebRenderBridgeParent.h" #include "mozilla/layers/AsyncImagePipelineManager.h" #include "mozilla/webrender/WebRenderAPI.h" +#include "mozilla/webgpu/WebGPUParent.h" #include "mozilla/media/MediaSystemResourceService.h" #include "mozilla/mozalloc.h" #include "mozilla/PerfStats.h" @@ -1923,6 +1924,22 @@ bool CompositorBridgeParent::DeallocPWebRenderBridgeParent( return true; } +webgpu::PWebGPUParent* CompositorBridgeParent::AllocPWebGPUParent() { + MOZ_ASSERT(!mWebGPUBridge); + mWebGPUBridge = new webgpu::WebGPUParent(); + mWebGPUBridge.get()->AddRef(); + return mWebGPUBridge; +} + +bool CompositorBridgeParent::DeallocPWebGPUParent( + webgpu::PWebGPUParent* aActor) { + webgpu::WebGPUParent* parent = static_cast(aActor); + MOZ_ASSERT(mWebGPUBridge == parent); + parent->Release(); + mWebGPUBridge = nullptr; + return true; +} + void CompositorBridgeParent::NotifyMemoryPressure() { if (mWrBridge) { RefPtr api = diff --git a/gfx/layers/ipc/CompositorBridgeParent.h b/gfx/layers/ipc/CompositorBridgeParent.h index 12a370ac6363..ab06e0e0a419 100644 --- a/gfx/layers/ipc/CompositorBridgeParent.h +++ b/gfx/layers/ipc/CompositorBridgeParent.h @@ -53,6 +53,11 @@ namespace mozilla { class CancelableRunnable; +namespace webgpu { +class PWebGPUParent; +class WebGPUParent; +} + namespace gfx { class DrawTarget; class GPUProcessManager; @@ -232,6 +237,9 @@ class CompositorBridgeParentBase : public PCompositorBridgeParent, virtual bool DeallocPWebRenderBridgeParent( PWebRenderBridgeParent* aActor) = 0; + virtual webgpu::PWebGPUParent* AllocPWebGPUParent() = 0; + virtual bool DeallocPWebGPUParent(webgpu::PWebGPUParent* aActor) = 0; + virtual PCompositorWidgetParent* AllocPCompositorWidgetParent( const CompositorWidgetInitData& aInitData) = 0; virtual bool DeallocPCompositorWidgetParent( @@ -646,6 +654,9 @@ class CompositorBridgeParent final : public CompositorBridgeParentBase, RefPtr GetWebRenderBridgeParent() const; Maybe GetTestingTimeStamp() const; + webgpu::PWebGPUParent* AllocPWebGPUParent() override; + bool DeallocPWebGPUParent(webgpu::PWebGPUParent* aActor) override; + static CompositorBridgeParent* GetCompositorBridgeParentFromLayersId( const LayersId& aLayersId); static RefPtr GetCompositorBridgeParentFromWindowId( @@ -665,6 +676,8 @@ class CompositorBridgeParent final : public CompositorBridgeParentBase, WebRenderBridgeParent* GetWrBridge() { return mWrBridge; } + webgpu::WebGPUParent* GetWebGPUBridge() { return mWebGPUBridge; } + private: void Initialize(); @@ -766,6 +779,7 @@ class CompositorBridgeParent final : public CompositorBridgeParentBase, RefPtr mCompositionManager; RefPtr mAsyncImageManager; RefPtr mWrBridge; + RefPtr mWebGPUBridge; widget::CompositorWidget* mWidget; Maybe mTestTime; CSSToLayoutDeviceScale mScale; diff --git a/gfx/layers/ipc/ContentCompositorBridgeParent.cpp b/gfx/layers/ipc/ContentCompositorBridgeParent.cpp index aaf9a2a868a8..248707e4ff82 100644 --- a/gfx/layers/ipc/ContentCompositorBridgeParent.cpp +++ b/gfx/layers/ipc/ContentCompositorBridgeParent.cpp @@ -31,6 +31,7 @@ #include "mozilla/layers/RemoteContentController.h" #include "mozilla/layers/WebRenderBridgeParent.h" #include "mozilla/layers/AsyncImagePipelineManager.h" +#include "mozilla/webgpu/WebGPUParent.h" #include "mozilla/mozalloc.h" #include "nsDebug.h" #include "nsTArray.h" @@ -271,6 +272,19 @@ bool ContentCompositorBridgeParent::DeallocPWebRenderBridgeParent( return true; } +webgpu::PWebGPUParent* ContentCompositorBridgeParent::AllocPWebGPUParent() { + webgpu::WebGPUParent* parent = new webgpu::WebGPUParent(); + parent->AddRef(); + return parent; +} + +bool ContentCompositorBridgeParent::DeallocPWebGPUParent( + webgpu::PWebGPUParent* aActor) { + webgpu::WebGPUParent* parent = static_cast(aActor); + parent->Release(); + return true; +} + mozilla::ipc::IPCResult ContentCompositorBridgeParent::RecvNotifyChildCreated( const LayersId& child, CompositorOptions* aOptions) { MonitorAutoLock lock(*sIndirectLayerTreesLock); diff --git a/gfx/layers/ipc/ContentCompositorBridgeParent.h b/gfx/layers/ipc/ContentCompositorBridgeParent.h index 0effca4ceb00..f74d339380f3 100644 --- a/gfx/layers/ipc/ContentCompositorBridgeParent.h +++ b/gfx/layers/ipc/ContentCompositorBridgeParent.h @@ -12,6 +12,10 @@ #include "mozilla/UniquePtr.h" namespace mozilla { +namespace webgpu { +class PWebGPUParent; +} + namespace layers { class CanvasParent; @@ -206,6 +210,9 @@ class ContentCompositorBridgeParent final : public CompositorBridgeParentBase { const LayoutDeviceIntSize& aSize) override; bool DeallocPWebRenderBridgeParent(PWebRenderBridgeParent* aActor) override; + webgpu::PWebGPUParent* AllocPWebGPUParent() override; + bool DeallocPWebGPUParent(webgpu::PWebGPUParent* aActor) override; + void ObserveLayersUpdate(LayersId aLayersId, LayersObserverEpoch aEpoch, bool aActive) override; diff --git a/gfx/layers/ipc/PCompositorBridge.ipdl b/gfx/layers/ipc/PCompositorBridge.ipdl index 39649a64b4e6..137bd824201b 100644 --- a/gfx/layers/ipc/PCompositorBridge.ipdl +++ b/gfx/layers/ipc/PCompositorBridge.ipdl @@ -18,6 +18,7 @@ include protocol PCompositorWidget; include protocol PLayerTransaction; include protocol PTexture; include protocol PWebRenderBridge; +include protocol PWebGPU; include "mozilla/GfxMessageUtils.h"; include "mozilla/layers/LayersMessageUtils.h"; include "mozilla/layers/WebRenderMessageUtils.h"; @@ -101,6 +102,7 @@ sync refcounted protocol PCompositorBridge manages PTexture; manages PCompositorWidget; manages PWebRenderBridge; + manages PWebGPU; child: // The child should invalidate retained layers. This is used for local @@ -166,6 +168,10 @@ parent: async PAPZ(LayersId layersId); async PAPZCTreeManager(LayersId layersId); + // Constructor for WebGPU IPDL + // Must be called before Initialize(). + async PWebGPU(); + /** * Confirmation callback for UpdatePluginConfigurations and HideAllPlugins. */ diff --git a/gfx/thebes/gfxPlatform.cpp b/gfx/thebes/gfxPlatform.cpp index 3c9bccae2f34..48ce867c4cfe 100644 --- a/gfx/thebes/gfxPlatform.cpp +++ b/gfx/thebes/gfxPlatform.cpp @@ -986,6 +986,8 @@ void gfxPlatform::Init() { gPlatform->InitAcceleration(); gPlatform->InitWebRenderConfig(); + gPlatform->InitWebGPUConfig(); + diff --git a/servo/components/style/Cargo.toml b/servo/components/style/Cargo.toml index 626c9ee43315..764beee78802 100644 --- a/servo/components/style/Cargo.toml +++ b/servo/components/style/Cargo.toml @@ -28,7 +28,7 @@ gecko_profiler = [] [dependencies] app_units = "0.7" -arrayvec = "0.4.6" +arrayvec = "0.5" atomic_refcell = "0.1" bitflags = "1.0" byteorder = "1.0" @@ -76,7 +76,7 @@ thin-slice = "0.1.0" to_shmem = {path = "../to_shmem"} to_shmem_derive = {path = "../to_shmem_derive"} time = "0.1" -uluru = "0.3" +uluru = "0.4" unicode-bidi = "0.3" unicode-segmentation = "1.0" void = "1.0.2" diff --git a/third_party/rust/arrayvec/.cargo-checksum.json b/third_party/rust/arrayvec/.cargo-checksum.json index acec798be47e..f8e8231da7c6 100644 --- a/third_party/rust/arrayvec/.cargo-checksum.json +++ b/third_party/rust/arrayvec/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"650501cb36febb1a25ce0218a5ccaeb01343f89389a97a43a50e4fd6e5a97a47","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0245ee104228a100ce5fceecf43e25faae450494d9173f43fd94c27d69fdac13","README.rst":"d8cac600ed199c2b77606f76c77cc201d93dfaf750e16b2dd76b2dcde1107bd0","benches/arraystring.rs":"f12b890977117ebde4ca42bcd6b91f2a6a087f2b235aaca6d15e30d125ae9f67","benches/extend.rs":"8c8f78df7e90b62c7e160cf5ea6c61b90bc4035a9704b6a179a1e01d8fafe2e9","build.rs":"fc29930f06cb4dde58f43d2f30b28c366ca3bafcd7e44b41a1c250d60fa900fb","custom.css":"e6f2cd299392337b4e2959c52f422e5b7be11920ea98d10db44d10ddef5ed47c","src/array.rs":"67fb063ee515bfd4968ede219dff81091a5935ef93529ebd1bb2a716ea3ed3d3","src/array_string.rs":"8a1a4cfc1699e2373815e57dc676a87a30629f91a9e861c866ccc6cb1381eadf","src/char.rs":"64a08f6a743b67bf2c96483f91c2fdaea79f6e91df5cd752f770b16a6b1d5b1e","src/errors.rs":"dde99bffaddfd45396aab7e07642cc018ef5435fe60c4f26a2c05a36555be18c","src/lib.rs":"566db78e5352be102d910e5826bb66cf3a4c4a5e9c68223d4e834c2793edcfc1","src/maybe_uninit.rs":"7cca39ffe0f122716baaa174b433ff5fe9c93560f8e54fc077a0083500eaa1dd","src/maybe_uninit_nodrop.rs":"7fb2e24bf815dd6e1d104056fa9be4a11de7e0f0e5474742af186c580a6b47cc","src/maybe_uninit_stable.rs":"3f7daba622cf5df86992b451b46636a491c9611292f59969eb6890a10a00476d","src/range.rs":"65744ab7def208a1ab155ea2448fe9ea7fc14f33211361b1041f540125b32efd","tests/serde.rs":"ef3986a82656b09f3fbb14358e767051ffabe09592c61e69ea695cb88760e8ba","tests/tests.rs":"8066a4aca7b40356525ed87f7658773e610ef4fce3522b0cc0f301384d880f00"},"package":"b8d73f9beda665eaa98ab9e4f7442bd4e7de6652587de55b2525e52e29c1b0ba"} \ No newline at end of file +{"files":{"Cargo.toml":"e7405a91fea075bb4fedb0e76e2039af27d6c380beaa31150f37655d79a7a3ab","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0245ee104228a100ce5fceecf43e25faae450494d9173f43fd94c27d69fdac13","README.rst":"8fab86c3c759d153a1a8a48e5f7f48546c898f0ec91433001c57fe0002af6455","benches/arraystring.rs":"f12b890977117ebde4ca42bcd6b91f2a6a087f2b235aaca6d15e30d125ae9f67","benches/extend.rs":"c3d69cc488ec5341b019cfed545ebbfea252f98718037b413f6a349da9489d1b","custom.css":"e6f2cd299392337b4e2959c52f422e5b7be11920ea98d10db44d10ddef5ed47c","src/array.rs":"8a42b3ff7a5a0713e8ee22462f303b0ce15bdc49a9fd5eb64f58e56855bdf944","src/array_string.rs":"fdcc24f0fd07e781b378f5d0190279e6d9c89b422f67e546ae443c602f967896","src/char.rs":"40af597d93895f206abcd33953b5d3d5a512d3b16ff5f96e492e659d9cca4209","src/errors.rs":"dde99bffaddfd45396aab7e07642cc018ef5435fe60c4f26a2c05a36555be18c","src/lib.rs":"4c00e50b532aec68b52fde4a737b7b5980b0cfb28f5c09ab8408d04896895a87","src/maybe_uninit.rs":"00659a86e8f84852d4355077a16beceaad0440ac0e81851fbac712fdb1850622","tests/serde.rs":"18c165cf6024f04a25b19aa139657d7c59f72d1541c9b24b44f9eaea01f507db","tests/tests.rs":"9633b92fe6c650b9b816cecac23b9c9e6a0365b1f67d4f0bfaad9e645e2bdc49"},"package":"cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8"} \ No newline at end of file diff --git a/third_party/rust/arrayvec/Cargo.toml b/third_party/rust/arrayvec/Cargo.toml index b6c938a0ec79..2a42a3f89b5a 100644 --- a/third_party/rust/arrayvec/Cargo.toml +++ b/third_party/rust/arrayvec/Cargo.toml @@ -11,8 +11,9 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "arrayvec" -version = "0.4.11" +version = "0.5.1" authors = ["bluss"] description = "A vector with fixed capacity, backed by an array (it can be stored on the stack too). Implements fixed capacity ArrayVec and ArrayString." documentation = "https://docs.rs/arrayvec/" @@ -21,10 +22,16 @@ categories = ["data-structures", "no-std"] license = "MIT/Apache-2.0" repository = "https://github.com/bluss/arrayvec" [package.metadata.docs.rs] -features = ["serde-1"] +features = ["serde"] [package.metadata.release] no-dev-version = true +tag-name = "{{version}}" +[profile.bench] +debug = true + +[profile.release] +debug = true [[bench]] name = "extend" @@ -33,10 +40,6 @@ harness = false [[bench]] name = "arraystring" harness = false -[dependencies.nodrop] -version = "0.1.12" -default-features = false - [dependencies.serde] version = "1.0" optional = true @@ -56,6 +59,4 @@ version = "1.0" array-sizes-129-255 = [] array-sizes-33-128 = [] default = ["std"] -serde-1 = ["serde"] std = [] -use_union = [] diff --git a/third_party/rust/arrayvec/README.rst b/third_party/rust/arrayvec/README.rst index 6f6476fede74..a8bb1db00a92 100644 --- a/third_party/rust/arrayvec/README.rst +++ b/third_party/rust/arrayvec/README.rst @@ -22,6 +22,46 @@ __ https://docs.rs/arrayvec Recent Changes (arrayvec) ------------------------- +- 0.5.1 + + - Add ``as_ptr``, ``as_mut_ptr`` accessors directly on the ``ArrayVec`` by @tbu- + (matches the same addition to ``Vec`` which happened in Rust 1.37). + - Add method ``ArrayString::len`` (now available directly, not just through deref to str). + - Use raw pointers instead of ``&mut [u8]`` for encoding chars into ``ArrayString`` + (uninit best practice fix). + - Use raw pointers instead of ``get_unchecked_mut`` where the target may be + uninitialized a everywhere relevant in the ArrayVec implementation + (uninit best practice fix). + - Changed inline hints on many methods, mainly removing inline hints + - ``ArrayVec::dispose`` is now deprecated (it has no purpose anymore) + +- 0.4.12 + + - Use raw pointers instead of ``get_unchecked_mut`` where the target may be + uninitialized a everywhere relevant in the ArrayVec implementation. + +- 0.5.0 + + - Use ``MaybeUninit`` (now unconditionally) in the implementation of + ``ArrayVec`` + - Use ``MaybeUninit`` (now unconditionally) in the implementation of + ``ArrayString`` + - The crate feature for serde serialization is now named ``serde``. + - Updated the ``Array`` trait interface, and it is now easier to use for + users outside the crate. + - Add ``FromStr`` impl for ``ArrayString`` by @despawnerer + - Add method ``try_extend_from_slice`` to ``ArrayVec``, which is always + effecient by @Thomasdezeeuw. + - Add method ``remaining_capacity`` by @Thomasdezeeuw + - Improve performance of the ``extend`` method. + - The index type of zero capacity vectors is now itself zero size, by + @clarfon + - Use ``drop_in_place`` for truncate and clear methods. This affects drop order + and resume from panic during drop. + - Use Rust 2018 edition for the implementation + - Require Rust 1.36 or later, for the unconditional ``MaybeUninit`` + improvements. + - 0.4.11 - In Rust 1.36 or later, use newly stable MaybeUninit. This extends the diff --git a/third_party/rust/arrayvec/benches/extend.rs b/third_party/rust/arrayvec/benches/extend.rs index d380a7ed247c..0579717696c2 100644 --- a/third_party/rust/arrayvec/benches/extend.rs +++ b/third_party/rust/arrayvec/benches/extend.rs @@ -2,17 +2,21 @@ extern crate arrayvec; #[macro_use] extern crate bencher; +use std::io::Write; + use arrayvec::ArrayVec; use bencher::Bencher; +use bencher::black_box; fn extend_with_constant(b: &mut Bencher) { let mut v = ArrayVec::<[u8; 512]>::new(); let cap = v.capacity(); b.iter(|| { v.clear(); - v.extend((0..cap).map(|_| 1)); - v[0] + let constant = black_box(1); + v.extend((0..cap).map(move |_| constant)); + v[511] }); b.bytes = v.capacity() as u64; } @@ -22,8 +26,9 @@ fn extend_with_range(b: &mut Bencher) { let cap = v.capacity(); b.iter(|| { v.clear(); - v.extend((0..cap).map(|x| x as _)); - v[0] + let range = 0..cap; + v.extend(range.map(|x| black_box(x as _))); + v[511] }); b.bytes = v.capacity() as u64; } @@ -33,11 +38,41 @@ fn extend_with_slice(b: &mut Bencher) { let data = [1; 512]; b.iter(|| { v.clear(); - v.extend(data.iter().cloned()); - v[0] + let iter = data.iter().map(|&x| x); + v.extend(iter); + v[511] + }); + b.bytes = v.capacity() as u64; +} + +fn extend_with_write(b: &mut Bencher) { + let mut v = ArrayVec::<[u8; 512]>::new(); + let data = [1; 512]; + b.iter(|| { + v.clear(); + v.write(&data[..]).ok(); + v[511] }); b.bytes = v.capacity() as u64; } -benchmark_group!(benches, extend_with_constant, extend_with_range, extend_with_slice); +fn extend_from_slice(b: &mut Bencher) { + let mut v = ArrayVec::<[u8; 512]>::new(); + let data = [1; 512]; + b.iter(|| { + v.clear(); + v.try_extend_from_slice(&data).ok(); + v[511] + }); + b.bytes = v.capacity() as u64; +} + +benchmark_group!(benches, + extend_with_constant, + extend_with_range, + extend_with_slice, + extend_with_write, + extend_from_slice +); + benchmark_main!(benches); diff --git a/third_party/rust/arrayvec/build.rs b/third_party/rust/arrayvec/build.rs deleted file mode 100644 index 0fd104ec249b..000000000000 --- a/third_party/rust/arrayvec/build.rs +++ /dev/null @@ -1,90 +0,0 @@ - -use std::env; -use std::io::Write; -use std::process::{Command, Stdio}; - -fn main() { - - println!("cargo:rerun-if-changed=build.rs"); - - detect_maybe_uninit(); -} - -fn detect_maybe_uninit() { - let has_stable_maybe_uninit = probe(&stable_maybe_uninit()); - if has_stable_maybe_uninit { - println!("cargo:rustc-cfg=has_stable_maybe_uninit"); - return; - } - let has_unstable_union_with_md = probe(&maybe_uninit_code(true)); - if has_unstable_union_with_md { - println!("cargo:rustc-cfg=has_manually_drop_in_union"); - println!("cargo:rustc-cfg=has_union_feature"); - } -} - - - -fn stable_maybe_uninit() -> String { - let code = " - #![allow(warnings)] - use std::mem::MaybeUninit; - - fn main() { } - "; - code.to_string() -} - - - -fn maybe_uninit_code(use_feature: bool) -> String { - let feature = if use_feature { "#![feature(untagged_unions)]" } else { "" }; - - let code = " - #![allow(warnings)] - use std::mem::ManuallyDrop; - - #[derive(Copy)] - pub union MaybeUninit { - empty: (), - value: ManuallyDrop, - } - - impl Clone for MaybeUninit where T: Copy - { - fn clone(&self) -> Self { *self } - } - - fn main() { - let value1 = MaybeUninit::<[i32; 3]> { empty: () }; - let value2 = MaybeUninit { value: ManuallyDrop::new([1, 2, 3]) }; - } - "; - - - [feature, code].concat() -} - - -fn probe(code: &str) -> bool { - let rustc = env::var_os("RUSTC").unwrap_or_else(|| "rustc".into()); - let out_dir = env::var_os("OUT_DIR").expect("environment variable OUT_DIR"); - - let mut child = Command::new(rustc) - .arg("--out-dir") - .arg(out_dir) - .arg("--emit=obj") - .arg("-") - .stdin(Stdio::piped()) - .spawn() - .expect("rustc probe"); - - child - .stdin - .as_mut() - .expect("rustc stdin") - .write_all(code.as_bytes()) - .expect("write rustc stdin"); - - child.wait().expect("rustc probe").success() -} diff --git a/third_party/rust/arrayvec/src/array.rs b/third_party/rust/arrayvec/src/array.rs index 4e56870aab04..b660ba0f61ba 100644 --- a/third_party/rust/arrayvec/src/array.rs +++ b/third_party/rust/arrayvec/src/array.rs @@ -9,40 +9,48 @@ + + + + + + + + + pub unsafe trait Array { type Item; - #[doc(hidden)] - type Index: Index; - #[doc(hidden)] - fn as_ptr(&self) -> *const Self::Item; #[doc(hidden)] - fn as_mut_ptr(&mut self) -> *mut Self::Item; - #[doc(hidden)] - fn capacity() -> usize; + type Index: Index; + + const CAPACITY: usize; + fn as_slice(&self) -> &[Self::Item]; + fn as_mut_slice(&mut self) -> &mut [Self::Item]; } pub trait Index : PartialEq + Copy { fn to_usize(self) -> usize; - fn from(usize) -> Self; + fn from(_: usize) -> Self; } -use std::slice::{from_raw_parts}; - -pub trait ArrayExt : Array { +impl Index for () { #[inline(always)] - fn as_slice(&self) -> &[Self::Item] { - unsafe { - from_raw_parts(self.as_ptr(), Self::capacity()) - } - } + fn to_usize(self) -> usize { 0 } + #[inline(always)] + fn from(_ix: usize) -> Self { () } } -impl ArrayExt for A where A: Array { } +impl Index for bool { + #[inline(always)] + fn to_usize(self) -> usize { self as usize } + #[inline(always)] + fn from(ix: usize) -> Self { ix != 0 } +} impl Index for u8 { #[inline(always)] @@ -77,15 +85,11 @@ macro_rules! fix_array_impl { unsafe impl Array for [T; $len] { type Item = T; type Index = $index_type; + const CAPACITY: usize = $len; #[doc(hidden)] - #[inline(always)] - fn as_ptr(&self) -> *const T { self as *const _ as *const _ } - #[doc(hidden)] - #[inline(always)] - fn as_mut_ptr(&mut self) -> *mut T { self as *mut _ as *mut _} + fn as_slice(&self) -> &[Self::Item] { self } #[doc(hidden)] - #[inline(always)] - fn capacity() -> usize { $len } + fn as_mut_slice(&mut self) -> &mut [Self::Item] { self } } ) } @@ -97,7 +101,10 @@ macro_rules! fix_array_impl_recursive { ); } -fix_array_impl_recursive!(u8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + +fix_array_impl_recursive!((), 0,); +fix_array_impl_recursive!(bool, 1,); +fix_array_impl_recursive!(u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ); diff --git a/third_party/rust/arrayvec/src/array_string.rs b/third_party/rust/arrayvec/src/array_string.rs index 750e6f947f29..6adaeb664528 100644 --- a/third_party/rust/arrayvec/src/array_string.rs +++ b/third_party/rust/arrayvec/src/array_string.rs @@ -2,21 +2,23 @@ use std::borrow::Borrow; use std::cmp; use std::fmt; use std::hash::{Hash, Hasher}; -use std::mem; use std::ptr; use std::ops::{Deref, DerefMut}; use std::str; +use std::str::FromStr; use std::str::Utf8Error; use std::slice; -use array::{Array, ArrayExt}; -use array::Index; -use CapacityError; -use char::encode_utf8; +use crate::array::Array; +use crate::array::Index; +use crate::CapacityError; +use crate::char::encode_utf8; -#[cfg(feature="serde-1")] +#[cfg(feature="serde")] use serde::{Serialize, Deserialize, Serializer, Deserializer}; +use super::MaybeUninit as MaybeUninitCopy; + @@ -25,20 +27,25 @@ use serde::{Serialize, Deserialize, Serializer, Deserializer}; #[derive(Copy)] -pub struct ArrayString> { - - xs: A, +pub struct ArrayString + where A: Array + Copy +{ + xs: MaybeUninitCopy, len: A::Index, } -impl> Default for ArrayString { +impl Default for ArrayString + where A: Array + Copy +{ fn default() -> ArrayString { ArrayString::new() } } -impl> ArrayString { +impl ArrayString + where A: Array + Copy +{ @@ -54,14 +61,17 @@ impl> ArrayString { pub fn new() -> ArrayString { unsafe { ArrayString { - - xs: mem::zeroed(), + xs: MaybeUninitCopy::uninitialized(), len: Index::from(0), } } } + #[inline] + pub fn len(&self) -> usize { self.len.to_usize() } + + @@ -91,11 +101,12 @@ impl> ArrayString { pub fn from_byte_string(b: &A) -> Result { - let mut arraystr = Self::new(); - let s = try!(str::from_utf8(b.as_slice())); - let _result = arraystr.try_push_str(s); - debug_assert!(_result.is_ok()); - Ok(arraystr) + let len = str::from_utf8(b.as_slice())?.len(); + debug_assert_eq!(len, A::CAPACITY); + Ok(ArrayString { + xs: MaybeUninitCopy::from(*b), + len: Index::from(A::CAPACITY), + }) } @@ -106,8 +117,8 @@ impl> ArrayString { - #[inline] - pub fn capacity(&self) -> usize { A::capacity() } + #[inline(always)] + pub fn capacity(&self) -> usize { A::CAPACITY } @@ -160,7 +171,9 @@ impl> ArrayString { pub fn try_push(&mut self, c: char) -> Result<(), CapacityError> { let len = self.len(); unsafe { - match encode_utf8(c, &mut self.raw_mut_bytes()[len..]) { + let ptr = self.xs.ptr_mut().add(len); + let remaining_cap = self.capacity() - len; + match encode_utf8(c, ptr, remaining_cap) { Ok(n) => { self.set_len(len + n); Ok(()) @@ -213,7 +226,7 @@ impl> ArrayString { return Err(CapacityError::new(s)); } unsafe { - let dst = self.xs.as_mut_ptr().offset(self.len() as isize); + let dst = self.xs.ptr_mut().offset(self.len() as isize); let src = s.as_ptr(); ptr::copy_nonoverlapping(src, dst, s.len()); let newl = self.len() + s.len(); @@ -237,7 +250,6 @@ impl> ArrayString { - #[inline] pub fn pop(&mut self) -> Option { let ch = match self.chars().rev().next() { Some(ch) => ch, @@ -266,7 +278,6 @@ impl> ArrayString { - #[inline] pub fn truncate(&mut self, new_len: usize) { if new_len <= self.len() { assert!(self.is_char_boundary(new_len)); @@ -297,7 +308,6 @@ impl> ArrayString { - #[inline] pub fn remove(&mut self, idx: usize) -> char { let ch = match self[idx..].chars().next() { Some(ch) => ch, @@ -307,8 +317,8 @@ impl> ArrayString { let next = idx + ch.len_utf8(); let len = self.len(); unsafe { - ptr::copy(self.xs.as_ptr().offset(next as isize), - self.xs.as_mut_ptr().offset(idx as isize), + ptr::copy(self.xs.ptr().offset(next as isize), + self.xs.ptr_mut().offset(idx as isize), len - next); self.set_len(len - (next - idx)); } @@ -329,7 +339,6 @@ impl> ArrayString { - #[inline] pub unsafe fn set_len(&mut self, length: usize) { debug_assert!(length <= self.capacity()); self.len = Index::from(length); @@ -339,79 +348,97 @@ impl> ArrayString { pub fn as_str(&self) -> &str { self } - - - unsafe fn raw_mut_bytes(&mut self) -> &mut [u8] { - slice::from_raw_parts_mut(self.xs.as_mut_ptr(), self.capacity()) - } } -impl> Deref for ArrayString { +impl Deref for ArrayString + where A: Array + Copy +{ type Target = str; #[inline] fn deref(&self) -> &str { unsafe { - let sl = slice::from_raw_parts(self.xs.as_ptr(), self.len.to_usize()); + let sl = slice::from_raw_parts(self.xs.ptr(), self.len.to_usize()); str::from_utf8_unchecked(sl) } } } -impl> DerefMut for ArrayString { +impl DerefMut for ArrayString + where A: Array + Copy +{ #[inline] fn deref_mut(&mut self) -> &mut str { unsafe { - let sl = slice::from_raw_parts_mut(self.xs.as_mut_ptr(), self.len.to_usize()); - - mem::transmute(sl) + let sl = slice::from_raw_parts_mut(self.xs.ptr_mut(), self.len.to_usize()); + str::from_utf8_unchecked_mut(sl) } } } -impl> PartialEq for ArrayString { +impl PartialEq for ArrayString + where A: Array + Copy +{ fn eq(&self, rhs: &Self) -> bool { **self == **rhs } } -impl> PartialEq for ArrayString { +impl PartialEq for ArrayString + where A: Array + Copy +{ fn eq(&self, rhs: &str) -> bool { &**self == rhs } } -impl> PartialEq> for str { +impl PartialEq> for str + where A: Array + Copy +{ fn eq(&self, rhs: &ArrayString) -> bool { self == &**rhs } } -impl> Eq for ArrayString { } +impl Eq for ArrayString + where A: Array + Copy +{ } -impl> Hash for ArrayString { +impl Hash for ArrayString + where A: Array + Copy +{ fn hash(&self, h: &mut H) { (**self).hash(h) } } -impl> Borrow for ArrayString { +impl Borrow for ArrayString + where A: Array + Copy +{ fn borrow(&self) -> &str { self } } -impl> AsRef for ArrayString { +impl AsRef for ArrayString + where A: Array + Copy +{ fn as_ref(&self) -> &str { self } } -impl> fmt::Debug for ArrayString { +impl fmt::Debug for ArrayString + where A: Array + Copy +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { (**self).fmt(f) } } -impl> fmt::Display for ArrayString { +impl fmt::Display for ArrayString + where A: Array + Copy +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { (**self).fmt(f) } } -impl> fmt::Write for ArrayString { +impl fmt::Write for ArrayString + where A: Array + Copy +{ fn write_char(&mut self, c: char) -> fmt::Result { self.try_push(c).map_err(|_| fmt::Error) } @@ -421,7 +448,9 @@ impl> fmt::Write for ArrayString { } } -impl + Copy> Clone for ArrayString { +impl Clone for ArrayString + where A: Array + Copy +{ fn clone(&self) -> ArrayString { *self } @@ -432,7 +461,9 @@ impl + Copy> Clone for ArrayString { } } -impl> PartialOrd for ArrayString { +impl PartialOrd for ArrayString + where A: Array + Copy +{ fn partial_cmp(&self, rhs: &Self) -> Option { (**self).partial_cmp(&**rhs) } @@ -442,7 +473,9 @@ impl> PartialOrd for ArrayString { fn ge(&self, rhs: &Self) -> bool { **self >= **rhs } } -impl> PartialOrd for ArrayString { +impl PartialOrd for ArrayString + where A: Array + Copy +{ fn partial_cmp(&self, rhs: &str) -> Option { (**self).partial_cmp(rhs) } @@ -452,7 +485,9 @@ impl> PartialOrd for ArrayString { fn ge(&self, rhs: &str) -> bool { &**self >= rhs } } -impl> PartialOrd> for str { +impl PartialOrd> for str + where A: Array + Copy +{ fn partial_cmp(&self, rhs: &ArrayString) -> Option { self.partial_cmp(&**rhs) } @@ -462,15 +497,29 @@ impl> PartialOrd> for str { fn ge(&self, rhs: &ArrayString) -> bool { self >= &**rhs } } -impl> Ord for ArrayString { +impl Ord for ArrayString + where A: Array + Copy +{ fn cmp(&self, rhs: &Self) -> cmp::Ordering { (**self).cmp(&**rhs) } } -#[cfg(feature="serde-1")] +impl FromStr for ArrayString + where A: Array + Copy +{ + type Err = CapacityError; + + fn from_str(s: &str) -> Result { + Self::from(s).map_err(CapacityError::simplify) + } +} + +#[cfg(feature="serde")] -impl> Serialize for ArrayString { +impl Serialize for ArrayString + where A: Array + Copy +{ fn serialize(&self, serializer: S) -> Result where S: Serializer { @@ -478,9 +527,11 @@ impl> Serialize for ArrayString { } } -#[cfg(feature="serde-1")] +#[cfg(feature="serde")] -impl<'de, A: Array> Deserialize<'de> for ArrayString { +impl<'de, A> Deserialize<'de> for ArrayString + where A: Array + Copy +{ fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { @@ -489,11 +540,11 @@ impl<'de, A: Array> Deserialize<'de> for ArrayString { struct ArrayStringVisitor>(PhantomData); - impl<'de, A: Array> Visitor<'de> for ArrayStringVisitor { + impl<'de, A: Copy + Array> Visitor<'de> for ArrayStringVisitor { type Value = ArrayString; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a string no more than {} bytes long", A::capacity()) + write!(formatter, "a string no more than {} bytes long", A::CAPACITY) } fn visit_str(self, v: &str) -> Result @@ -505,7 +556,7 @@ impl<'de, A: Array> Deserialize<'de> for ArrayString { fn visit_bytes(self, v: &[u8]) -> Result where E: de::Error, { - let s = try!(str::from_utf8(v).map_err(|_| E::invalid_value(de::Unexpected::Bytes(v), &self))); + let s = str::from_utf8(v).map_err(|_| E::invalid_value(de::Unexpected::Bytes(v), &self))?; ArrayString::from(s).map_err(|_| E::invalid_length(s.len(), &self)) } diff --git a/third_party/rust/arrayvec/src/char.rs b/third_party/rust/arrayvec/src/char.rs index 12a6c1b114fd..65b0fda0e25b 100644 --- a/third_party/rust/arrayvec/src/char.rs +++ b/third_party/rust/arrayvec/src/char.rs @@ -10,6 +10,8 @@ +use std::ptr; + const TAG_CONT: u8 = 0b1000_0000; const TAG_TWO_B: u8 = 0b1100_0000; @@ -22,33 +24,75 @@ const MAX_THREE_B: u32 = 0x10000; pub struct EncodeUtf8Error; +#[inline] +unsafe fn write(ptr: *mut u8, index: usize, byte: u8) { + ptr::write(ptr.add(index), byte) +} + + + #[inline] -pub fn encode_utf8(ch: char, buf: &mut [u8]) -> Result +pub unsafe fn encode_utf8(ch: char, ptr: *mut u8, len: usize) -> Result { let code = ch as u32; - if code < MAX_ONE_B && buf.len() >= 1 { - buf[0] = code as u8; + if code < MAX_ONE_B && len >= 1 { + write(ptr, 0, code as u8); return Ok(1); - } else if code < MAX_TWO_B && buf.len() >= 2 { - buf[0] = (code >> 6 & 0x1F) as u8 | TAG_TWO_B; - buf[1] = (code & 0x3F) as u8 | TAG_CONT; + } else if code < MAX_TWO_B && len >= 2 { + write(ptr, 0, (code >> 6 & 0x1F) as u8 | TAG_TWO_B); + write(ptr, 1, (code & 0x3F) as u8 | TAG_CONT); return Ok(2); - } else if code < MAX_THREE_B && buf.len() >= 3 { - buf[0] = (code >> 12 & 0x0F) as u8 | TAG_THREE_B; - buf[1] = (code >> 6 & 0x3F) as u8 | TAG_CONT; - buf[2] = (code & 0x3F) as u8 | TAG_CONT; + } else if code < MAX_THREE_B && len >= 3 { + write(ptr, 0, (code >> 12 & 0x0F) as u8 | TAG_THREE_B); + write(ptr, 1, (code >> 6 & 0x3F) as u8 | TAG_CONT); + write(ptr, 2, (code & 0x3F) as u8 | TAG_CONT); return Ok(3); - } else if buf.len() >= 4 { - buf[0] = (code >> 18 & 0x07) as u8 | TAG_FOUR_B; - buf[1] = (code >> 12 & 0x3F) as u8 | TAG_CONT; - buf[2] = (code >> 6 & 0x3F) as u8 | TAG_CONT; - buf[3] = (code & 0x3F) as u8 | TAG_CONT; + } else if len >= 4 { + write(ptr, 0, (code >> 18 & 0x07) as u8 | TAG_FOUR_B); + write(ptr, 1, (code >> 12 & 0x3F) as u8 | TAG_CONT); + write(ptr, 2, (code >> 6 & 0x3F) as u8 | TAG_CONT); + write(ptr, 3, (code & 0x3F) as u8 | TAG_CONT); return Ok(4); }; Err(EncodeUtf8Error) } + +#[test] +fn test_encode_utf8() { + + let mut data = [0u8; 16]; + for codepoint in 0..=(std::char::MAX as u32) { + if let Some(ch) = std::char::from_u32(codepoint) { + for elt in &mut data { *elt = 0; } + let ptr = data.as_mut_ptr(); + let len = data.len(); + unsafe { + let res = encode_utf8(ch, ptr, len).ok().unwrap(); + assert_eq!(res, ch.len_utf8()); + } + let string = std::str::from_utf8(&data).unwrap(); + assert_eq!(string.chars().next(), Some(ch)); + } + } +} + +#[test] +fn test_encode_utf8_oob() { + + let mut data = [0u8; 16]; + let chars = ['a', 'α', '�', '𐍈']; + for (len, &ch) in (1..=4).zip(&chars) { + assert_eq!(len, ch.len_utf8(), "Len of ch={}", ch); + let ptr = data.as_mut_ptr(); + unsafe { + assert!(matches::matches!(encode_utf8(ch, ptr, len - 1), Err(_))); + assert!(matches::matches!(encode_utf8(ch, ptr, len), Ok(_))); + } + } +} + diff --git a/third_party/rust/arrayvec/src/lib.rs b/third_party/rust/arrayvec/src/lib.rs index 904727e028c6..baf5d5f51e78 100644 --- a/third_party/rust/arrayvec/src/lib.rs +++ b/third_party/rust/arrayvec/src/lib.rs @@ -20,25 +20,18 @@ #![doc(html_root_url="https://docs.rs/arrayvec/0.4/")] #![cfg_attr(not(feature="std"), no_std)] -#![cfg_attr(has_union_feature, feature(untagged_unions))] -#[cfg(feature="serde-1")] +#[cfg(feature="serde")] extern crate serde; #[cfg(not(feature="std"))] extern crate core as std; -#[cfg(not(has_manually_drop_in_union))] -extern crate nodrop; - use std::cmp; use std::iter; use std::mem; +use std::ops::{Bound, Deref, DerefMut, RangeBounds}; use std::ptr; -use std::ops::{ - Deref, - DerefMut, -}; use std::slice; @@ -50,31 +43,21 @@ use std::fmt; use std::io; -#[cfg(has_stable_maybe_uninit)] -#[path="maybe_uninit_stable.rs"] -mod maybe_uninit; -#[cfg(all(not(has_stable_maybe_uninit), has_manually_drop_in_union))] -mod maybe_uninit; -#[cfg(all(not(has_stable_maybe_uninit), not(has_manually_drop_in_union)))] -#[path="maybe_uninit_nodrop.rs"] mod maybe_uninit; +use crate::maybe_uninit::MaybeUninit; -use maybe_uninit::MaybeUninit; - -#[cfg(feature="serde-1")] +#[cfg(feature="serde")] use serde::{Serialize, Deserialize, Serializer, Deserializer}; mod array; mod array_string; mod char; -mod range; mod errors; -pub use array::Array; -pub use range::RangeArgument; -use array::Index; -pub use array_string::ArrayString; -pub use errors::CapacityError; +pub use crate::array::Array; +use crate::array::Index; +pub use crate::array_string::ArrayString; +pub use crate::errors::CapacityError; @@ -151,8 +134,8 @@ impl ArrayVec { - #[inline] - pub fn capacity(&self) -> usize { A::capacity() } + #[inline(always)] + pub fn capacity(&self) -> usize { A::CAPACITY } @@ -175,6 +158,19 @@ impl ArrayVec { + pub fn remaining_capacity(&self) -> usize { + self.capacity() - self.len() + } + + + + + + + + + + @@ -207,7 +203,7 @@ impl ArrayVec { pub fn try_push(&mut self, element: A::Item) -> Result<(), CapacityError> { - if self.len() < A::capacity() { + if self.len() < A::CAPACITY { unsafe { self.push_unchecked(element); } @@ -239,13 +235,17 @@ impl ArrayVec { - #[inline] pub unsafe fn push_unchecked(&mut self, element: A::Item) { let len = self.len(); - debug_assert!(len < A::capacity()); - ptr::write(self.get_unchecked_mut(len), element); + debug_assert!(len < A::CAPACITY); + ptr::write(self.get_unchecked_ptr(len), element); self.set_len(len + 1); } + + + unsafe fn get_unchecked_ptr(&mut self, index: usize) -> *mut A::Item { + self.xs.ptr_mut().add(index) + } @@ -304,7 +304,7 @@ impl ArrayVec { unsafe { { - let p: *mut _ = self.get_unchecked_mut(index); + let p: *mut _ = self.get_unchecked_ptr(index); ptr::copy(p, p.offset(1), len - index); @@ -333,12 +333,12 @@ impl ArrayVec { pub fn pop(&mut self) -> Option { if self.len() == 0 { - return None + return None; } unsafe { let new_len = self.len() - 1; self.set_len(new_len); - Some(ptr::read(self.get_unchecked_mut(new_len))) + Some(ptr::read(self.get_unchecked_ptr(new_len))) } } @@ -455,13 +455,19 @@ impl ArrayVec { - pub fn truncate(&mut self, len: usize) { - while self.len() > len { self.pop(); } + pub fn truncate(&mut self, new_len: usize) { + unsafe { + if new_len < self.len() { + let tail: *mut [_] = &mut self[new_len..]; + self.len = Index::from(new_len); + ptr::drop_in_place(tail); + } + } } pub fn clear(&mut self) { - while let Some(_) = self.pop() { } + self.truncate(0) } @@ -505,7 +511,6 @@ impl ArrayVec { - #[inline] pub unsafe fn set_len(&mut self, length: usize) { debug_assert!(length <= self.capacity()); self.len = Index::from(length); @@ -529,7 +534,45 @@ impl ArrayVec { - pub fn drain(&mut self, range: R) -> Drain { + pub fn try_extend_from_slice(&mut self, other: &[A::Item]) -> Result<(), CapacityError> + where A::Item: Copy, + { + if self.remaining_capacity() < other.len() { + return Err(CapacityError::new(())); + } + + let self_len = self.len(); + let other_len = other.len(); + + unsafe { + let dst = self.xs.ptr_mut().offset(self_len as isize); + ptr::copy_nonoverlapping(other.as_ptr(), dst, other_len); + self.set_len(self_len + other_len); + } + Ok(()) + } + + + + + + + + + + + + + + + + + + + + pub fn drain(&mut self, range: R) -> Drain + where R: RangeBounds + { @@ -541,8 +584,22 @@ impl ArrayVec { let len = self.len(); - let start = range.start().unwrap_or(0); - let end = range.end().unwrap_or(len); + let start = match range.start_bound() { + Bound::Unbounded => 0, + Bound::Included(&i) => i, + Bound::Excluded(&i) => i.saturating_add(1), + }; + let end = match range.end_bound() { + Bound::Excluded(&j) => j, + Bound::Included(&j) => j.saturating_add(1), + Bound::Unbounded => len, + }; + self.drain_range(start, end) + } + + fn drain_range(&mut self, start: usize, end: usize) -> Drain + { + let len = self.len(); let range_slice: *const _ = &self[start..end]; @@ -562,9 +619,6 @@ impl ArrayVec { - - - pub fn into_inner(self) -> Result { if self.len() < self.capacity() { Err(self) @@ -578,6 +632,7 @@ impl ArrayVec { } + #[deprecated="Use std::mem::drop instead, if at all needed."] pub fn dispose(mut self) { self.clear(); mem::forget(self); @@ -592,6 +647,16 @@ impl ArrayVec { pub fn as_mut_slice(&mut self) -> &mut [A::Item] { self } + + + pub fn as_ptr(&self) -> *const A::Item { + self.xs.ptr() + } + + + pub fn as_mut_ptr(&mut self) -> *mut A::Item { + self.xs.ptr_mut() + } } impl Deref for ArrayVec { @@ -625,7 +690,7 @@ impl DerefMut for ArrayVec { impl From for ArrayVec { fn from(array: A) -> Self { - ArrayVec { xs: MaybeUninit::from(array), len: Index::from(A::capacity()) } + ArrayVec { xs: MaybeUninit::from(array), len: Index::from(A::CAPACITY) } } } @@ -693,7 +758,6 @@ pub struct IntoIter { impl Iterator for IntoIter { type Item = A::Item; - #[inline] fn next(&mut self) -> Option { if self.index == self.v.len { None @@ -701,7 +765,7 @@ impl Iterator for IntoIter { unsafe { let index = self.index.to_usize(); self.index = Index::from(index + 1); - Some(ptr::read(self.v.get_unchecked_mut(index))) + Some(ptr::read(self.v.get_unchecked_ptr(index))) } } } @@ -713,7 +777,6 @@ impl Iterator for IntoIter { } impl DoubleEndedIterator for IntoIter { - #[inline] fn next_back(&mut self) -> Option { if self.index == self.v.len { None @@ -721,7 +784,7 @@ impl DoubleEndedIterator for IntoIter { unsafe { let new_len = self.v.len() - 1; self.v.set_len(new_len); - Some(ptr::read(self.v.get_unchecked_mut(new_len))) + Some(ptr::read(self.v.get_unchecked_ptr(new_len))) } } } @@ -737,7 +800,7 @@ impl Drop for IntoIter { unsafe { self.v.set_len(0); let elements = slice::from_raw_parts_mut( - self.v.get_unchecked_mut(index), + self.v.get_unchecked_ptr(index), len - index); ptr::drop_in_place(elements); } @@ -790,7 +853,6 @@ impl<'a, A: Array> Iterator for Drain<'a, A> { type Item = A::Item; - #[inline] fn next(&mut self) -> Option { self.iter.next().map(|elt| unsafe { @@ -799,7 +861,6 @@ impl<'a, A: Array> Iterator for Drain<'a, A> ) } - #[inline] fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } @@ -808,7 +869,6 @@ impl<'a, A: Array> Iterator for Drain<'a, A> impl<'a, A: Array> DoubleEndedIterator for Drain<'a, A> where A::Item: 'a, { - #[inline] fn next_back(&mut self) -> Option { self.iter.next_back().map(|elt| unsafe { @@ -871,28 +931,53 @@ impl Extend for ArrayVec { let take = self.capacity() - self.len(); unsafe { let len = self.len(); - let mut ptr = self.as_mut_ptr().offset(len as isize); + let mut ptr = raw_ptr_add(self.as_mut_ptr(), len); + let end_ptr = raw_ptr_add(ptr, take); let mut guard = ScopeExitGuard { - value: self, + value: &mut self.len, data: len, - f: |&len, self_| { - self_.set_len(len) + f: move |&len, self_len| { + **self_len = Index::from(len); } }; - for elt in iter.into_iter().take(take) { - ptr::write(ptr, elt); - ptr = ptr.offset(1); - guard.data += 1; + let mut iter = iter.into_iter(); + loop { + if ptr == end_ptr { break; } + if let Some(elt) = iter.next() { + raw_ptr_write(ptr, elt); + ptr = raw_ptr_add(ptr, 1); + guard.data += 1; + } else { + break; + } } } } } +unsafe fn raw_ptr_add(ptr: *mut T, offset: usize) -> *mut T { + if mem::size_of::() == 0 { + + (ptr as usize).wrapping_add(offset) as _ + } else { + ptr.offset(offset as isize) + } +} + +unsafe fn raw_ptr_write(ptr: *mut T, value: T) { + if mem::size_of::() == 0 { + + } else { + ptr::write(ptr, value) + } +} + + @@ -982,27 +1067,22 @@ impl Default for ArrayVec { } impl PartialOrd for ArrayVec where A::Item: PartialOrd { - #[inline] fn partial_cmp(&self, other: &ArrayVec) -> Option { (**self).partial_cmp(other) } - #[inline] fn lt(&self, other: &Self) -> bool { (**self).lt(other) } - #[inline] fn le(&self, other: &Self) -> bool { (**self).le(other) } - #[inline] fn ge(&self, other: &Self) -> bool { (**self).ge(other) } - #[inline] fn gt(&self, other: &Self) -> bool { (**self).gt(other) } @@ -1020,21 +1100,15 @@ impl Ord for ArrayVec where A::Item: Ord { impl> io::Write for ArrayVec { fn write(&mut self, data: &[u8]) -> io::Result { - unsafe { - let len = self.len(); - let mut tail = slice::from_raw_parts_mut(self.get_unchecked_mut(len), - A::capacity() - len); - let result = tail.write(data); - if let Ok(written) = result { - self.set_len(len + written); - } - result - } + let len = cmp::min(self.remaining_capacity(), data.len()); + let _result = self.try_extend_from_slice(&data[..len]); + debug_assert!(_result.is_ok()); + Ok(len) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } -#[cfg(feature="serde-1")] +#[cfg(feature="serde")] impl> Serialize for ArrayVec { fn serialize(&self, serializer: S) -> Result @@ -1044,7 +1118,7 @@ impl> Serialize for ArrayVec { } } -#[cfg(feature="serde-1")] +#[cfg(feature="serde")] impl<'de, T: Deserialize<'de>, A: Array> Deserialize<'de> for ArrayVec { fn deserialize(deserializer: D) -> Result @@ -1059,7 +1133,7 @@ impl<'de, T: Deserialize<'de>, A: Array> Deserialize<'de> for ArrayVec; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "an array with no more than {} items", A::capacity()) + write!(formatter, "an array with no more than {} items", A::CAPACITY) } fn visit_seq(self, mut seq: SA) -> Result @@ -1067,9 +1141,9 @@ impl<'de, T: Deserialize<'de>, A: Array> Deserialize<'de> for ArrayVec::new(); - while let Some(value) = try!(seq.next_element()) { + while let Some(value) = seq.next_element()? { if let Err(_) = values.try_push(value) { - return Err(SA::Error::invalid_length(A::capacity() + 1, &self)); + return Err(SA::Error::invalid_length(A::CAPACITY + 1, &self)); } } diff --git a/third_party/rust/arrayvec/src/maybe_uninit.rs b/third_party/rust/arrayvec/src/maybe_uninit.rs index 694a47775962..33b94e0cd0a6 100644 --- a/third_party/rust/arrayvec/src/maybe_uninit.rs +++ b/third_party/rust/arrayvec/src/maybe_uninit.rs @@ -1,27 +1,28 @@ -use array::Array; -use std::mem::ManuallyDrop; +use crate::array::Array; +use std::mem::MaybeUninit as StdMaybeUninit; - - - -#[repr(C)] -pub union MaybeUninit { - empty: (), - value: ManuallyDrop, +#[derive(Copy)] +pub struct MaybeUninit { + inner: StdMaybeUninit, } +impl Clone for MaybeUninit + where T: Copy +{ + fn clone(&self) -> Self { *self } +} impl MaybeUninit { pub unsafe fn uninitialized() -> Self { - MaybeUninit { empty: () } + MaybeUninit { inner: StdMaybeUninit::uninit() } } pub fn from(v: T) -> Self { - MaybeUninit { value: ManuallyDrop::new(v) } + MaybeUninit { inner: StdMaybeUninit::new(v) } } @@ -31,16 +32,13 @@ impl MaybeUninit { pub fn ptr(&self) -> *const T::Item where T: Array { - - - - self as *const _ as *const T::Item + self.inner.as_ptr() as *const T::Item } pub fn ptr_mut(&mut self) -> *mut T::Item where T: Array { - self as *mut _ as *mut T::Item + self.inner.as_mut_ptr() as *mut T::Item } } diff --git a/third_party/rust/arrayvec/src/maybe_uninit_nodrop.rs b/third_party/rust/arrayvec/src/maybe_uninit_nodrop.rs deleted file mode 100644 index dc6fb928455a..000000000000 --- a/third_party/rust/arrayvec/src/maybe_uninit_nodrop.rs +++ /dev/null @@ -1,41 +0,0 @@ - -use array::Array; -use nodrop::NoDrop; -use std::mem::uninitialized; - - - - - - - -pub struct MaybeUninit(NoDrop); - - - -impl MaybeUninit { - - pub unsafe fn uninitialized() -> Self { - Self::from(uninitialized()) - } - - - pub fn from(v: T) -> Self { - MaybeUninit(NoDrop::new(v)) - } - - - pub fn ptr(&self) -> *const T::Item - where T: Array - { - &*self.0 as *const T as *const _ - } - - - pub fn ptr_mut(&mut self) -> *mut T::Item - where T: Array - { - &mut *self.0 as *mut T as *mut _ - } -} - diff --git a/third_party/rust/arrayvec/src/maybe_uninit_stable.rs b/third_party/rust/arrayvec/src/maybe_uninit_stable.rs deleted file mode 100644 index e5549a514599..000000000000 --- a/third_party/rust/arrayvec/src/maybe_uninit_stable.rs +++ /dev/null @@ -1,40 +0,0 @@ - - -use array::Array; -use std::mem::MaybeUninit as StdMaybeUninit; - -pub struct MaybeUninit { - inner: StdMaybeUninit, -} - -impl MaybeUninit { - - pub unsafe fn uninitialized() -> Self { - MaybeUninit { inner: StdMaybeUninit::uninit() } - } - - - pub fn from(v: T) -> Self { - MaybeUninit { inner: StdMaybeUninit::new(v) } - } - - - - - - pub fn ptr(&self) -> *const T::Item - where T: Array - { - - - - self.inner.as_ptr() as *const T::Item - } - - - pub fn ptr_mut(&mut self) -> *mut T::Item - where T: Array - { - self.inner.as_mut_ptr() as *mut T::Item - } -} diff --git a/third_party/rust/arrayvec/src/range.rs b/third_party/rust/arrayvec/src/range.rs deleted file mode 100644 index 83c4aa3fe70a..000000000000 --- a/third_party/rust/arrayvec/src/range.rs +++ /dev/null @@ -1,42 +0,0 @@ - -use std::ops::{ - RangeFull, - RangeFrom, - RangeTo, - Range, -}; - - - - - - -pub trait RangeArgument { - #[inline] - - fn start(&self) -> Option { None } - #[inline] - - fn end(&self) -> Option { None } -} - - -impl RangeArgument for RangeFull {} - -impl RangeArgument for RangeFrom { - #[inline] - fn start(&self) -> Option { Some(self.start) } -} - -impl RangeArgument for RangeTo { - #[inline] - fn end(&self) -> Option { Some(self.end) } -} - -impl RangeArgument for Range { - #[inline] - fn start(&self) -> Option { Some(self.start) } - #[inline] - fn end(&self) -> Option { Some(self.end) } -} - diff --git a/third_party/rust/arrayvec/tests/serde.rs b/third_party/rust/arrayvec/tests/serde.rs index 62acf25ba676..3876d2a7cd1f 100644 --- a/third_party/rust/arrayvec/tests/serde.rs +++ b/third_party/rust/arrayvec/tests/serde.rs @@ -1,4 +1,4 @@ -#![cfg(feature = "serde-1")] +#![cfg(feature = "serde")] extern crate arrayvec; extern crate serde_test; diff --git a/third_party/rust/arrayvec/tests/tests.rs b/third_party/rust/arrayvec/tests/tests.rs index 964af9c28b87..82f27bd052a9 100644 --- a/third_party/rust/arrayvec/tests/tests.rs +++ b/third_party/rust/arrayvec/tests/tests.rs @@ -27,6 +27,44 @@ fn test_simple() { assert_eq!(sum_len, 8); } +#[test] +fn test_capacity_left() { + let mut vec: ArrayVec<[usize; 4]> = ArrayVec::new(); + assert_eq!(vec.remaining_capacity(), 4); + vec.push(1); + assert_eq!(vec.remaining_capacity(), 3); + vec.push(2); + assert_eq!(vec.remaining_capacity(), 2); + vec.push(3); + assert_eq!(vec.remaining_capacity(), 1); + vec.push(4); + assert_eq!(vec.remaining_capacity(), 0); +} + +#[test] +fn test_extend_from_slice() { + let mut vec: ArrayVec<[usize; 10]> = ArrayVec::new(); + + vec.try_extend_from_slice(&[1, 2, 3]).unwrap(); + assert_eq!(vec.len(), 3); + assert_eq!(&vec[..], &[1, 2, 3]); + assert_eq!(vec.pop(), Some(3)); + assert_eq!(&vec[..], &[1, 2]); +} + +#[test] +fn test_extend_from_slice_error() { + let mut vec: ArrayVec<[usize; 10]> = ArrayVec::new(); + + vec.try_extend_from_slice(&[1, 2, 3]).unwrap(); + let res = vec.try_extend_from_slice(&[0; 8]); + assert_matches!(res, Err(_)); + + let mut vec: ArrayVec<[usize; 0]> = ArrayVec::new(); + let res = vec.try_extend_from_slice(&[0; 1]); + assert_matches!(res, Err(_)); +} + #[test] fn test_u16_index() { const N: usize = 4096; @@ -126,6 +164,80 @@ fn test_drop() { } } +#[test] +fn test_drop_panics() { + use std::cell::Cell; + use std::panic::catch_unwind; + use std::panic::AssertUnwindSafe; + + let flag = &Cell::new(0); + + struct Bump<'a>(&'a Cell); + + + impl<'a> Drop for Bump<'a> { + fn drop(&mut self) { + let n = self.0.get(); + self.0.set(n + 1); + if n == 0 { + panic!("Panic in Bump's drop"); + } + } + } + + flag.set(0); + { + let array = vec![Bump(flag), Bump(flag)]; + let res = catch_unwind(AssertUnwindSafe(|| { + drop(array); + })); + assert!(res.is_err()); + } + + if flag.get() != 2 { + println!("test_drop_panics: skip, this version of Rust doesn't continue in drop_in_place"); + return; + } + + flag.set(0); + { + let mut array = ArrayVec::<[Bump; 128]>::new(); + array.push(Bump(flag)); + array.push(Bump(flag)); + array.push(Bump(flag)); + + let res = catch_unwind(AssertUnwindSafe(|| { + drop(array); + })); + assert!(res.is_err()); + } + + assert_eq!(flag.get(), 3); + + + flag.set(0); + { + let mut array = ArrayVec::<[Bump; 16]>::new(); + array.push(Bump(flag)); + array.push(Bump(flag)); + array.push(Bump(flag)); + array.push(Bump(flag)); + array.push(Bump(flag)); + + let i = 2; + let tail_len = array.len() - i; + + let res = catch_unwind(AssertUnwindSafe(|| { + array.truncate(i); + })); + assert!(res.is_err()); + + assert_eq!(flag.get(), tail_len as i32); + } + + +} + #[test] fn test_extend() { let mut range = 0..10; @@ -146,8 +258,8 @@ fn test_extend() { #[test] fn test_is_send_sync() { let data = ArrayVec::<[Vec; 5]>::new(); - &data as &Send; - &data as &Sync; + &data as &dyn Send; + &data as &dyn Sync; } #[test] @@ -159,6 +271,11 @@ fn test_compact_size() { assert!(mem::size_of::() <= 8); + type EmptyArray = ArrayVec<[u8; 0]>; + println!("{}", mem::size_of::()); + assert!(mem::size_of::() <= 2); + + type QuadArray = ArrayVec<[u32; 3]>; println!("{}", mem::size_of::()); assert!(mem::size_of::() <= 24); @@ -189,6 +306,29 @@ fn test_drain() { assert_eq!(&v[..], &[]); } +#[test] +fn test_drain_range_inclusive() { + let mut v = ArrayVec::from([0; 8]); + v.drain(0..=7); + assert_eq!(&v[..], &[]); + + v.extend(0..); + v.drain(1..=4); + assert_eq!(&v[..], &[0, 5, 6, 7]); + let u: ArrayVec<[_; 3]> = v.drain(1..=2).rev().collect(); + assert_eq!(&u[..], &[6, 5]); + assert_eq!(&v[..], &[0, 7]); + v.drain(..); + assert_eq!(&v[..], &[]); +} + +#[test] +#[should_panic] +fn test_drain_range_inclusive_oob() { + let mut v = ArrayVec::from([0; 0]); + v.drain(0..=0); +} + #[test] fn test_retain() { let mut v = ArrayVec::from([0; 8]); @@ -294,6 +434,7 @@ fn test_into_inner_3_() { assert_eq!(v.into_inner().unwrap(), [1, 2, 3, 4]); } +#[cfg(feature="std")] #[test] fn test_write() { use std::io::Write; @@ -328,6 +469,7 @@ fn array_clone_from() { assert_eq!(&t, &reference[..]); } +#[cfg(feature="std")] #[test] fn test_string() { use std::error::Error; @@ -353,9 +495,9 @@ fn test_string() { assert_eq!(tmut, "ab"); - let t = || -> Result<(), Box> { + let t = || -> Result<(), Box> { let mut t = ArrayString::<[_; 2]>::new(); - try!(t.try_push_str(text)); + t.try_push_str(text)?; Ok(()) }(); assert!(t.is_err()); @@ -370,6 +512,14 @@ fn test_string_from() { assert_eq!(u.len(), text.len()); } +#[test] +fn test_string_parse_from_str() { + let text = "hello world"; + let u: ArrayString<[_; 11]> = text.parse().unwrap(); + assert_eq!(&u, text); + assert_eq!(u.len(), text.len()); +} + #[test] fn test_string_from_bytes() { let text = "hello world"; @@ -508,10 +658,22 @@ fn test_sizes_129_255() { ArrayVec::from([0u8; 255]); } - #[test] -fn test_newish_stable_uses_maybe_uninit() { - if option_env!("ARRAYVECTEST_ENSURE_MAYBEUNINIT").map(|s| !s.is_empty()).unwrap_or(false) { - assert!(cfg!(has_stable_maybe_uninit)); - } +fn test_extend_zst() { + let mut range = 0..10; + #[derive(Copy, Clone, PartialEq, Debug)] + struct Z; + + let mut array: ArrayVec<[_; 5]> = range.by_ref().map(|_| Z).collect(); + assert_eq!(&array[..], &[Z; 5]); + assert_eq!(range.next(), Some(5)); + + array.extend(range.by_ref().map(|_| Z)); + assert_eq!(range.next(), Some(6)); + + let mut array: ArrayVec<[_; 10]> = (0..3).map(|_| Z).collect(); + assert_eq!(&array[..], &[Z; 3]); + array.extend((3..5).map(|_| Z)); + assert_eq!(&array[..], &[Z; 5]); + assert_eq!(array.len(), 5); } diff --git a/third_party/rust/ash/.cargo-checksum.json b/third_party/rust/ash/.cargo-checksum.json new file mode 100644 index 000000000000..71ffb13a4e4d --- /dev/null +++ b/third_party/rust/ash/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"c1c67981635d76cb25b8e2d93bbf5ed521f2a436c8a3eb5c11673a1d6373cbbb","output":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","src/allocator.rs":"8defd2b41207b2049c2fdae62564148c969d92d5a724487bbc189e748b27fd5c","src/device.rs":"7349761540893b33a41eff1e922f846db494b8b9e6bf0dcfd48ce0bec00e5870","src/entry.rs":"2491a6f350f1c04bcb39e4f585d8549e2cf4bbd3e8a792145a6b0218f52b6e9b","src/extensions/experimental/amd.rs":"70652e2216811f0fcf2f0e748d0cf5c52c6eabfd613e3bd6da28cb1100cfd620","src/extensions/experimental/mod.rs":"41a5366e1c8bd0e1fa47e9cf6fddc8111ed0a6946813be4eefca81da969d1ee9","src/extensions/ext/debug_marker.rs":"2221980d611c8e9bdc9ca5186bf363ccbda22e6b5ea52572f3b7ed4ec5a752a3","src/extensions/ext/debug_report.rs":"affff85cefb68313a6d91489fba4c58e4adffca00d571e24a6818e72f94f4983","src/extensions/ext/debug_utils.rs":"8592be4c7dfbf13d4d224d79d16e4fc6ab746d1fa761178a781dc03caa63a53a","src/extensions/ext/mod.rs":"ccd7b9471c4bb356fc2fa309d58a847f9aff393b77fc08752123e19c801cbc65","src/extensions/khr/android_surface.rs":"5f9ff04add0661637258b32eea95c1eefcf86ab8686088e28a7369ab77df9456","src/extensions/khr/display_swapchain.rs":"cfd551cc2bb29d8e998938880de49d0142c1af6561360282820f6e32c1f9bc42","src/extensions/khr/mod.rs":"12a32c91a4b13972660dc997b59b38383b8059e6ff457d594731e828db6f2e1d","src/extensions/khr/surface.rs":"2e5a08e6a3f8903f40e643476eb48b0626054de606dfeed1e1e6ee3b8098c743","src/extensions/khr/swapchain.rs":"4dd73298a5d3e55c83d649a81aaf42384625ef53b7f13b24ad326a08627cf794","src/extensions/khr/wayland_surface.rs":"63233a95aa5f4c693f7322b6cf70789a9ac304a90bc3157a0855ce71872cf6e9","src/extensions/khr/win32_surface.rs":"4e27aaf236eba179eb0d2ad3a29a54ace21d7c4b5210ac36bc328e3d57cc8616","src/extensions/khr/xcb_surface.rs":"328e57312e261f55f13ed78a7c3bd8dcaab7d94d481910a6483b962d0f4da40d","src/extensions/khr/xlib_surface.rs":"44ee06032f0d3fe7f330c6542cbe81636523123355f8c10844abf7893bcb2503","src/extensions/mod.rs":"4a394c468a0fc824671b36c1390f6c34173d073ed0918a528a84f48667756d65","src/extensions/mvk/ios_surface.rs":"3c58810506841142a781df7ab76fe95a2eac5d7dc95ae6345ae93220d2647b7b","src/extensions/mvk/macos_surface.rs":"fcf3a34c164f0251293a50222f944e74fff4eeb797ad8521678031e69a26956c","src/extensions/mvk/mod.rs":"d03ac1a0144d1aca9ed1d0ce0c14b099f1fedb03b8108028b780a34f64de604c","src/extensions/nv/mesh_shader.rs":"c0450955eb36344b7e49acc58a021d04926dd918685b9fc6a655cd29a39afc72","src/extensions/nv/mod.rs":"175512de8528c3a90000cf9509c683761e9536dcb448877b7c7772b695aad258","src/extensions/nv/ray_tracing.rs":"a241936debf78f219de647b8392dc18c0542a82393eace4d25aaa49afef36b82","src/instance.rs":"fab133b311506eb38d8a3faa7f3e60a9e13b84760e08ad830e616262a6b46228","src/lib.rs":"801481c0cd8415f7f90ba1022128b440cc951cbd572a82f30cc1a142d34af405","src/prelude.rs":"ed6ee8e74131c232af2e3a780abe13f0c65acba1e6de61e3d1eec7f7aec7467a","src/util.rs":"bb50e11bc75058fb474bda5e34aa8978cb585ce3532ae2921c93692a13a25187","src/version.rs":"6f2d52ac2edd6f54c899763825954ac8b4c944aa9168d00885cf3955b5e4e454","src/vk.rs":"f946223870190a0060cf7b3c5baacae9ef1e4bcd12bc2d860344dc5c1567cf3d","tests/constant_size_arrays.rs":"6577f5c8d9810f9aea1d47862243e4d41a297d43e744be04fdb34d08021bac48","tests/display.rs":"13f341053efcfc104e6dae48c19e6092ffc2acf6ff3cbc4ed37dd1a03875cb17"},"package":"003d1fb2eb12eb06d4a03dbe02eea67a9fac910fa97932ab9e3a75b96a1ea5e5"} \ No newline at end of file diff --git a/third_party/rust/ash/Cargo.toml b/third_party/rust/ash/Cargo.toml new file mode 100644 index 000000000000..a745e43b96b8 --- /dev/null +++ b/third_party/rust/ash/Cargo.toml @@ -0,0 +1,29 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "ash" +version = "0.29.0" +authors = ["maik klein "] +description = "Vulkan bindings for Rust" +documentation = "https://docs.rs/ash" +readme = "../README.md" +keywords = ["vulkan", "graphic"] +license = "MIT" +repository = "https://github.com/MaikKlein/ash" +[package.metadata.release] +no-dev-version = true +[dependencies.shared_library] +version = "0.1.9" + +[features] +default = [] diff --git a/third_party/rust/ash/output b/third_party/rust/ash/output new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/third_party/rust/ash/src/allocator.rs b/third_party/rust/ash/src/allocator.rs new file mode 100644 index 000000000000..1e0f4b045854 --- /dev/null +++ b/third_party/rust/ash/src/allocator.rs @@ -0,0 +1,120 @@ +#![allow(dead_code)] +use vk; +use std::os::raw::c_void; +use std::ptr; +pub trait VkAllocation { + unsafe extern "system" fn allocation( + *mut (), + usize, + usize, + vk::SystemAllocationScope, + ) -> *mut (); + unsafe extern "system" fn reallocation( + *mut c_void, + *mut c_void, + usize, + usize, + vk::SystemAllocationScope, + ) -> *mut c_void; + unsafe extern "system" fn free(*mut c_void, *mut c_void); + unsafe extern "system" fn internal_allocation( + *mut c_void, + usize, + vk::InternalAllocationType, + vk::SystemAllocationScope, + ); + unsafe extern "system" fn internal_free( + *mut c_void, + usize, + vk::InternalAllocationType, + vk::SystemAllocationScope, + ); + fn create_allocation_callback() -> Option { + let alloc = vk::AllocationCallbacks { + p_user_data: ptr::null_mut(), + pfn_allocation: Self::allocation, + pfn_reallocation: Self::reallocation, + pfn_free: Self::free, + pfn_internal_allocation: Self::internal_allocation, + pfn_internal_free: Self::internal_free, + }; + Some(alloc) + } +} + +pub struct DefaultAllocatorCallback; +pub struct TestAlloc; + +impl VkAllocation for TestAlloc { + unsafe extern "system" fn allocation( + _: *mut (), + _: usize, + _: usize, + _: vk::SystemAllocationScope, + ) -> *mut () { + ptr::null_mut() + } + + unsafe extern "system" fn reallocation( + _: *mut c_void, + _: *mut c_void, + _: usize, + _: usize, + _: vk::SystemAllocationScope, + ) -> *mut c_void { + ptr::null_mut() + } + unsafe extern "system" fn free(_: *mut c_void, _: *mut c_void) {} + unsafe extern "system" fn internal_allocation( + _: *mut c_void, + _: usize, + _: vk::InternalAllocationType, + _: vk::SystemAllocationScope, + ) { + } + unsafe extern "system" fn internal_free( + _: *mut c_void, + _: usize, + _: vk::InternalAllocationType, + _: vk::SystemAllocationScope, + ) { + } +} +impl VkAllocation for DefaultAllocatorCallback { + unsafe extern "system" fn allocation( + _: *mut (), + _: usize, + _: usize, + _: vk::SystemAllocationScope, + ) -> *mut () { + ptr::null_mut() + } + + unsafe extern "system" fn reallocation( + _: *mut c_void, + _: *mut c_void, + _: usize, + _: usize, + _: vk::SystemAllocationScope, + ) -> *mut c_void { + ptr::null_mut() + } + unsafe extern "system" fn free(_: *mut c_void, _: *mut c_void) {} + unsafe extern "system" fn internal_allocation( + _: *mut c_void, + _: usize, + _: vk::InternalAllocationType, + _: vk::SystemAllocationScope, + ) { + } + unsafe extern "system" fn internal_free( + _: *mut c_void, + _: usize, + _: vk::InternalAllocationType, + _: vk::SystemAllocationScope, + ) { + } + fn create_allocation_callback() -> Option { + None + } +} \ No newline at end of file diff --git a/third_party/rust/ash/src/device.rs b/third_party/rust/ash/src/device.rs new file mode 100644 index 000000000000..3bae6316c26c --- /dev/null +++ b/third_party/rust/ash/src/device.rs @@ -0,0 +1,1997 @@ +#![allow(dead_code)] +use prelude::*; +use std::mem; +use std::os::raw::c_void; +use std::ptr; +use vk; +use RawPtr; + +#[allow(non_camel_case_types)] +pub trait DeviceV1_1: DeviceV1_0 { + fn fp_v1_1(&self) -> &vk::DeviceFnV1_1; + + #[doc = ""] + unsafe fn bind_buffer_memory2(&self, bind_infos: &[vk::BindBufferMemoryInfo]) -> VkResult<()> { + let err_code = self.fp_v1_1().bind_buffer_memory2( + self.handle(), + bind_infos.len() as _, + bind_infos.as_ptr(), + ); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn bind_image_memory2(&self, bind_infos: &[vk::BindImageMemoryInfo]) -> VkResult<()> { + let err_code = self.fp_v1_1().bind_image_memory2( + self.handle(), + bind_infos.len() as _, + bind_infos.as_ptr(), + ); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn get_device_group_peer_memory_features( + &self, + heap_index: u32, + local_device_index: u32, + remote_device_index: u32, + ) -> vk::PeerMemoryFeatureFlags { + let mut peer_memory_features = mem::uninitialized(); + self.fp_v1_1().get_device_group_peer_memory_features( + self.handle(), + heap_index, + local_device_index, + remote_device_index, + &mut peer_memory_features, + ); + peer_memory_features + } + + #[doc = ""] + unsafe fn cmd_set_device_mask(&self, command_buffer: vk::CommandBuffer, device_mask: u32) { + self.fp_v1_1() + .cmd_set_device_mask(command_buffer, device_mask); + } + + #[doc = ""] + unsafe fn cmd_dispatch_base( + &self, + command_buffer: vk::CommandBuffer, + base_group_x: u32, + base_group_y: u32, + base_group_z: u32, + group_count_x: u32, + group_count_y: u32, + group_count_z: u32, + ) { + self.fp_v1_1().cmd_dispatch_base( + command_buffer, + base_group_x, + base_group_y, + base_group_z, + group_count_x, + group_count_y, + group_count_z, + ); + } + + #[doc = ""] + unsafe fn get_image_memory_requirements2( + &self, + info: &vk::ImageMemoryRequirementsInfo2, + out: &mut vk::MemoryRequirements2, + ) { + self.fp_v1_1() + .get_image_memory_requirements2(self.handle(), info, out); + } + + #[doc = ""] + unsafe fn get_buffer_memory_requirements2( + &self, + info: &vk::BufferMemoryRequirementsInfo2, + out: &mut vk::MemoryRequirements2, + ) { + self.fp_v1_1() + .get_buffer_memory_requirements2(self.handle(), info, out); + } + + unsafe fn get_image_sparse_memory_requirements2_len( + &self, + info: &vk::ImageSparseMemoryRequirementsInfo2, + ) -> usize { + let mut count = mem::uninitialized(); + self.fp_v1_1().get_image_sparse_memory_requirements2( + self.handle(), + info, + &mut count, + ptr::null_mut(), + ); + count as usize + } + + #[doc = ""] + unsafe fn get_image_sparse_memory_requirements2( + &self, + info: &vk::ImageSparseMemoryRequirementsInfo2, + out: &mut [vk::SparseImageMemoryRequirements2], + ) { + let mut count = out.len() as u32; + self.fp_v1_1().get_image_sparse_memory_requirements2( + self.handle(), + info, + &mut count, + out.as_mut_ptr(), + ); + } + + #[doc = ""] + unsafe fn trim_command_pool( + &self, + command_pool: vk::CommandPool, + flags: vk::CommandPoolTrimFlags, + ) { + self.fp_v1_1() + .trim_command_pool(self.handle(), command_pool, flags); + } + + #[doc = ""] + unsafe fn create_sampler_ycbcr_conversion( + &self, + create_info: &vk::SamplerYcbcrConversionCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut ycbcr_conversion = mem::uninitialized(); + let err_code = self.fp_v1_1().create_sampler_ycbcr_conversion( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut ycbcr_conversion, + ); + match err_code { + vk::Result::SUCCESS => Ok(ycbcr_conversion), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn destroy_sampler_ycbcr_conversion( + &self, + ycbcr_conversion: vk::SamplerYcbcrConversion, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_1().destroy_sampler_ycbcr_conversion( + self.handle(), + ycbcr_conversion, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + unsafe fn create_descriptor_update_template( + &self, + create_info: &vk::DescriptorUpdateTemplateCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut descriptor_update_template = mem::uninitialized(); + let err_code = self.fp_v1_1().create_descriptor_update_template( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut descriptor_update_template, + ); + match err_code { + vk::Result::SUCCESS => Ok(descriptor_update_template), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn destroy_descriptor_update_template( + &self, + descriptor_update_template: vk::DescriptorUpdateTemplate, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_1().destroy_descriptor_update_template( + self.handle(), + descriptor_update_template, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + unsafe fn update_descriptor_set_with_template( + &self, + descriptor_set: vk::DescriptorSet, + descriptor_update_template: vk::DescriptorUpdateTemplate, + data: *const c_void, + ) { + self.fp_v1_1().update_descriptor_set_with_template( + self.handle(), + descriptor_set, + descriptor_update_template, + data, + ); + } + + #[doc = ""] + unsafe fn get_descriptor_set_layout_support( + &self, + create_info: &vk::DescriptorSetLayoutCreateInfo, + out: &mut vk::DescriptorSetLayoutSupport, + ) { + self.fp_v1_1() + .get_descriptor_set_layout_support(self.handle(), create_info, out); + } +} + +#[allow(non_camel_case_types)] +pub trait DeviceV1_0 { + fn handle(&self) -> vk::Device; + fn fp_v1_0(&self) -> &vk::DeviceFnV1_0; + #[doc = ""] + unsafe fn destroy_device(&self, allocation_callbacks: Option<&vk::AllocationCallbacks>) { + self.fp_v1_0() + .destroy_device(self.handle(), allocation_callbacks.as_raw_ptr()); + } + + #[doc = ""] + unsafe fn destroy_sampler( + &self, + sampler: vk::Sampler, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0() + .destroy_sampler(self.handle(), sampler, allocation_callbacks.as_raw_ptr()); + } + + #[doc = ""] + unsafe fn free_memory( + &self, + memory: vk::DeviceMemory, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0() + .free_memory(self.handle(), memory, allocation_callbacks.as_raw_ptr()); + } + + #[doc = ""] + unsafe fn free_command_buffers( + &self, + command_pool: vk::CommandPool, + command_buffers: &[vk::CommandBuffer], + ) { + self.fp_v1_0().free_command_buffers( + self.handle(), + command_pool, + command_buffers.len() as u32, + command_buffers.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn create_event( + &self, + create_info: &vk::EventCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut event = mem::uninitialized(); + let err_code = self.fp_v1_0().create_event( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut event, + ); + match err_code { + vk::Result::SUCCESS => Ok(event), + _ => Err(err_code), + } + } + + + + #[doc = ""] + unsafe fn get_event_status(&self, event: vk::Event) -> VkResult { + let err_code = self.fp_v1_0().get_event_status(self.handle(), event); + match err_code { + vk::Result::EVENT_SET => Ok(true), + vk::Result::EVENT_RESET => Ok(false), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn set_event(&self, event: vk::Event) -> VkResult<()> { + let err_code = self.fp_v1_0().set_event(self.handle(), event); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn reset_event(&self, event: vk::Event) -> VkResult<()> { + let err_code = self.fp_v1_0().reset_event(self.handle(), event); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + #[doc = ""] + unsafe fn cmd_set_event( + &self, + command_buffer: vk::CommandBuffer, + event: vk::Event, + stage_mask: vk::PipelineStageFlags, + ) { + self.fp_v1_0() + .cmd_set_event(command_buffer, event, stage_mask); + } + #[doc = ""] + unsafe fn cmd_reset_event( + &self, + command_buffer: vk::CommandBuffer, + event: vk::Event, + stage_mask: vk::PipelineStageFlags, + ) { + self.fp_v1_0() + .cmd_reset_event(command_buffer, event, stage_mask); + } + + #[doc = ""] + unsafe fn cmd_wait_events( + &self, + command_buffer: vk::CommandBuffer, + events: &[vk::Event], + src_stage_mask: vk::PipelineStageFlags, + dst_stage_mask: vk::PipelineStageFlags, + memory_barriers: &[vk::MemoryBarrier], + buffer_memory_barriers: &[vk::BufferMemoryBarrier], + image_memory_barriers: &[vk::ImageMemoryBarrier], + ) { + self.fp_v1_0().cmd_wait_events( + command_buffer, + events.len() as _, + events.as_ptr(), + src_stage_mask, + dst_stage_mask, + memory_barriers.len() as _, + memory_barriers.as_ptr(), + buffer_memory_barriers.len() as _, + buffer_memory_barriers.as_ptr(), + image_memory_barriers.len() as _, + image_memory_barriers.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn destroy_fence( + &self, + fence: vk::Fence, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0() + .destroy_fence(self.handle(), fence, allocation_callbacks.as_raw_ptr()); + } + + #[doc = ""] + unsafe fn destroy_event( + &self, + event: vk::Event, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0() + .destroy_event(self.handle(), event, allocation_callbacks.as_raw_ptr()); + } + + #[doc = ""] + unsafe fn destroy_image( + &self, + image: vk::Image, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0() + .destroy_image(self.handle(), image, allocation_callbacks.as_raw_ptr()); + } + + #[doc = ""] + unsafe fn destroy_command_pool( + &self, + pool: vk::CommandPool, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0() + .destroy_command_pool(self.handle(), pool, allocation_callbacks.as_raw_ptr()); + } + + #[doc = ""] + unsafe fn destroy_image_view( + &self, + image_view: vk::ImageView, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0().destroy_image_view( + self.handle(), + image_view, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + unsafe fn destroy_render_pass( + &self, + renderpass: vk::RenderPass, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0().destroy_render_pass( + self.handle(), + renderpass, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + unsafe fn destroy_framebuffer( + &self, + framebuffer: vk::Framebuffer, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0().destroy_framebuffer( + self.handle(), + framebuffer, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + unsafe fn destroy_pipeline_layout( + &self, + pipeline_layout: vk::PipelineLayout, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0().destroy_pipeline_layout( + self.handle(), + pipeline_layout, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + unsafe fn destroy_pipeline_cache( + &self, + pipeline_cache: vk::PipelineCache, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0().destroy_pipeline_cache( + self.handle(), + pipeline_cache, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + unsafe fn destroy_buffer( + &self, + buffer: vk::Buffer, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0() + .destroy_buffer(self.handle(), buffer, allocation_callbacks.as_raw_ptr()); + } + + #[doc = ""] + unsafe fn destroy_shader_module( + &self, + shader: vk::ShaderModule, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0().destroy_shader_module( + self.handle(), + shader, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + unsafe fn destroy_pipeline( + &self, + pipeline: vk::Pipeline, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0() + .destroy_pipeline(self.handle(), pipeline, allocation_callbacks.as_raw_ptr()); + } + + #[doc = ""] + unsafe fn destroy_semaphore( + &self, + semaphore: vk::Semaphore, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0().destroy_semaphore( + self.handle(), + semaphore, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + unsafe fn destroy_descriptor_pool( + &self, + pool: vk::DescriptorPool, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0().destroy_descriptor_pool( + self.handle(), + pool, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + unsafe fn destroy_query_pool( + &self, + pool: vk::QueryPool, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0() + .destroy_query_pool(self.handle(), pool, allocation_callbacks.as_raw_ptr()); + } + + #[doc = ""] + unsafe fn destroy_descriptor_set_layout( + &self, + layout: vk::DescriptorSetLayout, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0().destroy_descriptor_set_layout( + self.handle(), + layout, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + unsafe fn free_descriptor_sets( + &self, + pool: vk::DescriptorPool, + descriptor_sets: &[vk::DescriptorSet], + ) { + self.fp_v1_0().free_descriptor_sets( + self.handle(), + pool, + descriptor_sets.len() as u32, + descriptor_sets.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn update_descriptor_sets( + &self, + descriptor_writes: &[vk::WriteDescriptorSet], + descriptor_copies: &[vk::CopyDescriptorSet], + ) { + self.fp_v1_0().update_descriptor_sets( + self.handle(), + descriptor_writes.len() as u32, + descriptor_writes.as_ptr(), + descriptor_copies.len() as u32, + descriptor_copies.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn create_sampler( + &self, + create_info: &vk::SamplerCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut sampler = mem::uninitialized(); + let err_code = self.fp_v1_0().create_sampler( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut sampler, + ); + match err_code { + vk::Result::SUCCESS => Ok(sampler), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn cmd_blit_image( + &self, + command_buffer: vk::CommandBuffer, + src_image: vk::Image, + src_image_layout: vk::ImageLayout, + dst_image: vk::Image, + dst_image_layout: vk::ImageLayout, + regions: &[vk::ImageBlit], + filter: vk::Filter, + ) { + self.fp_v1_0().cmd_blit_image( + command_buffer, + src_image, + src_image_layout, + dst_image, + dst_image_layout, + regions.len() as _, + regions.as_ptr(), + filter, + ); + } + + #[doc = ""] + unsafe fn cmd_resolve_image( + &self, + command_buffer: vk::CommandBuffer, + src_image: vk::Image, + src_image_layout: vk::ImageLayout, + dst_image: vk::Image, + dst_image_layout: vk::ImageLayout, + regions: &[vk::ImageResolve], + ) { + self.fp_v1_0().cmd_resolve_image( + command_buffer, + src_image, + src_image_layout, + dst_image, + dst_image_layout, + regions.len() as u32, + regions.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn cmd_fill_buffer( + &self, + command_buffer: vk::CommandBuffer, + buffer: vk::Buffer, + offset: vk::DeviceSize, + size: vk::DeviceSize, + data: u32, + ) { + self.fp_v1_0() + .cmd_fill_buffer(command_buffer, buffer, offset, size, data); + } + + #[doc = ""] + unsafe fn cmd_update_buffer( + &self, + command_buffer: vk::CommandBuffer, + buffer: vk::Buffer, + offset: vk::DeviceSize, + data: &[u8], + ) { + self.fp_v1_0().cmd_update_buffer( + command_buffer, + buffer, + offset, + data.len() as u64, + data.as_ptr() as _, + ); + } + + #[doc = ""] + unsafe fn cmd_copy_buffer( + &self, + command_buffer: vk::CommandBuffer, + src_buffer: vk::Buffer, + dst_buffer: vk::Buffer, + regions: &[vk::BufferCopy], + ) { + self.fp_v1_0().cmd_copy_buffer( + command_buffer, + src_buffer, + dst_buffer, + regions.len() as u32, + regions.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn cmd_copy_image_to_buffer( + &self, + command_buffer: vk::CommandBuffer, + src_image: vk::Image, + src_image_layout: vk::ImageLayout, + dst_buffer: vk::Buffer, + regions: &[vk::BufferImageCopy], + ) { + self.fp_v1_0().cmd_copy_image_to_buffer( + command_buffer, + src_image, + src_image_layout, + dst_buffer, + regions.len() as u32, + regions.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn cmd_copy_buffer_to_image( + &self, + command_buffer: vk::CommandBuffer, + src_buffer: vk::Buffer, + dst_image: vk::Image, + dst_image_layout: vk::ImageLayout, + regions: &[vk::BufferImageCopy], + ) { + self.fp_v1_0().cmd_copy_buffer_to_image( + command_buffer, + src_buffer, + dst_image, + dst_image_layout, + regions.len() as u32, + regions.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn cmd_copy_image( + &self, + command_buffer: vk::CommandBuffer, + src_image: vk::Image, + src_image_layout: vk::ImageLayout, + dst_image: vk::Image, + dst_image_layout: vk::ImageLayout, + regions: &[vk::ImageCopy], + ) { + self.fp_v1_0().cmd_copy_image( + command_buffer, + src_image, + src_image_layout, + dst_image, + dst_image_layout, + regions.len() as u32, + regions.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn allocate_descriptor_sets( + &self, + create_info: &vk::DescriptorSetAllocateInfo, + ) -> VkResult> { + let mut desc_set = Vec::with_capacity(create_info.descriptor_set_count as usize); + let err_code = self.fp_v1_0().allocate_descriptor_sets( + self.handle(), + create_info, + desc_set.as_mut_ptr(), + ); + + desc_set.set_len(create_info.descriptor_set_count as usize); + match err_code { + vk::Result::SUCCESS => Ok(desc_set), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn create_descriptor_set_layout( + &self, + create_info: &vk::DescriptorSetLayoutCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut layout = mem::uninitialized(); + let err_code = self.fp_v1_0().create_descriptor_set_layout( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut layout, + ); + match err_code { + vk::Result::SUCCESS => Ok(layout), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn device_wait_idle(&self) -> VkResult<()> { + let err_code = self.fp_v1_0().device_wait_idle(self.handle()); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn create_descriptor_pool( + &self, + create_info: &vk::DescriptorPoolCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut pool = mem::uninitialized(); + let err_code = self.fp_v1_0().create_descriptor_pool( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut pool, + ); + match err_code { + vk::Result::SUCCESS => Ok(pool), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn reset_descriptor_pool( + &self, + pool: vk::DescriptorPool, + flags: vk::DescriptorPoolResetFlags, + ) -> VkResult<()> { + let err_code = self + .fp_v1_0() + .reset_descriptor_pool(self.handle(), pool, flags); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn reset_command_pool( + &self, + command_pool: vk::CommandPool, + flags: vk::CommandPoolResetFlags, + ) -> VkResult<()> { + let err_code = self + .fp_v1_0() + .reset_command_pool(self.handle(), command_pool, flags); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn reset_command_buffer( + &self, + command_buffer: vk::CommandBuffer, + flags: vk::CommandBufferResetFlags, + ) -> VkResult<()> { + let err_code = self.fp_v1_0().reset_command_buffer(command_buffer, flags); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn reset_fences(&self, fences: &[vk::Fence]) -> VkResult<()> { + let err_code = + self.fp_v1_0() + .reset_fences(self.handle(), fences.len() as u32, fences.as_ptr()); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn cmd_bind_index_buffer( + &self, + command_buffer: vk::CommandBuffer, + buffer: vk::Buffer, + offset: vk::DeviceSize, + index_type: vk::IndexType, + ) { + self.fp_v1_0() + .cmd_bind_index_buffer(command_buffer, buffer, offset, index_type); + } + + #[doc = ""] + unsafe fn cmd_clear_color_image( + &self, + command_buffer: vk::CommandBuffer, + image: vk::Image, + image_layout: vk::ImageLayout, + clear_color_value: &vk::ClearColorValue, + ranges: &[vk::ImageSubresourceRange], + ) { + self.fp_v1_0().cmd_clear_color_image( + command_buffer, + image, + image_layout, + clear_color_value, + ranges.len() as u32, + ranges.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn cmd_clear_depth_stencil_image( + &self, + command_buffer: vk::CommandBuffer, + image: vk::Image, + image_layout: vk::ImageLayout, + clear_depth_stencil_value: &vk::ClearDepthStencilValue, + ranges: &[vk::ImageSubresourceRange], + ) { + self.fp_v1_0().cmd_clear_depth_stencil_image( + command_buffer, + image, + image_layout, + clear_depth_stencil_value, + ranges.len() as u32, + ranges.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn cmd_clear_attachments( + &self, + command_buffer: vk::CommandBuffer, + attachments: &[vk::ClearAttachment], + rects: &[vk::ClearRect], + ) { + self.fp_v1_0().cmd_clear_attachments( + command_buffer, + attachments.len() as u32, + attachments.as_ptr(), + rects.len() as u32, + rects.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn cmd_draw_indexed( + &self, + command_buffer: vk::CommandBuffer, + index_count: u32, + instance_count: u32, + first_index: u32, + vertex_offset: i32, + first_instance: u32, + ) { + self.fp_v1_0().cmd_draw_indexed( + command_buffer, + index_count, + instance_count, + first_index, + vertex_offset, + first_instance, + ); + } + + #[doc = ""] + unsafe fn cmd_draw_indexed_indirect( + &self, + command_buffer: vk::CommandBuffer, + buffer: vk::Buffer, + offset: vk::DeviceSize, + draw_count: u32, + stride: u32, + ) { + self.fp_v1_0().cmd_draw_indexed_indirect( + command_buffer, + buffer, + offset, + draw_count, + stride, + ); + } + + #[doc = ""] + unsafe fn cmd_execute_commands( + &self, + primary_command_buffer: vk::CommandBuffer, + secondary_command_buffers: &[vk::CommandBuffer], + ) { + self.fp_v1_0().cmd_execute_commands( + primary_command_buffer, + secondary_command_buffers.len() as u32, + secondary_command_buffers.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn cmd_bind_descriptor_sets( + &self, + command_buffer: vk::CommandBuffer, + pipeline_bind_point: vk::PipelineBindPoint, + layout: vk::PipelineLayout, + first_set: u32, + descriptor_sets: &[vk::DescriptorSet], + dynamic_offsets: &[u32], + ) { + self.fp_v1_0().cmd_bind_descriptor_sets( + command_buffer, + pipeline_bind_point, + layout, + first_set, + descriptor_sets.len() as u32, + descriptor_sets.as_ptr(), + dynamic_offsets.len() as u32, + dynamic_offsets.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn cmd_copy_query_pool_results( + &self, + command_buffer: vk::CommandBuffer, + query_pool: vk::QueryPool, + first_query: u32, + query_count: u32, + dst_buffer: vk::Buffer, + dst_offset: vk::DeviceSize, + stride: vk::DeviceSize, + flags: vk::QueryResultFlags, + ) { + self.fp_v1_0().cmd_copy_query_pool_results( + command_buffer, + query_pool, + first_query, + query_count, + dst_buffer, + dst_offset, + stride, + flags, + ); + } + + #[doc = ""] + unsafe fn cmd_push_constants( + &self, + command_buffer: vk::CommandBuffer, + layout: vk::PipelineLayout, + stage_flags: vk::ShaderStageFlags, + offset: u32, + constants: &[u8], + ) { + self.fp_v1_0().cmd_push_constants( + command_buffer, + layout, + stage_flags, + offset, + constants.len() as _, + constants.as_ptr() as _, + ); + } + + #[doc = ""] + unsafe fn cmd_begin_render_pass( + &self, + command_buffer: vk::CommandBuffer, + create_info: &vk::RenderPassBeginInfo, + contents: vk::SubpassContents, + ) { + self.fp_v1_0() + .cmd_begin_render_pass(command_buffer, create_info, contents); + } + + #[doc = ""] + unsafe fn cmd_next_subpass( + &self, + command_buffer: vk::CommandBuffer, + contents: vk::SubpassContents, + ) { + self.fp_v1_0().cmd_next_subpass(command_buffer, contents); + } + + #[doc = ""] + unsafe fn cmd_bind_pipeline( + &self, + command_buffer: vk::CommandBuffer, + pipeline_bind_point: vk::PipelineBindPoint, + pipeline: vk::Pipeline, + ) { + self.fp_v1_0() + .cmd_bind_pipeline(command_buffer, pipeline_bind_point, pipeline); + } + + #[doc = ""] + unsafe fn cmd_set_scissor( + &self, + command_buffer: vk::CommandBuffer, + first_scissor: u32, + scissors: &[vk::Rect2D], + ) { + self.fp_v1_0().cmd_set_scissor( + command_buffer, + first_scissor, + scissors.len() as u32, + scissors.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn cmd_set_line_width(&self, command_buffer: vk::CommandBuffer, line_width: f32) { + self.fp_v1_0() + .cmd_set_line_width(command_buffer, line_width); + } + + #[doc = ""] + unsafe fn cmd_bind_vertex_buffers( + &self, + command_buffer: vk::CommandBuffer, + first_binding: u32, + buffers: &[vk::Buffer], + offsets: &[vk::DeviceSize], + ) { + debug_assert_eq!(buffers.len(), offsets.len()); + self.fp_v1_0().cmd_bind_vertex_buffers( + command_buffer, + first_binding, + buffers.len() as u32, + buffers.as_ptr(), + offsets.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn cmd_end_render_pass(&self, command_buffer: vk::CommandBuffer) { + self.fp_v1_0().cmd_end_render_pass(command_buffer); + } + + #[doc = ""] + unsafe fn cmd_draw( + &self, + command_buffer: vk::CommandBuffer, + vertex_count: u32, + instance_count: u32, + first_vertex: u32, + first_instance: u32, + ) { + self.fp_v1_0().cmd_draw( + command_buffer, + vertex_count, + instance_count, + first_vertex, + first_instance, + ); + } + + #[doc = ""] + unsafe fn cmd_draw_indirect( + &self, + command_buffer: vk::CommandBuffer, + buffer: vk::Buffer, + offset: vk::DeviceSize, + draw_count: u32, + stride: u32, + ) { + self.fp_v1_0() + .cmd_draw_indirect(command_buffer, buffer, offset, draw_count, stride); + } + + #[doc = ""] + unsafe fn cmd_dispatch( + &self, + command_buffer: vk::CommandBuffer, + group_count_x: u32, + group_count_y: u32, + group_count_z: u32, + ) { + self.fp_v1_0() + .cmd_dispatch(command_buffer, group_count_x, group_count_y, group_count_z); + } + + #[doc = ""] + unsafe fn cmd_dispatch_indirect( + &self, + command_buffer: vk::CommandBuffer, + buffer: vk::Buffer, + offset: vk::DeviceSize, + ) { + self.fp_v1_0() + .cmd_dispatch_indirect(command_buffer, buffer, offset); + } + + #[doc = ""] + unsafe fn cmd_set_viewport( + &self, + command_buffer: vk::CommandBuffer, + first_viewport: u32, + viewports: &[vk::Viewport], + ) { + self.fp_v1_0().cmd_set_viewport( + command_buffer, + first_viewport, + viewports.len() as u32, + viewports.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn cmd_set_depth_bias( + &self, + command_buffer: vk::CommandBuffer, + constant_factor: f32, + clamp: f32, + slope_factor: f32, + ) { + self.fp_v1_0() + .cmd_set_depth_bias(command_buffer, constant_factor, clamp, slope_factor); + } + + #[doc = ""] + unsafe fn cmd_set_blend_constants( + &self, + command_buffer: vk::CommandBuffer, + blend_constants: &[f32; 4], + ) { + self.fp_v1_0() + .cmd_set_blend_constants(command_buffer, blend_constants); + } + + #[doc = ""] + unsafe fn cmd_set_depth_bounds( + &self, + command_buffer: vk::CommandBuffer, + min_depth_bounds: f32, + max_depth_bounds: f32, + ) { + self.fp_v1_0() + .cmd_set_depth_bounds(command_buffer, min_depth_bounds, max_depth_bounds); + } + + #[doc = ""] + unsafe fn cmd_set_stencil_compare_mask( + &self, + command_buffer: vk::CommandBuffer, + face_mask: vk::StencilFaceFlags, + compare_mask: u32, + ) { + self.fp_v1_0() + .cmd_set_stencil_compare_mask(command_buffer, face_mask, compare_mask); + } + + #[doc = ""] + unsafe fn cmd_set_stencil_write_mask( + &self, + command_buffer: vk::CommandBuffer, + face_mask: vk::StencilFaceFlags, + write_mask: u32, + ) { + self.fp_v1_0() + .cmd_set_stencil_write_mask(command_buffer, face_mask, write_mask); + } + + #[doc = ""] + unsafe fn cmd_set_stencil_reference( + &self, + command_buffer: vk::CommandBuffer, + face_mask: vk::StencilFaceFlags, + reference: u32, + ) { + self.fp_v1_0() + .cmd_set_stencil_reference(command_buffer, face_mask, reference); + } + + #[doc = ""] + unsafe fn get_query_pool_results( + &self, + query_pool: vk::QueryPool, + first_query: u32, + query_count: u32, + data: &mut [T], + flags: vk::QueryResultFlags, + ) -> VkResult<()> { + let data_length = query_count as usize; + assert!( + mem::size_of::() <= mem::size_of::(), + "T can not be bigger than an u64" + ); + assert!( + data_length <= data.len(), + "query_count was higher than the length of the slice" + ); + let data_size = mem::size_of::() * data_length; + let err_code = self.fp_v1_0().get_query_pool_results( + self.handle(), + query_pool, + first_query, + query_count, + data_size, + data.as_mut_ptr() as *mut _, + mem::size_of::() as _, + flags, + ); + + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn cmd_begin_query( + &self, + command_buffer: vk::CommandBuffer, + query_pool: vk::QueryPool, + query: u32, + flags: vk::QueryControlFlags, + ) { + self.fp_v1_0() + .cmd_begin_query(command_buffer, query_pool, query, flags); + } + + #[doc = ""] + unsafe fn cmd_end_query( + &self, + command_buffer: vk::CommandBuffer, + query_pool: vk::QueryPool, + query: u32, + ) { + self.fp_v1_0() + .cmd_end_query(command_buffer, query_pool, query); + } + + #[doc = ""] + unsafe fn cmd_reset_query_pool( + &self, + command_buffer: vk::CommandBuffer, + pool: vk::QueryPool, + first_query: u32, + query_count: u32, + ) { + self.fp_v1_0() + .cmd_reset_query_pool(command_buffer, pool, first_query, query_count); + } + + #[doc = ""] + unsafe fn cmd_write_timestamp( + &self, + command_buffer: vk::CommandBuffer, + pipeline_stage: vk::PipelineStageFlags, + query_pool: vk::QueryPool, + query: u32, + ) { + self.fp_v1_0() + .cmd_write_timestamp(command_buffer, pipeline_stage, query_pool, query); + } + + #[doc = ""] + unsafe fn create_semaphore( + &self, + create_info: &vk::SemaphoreCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut semaphore = mem::uninitialized(); + let err_code = self.fp_v1_0().create_semaphore( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut semaphore, + ); + match err_code { + vk::Result::SUCCESS => Ok(semaphore), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn create_graphics_pipelines( + &self, + pipeline_cache: vk::PipelineCache, + create_infos: &[vk::GraphicsPipelineCreateInfo], + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> Result, (Vec, vk::Result)> { + let mut pipelines = Vec::with_capacity(create_infos.len()); + let err_code = self.fp_v1_0().create_graphics_pipelines( + self.handle(), + pipeline_cache, + create_infos.len() as u32, + create_infos.as_ptr(), + allocation_callbacks.as_raw_ptr(), + pipelines.as_mut_ptr(), + ); + pipelines.set_len(create_infos.len()); + match err_code { + vk::Result::SUCCESS => Ok(pipelines), + _ => Err((pipelines, err_code)), + } + } + + #[doc = ""] + unsafe fn create_compute_pipelines( + &self, + pipeline_cache: vk::PipelineCache, + create_infos: &[vk::ComputePipelineCreateInfo], + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> Result, (Vec, vk::Result)> { + let mut pipelines = Vec::with_capacity(create_infos.len()); + let err_code = self.fp_v1_0().create_compute_pipelines( + self.handle(), + pipeline_cache, + create_infos.len() as u32, + create_infos.as_ptr(), + allocation_callbacks.as_raw_ptr(), + pipelines.as_mut_ptr(), + ); + pipelines.set_len(create_infos.len()); + match err_code { + vk::Result::SUCCESS => Ok(pipelines), + _ => Err((pipelines, err_code)), + } + } + + #[doc = ""] + unsafe fn create_buffer( + &self, + create_info: &vk::BufferCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut buffer = mem::uninitialized(); + let err_code = self.fp_v1_0().create_buffer( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut buffer, + ); + match err_code { + vk::Result::SUCCESS => Ok(buffer), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn create_pipeline_layout( + &self, + create_info: &vk::PipelineLayoutCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut pipeline_layout = mem::uninitialized(); + let err_code = self.fp_v1_0().create_pipeline_layout( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut pipeline_layout, + ); + match err_code { + vk::Result::SUCCESS => Ok(pipeline_layout), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn create_pipeline_cache( + &self, + create_info: &vk::PipelineCacheCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut pipeline_cache = mem::uninitialized(); + let err_code = self.fp_v1_0().create_pipeline_cache( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut pipeline_cache, + ); + + match err_code { + vk::Result::SUCCESS => Ok(pipeline_cache), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn get_pipeline_cache_data( + &self, + pipeline_cache: vk::PipelineCache, + ) -> VkResult> { + let mut data_size: usize = 0; + let err_code = self.fp_v1_0().get_pipeline_cache_data( + self.handle(), + pipeline_cache, + &mut data_size, + ptr::null_mut(), + ); + if err_code != vk::Result::SUCCESS { + return Err(err_code); + }; + let mut data: Vec = Vec::with_capacity(data_size); + let err_code = self.fp_v1_0().get_pipeline_cache_data( + self.handle(), + pipeline_cache, + &mut data_size, + data.as_mut_ptr() as _, + ); + data.set_len(data_size); + match err_code { + vk::Result::SUCCESS => Ok(data), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn map_memory( + &self, + memory: vk::DeviceMemory, + offset: vk::DeviceSize, + size: vk::DeviceSize, + flags: vk::MemoryMapFlags, + ) -> VkResult<*mut c_void> { + let mut data: *mut c_void = mem::uninitialized(); + let err_code = + self.fp_v1_0() + .map_memory(self.handle(), memory, offset, size, flags, &mut data); + match err_code { + vk::Result::SUCCESS => Ok(data), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn unmap_memory(&self, memory: vk::DeviceMemory) { + self.fp_v1_0().unmap_memory(self.handle(), memory); + } + + #[doc = ""] + unsafe fn invalidate_mapped_memory_ranges( + &self, + ranges: &[vk::MappedMemoryRange], + ) -> VkResult<()> { + let err_code = self.fp_v1_0().invalidate_mapped_memory_ranges( + self.handle(), + ranges.len() as u32, + ranges.as_ptr(), + ); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn flush_mapped_memory_ranges(&self, ranges: &[vk::MappedMemoryRange]) -> VkResult<()> { + let err_code = self.fp_v1_0().flush_mapped_memory_ranges( + self.handle(), + ranges.len() as u32, + ranges.as_ptr(), + ); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn create_framebuffer( + &self, + create_info: &vk::FramebufferCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut framebuffer = mem::uninitialized(); + let err_code = self.fp_v1_0().create_framebuffer( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut framebuffer, + ); + match err_code { + vk::Result::SUCCESS => Ok(framebuffer), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn get_device_queue(&self, queue_family_index: u32, queue_index: u32) -> vk::Queue { + let mut queue = mem::uninitialized(); + self.fp_v1_0() + .get_device_queue(self.handle(), queue_family_index, queue_index, &mut queue); + queue + } + + #[doc = ""] + unsafe fn cmd_pipeline_barrier( + &self, + command_buffer: vk::CommandBuffer, + src_stage_mask: vk::PipelineStageFlags, + dst_stage_mask: vk::PipelineStageFlags, + dependency_flags: vk::DependencyFlags, + memory_barriers: &[vk::MemoryBarrier], + buffer_memory_barriers: &[vk::BufferMemoryBarrier], + image_memory_barriers: &[vk::ImageMemoryBarrier], + ) { + self.fp_v1_0().cmd_pipeline_barrier( + command_buffer, + src_stage_mask, + dst_stage_mask, + dependency_flags, + memory_barriers.len() as u32, + memory_barriers.as_ptr(), + buffer_memory_barriers.len() as u32, + buffer_memory_barriers.as_ptr(), + image_memory_barriers.len() as u32, + image_memory_barriers.as_ptr(), + ); + } + + #[doc = ""] + unsafe fn create_render_pass( + &self, + create_info: &vk::RenderPassCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut renderpass = mem::uninitialized(); + let err_code = self.fp_v1_0().create_render_pass( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut renderpass, + ); + match err_code { + vk::Result::SUCCESS => Ok(renderpass), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn begin_command_buffer( + &self, + command_buffer: vk::CommandBuffer, + begin_info: &vk::CommandBufferBeginInfo, + ) -> VkResult<()> { + let err_code = self + .fp_v1_0() + .begin_command_buffer(command_buffer, begin_info); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn end_command_buffer(&self, command_buffer: vk::CommandBuffer) -> VkResult<()> { + let err_code = self.fp_v1_0().end_command_buffer(command_buffer); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn wait_for_fences( + &self, + fences: &[vk::Fence], + wait_all: bool, + timeout: u64, + ) -> VkResult<()> { + let err_code = self.fp_v1_0().wait_for_fences( + self.handle(), + fences.len() as u32, + fences.as_ptr(), + wait_all as u32, + timeout, + ); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn get_fence_status(&self, fence: vk::Fence) -> VkResult<()> { + let err_code = self.fp_v1_0().get_fence_status(self.handle(), fence); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn queue_wait_idle(&self, queue: vk::Queue) -> VkResult<()> { + let err_code = self.fp_v1_0().queue_wait_idle(queue); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn queue_submit( + &self, + queue: vk::Queue, + submits: &[vk::SubmitInfo], + fence: vk::Fence, + ) -> VkResult<()> { + let err_code = + self.fp_v1_0() + .queue_submit(queue, submits.len() as u32, submits.as_ptr(), fence); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn create_buffer_view( + &self, + create_info: &vk::BufferViewCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut buffer_view = mem::uninitialized(); + let err_code = self.fp_v1_0().create_buffer_view( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut buffer_view, + ); + match err_code { + vk::Result::SUCCESS => Ok(buffer_view), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn destroy_buffer_view( + &self, + buffer_view: vk::BufferView, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.fp_v1_0().destroy_buffer_view( + self.handle(), + buffer_view, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + unsafe fn create_image_view( + &self, + create_info: &vk::ImageViewCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut image_view = mem::uninitialized(); + let err_code = self.fp_v1_0().create_image_view( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut image_view, + ); + match err_code { + vk::Result::SUCCESS => Ok(image_view), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn allocate_command_buffers( + &self, + create_info: &vk::CommandBufferAllocateInfo, + ) -> VkResult> { + let mut buffers = Vec::with_capacity(create_info.command_buffer_count as usize); + let err_code = self.fp_v1_0().allocate_command_buffers( + self.handle(), + create_info, + buffers.as_mut_ptr(), + ); + buffers.set_len(create_info.command_buffer_count as usize); + match err_code { + vk::Result::SUCCESS => Ok(buffers), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn create_command_pool( + &self, + create_info: &vk::CommandPoolCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut pool = mem::uninitialized(); + let err_code = self.fp_v1_0().create_command_pool( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut pool, + ); + match err_code { + vk::Result::SUCCESS => Ok(pool), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn create_query_pool( + &self, + create_info: &vk::QueryPoolCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut pool = mem::uninitialized(); + let err_code = self.fp_v1_0().create_query_pool( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut pool, + ); + match err_code { + vk::Result::SUCCESS => Ok(pool), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn create_image( + &self, + create_info: &vk::ImageCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut image = mem::uninitialized(); + let err_code = self.fp_v1_0().create_image( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut image, + ); + match err_code { + vk::Result::SUCCESS => Ok(image), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn get_image_subresource_layout( + &self, + image: vk::Image, + subresource: vk::ImageSubresource, + ) -> vk::SubresourceLayout { + let mut layout = mem::uninitialized(); + self.fp_v1_0().get_image_subresource_layout( + self.handle(), + image, + &subresource, + &mut layout, + ); + layout + } + + #[doc = ""] + unsafe fn get_image_memory_requirements(&self, image: vk::Image) -> vk::MemoryRequirements { + let mut mem_req = mem::uninitialized(); + self.fp_v1_0() + .get_image_memory_requirements(self.handle(), image, &mut mem_req); + mem_req + } + + #[doc = ""] + unsafe fn get_buffer_memory_requirements(&self, buffer: vk::Buffer) -> vk::MemoryRequirements { + let mut mem_req = mem::uninitialized(); + self.fp_v1_0() + .get_buffer_memory_requirements(self.handle(), buffer, &mut mem_req); + mem_req + } + + #[doc = ""] + unsafe fn allocate_memory( + &self, + create_info: &vk::MemoryAllocateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut memory = mem::uninitialized(); + let err_code = self.fp_v1_0().allocate_memory( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut memory, + ); + match err_code { + vk::Result::SUCCESS => Ok(memory), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn create_shader_module( + &self, + create_info: &vk::ShaderModuleCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut shader = mem::uninitialized(); + let err_code = self.fp_v1_0().create_shader_module( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut shader, + ); + match err_code { + vk::Result::SUCCESS => Ok(shader), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn create_fence( + &self, + create_info: &vk::FenceCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut fence = mem::uninitialized(); + let err_code = self.fp_v1_0().create_fence( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut fence, + ); + match err_code { + vk::Result::SUCCESS => Ok(fence), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn bind_buffer_memory( + &self, + buffer: vk::Buffer, + device_memory: vk::DeviceMemory, + offset: vk::DeviceSize, + ) -> VkResult<()> { + let err_code = + self.fp_v1_0() + .bind_buffer_memory(self.handle(), buffer, device_memory, offset); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn bind_image_memory( + &self, + image: vk::Image, + device_memory: vk::DeviceMemory, + offset: vk::DeviceSize, + ) -> VkResult<()> { + let err_code = + self.fp_v1_0() + .bind_image_memory(self.handle(), image, device_memory, offset); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } +} + +#[doc = ""] +#[derive(Clone)] +pub struct Device { + handle: vk::Device, + device_fn_1_0: vk::DeviceFnV1_0, + device_fn_1_1: vk::DeviceFnV1_1, +} +impl Device { + pub unsafe fn load(instance_fn: &vk::InstanceFnV1_0, device: vk::Device) -> Self { + let device_fn_1_0 = vk::DeviceFnV1_0::load(|name| { + mem::transmute(instance_fn.get_device_proc_addr(device, name.as_ptr())) + }); + let device_fn_1_1 = vk::DeviceFnV1_1::load(|name| { + mem::transmute(instance_fn.get_device_proc_addr(device, name.as_ptr())) + }); + Device { + handle: device, + device_fn_1_0, + device_fn_1_1, + } + } +} + +impl DeviceV1_0 for Device { + fn handle(&self) -> vk::Device { + self.handle + } + + fn fp_v1_0(&self) -> &vk::DeviceFnV1_0 { + &self.device_fn_1_0 + } +} + +impl DeviceV1_1 for Device { + fn fp_v1_1(&self) -> &vk::DeviceFnV1_1 { + &self.device_fn_1_1 + } +} + +impl Device { + pub fn handle(&self) -> vk::Device { + self.handle + } +} diff --git a/third_party/rust/ash/src/entry.rs b/third_party/rust/ash/src/entry.rs new file mode 100644 index 000000000000..3f7979f6a8b1 --- /dev/null +++ b/third_party/rust/ash/src/entry.rs @@ -0,0 +1,281 @@ +use instance::Instance; +use prelude::*; +use shared_library::dynamic_library::DynamicLibrary; +use std::error::Error; +use std::fmt; +use std::mem; +use std::os::raw::c_char; +use std::os::raw::c_void; +use std::path::Path; +use std::ptr; +use std::sync::Arc; +use vk; +use RawPtr; + +#[cfg(windows)] +const LIB_PATH: &'static str = "vulkan-1.dll"; + +#[cfg(all( + unix, + not(any(target_os = "macos", target_os = "ios", target_os = "android")) +))] +const LIB_PATH: &'static str = "libvulkan.so.1"; + +#[cfg(target_os = "android")] +const LIB_PATH: &'static str = "libvulkan.so"; + +#[cfg(any(target_os = "macos", target_os = "ios"))] +const LIB_PATH: &'static str = "libvulkan.dylib"; + + +pub type Entry = EntryCustom>; + + +#[derive(Clone)] +pub struct EntryCustom { + static_fn: vk::StaticFn, + entry_fn_1_0: vk::EntryFnV1_0, + entry_fn_1_1: vk::EntryFnV1_1, + lib: L, +} + +#[derive(Debug)] +pub enum LoadingError { + LibraryLoadError(String), +} + +impl fmt::Display for LoadingError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + LoadingError::LibraryLoadError(e) => write!(f, "{}", e), + } + } +} + +impl Error for LoadingError {} + +#[derive(Debug)] +pub enum InstanceError { + LoadError(Vec<&'static str>), + VkError(vk::Result), +} + +impl fmt::Display for InstanceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + InstanceError::LoadError(e) => write!(f, "{}", e.join("; ")), + InstanceError::VkError(e) => write!(f, "{}", e), + } + } +} + +impl Error for InstanceError {} + +#[allow(non_camel_case_types)] +pub trait EntryV1_0 { + type Instance; + fn fp_v1_0(&self) -> &vk::EntryFnV1_0; + fn static_fn(&self) -> &vk::StaticFn; + #[doc = ""] + unsafe fn create_instance( + &self, + create_info: &vk::InstanceCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> Result; + + #[doc = ""] + fn enumerate_instance_layer_properties(&self) -> VkResult> { + unsafe { + let mut num = 0; + self.fp_v1_0() + .enumerate_instance_layer_properties(&mut num, ptr::null_mut()); + + let mut v = Vec::with_capacity(num as usize); + let err_code = self + .fp_v1_0() + .enumerate_instance_layer_properties(&mut num, v.as_mut_ptr()); + v.set_len(num as usize); + match err_code { + vk::Result::SUCCESS => Ok(v), + _ => Err(err_code), + } + } + } + + #[doc = ""] + fn enumerate_instance_extension_properties(&self) -> VkResult> { + unsafe { + let mut num = 0; + self.fp_v1_0().enumerate_instance_extension_properties( + ptr::null(), + &mut num, + ptr::null_mut(), + ); + let mut data = Vec::with_capacity(num as usize); + let err_code = self.fp_v1_0().enumerate_instance_extension_properties( + ptr::null(), + &mut num, + data.as_mut_ptr(), + ); + data.set_len(num as usize); + match err_code { + vk::Result::SUCCESS => Ok(data), + _ => Err(err_code), + } + } + } + + #[doc = ""] + fn get_instance_proc_addr( + &self, + instance: vk::Instance, + p_name: *const c_char, + ) -> vk::PFN_vkVoidFunction { + unsafe { self.static_fn().get_instance_proc_addr(instance, p_name) } + } +} + +impl EntryV1_0 for EntryCustom { + type Instance = Instance; + #[doc = ""] + unsafe fn create_instance( + &self, + create_info: &vk::InstanceCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> Result { + let mut instance: vk::Instance = mem::uninitialized(); + let err_code = self.fp_v1_0().create_instance( + create_info, + allocation_callbacks.as_raw_ptr(), + &mut instance, + ); + if err_code != vk::Result::SUCCESS { + return Err(InstanceError::VkError(err_code)); + } + Ok(Instance::load(&self.static_fn, instance)) + } + fn fp_v1_0(&self) -> &vk::EntryFnV1_0 { + &self.entry_fn_1_0 + } + fn static_fn(&self) -> &vk::StaticFn { + &self.static_fn + } +} + +#[allow(non_camel_case_types)] +pub trait EntryV1_1: EntryV1_0 { + fn fp_v1_1(&self) -> &vk::EntryFnV1_1; + + #[doc = ""] + fn enumerate_instance_version(&self) -> VkResult { + unsafe { + let mut api_version = 0; + let err_code = self.fp_v1_1().enumerate_instance_version(&mut api_version); + match err_code { + vk::Result::SUCCESS => Ok(api_version), + _ => Err(err_code), + } + } + } +} + +impl EntryCustom> { + + + + + + + + + + + + + + + + + + pub fn new() -> Result { + Self::new_custom( + || { + DynamicLibrary::open(Some(&Path::new(LIB_PATH))) + .map_err(|err| LoadingError::LibraryLoadError(err.clone())) + .map(|dl| Arc::new(dl)) + }, + |vk_lib, name| unsafe { + vk_lib + .symbol(&*name.to_string_lossy()) + .unwrap_or(ptr::null_mut()) + }, + ) + } +} + +impl EntryCustom { + pub fn new_custom(open: Open, mut load: Load) -> Result + where + Open: FnOnce() -> Result, + Load: FnMut(&mut L, &::std::ffi::CStr) -> *const c_void, + { + let mut lib = open()?; + let static_fn = vk::StaticFn::load(|name| load(&mut lib, name)); + + let entry_fn_1_0 = vk::EntryFnV1_0::load(|name| unsafe { + mem::transmute(static_fn.get_instance_proc_addr(vk::Instance::null(), name.as_ptr())) + }); + + let entry_fn_1_1 = vk::EntryFnV1_1::load(|name| unsafe { + mem::transmute(static_fn.get_instance_proc_addr(vk::Instance::null(), name.as_ptr())) + }); + + Ok(EntryCustom { + static_fn, + entry_fn_1_0, + entry_fn_1_1, + lib, + }) + } + + #[doc = ""] + + + + + + + + + + + + + + + + + + + pub fn try_enumerate_instance_version(&self) -> VkResult> { + unsafe { + let mut api_version = 0; + let enumerate_instance_version: Option = { + let name = b"vkEnumerateInstanceVersion\0".as_ptr() as *const _; + mem::transmute( + self.static_fn() + .get_instance_proc_addr(vk::Instance::null(), name), + ) + }; + if let Some(enumerate_instance_version) = enumerate_instance_version { + let err_code = (enumerate_instance_version)(&mut api_version); + match err_code { + vk::Result::SUCCESS => Ok(Some(api_version)), + _ => Err(err_code), + } + } else { + Ok(None) + } + } + } +} diff --git a/third_party/rust/ash/src/extensions/experimental/amd.rs b/third_party/rust/ash/src/extensions/experimental/amd.rs new file mode 100644 index 000000000000..825441b981ef --- /dev/null +++ b/third_party/rust/ash/src/extensions/experimental/amd.rs @@ -0,0 +1,701 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +use std::fmt; +use std::os::raw::*; +use vk::*; + + + +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct GpaSqShaderStageFlags(pub(crate) Flags); +vk_bitflags_wrapped!( + GpaSqShaderStageFlags, + 0b1111111111111111111111111111111, + Flags +); +impl fmt::Debug for GpaSqShaderStageFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (GpaSqShaderStageFlags::PS.0, "PS"), + (GpaSqShaderStageFlags::VS.0, "VS"), + (GpaSqShaderStageFlags::GS.0, "GS"), + (GpaSqShaderStageFlags::ES.0, "ES"), + (GpaSqShaderStageFlags::HS.0, "HS"), + (GpaSqShaderStageFlags::LS.0, "LS"), + (GpaSqShaderStageFlags::CS.0, "CS"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl GpaSqShaderStageFlags { + pub const PS: Self = GpaSqShaderStageFlags(0b1); + pub const VS: Self = GpaSqShaderStageFlags(0b10); + pub const GS: Self = GpaSqShaderStageFlags(0b100); + pub const ES: Self = GpaSqShaderStageFlags(0b1000); + pub const HS: Self = GpaSqShaderStageFlags(0b10000); + pub const LS: Self = GpaSqShaderStageFlags(0b100000); + pub const CS: Self = GpaSqShaderStageFlags(0b1000000); +} + +impl StructureType { + pub const PHYSICAL_DEVICE_GPA_FEATURES_AMD: Self = StructureType(1000133000); + pub const PHYSICAL_DEVICE_GPA_PROPERTIES_AMD: Self = StructureType(1000133001); + pub const GPA_SAMPLE_BEGIN_INFO_AMD: Self = StructureType(1000133002); + pub const GPA_SESSION_CREATE_INFO_AMD: Self = StructureType(1000133003); + pub const GPA_DEVICE_CLOCK_MODE_INFO_AMD: Self = StructureType(1000133004); +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +pub struct GpaDeviceClockModeAmd(pub(crate) i32); +impl GpaDeviceClockModeAmd { + pub fn from_raw(x: i32) -> Self { + GpaDeviceClockModeAmd(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl GpaDeviceClockModeAmd { + pub const DEFAULT: Self = GpaDeviceClockModeAmd(0); + pub const QUERY: Self = GpaDeviceClockModeAmd(1); + pub const PROFILING: Self = GpaDeviceClockModeAmd(2); + pub const MIN_MEMORY: Self = GpaDeviceClockModeAmd(3); + pub const MIN_ENGINE: Self = GpaDeviceClockModeAmd(4); + pub const PEAK: Self = GpaDeviceClockModeAmd(5); +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +pub struct GpaPerfBlockAmd(pub(crate) i32); +impl GpaPerfBlockAmd { + pub fn from_raw(x: i32) -> Self { + GpaPerfBlockAmd(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl GpaPerfBlockAmd { + pub const CPF: Self = GpaPerfBlockAmd(0); + pub const IA: Self = GpaPerfBlockAmd(1); + pub const VGT: Self = GpaPerfBlockAmd(2); + pub const PA: Self = GpaPerfBlockAmd(3); + pub const SC: Self = GpaPerfBlockAmd(4); + pub const SPI: Self = GpaPerfBlockAmd(5); + pub const SQ: Self = GpaPerfBlockAmd(6); + pub const SX: Self = GpaPerfBlockAmd(7); + pub const TA: Self = GpaPerfBlockAmd(8); + pub const TD: Self = GpaPerfBlockAmd(9); + pub const TCP: Self = GpaPerfBlockAmd(10); + pub const TCC: Self = GpaPerfBlockAmd(11); + pub const TCA: Self = GpaPerfBlockAmd(12); + pub const DB: Self = GpaPerfBlockAmd(13); + pub const CB: Self = GpaPerfBlockAmd(14); + pub const GDS: Self = GpaPerfBlockAmd(15); + pub const SRBM: Self = GpaPerfBlockAmd(16); + pub const GRBM: Self = GpaPerfBlockAmd(17); + pub const GRBM_SE: Self = GpaPerfBlockAmd(18); + pub const RLC: Self = GpaPerfBlockAmd(19); + pub const DMA: Self = GpaPerfBlockAmd(20); + pub const MC: Self = GpaPerfBlockAmd(21); + pub const CPG: Self = GpaPerfBlockAmd(22); + pub const CPC: Self = GpaPerfBlockAmd(23); + pub const WD: Self = GpaPerfBlockAmd(24); + pub const TCS: Self = GpaPerfBlockAmd(25); + pub const ATC: Self = GpaPerfBlockAmd(26); + pub const ATC_L2: Self = GpaPerfBlockAmd(27); + pub const MC_VM_L2: Self = GpaPerfBlockAmd(28); + pub const EA: Self = GpaPerfBlockAmd(29); + pub const RPB: Self = GpaPerfBlockAmd(30); + pub const RMI: Self = GpaPerfBlockAmd(31); +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +pub struct GpaSampleTypeAmd(pub(crate) i32); +impl GpaSampleTypeAmd { + pub fn from_raw(x: i32) -> Self { + GpaSampleTypeAmd(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl GpaSampleTypeAmd { + pub const CUMULATIVE: Self = GpaSampleTypeAmd(0); + pub const TRACE: Self = GpaSampleTypeAmd(1); + pub const TIMING: Self = GpaSampleTypeAmd(2); +} + +handle_nondispatchable!(GpaSessionAmd, UNKNOWN); + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct GpaSessionCreateInfoAmd { + pub s_type: StructureType, + pub p_next: *const c_void, + pub secondary_copy_source: GpaSessionAmd, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct GpaPerfBlockPropertiesAmd { + pub block_type: GpaPerfBlockAmd, + pub flags: Flags, + pub instance_count: u32, + pub max_event_id: u32, + pub max_global_only_counters: u32, + pub max_global_shared_counters: u32, + pub max_streaming_counters: u32, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct PhysicalDeviceGpaFeaturesAmd { + pub s_type: StructureType, + pub p_next: *const c_void, + pub perf_counters: Bool32, + pub streaming_perf_counters: Bool32, + pub sq_thread_tracing: Bool32, + pub clock_modes: Bool32, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct PhysicalDeviceGpaPropertiesAmd { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: Flags, + pub max_sqtt_se_buffer_size: DeviceSize, + pub shader_engine_count: u32, + pub perf_block_count: u32, + pub p_perf_block_properties: *mut GpaPerfBlockPropertiesAmd, +} + +impl ::std::default::Default for PhysicalDeviceGpaPropertiesAmd { + fn default() -> PhysicalDeviceGpaPropertiesAmd { + PhysicalDeviceGpaPropertiesAmd { + s_type: StructureType::PHYSICAL_DEVICE_GPA_PROPERTIES_AMD, + p_next: ::std::ptr::null_mut(), + flags: Flags::default(), + max_sqtt_se_buffer_size: DeviceSize::default(), + shader_engine_count: u32::default(), + perf_block_count: u32::default(), + p_perf_block_properties: ::std::ptr::null_mut(), + } + } +} +impl PhysicalDeviceGpaPropertiesAmd { + pub fn builder<'a>() -> PhysicalDeviceGpaPropertiesAmdBuilder<'a> { + PhysicalDeviceGpaPropertiesAmdBuilder { + inner: PhysicalDeviceGpaPropertiesAmd::default(), + marker: ::std::marker::PhantomData, + } + } +} +pub struct PhysicalDeviceGpaPropertiesAmdBuilder<'a> { + inner: PhysicalDeviceGpaPropertiesAmd, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDeviceGpaPropertiesAmd {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceGpaPropertiesAmd {} +impl<'a> ::std::ops::Deref for PhysicalDeviceGpaPropertiesAmdBuilder<'a> { + type Target = PhysicalDeviceGpaPropertiesAmd; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> PhysicalDeviceGpaPropertiesAmdBuilder<'a> { + pub fn next(mut self, next: &'a mut T) -> PhysicalDeviceGpaPropertiesAmdBuilder<'a> + where + T: ExtendsPhysicalDeviceGpaPropertiesAmd, + { + self.inner.p_next = next as *mut T as *mut c_void; + self + } + pub fn build(self) -> PhysicalDeviceGpaPropertiesAmd { + self.inner + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct GpaPerfCounterAmd { + pub block_type: GpaPerfBlockAmd, + pub block_instance: u32, + pub event_id: u32, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct GpaSampleBeginInfoAmd { + pub s_type: StructureType, + pub p_next: *const c_void, + pub sample_type: GpaSampleTypeAmd, + pub sample_internal_operations: Bool32, + pub cache_flush_on_counter_collection: Bool32, + pub sq_shader_mask_enable: Bool32, + pub sq_shader_mask: GpaSqShaderStageFlags, + pub perf_counter_count: u32, + pub p_perf_counters: *const GpaPerfCounterAmd, + pub streaming_perf_trace_sample_interval: u32, + pub perf_counter_device_memory_limit: DeviceSize, + pub sq_thread_trace_enable: Bool32, + pub sq_thread_trace_suppress_instruction_tokens: Bool32, + pub sq_thread_trace_device_memory_limit: DeviceSize, + pub timing_pre_sample: PipelineStageFlags, + pub timing_post_sample: PipelineStageFlags, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct GpaDeviceClockModeInfoAmd { + pub s_type: StructureType, + pub p_next: *const c_void, + pub clock_mode: GpaDeviceClockModeAmd, + pub memory_clock_ratio_to_peak: f32, + pub engine_clock_ratio_to_peak: f32, +} + +#[allow(non_camel_case_types)] +pub type PFN_vkCreateGpaSessionAMD = extern "system" fn( + device: Device, + p_create_info: *const GpaSessionCreateInfoAmd, + p_allocator: *const AllocationCallbacks, + p_gpa_session: *mut GpaSessionAmd, +) -> Result; + +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyGpaSessionAMD = extern "system" fn( + device: Device, + gpa_session: GpaSessionAmd, + p_allocator: *const AllocationCallbacks, +) -> c_void; + +#[allow(non_camel_case_types)] +pub type PFN_vkSetGpaDeviceClockModeAMD = + extern "system" fn(device: Device, p_info: *mut GpaDeviceClockModeInfoAmd) -> Result; + +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBeginGpaSessionAMD = + extern "system" fn(commandBuffer: CommandBuffer, gpa_session: GpaSessionAmd) -> Result; + +#[allow(non_camel_case_types)] +pub type PFN_vkCmdEndGpaSessionAMD = + extern "system" fn(commandBuffer: CommandBuffer, gpa_session: GpaSessionAmd) -> Result; + +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBeginGpaSampleAMD = extern "system" fn( + commandBuffer: CommandBuffer, + gpa_session: GpaSessionAmd, + p_gpa_sample_begin_info: *const GpaSampleBeginInfoAmd, + p_sample_id: *mut u32, +) -> Result; + +#[allow(non_camel_case_types)] +pub type PFN_vkCmdEndGpaSampleAMD = extern "system" fn( + commandBuffer: CommandBuffer, + gpa_session: GpaSessionAmd, + sample_id: u32, +) -> c_void; + +#[allow(non_camel_case_types)] +pub type PFN_vkGetGpaSessionStatusAMD = + extern "system" fn(device: Device, gpaSession: GpaSessionAmd) -> Result; + +#[allow(non_camel_case_types)] +pub type PFN_vkGetGpaSessionResultsAMD = extern "system" fn( + device: Device, + gpaSession: GpaSessionAmd, + sample_id: u32, + p_size_in_bytes: *mut usize, + p_data: *mut c_void, +) -> Result; + +#[allow(non_camel_case_types)] +pub type PFN_vkResetGpaSessionAMD = + extern "system" fn(device: Device, gpaSession: GpaSessionAmd) -> Result; + +#[allow(non_camel_case_types)] +pub type PFN_vkCmdCopyGpaSessionResultsAMD = + extern "system" fn(commandBuffer: CommandBuffer, gpaSession: GpaSessionAmd) -> c_void; + +pub struct AmdGpaInterfaceFn { + pub create_gpa_session: PFN_vkCreateGpaSessionAMD, + pub destroy_gpa_session: PFN_vkDestroyGpaSessionAMD, + pub set_gpa_device_clock_mode: PFN_vkSetGpaDeviceClockModeAMD, + pub cmd_begin_gpa_session: PFN_vkCmdBeginGpaSessionAMD, + pub cmd_end_gpa_session: PFN_vkCmdEndGpaSessionAMD, + pub cmd_begin_gpa_sample: PFN_vkCmdBeginGpaSampleAMD, + pub cmd_end_gpa_sample: PFN_vkCmdEndGpaSampleAMD, + pub get_gpa_session_status: PFN_vkGetGpaSessionStatusAMD, + pub get_gpa_session_results: PFN_vkGetGpaSessionResultsAMD, + pub reset_gpa_session: PFN_vkResetGpaSessionAMD, + pub cmd_copy_gpa_session_results: PFN_vkCmdCopyGpaSessionResultsAMD, +} +unsafe impl Send for AmdGpaInterfaceFn {} +unsafe impl Sync for AmdGpaInterfaceFn {} + +impl ::std::clone::Clone for AmdGpaInterfaceFn { + fn clone(&self) -> Self { + AmdGpaInterfaceFn { + create_gpa_session: self.create_gpa_session, + destroy_gpa_session: self.destroy_gpa_session, + set_gpa_device_clock_mode: self.set_gpa_device_clock_mode, + cmd_begin_gpa_session: self.cmd_begin_gpa_session, + cmd_end_gpa_session: self.cmd_end_gpa_session, + cmd_begin_gpa_sample: self.cmd_begin_gpa_sample, + cmd_end_gpa_sample: self.cmd_end_gpa_sample, + get_gpa_session_status: self.get_gpa_session_status, + get_gpa_session_results: self.get_gpa_session_results, + reset_gpa_session: self.reset_gpa_session, + cmd_copy_gpa_session_results: self.cmd_copy_gpa_session_results, + } + } +} + +impl AmdGpaInterfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdGpaInterfaceFn { + create_gpa_session: unsafe { + extern "system" fn create_gpa_session_amd( + _device: Device, + _p_create_info: *const GpaSessionCreateInfoAmd, + _p_allocator: *const AllocationCallbacks, + _p_gpa_session: *mut GpaSessionAmd, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_gpa_session_amd) + )) + } + let raw_name = stringify!(vkCreateGpaSessionAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_gpa_session_amd + } else { + ::std::mem::transmute(val) + } + }, + destroy_gpa_session: unsafe { + extern "system" fn destroy_gpa_session_amd( + _device: Device, + _gpa_session: GpaSessionAmd, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_gpa_session_amd) + )) + } + let raw_name = stringify!(vkDestroyGpaSessionAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_gpa_session_amd + } else { + ::std::mem::transmute(val) + } + }, + set_gpa_device_clock_mode: unsafe { + extern "system" fn set_gpa_device_clock_mode_amd( + _device: Device, + _p_info: *mut GpaDeviceClockModeInfoAmd, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(set_gpa_device_clock_mode_amd) + )) + } + let raw_name = stringify!(vkSetGpaDeviceClockModeAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + set_gpa_device_clock_mode_amd + } else { + ::std::mem::transmute(val) + } + }, + cmd_begin_gpa_session: unsafe { + extern "system" fn cmd_begin_gpa_session_amd( + _command_buffer: CommandBuffer, + _gpa_session: GpaSessionAmd, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(cmd_begin_gpa_session_amd) + )) + } + let raw_name = stringify!(vkCmdBeginGpaSessionAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_begin_gpa_session_amd + } else { + ::std::mem::transmute(val) + } + }, + cmd_end_gpa_session: unsafe { + extern "system" fn cmd_end_gpa_session_amd( + _command_buffer: CommandBuffer, + _gpa_session: GpaSessionAmd, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(cmd_end_gpa_session_amd) + )) + } + let raw_name = stringify!(vkCmdEndGpaSessionAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_end_gpa_session_amd + } else { + ::std::mem::transmute(val) + } + }, + cmd_begin_gpa_sample: unsafe { + extern "system" fn cmd_begin_gpa_sample_amd( + _command_buffer: CommandBuffer, + _gpa_session: GpaSessionAmd, + _p_gpa_sample_begin_info: *const GpaSampleBeginInfoAmd, + _p_sample_id: *mut u32, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(cmd_begin_gpa_sample_amd) + )) + } + let raw_name = stringify!(vkCmdBeginGpaSampleAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_begin_gpa_sample_amd + } else { + ::std::mem::transmute(val) + } + }, + cmd_end_gpa_sample: unsafe { + extern "system" fn cmd_end_gpa_sample_amd( + _command_buffer: CommandBuffer, + _gpa_session: GpaSessionAmd, + _sample_id: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_end_gpa_sample_amd) + )) + } + let raw_name = stringify!(vkCmdEndGpaSampleAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_end_gpa_sample_amd + } else { + ::std::mem::transmute(val) + } + }, + get_gpa_session_status: unsafe { + extern "system" fn get_gpa_session_status_amd( + _device: Device, + _gpa_session: GpaSessionAmd, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_gpa_session_status_amd) + )) + } + let raw_name = stringify!(vkGetGpaSessionStatusAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_gpa_session_status_amd + } else { + ::std::mem::transmute(val) + } + }, + get_gpa_session_results: unsafe { + extern "system" fn get_gpa_session_results_amd( + _device: Device, + _gpa_session: GpaSessionAmd, + _sample_id: u32, + _p_size_in_bytes: *mut usize, + _p_data: *mut c_void, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_gpa_session_results_amd) + )) + } + let raw_name = stringify!(vkGetGpaSessionResultsAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_gpa_session_results_amd + } else { + ::std::mem::transmute(val) + } + }, + reset_gpa_session: unsafe { + extern "system" fn reset_gpa_session_amd( + _device: Device, + _gpa_session: GpaSessionAmd, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(reset_gpa_session_amd) + )) + } + let raw_name = stringify!(vkCmdEndGpaSampleAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + reset_gpa_session_amd + } else { + ::std::mem::transmute(val) + } + }, + cmd_copy_gpa_session_results: unsafe { + extern "system" fn cmd_copy_gpa_session_results_amd( + _command_buffer: CommandBuffer, + _gpa_session: GpaSessionAmd, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_copy_gpa_session_results_amd) + )) + } + let raw_name = stringify!(vkCmdCopyGpaSessionResultsAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_copy_gpa_session_results_amd + } else { + ::std::mem::transmute(val) + } + }, + } + } + pub unsafe fn create_gpa_session( + &self, + device: Device, + create_info: *const GpaSessionCreateInfoAmd, + allocator: *const AllocationCallbacks, + gpa_session: *mut GpaSessionAmd, + ) -> Result { + (self.create_gpa_session)(device, create_info, allocator, gpa_session) + } + pub unsafe fn destroy_gpa_session( + &self, + device: Device, + gpa_session: GpaSessionAmd, + allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_gpa_session)(device, gpa_session, allocator) + } +} + + + +impl StructureType { + pub const WAVE_LIMIT_AMD: Self = StructureType(1000045000); + pub const PHYSICAL_DEVICE_WAVE_LIMIT_PROPERTIES_AMD: Self = StructureType(1000045001); +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct PhysicalDeviceWaveLimitPropertiesAmd { + pub s_type: StructureType, + pub p_next: *const c_void, + pub cu_count: u32, + pub max_waves_per_cu: u32, +} + +impl ::std::default::Default for PhysicalDeviceWaveLimitPropertiesAmd { + fn default() -> PhysicalDeviceWaveLimitPropertiesAmd { + PhysicalDeviceWaveLimitPropertiesAmd { + s_type: StructureType::PHYSICAL_DEVICE_WAVE_LIMIT_PROPERTIES_AMD, + p_next: ::std::ptr::null_mut(), + cu_count: u32::default(), + max_waves_per_cu: u32::default(), + } + } +} +impl PhysicalDeviceWaveLimitPropertiesAmd { + pub fn builder<'a>() -> PhysicalDeviceWaveLimitPropertiesAmdBuilder<'a> { + PhysicalDeviceWaveLimitPropertiesAmdBuilder { + inner: PhysicalDeviceWaveLimitPropertiesAmd::default(), + marker: ::std::marker::PhantomData, + } + } +} +pub struct PhysicalDeviceWaveLimitPropertiesAmdBuilder<'a> { + inner: PhysicalDeviceWaveLimitPropertiesAmd, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDeviceWaveLimitPropertiesAmd {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceWaveLimitPropertiesAmd {} +impl<'a> ::std::ops::Deref for PhysicalDeviceWaveLimitPropertiesAmdBuilder<'a> { + type Target = PhysicalDeviceWaveLimitPropertiesAmd; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> PhysicalDeviceWaveLimitPropertiesAmdBuilder<'a> { + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PhysicalDeviceWaveLimitPropertiesAmdBuilder<'a> + where + T: ExtendsPhysicalDeviceWaveLimitPropertiesAmd, + { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + pub fn build(self) -> PhysicalDeviceWaveLimitPropertiesAmd { + self.inner + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct PipelineShaderStageCreateInfoWaveLimitAmd { + pub s_type: StructureType, + pub p_next: *const c_void, + pub waves_per_cu: f32, + pub cu_enable_mask: *mut u32, +} diff --git a/third_party/rust/ash/src/extensions/experimental/mod.rs b/third_party/rust/ash/src/extensions/experimental/mod.rs new file mode 100644 index 000000000000..49e51f80c3b8 --- /dev/null +++ b/third_party/rust/ash/src/extensions/experimental/mod.rs @@ -0,0 +1 @@ +pub mod amd; diff --git a/third_party/rust/ash/src/extensions/ext/debug_marker.rs b/third_party/rust/ash/src/extensions/ext/debug_marker.rs new file mode 100644 index 000000000000..11f6ce6aee2c --- /dev/null +++ b/third_party/rust/ash/src/extensions/ext/debug_marker.rs @@ -0,0 +1,67 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use version::{DeviceV1_0, InstanceV1_0}; +use vk; + +#[derive(Clone)] +pub struct DebugMarker { + debug_marker_fn: vk::ExtDebugMarkerFn, +} + +impl DebugMarker { + pub fn new(instance: &I, device: &D) -> DebugMarker { + let debug_marker_fn = vk::ExtDebugMarkerFn::load(|name| unsafe { + mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr())) + }); + DebugMarker { + debug_marker_fn: debug_marker_fn, + } + } + + pub fn name() -> &'static CStr { + vk::ExtDebugMarkerFn::name() + } + + #[doc = ""] + pub unsafe fn debug_marker_set_object_name( + &self, + device: vk::Device, + name_info: &vk::DebugMarkerObjectNameInfoEXT, + ) -> VkResult<()> { + let err_code = self + .debug_marker_fn + .debug_marker_set_object_name_ext(device, name_info); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn cmd_debug_marker_begin( + &self, + command_buffer: vk::CommandBuffer, + marker_info: &vk::DebugMarkerMarkerInfoEXT, + ) { + self.debug_marker_fn + .cmd_debug_marker_begin_ext(command_buffer, marker_info); + } + + #[doc = ""] + pub unsafe fn cmd_debug_marker_end(&self, command_buffer: vk::CommandBuffer) { + self.debug_marker_fn + .cmd_debug_marker_end_ext(command_buffer); + } + + #[doc = ""] + pub unsafe fn cmd_debug_marker_insert( + &self, + command_buffer: vk::CommandBuffer, + marker_info: &vk::DebugMarkerMarkerInfoEXT, + ) { + self.debug_marker_fn + .cmd_debug_marker_insert_ext(command_buffer, marker_info); + } +} diff --git a/third_party/rust/ash/src/extensions/ext/debug_report.rs b/third_party/rust/ash/src/extensions/ext/debug_report.rs new file mode 100644 index 000000000000..1d5011750d28 --- /dev/null +++ b/third_party/rust/ash/src/extensions/ext/debug_report.rs @@ -0,0 +1,61 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use version::{EntryV1_0, InstanceV1_0}; +use vk; +use RawPtr; + +#[derive(Clone)] +pub struct DebugReport { + handle: vk::Instance, + debug_report_fn: vk::ExtDebugReportFn, +} + +impl DebugReport { + pub fn new(entry: &E, instance: &I) -> DebugReport { + let debug_report_fn = vk::ExtDebugReportFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + DebugReport { + handle: instance.handle(), + debug_report_fn, + } + } + + pub fn name() -> &'static CStr { + vk::ExtDebugReportFn::name() + } + + #[doc = ""] + pub unsafe fn destroy_debug_report_callback( + &self, + debug: vk::DebugReportCallbackEXT, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.debug_report_fn.destroy_debug_report_callback_ext( + self.handle, + debug, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + pub unsafe fn create_debug_report_callback( + &self, + create_info: &vk::DebugReportCallbackCreateInfoEXT, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut debug_cb = mem::uninitialized(); + let err_code = self.debug_report_fn.create_debug_report_callback_ext( + self.handle, + create_info, + allocation_callbacks.as_raw_ptr(), + &mut debug_cb, + ); + match err_code { + vk::Result::SUCCESS => Ok(debug_cb), + _ => Err(err_code), + } + } +} diff --git a/third_party/rust/ash/src/extensions/ext/debug_utils.rs b/third_party/rust/ash/src/extensions/ext/debug_utils.rs new file mode 100644 index 000000000000..a8acffdf6139 --- /dev/null +++ b/third_party/rust/ash/src/extensions/ext/debug_utils.rs @@ -0,0 +1,157 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use version::{EntryV1_0, InstanceV1_0}; +use {vk, RawPtr}; + +#[derive(Clone)] +pub struct DebugUtils { + handle: vk::Instance, + debug_utils_fn: vk::ExtDebugUtilsFn, +} + +impl DebugUtils { + pub fn new(entry: &E, instance: &I) -> DebugUtils { + let debug_utils_fn = vk::ExtDebugUtilsFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + DebugUtils { + handle: instance.handle(), + debug_utils_fn, + } + } + + pub fn name() -> &'static CStr { + vk::ExtDebugUtilsFn::name() + } + + #[doc = ""] + pub unsafe fn debug_utils_set_object_name( + &self, + device: vk::Device, + name_info: &vk::DebugUtilsObjectNameInfoEXT, + ) -> VkResult<()> { + let err_code = self + .debug_utils_fn + .set_debug_utils_object_name_ext(device, name_info); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn debug_utils_set_object_tag( + &self, + device: vk::Device, + tag_info: &vk::DebugUtilsObjectTagInfoEXT, + ) -> VkResult<()> { + let err_code = self + .debug_utils_fn + .set_debug_utils_object_tag_ext(device, tag_info); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn cmd_begin_debug_utils_label( + &self, + command_buffer: vk::CommandBuffer, + label: &vk::DebugUtilsLabelEXT, + ) { + self.debug_utils_fn + .cmd_begin_debug_utils_label_ext(command_buffer, label); + } + + #[doc = ""] + pub unsafe fn cmd_end_debug_utils_label(&self, command_buffer: vk::CommandBuffer) { + self.debug_utils_fn + .cmd_end_debug_utils_label_ext(command_buffer); + } + + #[doc = ""] + pub unsafe fn cmd_insert_debug_utils_label( + &self, + command_buffer: vk::CommandBuffer, + label: &vk::DebugUtilsLabelEXT, + ) { + self.debug_utils_fn + .cmd_insert_debug_utils_label_ext(command_buffer, label); + } + + #[doc = ""] + pub unsafe fn queue_begin_debug_utils_label( + &self, + queue: vk::Queue, + label: &vk::DebugUtilsLabelEXT, + ) { + self.debug_utils_fn + .queue_begin_debug_utils_label_ext(queue, label); + } + + #[doc = ""] + pub unsafe fn queue_end_debug_utils_label(&self, queue: vk::Queue) { + self.debug_utils_fn.queue_end_debug_utils_label_ext(queue); + } + + #[doc = ""] + pub unsafe fn queue_insert_debug_utils_label( + &self, + queue: vk::Queue, + label: &vk::DebugUtilsLabelEXT, + ) { + self.debug_utils_fn + .queue_insert_debug_utils_label_ext(queue, label); + } + + #[doc = ""] + pub unsafe fn create_debug_utils_messenger( + &self, + create_info: &vk::DebugUtilsMessengerCreateInfoEXT, + allocator: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut messenger = mem::uninitialized(); + let err_code = self.debug_utils_fn.create_debug_utils_messenger_ext( + self.handle, + create_info, + allocator.as_raw_ptr(), + &mut messenger, + ); + match err_code { + vk::Result::SUCCESS => Ok(messenger), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn destroy_debug_utils_messenger( + &self, + messenger: vk::DebugUtilsMessengerEXT, + allocator: Option<&vk::AllocationCallbacks>, + ) { + self.debug_utils_fn.destroy_debug_utils_messenger_ext( + self.handle, + messenger, + allocator.as_raw_ptr(), + ); + } + + #[doc = ""] + pub unsafe fn submit_debug_utils_message( + &self, + instance: vk::Instance, + message_severity: vk::DebugUtilsMessageSeverityFlagsEXT, + message_types: vk::DebugUtilsMessageTypeFlagsEXT, + callback_data: &vk::DebugUtilsMessengerCallbackDataEXT, + ) { + self.debug_utils_fn.submit_debug_utils_message_ext( + instance, + message_severity, + message_types, + callback_data, + ); + } +} diff --git a/third_party/rust/ash/src/extensions/ext/mod.rs b/third_party/rust/ash/src/extensions/ext/mod.rs new file mode 100644 index 000000000000..eba690f69cbb --- /dev/null +++ b/third_party/rust/ash/src/extensions/ext/mod.rs @@ -0,0 +1,7 @@ +pub use self::debug_marker::DebugMarker; +pub use self::debug_report::DebugReport; +pub use self::debug_utils::DebugUtils; + +mod debug_marker; +mod debug_report; +mod debug_utils; diff --git a/third_party/rust/ash/src/extensions/khr/android_surface.rs b/third_party/rust/ash/src/extensions/khr/android_surface.rs new file mode 100644 index 000000000000..d9d343d67bc7 --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/android_surface.rs @@ -0,0 +1,48 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use version::{EntryV1_0, InstanceV1_0}; +use vk; +use RawPtr; + +#[derive(Clone)] +pub struct AndroidSurface { + handle: vk::Instance, + android_surface_fn: vk::KhrAndroidSurfaceFn, +} + +impl AndroidSurface { + pub fn new(entry: &E, instance: &I) -> AndroidSurface { + let surface_fn = vk::KhrAndroidSurfaceFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + AndroidSurface { + handle: instance.handle(), + android_surface_fn: surface_fn, + } + } + + pub fn name() -> &'static CStr { + vk::KhrAndroidSurfaceFn::name() + } + + #[doc = ""] + pub unsafe fn create_android_surface( + &self, + create_info: &vk::AndroidSurfaceCreateInfoKHR, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut surface = mem::uninitialized(); + let err_code = self.android_surface_fn.create_android_surface_khr( + self.handle, + create_info, + allocation_callbacks.as_raw_ptr(), + &mut surface, + ); + match err_code { + vk::Result::SUCCESS => Ok(surface), + _ => Err(err_code), + } + } +} diff --git a/third_party/rust/ash/src/extensions/khr/display_swapchain.rs b/third_party/rust/ash/src/extensions/khr/display_swapchain.rs new file mode 100644 index 000000000000..29c3b5dfb348 --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/display_swapchain.rs @@ -0,0 +1,50 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use version::{DeviceV1_0, InstanceV1_0}; +use vk; +use RawPtr; + +#[derive(Clone)] +pub struct DisplaySwapchain { + handle: vk::Device, + swapchain_fn: vk::KhrDisplaySwapchainFn, +} + +impl DisplaySwapchain { + pub fn new(instance: &I, device: &D) -> DisplaySwapchain { + let swapchain_fn = vk::KhrDisplaySwapchainFn::load(|name| unsafe { + mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr())) + }); + DisplaySwapchain { + handle: device.handle(), + swapchain_fn: swapchain_fn, + } + } + + pub fn name() -> &'static CStr { + vk::KhrDisplaySwapchainFn::name() + } + + #[doc = ""] + pub unsafe fn create_shared_swapchains( + &self, + create_infos: &[vk::SwapchainCreateInfoKHR], + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult> { + let mut swapchains = Vec::with_capacity(create_infos.len()); + let err_code = self.swapchain_fn.create_shared_swapchains_khr( + self.handle, + create_infos.len() as u32, + create_infos.as_ptr(), + allocation_callbacks.as_raw_ptr(), + swapchains.as_mut_ptr(), + ); + swapchains.set_len(create_infos.len()); + match err_code { + vk::Result::SUCCESS => Ok(swapchains), + _ => Err(err_code), + } + } +} diff --git a/third_party/rust/ash/src/extensions/khr/mod.rs b/third_party/rust/ash/src/extensions/khr/mod.rs new file mode 100644 index 000000000000..88126740cff1 --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/mod.rs @@ -0,0 +1,17 @@ +pub use self::android_surface::AndroidSurface; +pub use self::display_swapchain::DisplaySwapchain; +pub use self::surface::Surface; +pub use self::swapchain::Swapchain; +pub use self::wayland_surface::WaylandSurface; +pub use self::win32_surface::Win32Surface; +pub use self::xcb_surface::XcbSurface; +pub use self::xlib_surface::XlibSurface; + +mod android_surface; +mod display_swapchain; +mod surface; +mod swapchain; +mod wayland_surface; +mod win32_surface; +mod xcb_surface; +mod xlib_surface; diff --git a/third_party/rust/ash/src/extensions/khr/surface.rs b/third_party/rust/ash/src/extensions/khr/surface.rs new file mode 100644 index 000000000000..a0ca5ad1694c --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/surface.rs @@ -0,0 +1,137 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use std::ptr; +use version::{EntryV1_0, InstanceV1_0}; +use vk; +use RawPtr; + +#[derive(Clone)] +pub struct Surface { + handle: vk::Instance, + surface_fn: vk::KhrSurfaceFn, +} + +impl Surface { + pub fn new(entry: &E, instance: &I) -> Surface { + let surface_fn = vk::KhrSurfaceFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + Surface { + handle: instance.handle(), + surface_fn: surface_fn, + } + } + + pub fn name() -> &'static CStr { + vk::KhrSurfaceFn::name() + } + + #[doc = ""] + pub unsafe fn get_physical_device_surface_support( + &self, + physical_device: vk::PhysicalDevice, + queue_index: u32, + surface: vk::SurfaceKHR, + ) -> bool { + let mut b = mem::uninitialized(); + self.surface_fn.get_physical_device_surface_support_khr( + physical_device, + queue_index, + surface, + &mut b, + ); + b > 0 + } + + #[doc = ""] + pub unsafe fn get_physical_device_surface_present_modes( + &self, + physical_device: vk::PhysicalDevice, + surface: vk::SurfaceKHR, + ) -> VkResult> { + let mut count = 0; + self.surface_fn + .get_physical_device_surface_present_modes_khr( + physical_device, + surface, + &mut count, + ptr::null_mut(), + ); + let mut v = Vec::with_capacity(count as usize); + let err_code = self + .surface_fn + .get_physical_device_surface_present_modes_khr( + physical_device, + surface, + &mut count, + v.as_mut_ptr(), + ); + v.set_len(count as usize); + match err_code { + vk::Result::SUCCESS => Ok(v), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn get_physical_device_surface_capabilities( + &self, + physical_device: vk::PhysicalDevice, + surface: vk::SurfaceKHR, + ) -> VkResult { + let mut surface_capabilities = mem::uninitialized(); + let err_code = self + .surface_fn + .get_physical_device_surface_capabilities_khr( + physical_device, + surface, + &mut surface_capabilities, + ); + match err_code { + vk::Result::SUCCESS => Ok(surface_capabilities), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn get_physical_device_surface_formats( + &self, + physical_device: vk::PhysicalDevice, + surface: vk::SurfaceKHR, + ) -> VkResult> { + let mut count = 0; + self.surface_fn.get_physical_device_surface_formats_khr( + physical_device, + surface, + &mut count, + ptr::null_mut(), + ); + let mut v = Vec::with_capacity(count as usize); + let err_code = self.surface_fn.get_physical_device_surface_formats_khr( + physical_device, + surface, + &mut count, + v.as_mut_ptr(), + ); + v.set_len(count as usize); + match err_code { + vk::Result::SUCCESS => Ok(v), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn destroy_surface( + &self, + surface: vk::SurfaceKHR, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.surface_fn.destroy_surface_khr( + self.handle, + surface, + allocation_callbacks.as_raw_ptr(), + ); + } +} diff --git a/third_party/rust/ash/src/extensions/khr/swapchain.rs b/third_party/rust/ash/src/extensions/khr/swapchain.rs new file mode 100644 index 000000000000..a08c160f252b --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/swapchain.rs @@ -0,0 +1,129 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use std::ptr; +use version::{DeviceV1_0, InstanceV1_0}; +use vk; +use RawPtr; + +#[derive(Clone)] +pub struct Swapchain { + handle: vk::Device, + swapchain_fn: vk::KhrSwapchainFn, +} + +impl Swapchain { + pub fn new(instance: &I, device: &D) -> Swapchain { + let swapchain_fn = vk::KhrSwapchainFn::load(|name| unsafe { + mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr())) + }); + Swapchain { + handle: device.handle(), + swapchain_fn: swapchain_fn, + } + } + + pub fn name() -> &'static CStr { + vk::KhrSwapchainFn::name() + } + + #[doc = ""] + pub unsafe fn destroy_swapchain( + &self, + swapchain: vk::SwapchainKHR, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.swapchain_fn.destroy_swapchain_khr( + self.handle, + swapchain, + allocation_callbacks.as_raw_ptr(), + ); + } + + + #[doc = ""] + pub unsafe fn acquire_next_image( + &self, + swapchain: vk::SwapchainKHR, + timeout: u64, + semaphore: vk::Semaphore, + fence: vk::Fence, + ) -> VkResult<(u32, bool)> { + let mut index = mem::uninitialized(); + let err_code = self.swapchain_fn.acquire_next_image_khr( + self.handle, + swapchain, + timeout, + semaphore, + fence, + &mut index, + ); + match err_code { + vk::Result::SUCCESS => Ok((index, false)), + vk::Result::SUBOPTIMAL_KHR => Ok((index, true)), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn create_swapchain( + &self, + create_info: &vk::SwapchainCreateInfoKHR, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut swapchain = mem::uninitialized(); + let err_code = self.swapchain_fn.create_swapchain_khr( + self.handle, + create_info, + allocation_callbacks.as_raw_ptr(), + &mut swapchain, + ); + match err_code { + vk::Result::SUCCESS => Ok(swapchain), + _ => Err(err_code), + } + } + + + #[doc = ""] + pub unsafe fn queue_present( + &self, + queue: vk::Queue, + create_info: &vk::PresentInfoKHR, + ) -> VkResult { + let err_code = self.swapchain_fn.queue_present_khr(queue, create_info); + match err_code { + vk::Result::SUCCESS => Ok(false), + vk::Result::SUBOPTIMAL_KHR => Ok(true), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn get_swapchain_images( + &self, + swapchain: vk::SwapchainKHR, + ) -> VkResult> { + let mut count = 0; + self.swapchain_fn.get_swapchain_images_khr( + self.handle, + swapchain, + &mut count, + ptr::null_mut(), + ); + + let mut v = Vec::with_capacity(count as usize); + let err_code = self.swapchain_fn.get_swapchain_images_khr( + self.handle, + swapchain, + &mut count, + v.as_mut_ptr(), + ); + v.set_len(count as usize); + match err_code { + vk::Result::SUCCESS => Ok(v), + _ => Err(err_code), + } + } +} diff --git a/third_party/rust/ash/src/extensions/khr/wayland_surface.rs b/third_party/rust/ash/src/extensions/khr/wayland_surface.rs new file mode 100644 index 000000000000..ec5480d96a1f --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/wayland_surface.rs @@ -0,0 +1,48 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use version::{EntryV1_0, InstanceV1_0}; +use vk; +use RawPtr; + +#[derive(Clone)] +pub struct WaylandSurface { + handle: vk::Instance, + wayland_surface_fn: vk::KhrWaylandSurfaceFn, +} + +impl WaylandSurface { + pub fn new(entry: &E, instance: &I) -> WaylandSurface { + let surface_fn = vk::KhrWaylandSurfaceFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + WaylandSurface { + handle: instance.handle(), + wayland_surface_fn: surface_fn, + } + } + + pub fn name() -> &'static CStr { + vk::KhrWaylandSurfaceFn::name() + } + + #[doc = ""] + pub unsafe fn create_wayland_surface( + &self, + create_info: &vk::WaylandSurfaceCreateInfoKHR, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut surface = mem::uninitialized(); + let err_code = self.wayland_surface_fn.create_wayland_surface_khr( + self.handle, + create_info, + allocation_callbacks.as_raw_ptr(), + &mut surface, + ); + match err_code { + vk::Result::SUCCESS => Ok(surface), + _ => Err(err_code), + } + } +} diff --git a/third_party/rust/ash/src/extensions/khr/win32_surface.rs b/third_party/rust/ash/src/extensions/khr/win32_surface.rs new file mode 100644 index 000000000000..72ae7efce0b7 --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/win32_surface.rs @@ -0,0 +1,48 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use version::{EntryV1_0, InstanceV1_0}; +use vk; +use RawPtr; + +#[derive(Clone)] +pub struct Win32Surface { + handle: vk::Instance, + win32_surface_fn: vk::KhrWin32SurfaceFn, +} + +impl Win32Surface { + pub fn new(entry: &E, instance: &I) -> Win32Surface { + let surface_fn = vk::KhrWin32SurfaceFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + Win32Surface { + handle: instance.handle(), + win32_surface_fn: surface_fn, + } + } + + pub fn name() -> &'static CStr { + vk::KhrWin32SurfaceFn::name() + } + + #[doc = ""] + pub unsafe fn create_win32_surface( + &self, + create_info: &vk::Win32SurfaceCreateInfoKHR, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut surface = mem::uninitialized(); + let err_code = self.win32_surface_fn.create_win32_surface_khr( + self.handle, + create_info, + allocation_callbacks.as_raw_ptr(), + &mut surface, + ); + match err_code { + vk::Result::SUCCESS => Ok(surface), + _ => Err(err_code), + } + } +} diff --git a/third_party/rust/ash/src/extensions/khr/xcb_surface.rs b/third_party/rust/ash/src/extensions/khr/xcb_surface.rs new file mode 100644 index 000000000000..dbd05e8a7f54 --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/xcb_surface.rs @@ -0,0 +1,48 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use version::{EntryV1_0, InstanceV1_0}; +use vk; +use RawPtr; + +#[derive(Clone)] +pub struct XcbSurface { + handle: vk::Instance, + xcb_surface_fn: vk::KhrXcbSurfaceFn, +} + +impl XcbSurface { + pub fn new(entry: &E, instance: &I) -> XcbSurface { + let surface_fn = vk::KhrXcbSurfaceFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + XcbSurface { + handle: instance.handle(), + xcb_surface_fn: surface_fn, + } + } + + pub fn name() -> &'static CStr { + vk::KhrXcbSurfaceFn::name() + } + + #[doc = ""] + pub unsafe fn create_xcb_surface( + &self, + create_info: &vk::XcbSurfaceCreateInfoKHR, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut surface = mem::uninitialized(); + let err_code = self.xcb_surface_fn.create_xcb_surface_khr( + self.handle, + create_info, + allocation_callbacks.as_raw_ptr(), + &mut surface, + ); + match err_code { + vk::Result::SUCCESS => Ok(surface), + _ => Err(err_code), + } + } +} diff --git a/third_party/rust/ash/src/extensions/khr/xlib_surface.rs b/third_party/rust/ash/src/extensions/khr/xlib_surface.rs new file mode 100644 index 000000000000..aed6ca4efd01 --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/xlib_surface.rs @@ -0,0 +1,48 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use version::{EntryV1_0, InstanceV1_0}; +use vk; +use RawPtr; + +#[derive(Clone)] +pub struct XlibSurface { + handle: vk::Instance, + xlib_surface_fn: vk::KhrXlibSurfaceFn, +} + +impl XlibSurface { + pub fn new(entry: &E, instance: &I) -> XlibSurface { + let surface_fn = vk::KhrXlibSurfaceFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + XlibSurface { + handle: instance.handle(), + xlib_surface_fn: surface_fn, + } + } + + pub fn name() -> &'static CStr { + vk::KhrXlibSurfaceFn::name() + } + + #[doc = ""] + pub unsafe fn create_xlib_surface( + &self, + create_info: &vk::XlibSurfaceCreateInfoKHR, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut surface = mem::uninitialized(); + let err_code = self.xlib_surface_fn.create_xlib_surface_khr( + self.handle, + create_info, + allocation_callbacks.as_raw_ptr(), + &mut surface, + ); + match err_code { + vk::Result::SUCCESS => Ok(surface), + _ => Err(err_code), + } + } +} diff --git a/third_party/rust/ash/src/extensions/mod.rs b/third_party/rust/ash/src/extensions/mod.rs new file mode 100644 index 000000000000..4de4222d6189 --- /dev/null +++ b/third_party/rust/ash/src/extensions/mod.rs @@ -0,0 +1,5 @@ +pub mod experimental; +pub mod ext; +pub mod khr; +pub mod mvk; +pub mod nv; diff --git a/third_party/rust/ash/src/extensions/mvk/ios_surface.rs b/third_party/rust/ash/src/extensions/mvk/ios_surface.rs new file mode 100644 index 000000000000..4aac1c50478f --- /dev/null +++ b/third_party/rust/ash/src/extensions/mvk/ios_surface.rs @@ -0,0 +1,48 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use version::{EntryV1_0, InstanceV1_0}; +use vk; +use RawPtr; + +#[derive(Clone)] +pub struct IOSSurface { + handle: vk::Instance, + ios_surface_fn: vk::MvkIosSurfaceFn, +} + +impl IOSSurface { + pub fn new(entry: &E, instance: &I) -> IOSSurface { + let surface_fn = vk::MvkIosSurfaceFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + IOSSurface { + handle: instance.handle(), + ios_surface_fn: surface_fn, + } + } + + pub fn name() -> &'static CStr { + vk::MvkIosSurfaceFn::name() + } + + #[doc = ""] + pub unsafe fn create_ios_surface_mvk( + &self, + create_info: &vk::IOSSurfaceCreateInfoMVK, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut surface = mem::uninitialized(); + let err_code = self.ios_surface_fn.create_ios_surface_mvk( + self.handle, + create_info, + allocation_callbacks.as_raw_ptr(), + &mut surface, + ); + match err_code { + vk::Result::SUCCESS => Ok(surface), + _ => Err(err_code), + } + } +} diff --git a/third_party/rust/ash/src/extensions/mvk/macos_surface.rs b/third_party/rust/ash/src/extensions/mvk/macos_surface.rs new file mode 100644 index 000000000000..9dbf44b60da1 --- /dev/null +++ b/third_party/rust/ash/src/extensions/mvk/macos_surface.rs @@ -0,0 +1,48 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use version::{EntryV1_0, InstanceV1_0}; +use vk; +use RawPtr; + +#[derive(Clone)] +pub struct MacOSSurface { + handle: vk::Instance, + macos_surface_fn: vk::MvkMacosSurfaceFn, +} + +impl MacOSSurface { + pub fn new(entry: &E, instance: &I) -> MacOSSurface { + let surface_fn = vk::MvkMacosSurfaceFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + MacOSSurface { + handle: instance.handle(), + macos_surface_fn: surface_fn, + } + } + + pub fn name() -> &'static CStr { + vk::MvkMacosSurfaceFn::name() + } + + #[doc = ""] + pub unsafe fn create_mac_os_surface_mvk( + &self, + create_info: &vk::MacOSSurfaceCreateInfoMVK, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut surface = mem::uninitialized(); + let err_code = self.macos_surface_fn.create_mac_os_surface_mvk( + self.handle, + create_info, + allocation_callbacks.as_raw_ptr(), + &mut surface, + ); + match err_code { + vk::Result::SUCCESS => Ok(surface), + _ => Err(err_code), + } + } +} diff --git a/third_party/rust/ash/src/extensions/mvk/mod.rs b/third_party/rust/ash/src/extensions/mvk/mod.rs new file mode 100644 index 000000000000..e475d535633e --- /dev/null +++ b/third_party/rust/ash/src/extensions/mvk/mod.rs @@ -0,0 +1,5 @@ +pub use self::ios_surface::IOSSurface; +pub use self::macos_surface::MacOSSurface; + +mod ios_surface; +mod macos_surface; diff --git a/third_party/rust/ash/src/extensions/nv/mesh_shader.rs b/third_party/rust/ash/src/extensions/nv/mesh_shader.rs new file mode 100644 index 000000000000..c9829514ebdf --- /dev/null +++ b/third_party/rust/ash/src/extensions/nv/mesh_shader.rs @@ -0,0 +1,70 @@ +#![allow(dead_code)] +use std::ffi::CStr; +use std::mem; +use version::{DeviceV1_0, InstanceV1_0}; +use vk; + +#[derive(Clone)] +pub struct MeshShader { + mesh_shader_fn: vk::NvMeshShaderFn, +} + +impl MeshShader { + pub fn new(instance: &I, device: &D) -> MeshShader { + let mesh_shader_fn = vk::NvMeshShaderFn::load(|name| unsafe { + mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr())) + }); + MeshShader { mesh_shader_fn } + } + #[doc = ""] + pub unsafe fn cmd_draw_mesh_tasks( + &self, + command_buffer: vk::CommandBuffer, + task_count: u32, + first_task: u32, + ) { + self.mesh_shader_fn + .cmd_draw_mesh_tasks_nv(command_buffer, task_count, first_task); + } + #[doc = ""] + pub unsafe fn cmd_draw_mesh_tasks_indirect( + &self, + command_buffer: vk::CommandBuffer, + buffer: vk::Buffer, + offset: vk::DeviceSize, + draw_count: u32, + stride: u32, + ) { + self.mesh_shader_fn.cmd_draw_mesh_tasks_indirect_nv( + command_buffer, + buffer, + offset, + draw_count, + stride, + ); + } + #[doc = ""] + pub unsafe fn cmd_draw_mesh_tasks_indirect_count( + &self, + command_buffer: vk::CommandBuffer, + buffer: vk::Buffer, + offset: vk::DeviceSize, + count_buffer: vk::Buffer, + count_buffer_offset: vk::DeviceSize, + max_draw_count: u32, + stride: u32, + ) { + self.mesh_shader_fn.cmd_draw_mesh_tasks_indirect_count_nv( + command_buffer, + buffer, + offset, + count_buffer, + count_buffer_offset, + max_draw_count, + stride, + ); + } + pub fn name() -> &'static CStr { + vk::NvMeshShaderFn::name() + } +} diff --git a/third_party/rust/ash/src/extensions/nv/mod.rs b/third_party/rust/ash/src/extensions/nv/mod.rs new file mode 100644 index 000000000000..ffc0c0197077 --- /dev/null +++ b/third_party/rust/ash/src/extensions/nv/mod.rs @@ -0,0 +1,5 @@ +pub use self::mesh_shader::MeshShader; +pub use self::ray_tracing::RayTracing; + +mod mesh_shader; +mod ray_tracing; diff --git a/third_party/rust/ash/src/extensions/nv/ray_tracing.rs b/third_party/rust/ash/src/extensions/nv/ray_tracing.rs new file mode 100644 index 000000000000..02aa71f7ad72 --- /dev/null +++ b/third_party/rust/ash/src/extensions/nv/ray_tracing.rs @@ -0,0 +1,274 @@ +#![allow(dead_code)] +use prelude::*; +use std::ffi::CStr; +use std::mem; +use version::{DeviceV1_0, InstanceV1_0, InstanceV1_1}; +use vk; +use RawPtr; + +#[derive(Clone)] +pub struct RayTracing { + handle: vk::Device, + ray_tracing_fn: vk::NvRayTracingFn, +} + +impl RayTracing { + pub fn new(instance: &I, device: &D) -> RayTracing { + let ray_tracing_fn = vk::NvRayTracingFn::load(|name| unsafe { + mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr())) + }); + RayTracing { + handle: device.handle(), + ray_tracing_fn, + } + } + + pub unsafe fn get_properties( + instance: &I, + pdevice: vk::PhysicalDevice, + ) -> vk::PhysicalDeviceRayTracingPropertiesNV { + let mut props_rt = vk::PhysicalDeviceRayTracingPropertiesNV::default(); + { + let mut props = vk::PhysicalDeviceProperties2::builder().push_next(&mut props_rt); + instance.get_physical_device_properties2(pdevice, &mut props); + } + props_rt + } + + #[doc = ""] + pub unsafe fn create_acceleration_structure( + &self, + create_info: &vk::AccelerationStructureCreateInfoNV, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut accel_struct = mem::uninitialized(); + let err_code = self.ray_tracing_fn.create_acceleration_structure_nv( + self.handle, + create_info, + allocation_callbacks.as_raw_ptr(), + &mut accel_struct, + ); + match err_code { + vk::Result::SUCCESS => Ok(accel_struct), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn destroy_acceleration_structure( + &self, + accel_struct: vk::AccelerationStructureNV, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.ray_tracing_fn.destroy_acceleration_structure_nv( + self.handle, + accel_struct, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + pub unsafe fn get_acceleration_structure_memory_requirements( + &self, + info: &vk::AccelerationStructureMemoryRequirementsInfoNV, + ) -> vk::MemoryRequirements2KHR { + let mut requirements = mem::uninitialized(); + self.ray_tracing_fn + .get_acceleration_structure_memory_requirements_nv( + self.handle, + info, + &mut requirements, + ); + requirements + } + + #[doc = ""] + pub unsafe fn bind_acceleration_structure_memory( + &self, + bind_info: &[vk::BindAccelerationStructureMemoryInfoNV], + ) -> VkResult<()> { + let err_code = self.ray_tracing_fn.bind_acceleration_structure_memory_nv( + self.handle, + bind_info.len() as u32, + bind_info.as_ptr(), + ); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn cmd_build_acceleration_structure( + &self, + command_buffer: vk::CommandBuffer, + info: &vk::AccelerationStructureInfoNV, + instance_data: vk::Buffer, + instance_offset: vk::DeviceSize, + update: bool, + dst: vk::AccelerationStructureNV, + src: vk::AccelerationStructureNV, + scratch: vk::Buffer, + scratch_offset: vk::DeviceSize, + ) { + self.ray_tracing_fn.cmd_build_acceleration_structure_nv( + command_buffer, + info, + instance_data, + instance_offset, + if update { vk::TRUE } else { vk::FALSE }, + dst, + src, + scratch, + scratch_offset, + ); + } + + #[doc = ""] + pub unsafe fn cmd_copy_acceleration_structure( + &self, + command_buffer: vk::CommandBuffer, + dst: vk::AccelerationStructureNV, + src: vk::AccelerationStructureNV, + mode: vk::CopyAccelerationStructureModeNV, + ) { + self.ray_tracing_fn + .cmd_copy_acceleration_structure_nv(command_buffer, dst, src, mode); + } + + #[doc = ""] + pub unsafe fn cmd_trace_rays( + &self, + command_buffer: vk::CommandBuffer, + raygen_shader_binding_table_buffer: vk::Buffer, + raygen_shader_binding_offset: vk::DeviceSize, + miss_shader_binding_table_buffer: vk::Buffer, + miss_shader_binding_offset: vk::DeviceSize, + miss_shader_binding_stride: vk::DeviceSize, + hit_shader_binding_table_buffer: vk::Buffer, + hit_shader_binding_offset: vk::DeviceSize, + hit_shader_binding_stride: vk::DeviceSize, + callable_shader_binding_table_buffer: vk::Buffer, + callable_shader_binding_offset: vk::DeviceSize, + callable_shader_binding_stride: vk::DeviceSize, + width: u32, + height: u32, + depth: u32, + ) { + self.ray_tracing_fn.cmd_trace_rays_nv( + command_buffer, + raygen_shader_binding_table_buffer, + raygen_shader_binding_offset, + miss_shader_binding_table_buffer, + miss_shader_binding_offset, + miss_shader_binding_stride, + hit_shader_binding_table_buffer, + hit_shader_binding_offset, + hit_shader_binding_stride, + callable_shader_binding_table_buffer, + callable_shader_binding_offset, + callable_shader_binding_stride, + width, + height, + depth, + ); + } + + #[doc = ""] + pub unsafe fn create_ray_tracing_pipelines( + &self, + pipeline_cache: vk::PipelineCache, + create_info: &[vk::RayTracingPipelineCreateInfoNV], + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult> { + let mut pipelines = vec![mem::uninitialized(); create_info.len()]; + let err_code = self.ray_tracing_fn.create_ray_tracing_pipelines_nv( + self.handle, + pipeline_cache, + create_info.len() as u32, + create_info.as_ptr(), + allocation_callbacks.as_raw_ptr(), + pipelines.as_mut_ptr(), + ); + match err_code { + vk::Result::SUCCESS => Ok(pipelines), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn get_ray_tracing_shader_group_handles( + &self, + pipeline: vk::Pipeline, + first_group: u32, + group_count: u32, + data: &mut [u8], + ) -> VkResult<()> { + let err_code = self.ray_tracing_fn.get_ray_tracing_shader_group_handles_nv( + self.handle, + pipeline, + first_group, + group_count, + data.len(), + data.as_mut_ptr() as *mut std::ffi::c_void, + ); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn get_acceleration_structure_handle( + &self, + accel_struct: vk::AccelerationStructureNV, + ) -> VkResult { + let mut handle: u64 = 0; + let handle_ptr: *mut u64 = &mut handle; + let err_code = self.ray_tracing_fn.get_acceleration_structure_handle_nv( + self.handle, + accel_struct, + std::mem::size_of::(), + handle_ptr as *mut std::ffi::c_void, + ); + match err_code { + vk::Result::SUCCESS => Ok(handle), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn cmd_write_acceleration_structures_properties( + &self, + command_buffer: vk::CommandBuffer, + structures: &[vk::AccelerationStructureNV], + query_type: vk::QueryType, + query_pool: vk::QueryPool, + first_query: u32, + ) { + self.ray_tracing_fn + .cmd_write_acceleration_structures_properties_nv( + command_buffer, + structures.len() as u32, + structures.as_ptr(), + query_type, + query_pool, + first_query, + ); + } + + #[doc = ""] + pub unsafe fn compile_deferred(&self, pipeline: vk::Pipeline, shader: u32) -> VkResult<()> { + let err_code = self + .ray_tracing_fn + .compile_deferred_nv(self.handle, pipeline, shader); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + pub fn name() -> &'static CStr { + vk::NvRayTracingFn::name() + } +} diff --git a/third_party/rust/ash/src/instance.rs b/third_party/rust/ash/src/instance.rs new file mode 100644 index 000000000000..4db5ceb0dc34 --- /dev/null +++ b/third_party/rust/ash/src/instance.rs @@ -0,0 +1,427 @@ +#![allow(dead_code)] +use device::Device; +use prelude::*; +use std::mem; +use std::os::raw::c_char; +use std::ptr; +use vk; +use RawPtr; + +#[doc = ""] +#[derive(Clone)] +pub struct Instance { + handle: vk::Instance, + instance_fn_1_0: vk::InstanceFnV1_0, + instance_fn_1_1: vk::InstanceFnV1_1, +} +impl Instance { + pub unsafe fn load(static_fn: &vk::StaticFn, instance: vk::Instance) -> Self { + let instance_fn_1_0 = vk::InstanceFnV1_0::load(|name| { + mem::transmute(static_fn.get_instance_proc_addr(instance, name.as_ptr())) + }); + let instance_fn_1_1 = vk::InstanceFnV1_1::load(|name| { + mem::transmute(static_fn.get_instance_proc_addr(instance, name.as_ptr())) + }); + + Instance { + handle: instance, + instance_fn_1_0, + instance_fn_1_1, + } + } +} + +impl InstanceV1_0 for Instance { + type Device = Device; + #[doc = ""] + unsafe fn create_device( + &self, + physical_device: vk::PhysicalDevice, + create_info: &vk::DeviceCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> Result { + let mut device: vk::Device = mem::uninitialized(); + let err_code = self.fp_v1_0().create_device( + physical_device, + create_info, + allocation_callbacks.as_raw_ptr(), + &mut device, + ); + if err_code != vk::Result::SUCCESS { + return Err(err_code); + } + Ok(Device::load(&self.instance_fn_1_0, device)) + } + fn handle(&self) -> vk::Instance { + self.handle + } + + fn fp_v1_0(&self) -> &vk::InstanceFnV1_0 { + &self.instance_fn_1_0 + } +} + +impl InstanceV1_1 for Instance { + fn fp_v1_1(&self) -> &vk::InstanceFnV1_1 { + &self.instance_fn_1_1 + } +} + +#[allow(non_camel_case_types)] +pub trait InstanceV1_1: InstanceV1_0 { + fn fp_v1_1(&self) -> &vk::InstanceFnV1_1; + + unsafe fn enumerate_physical_device_groups_len(&self) -> usize { + let mut group_count = mem::uninitialized(); + self.fp_v1_1().enumerate_physical_device_groups( + self.handle(), + &mut group_count, + ptr::null_mut(), + ); + group_count as usize + } + + #[doc = ""] + fn enumerate_physical_device_groups( + &self, + out: &mut [vk::PhysicalDeviceGroupProperties], + ) -> VkResult<()> { + unsafe { + let mut group_count = out.len() as u32; + let err_code = self.fp_v1_1().enumerate_physical_device_groups( + self.handle(), + &mut group_count, + out.as_mut_ptr(), + ); + if err_code == vk::Result::SUCCESS { + Ok(()) + } else { + Err(err_code) + } + } + } + + #[doc = ""] + unsafe fn get_physical_device_properties2( + &self, + physical_device: vk::PhysicalDevice, + prop: &mut vk::PhysicalDeviceProperties2, + ) { + self.fp_v1_1() + .get_physical_device_properties2(physical_device, prop); + } + + #[doc = ""] + unsafe fn get_physical_device_format_properties2( + &self, + physical_device: vk::PhysicalDevice, + format: vk::Format, + out: &mut vk::FormatProperties2, + ) { + self.fp_v1_1() + .get_physical_device_format_properties2(physical_device, format, out); + } + + #[doc = ""] + unsafe fn get_physical_device_image_format_properties2( + &self, + physical_device: vk::PhysicalDevice, + format_info: &vk::PhysicalDeviceImageFormatInfo2, + image_format_prop: &mut vk::ImageFormatProperties2, + ) -> VkResult<()> { + let err_code = self.fp_v1_1().get_physical_device_image_format_properties2( + physical_device, + format_info, + image_format_prop, + ); + if err_code == vk::Result::SUCCESS { + Ok(()) + } else { + Err(err_code) + } + } + + unsafe fn get_physical_device_queue_family_properties2_len( + &self, + physical_device: vk::PhysicalDevice, + ) -> usize { + let mut queue_count = 0; + self.fp_v1_1().get_physical_device_queue_family_properties2( + physical_device, + &mut queue_count, + ptr::null_mut(), + ); + queue_count as usize + } + + #[doc = ""] + unsafe fn get_physical_device_queue_family_properties2( + &self, + physical_device: vk::PhysicalDevice, + queue_family_props: &mut [vk::QueueFamilyProperties2], + ) { + let mut queue_count = queue_family_props.len() as u32; + self.fp_v1_1().get_physical_device_queue_family_properties2( + physical_device, + &mut queue_count, + queue_family_props.as_mut_ptr(), + ); + } + + #[doc = ""] + unsafe fn get_physical_device_memory_properties2( + &self, + physical_device: vk::PhysicalDevice, + out: &mut vk::PhysicalDeviceMemoryProperties2, + ) { + self.fp_v1_1() + .get_physical_device_memory_properties2(physical_device, out); + } + + unsafe fn get_physical_device_sparse_image_format_properties2_len( + &self, + physical_device: vk::PhysicalDevice, + format_info: &vk::PhysicalDeviceSparseImageFormatInfo2, + ) -> usize { + let mut format_count = 0; + self.fp_v1_1() + .get_physical_device_sparse_image_format_properties2( + physical_device, + format_info, + &mut format_count, + ptr::null_mut(), + ); + format_count as usize + } + + #[doc = ""] + unsafe fn get_physical_device_sparse_image_format_properties2( + &self, + physical_device: vk::PhysicalDevice, + format_info: &vk::PhysicalDeviceSparseImageFormatInfo2, + out: &mut [vk::SparseImageFormatProperties2], + ) { + let mut format_count = out.len() as u32; + self.fp_v1_1() + .get_physical_device_sparse_image_format_properties2( + physical_device, + format_info, + &mut format_count, + out.as_mut_ptr(), + ); + } + + #[doc = ""] + unsafe fn get_physical_device_external_buffer_properties( + &self, + physical_device: vk::PhysicalDevice, + external_buffer_info: &vk::PhysicalDeviceExternalBufferInfo, + out: &mut vk::ExternalBufferProperties, + ) { + self.fp_v1_1() + .get_physical_device_external_buffer_properties( + physical_device, + external_buffer_info, + out, + ); + } + + #[doc = ""] + unsafe fn get_physical_device_external_fence_properties( + &self, + physical_device: vk::PhysicalDevice, + external_fence_info: &vk::PhysicalDeviceExternalFenceInfo, + out: &mut vk::ExternalFenceProperties, + ) { + self.fp_v1_1() + .get_physical_device_external_fence_properties( + physical_device, + external_fence_info, + out, + ); + } + + #[doc = ""] + unsafe fn get_physical_device_external_semaphore_properties( + &self, + physical_device: vk::PhysicalDevice, + external_semaphore_info: &vk::PhysicalDeviceExternalSemaphoreInfo, + out: &mut vk::ExternalSemaphoreProperties, + ) { + self.fp_v1_1() + .get_physical_device_external_semaphore_properties( + physical_device, + external_semaphore_info, + out, + ); + } +} + +#[allow(non_camel_case_types)] +pub trait InstanceV1_0 { + type Device; + fn handle(&self) -> vk::Instance; + fn fp_v1_0(&self) -> &vk::InstanceFnV1_0; + #[doc = ""] + unsafe fn create_device( + &self, + physical_device: vk::PhysicalDevice, + create_info: &vk::DeviceCreateInfo, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> Result; + + #[doc = ""] + unsafe fn get_device_proc_addr( + &self, + device: vk::Device, + p_name: *const c_char, + ) -> vk::PFN_vkVoidFunction { + self.fp_v1_0().get_device_proc_addr(device, p_name) + } + + #[doc = ""] + unsafe fn destroy_instance(&self, allocation_callbacks: Option<&vk::AllocationCallbacks>) { + self.fp_v1_0() + .destroy_instance(self.handle(), allocation_callbacks.as_raw_ptr()); + } + + #[doc = ""] + unsafe fn get_physical_device_format_properties( + &self, + physical_device: vk::PhysicalDevice, + format: vk::Format, + ) -> vk::FormatProperties { + let mut format_prop = mem::uninitialized(); + self.fp_v1_0().get_physical_device_format_properties( + physical_device, + format, + &mut format_prop, + ); + format_prop + } + + #[doc = ""] + unsafe fn get_physical_device_image_format_properties( + &self, + physical_device: vk::PhysicalDevice, + format: vk::Format, + typ: vk::ImageType, + tiling: vk::ImageTiling, + usage: vk::ImageUsageFlags, + flags: vk::ImageCreateFlags, + ) -> VkResult { + let mut image_format_prop = mem::uninitialized(); + let err_code = self.fp_v1_0().get_physical_device_image_format_properties( + physical_device, + format, + typ, + tiling, + usage, + flags, + &mut image_format_prop, + ); + if err_code == vk::Result::SUCCESS { + Ok(image_format_prop) + } else { + Err(err_code) + } + } + + #[doc = ""] + unsafe fn get_physical_device_memory_properties( + &self, + physical_device: vk::PhysicalDevice, + ) -> vk::PhysicalDeviceMemoryProperties { + let mut memory_prop = mem::uninitialized(); + self.fp_v1_0() + .get_physical_device_memory_properties(physical_device, &mut memory_prop); + memory_prop + } + + #[doc = ""] + unsafe fn get_physical_device_properties( + &self, + physical_device: vk::PhysicalDevice, + ) -> vk::PhysicalDeviceProperties { + let mut prop = mem::uninitialized(); + self.fp_v1_0() + .get_physical_device_properties(physical_device, &mut prop); + prop + } + + #[doc = ""] + unsafe fn get_physical_device_queue_family_properties( + &self, + physical_device: vk::PhysicalDevice, + ) -> Vec { + let mut queue_count = 0; + self.fp_v1_0().get_physical_device_queue_family_properties( + physical_device, + &mut queue_count, + ptr::null_mut(), + ); + let mut queue_families_vec = Vec::with_capacity(queue_count as usize); + self.fp_v1_0().get_physical_device_queue_family_properties( + physical_device, + &mut queue_count, + queue_families_vec.as_mut_ptr(), + ); + queue_families_vec.set_len(queue_count as usize); + queue_families_vec + } + + #[doc = ""] + unsafe fn get_physical_device_features( + &self, + physical_device: vk::PhysicalDevice, + ) -> vk::PhysicalDeviceFeatures { + let mut prop = mem::uninitialized(); + self.fp_v1_0() + .get_physical_device_features(physical_device, &mut prop); + prop + } + + #[doc = ""] + unsafe fn enumerate_physical_devices(&self) -> VkResult> { + let mut num = mem::uninitialized(); + self.fp_v1_0() + .enumerate_physical_devices(self.handle(), &mut num, ptr::null_mut()); + let mut physical_devices = Vec::::with_capacity(num as usize); + let err_code = self.fp_v1_0().enumerate_physical_devices( + self.handle(), + &mut num, + physical_devices.as_mut_ptr(), + ); + physical_devices.set_len(num as usize); + match err_code { + vk::Result::SUCCESS => Ok(physical_devices), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn enumerate_device_extension_properties( + &self, + device: vk::PhysicalDevice, + ) -> Result, vk::Result> { + let mut num = 0; + self.fp_v1_0().enumerate_device_extension_properties( + device, + ptr::null(), + &mut num, + ptr::null_mut(), + ); + let mut data = Vec::with_capacity(num as usize); + let err_code = self.fp_v1_0().enumerate_device_extension_properties( + device, + ptr::null(), + &mut num, + data.as_mut_ptr(), + ); + data.set_len(num as usize); + match err_code { + vk::Result::SUCCESS => Ok(data), + _ => Err(err_code), + } + } +} diff --git a/third_party/rust/ash/src/lib.rs b/third_party/rust/ash/src/lib.rs new file mode 100644 index 000000000000..7ef454c4e455 --- /dev/null +++ b/third_party/rust/ash/src/lib.rs @@ -0,0 +1,80 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +extern crate shared_library; + +pub use device::Device; +pub use entry::{Entry, EntryCustom, InstanceError, LoadingError}; +pub use instance::Instance; + +mod device; +mod entry; +mod instance; +pub mod prelude; +pub mod util; +pub mod version; +#[macro_use] +pub mod vk; + + +pub mod extensions; + +pub trait RawPtr { + fn as_raw_ptr(&self) -> *const T; +} + +impl<'r, T> RawPtr for Option<&'r T> { + fn as_raw_ptr(&self) -> *const T { + match self { + &Some(inner) => inner as *const T, + + _ => ::std::ptr::null(), + } + } +} + +#[cfg(test)] +mod tests { + use super::vk; + #[test] + fn test_ptr_chains() { + let mut variable_pointers = vk::PhysicalDeviceVariablePointerFeatures::builder(); + let mut corner = vk::PhysicalDeviceCornerSampledImageFeaturesNV::builder(); + let chain = vec![ + &variable_pointers as *const _ as usize, + &corner as *const _ as usize, + ]; + let mut device_create_info = vk::DeviceCreateInfo::builder() + .push_next(&mut corner) + .push_next(&mut variable_pointers); + let chain2: Vec = unsafe { + vk::ptr_chain_iter(&mut device_create_info) + .skip(1) + .map(|ptr| ptr as usize) + .collect() + }; + assert_eq!(chain, chain2); + } +} diff --git a/third_party/rust/ash/src/prelude.rs b/third_party/rust/ash/src/prelude.rs new file mode 100644 index 000000000000..a01406c9bc3c --- /dev/null +++ b/third_party/rust/ash/src/prelude.rs @@ -0,0 +1,2 @@ +use vk; +pub type VkResult = Result; diff --git a/third_party/rust/ash/src/util.rs b/third_party/rust/ash/src/util.rs new file mode 100644 index 000000000000..231514db7aeb --- /dev/null +++ b/third_party/rust/ash/src/util.rs @@ -0,0 +1,138 @@ +use std::iter::Iterator; +use std::marker::PhantomData; +use std::mem::size_of; +use std::os::raw::c_void; +use std::{io, slice}; +use vk; + + + + + + + + +#[derive(Debug, Clone)] +pub struct Align { + ptr: *mut c_void, + elem_size: vk::DeviceSize, + size: vk::DeviceSize, + _m: PhantomData, +} + +#[derive(Debug)] +pub struct AlignIter<'a, T: 'a> { + align: &'a mut Align, + current: vk::DeviceSize, +} + +impl Align { + pub fn copy_from_slice(&mut self, slice: &[T]) { + use std::slice::from_raw_parts_mut; + if self.elem_size == size_of::() as u64 { + unsafe { + let mapped_slice = from_raw_parts_mut(self.ptr as *mut T, slice.len()); + mapped_slice.copy_from_slice(slice); + } + } else { + for (i, val) in self.iter_mut().enumerate().take(slice.len()) { + *val = slice[i]; + } + } + } +} + +fn calc_padding(adr: vk::DeviceSize, align: vk::DeviceSize) -> vk::DeviceSize { + (align - adr % align) % align +} + +impl Align { + pub unsafe fn new(ptr: *mut c_void, alignment: vk::DeviceSize, size: vk::DeviceSize) -> Self { + let padding = calc_padding(size_of::() as vk::DeviceSize, alignment); + let elem_size = size_of::() as vk::DeviceSize + padding; + assert!(calc_padding(size, alignment) == 0, "size must be aligned"); + Align { + ptr, + elem_size, + size, + _m: PhantomData, + } + } + + pub fn iter_mut(&mut self) -> AlignIter { + AlignIter { + current: 0, + align: self, + } + } +} + +impl<'a, T: Copy + 'a> Iterator for AlignIter<'a, T> { + type Item = &'a mut T; + fn next(&mut self) -> Option { + if self.current == self.align.size { + return None; + } + unsafe { + + let ptr = (self.align.ptr as *mut u8).offset(self.current as isize) as *mut T; + self.current += self.align.elem_size; + Some(&mut *ptr) + } + } +} + + + + + + + + + + + + + + + + + + + + +pub fn read_spv(x: &mut R) -> io::Result> { + let size = x.seek(io::SeekFrom::End(0))?; + if size % 4 != 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "input length not divisible by 4", + )); + } + if size > usize::max_value() as u64 { + return Err(io::Error::new(io::ErrorKind::InvalidData, "input too long")); + } + let words = (size / 4) as usize; + let mut result = Vec::::with_capacity(words); + x.seek(io::SeekFrom::Start(0))?; + unsafe { + x.read_exact(slice::from_raw_parts_mut( + result.as_mut_ptr() as *mut u8, + words * 4, + ))?; + result.set_len(words); + } + const MAGIC_NUMBER: u32 = 0x07230203; + if result.len() > 0 && result[0] == MAGIC_NUMBER.swap_bytes() { + for word in &mut result { + *word = word.swap_bytes(); + } + } + if result.len() == 0 || result[0] != MAGIC_NUMBER { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "input missing SPIR-V magic number", + )); + } + Ok(result) +} diff --git a/third_party/rust/ash/src/version.rs b/third_party/rust/ash/src/version.rs new file mode 100644 index 000000000000..922f6bff0d00 --- /dev/null +++ b/third_party/rust/ash/src/version.rs @@ -0,0 +1,3 @@ +pub use device::{DeviceV1_0, DeviceV1_1}; +pub use entry::{EntryV1_0, EntryV1_1}; +pub use instance::{InstanceV1_0, InstanceV1_1}; diff --git a/third_party/rust/ash/src/vk.rs b/third_party/rust/ash/src/vk.rs new file mode 100644 index 000000000000..bcd41b8e0f59 --- /dev/null +++ b/third_party/rust/ash/src/vk.rs @@ -0,0 +1,63147 @@ +use std::fmt; +use std::os::raw::*; +#[doc = r" Iterates through the pointer chain. Includes the item that is passed into the function."] +#[doc = r" Stops at the last `BaseOutStructure` that has a null `p_next` field."] +pub(crate) unsafe fn ptr_chain_iter(ptr: &mut T) -> impl Iterator { + use std::ptr::null_mut; + let ptr: *mut BaseOutStructure = ptr as *mut T as _; + (0..).scan(ptr, |p_ptr, _| { + if *p_ptr == null_mut() { + return None; + } + let n_ptr = (**p_ptr).p_next as *mut BaseOutStructure; + let old = *p_ptr; + *p_ptr = n_ptr; + Some(old) + }) +} +pub trait Handle { + const TYPE: ObjectType; + fn as_raw(self) -> u64; + fn from_raw(u64) -> Self; +} +#[doc = ""] +#[macro_export] +macro_rules! vk_make_version { + ( $ major : expr , $ minor : expr , $ patch : expr ) => { + (($major as u32) << 22) | (($minor as u32) << 12) | $patch as u32 + }; +} +#[doc = ""] +#[macro_export] +macro_rules! vk_version_major { + ( $ major : expr ) => { + ($major as u32) >> 22 + }; +} +#[doc = ""] +#[macro_export] +macro_rules! vk_version_minor { + ( $ minor : expr ) => { + (($minor as u32) >> 12) & 0x3ff + }; +} +#[doc = ""] +#[macro_export] +macro_rules! vk_version_patch { + ( $ minor : expr ) => { + ($minor as u32) & 0xfff + }; +} +pub type RROutput = c_ulong; +pub type VisualID = c_uint; +pub type Display = *const c_void; +pub type Window = c_ulong; +#[allow(non_camel_case_types)] +pub type xcb_connection_t = *const c_void; +#[allow(non_camel_case_types)] +pub type xcb_window_t = u32; +#[allow(non_camel_case_types)] +pub type xcb_visualid_t = *const c_void; +pub type MirConnection = *const c_void; +pub type MirSurface = *const c_void; +pub type HINSTANCE = *const c_void; +pub type HWND = *const c_void; +#[allow(non_camel_case_types)] +pub type wl_display = c_void; +#[allow(non_camel_case_types)] +pub type wl_surface = c_void; +pub type HANDLE = *mut c_void; +pub type DWORD = c_ulong; +pub type LPCWSTR = *const u16; +#[allow(non_camel_case_types)] +pub type zx_handle_t = u32; +#[allow(non_camel_case_types)] +pub type SECURITY_ATTRIBUTES = (); +pub type ANativeWindow = c_void; +pub type AHardwareBuffer = c_void; +#[macro_export] +macro_rules! vk_bitflags_wrapped { + ( $ name : ident , $ all : expr , $ flag_type : ty ) => { + impl Default for $name { + fn default() -> $name { + $name(0) + } + } + impl $name { + #[inline] + pub fn empty() -> $name { + $name(0) + } + #[inline] + pub fn all() -> $name { + $name($all) + } + #[inline] + pub fn from_raw(x: $flag_type) -> Self { + $name(x) + } + #[inline] + pub fn as_raw(self) -> $flag_type { + self.0 + } + #[inline] + pub fn is_empty(self) -> bool { + self == $name::empty() + } + #[inline] + pub fn is_all(self) -> bool { + self & $name::all() == $name::all() + } + #[inline] + pub fn intersects(self, other: $name) -> bool { + self & other != $name::empty() + } + #[doc = r" Returns whether `other` is a subset of `self`"] + #[inline] + pub fn contains(self, other: $name) -> bool { + self & other == other + } + } + impl ::std::ops::BitOr for $name { + type Output = $name; + #[inline] + fn bitor(self, rhs: $name) -> $name { + $name(self.0 | rhs.0) + } + } + impl ::std::ops::BitOrAssign for $name { + #[inline] + fn bitor_assign(&mut self, rhs: $name) { + *self = *self | rhs + } + } + impl ::std::ops::BitAnd for $name { + type Output = $name; + #[inline] + fn bitand(self, rhs: $name) -> $name { + $name(self.0 & rhs.0) + } + } + impl ::std::ops::BitAndAssign for $name { + #[inline] + fn bitand_assign(&mut self, rhs: $name) { + *self = *self & rhs + } + } + impl ::std::ops::BitXor for $name { + type Output = $name; + #[inline] + fn bitxor(self, rhs: $name) -> $name { + $name(self.0 ^ rhs.0) + } + } + impl ::std::ops::BitXorAssign for $name { + #[inline] + fn bitxor_assign(&mut self, rhs: $name) { + *self = *self ^ rhs + } + } + impl ::std::ops::Sub for $name { + type Output = $name; + #[inline] + fn sub(self, rhs: $name) -> $name { + self & !rhs + } + } + impl ::std::ops::SubAssign for $name { + #[inline] + fn sub_assign(&mut self, rhs: $name) { + *self = *self - rhs + } + } + impl ::std::ops::Not for $name { + type Output = $name; + #[inline] + fn not(self) -> $name { + self ^ $name::all() + } + } + }; +} +#[macro_export] +macro_rules! handle_nondispatchable { + ( $ name : ident , $ ty : ident ) => { + handle_nondispatchable!($name, $ty, doc = ""); + }; + ( $ name : ident , $ ty : ident , $ doc_link : meta ) => { + #[repr(transparent)] + #[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Hash, Default)] + #[$doc_link] + pub struct $name(u64); + impl Handle for $name { + const TYPE: ObjectType = ObjectType::$ty; + fn as_raw(self) -> u64 { + self.0 as u64 + } + fn from_raw(x: u64) -> Self { + $name(x as _) + } + } + impl $name { + pub fn null() -> $name { + $name(0) + } + } + impl fmt::Pointer for $name { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "0x{:x}", self.0) + } + } + impl fmt::Debug for $name { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "0x{:x}", self.0) + } + } + }; +} +#[macro_export] +macro_rules! define_handle { + ( $ name : ident , $ ty : ident ) => { + define_handle!($name, $ty, doc = ""); + }; + ( $ name : ident , $ ty : ident , $ doc_link : meta ) => { + #[repr(transparent)] + #[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Hash)] + #[$doc_link] + pub struct $name(*mut u8); + impl Default for $name { + fn default() -> $name { + $name::null() + } + } + impl Handle for $name { + const TYPE: ObjectType = ObjectType::$ty; + fn as_raw(self) -> u64 { + self.0 as u64 + } + fn from_raw(x: u64) -> Self { + $name(x as _) + } + } + unsafe impl Send for $name {} + unsafe impl Sync for $name {} + impl $name { + pub fn null() -> Self { + $name(::std::ptr::null_mut()) + } + } + impl fmt::Pointer for $name { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&self.0, f) + } + } + impl fmt::Debug for $name { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.0, f) + } + } + }; +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetInstanceProcAddr = + extern "system" fn(instance: Instance, p_name: *const c_char) -> PFN_vkVoidFunction; +pub struct StaticFn { + pub get_instance_proc_addr: + extern "system" fn(instance: Instance, p_name: *const c_char) -> PFN_vkVoidFunction, +} +unsafe impl Send for StaticFn {} +unsafe impl Sync for StaticFn {} +impl ::std::clone::Clone for StaticFn { + fn clone(&self) -> Self { + StaticFn { + get_instance_proc_addr: self.get_instance_proc_addr, + } + } +} +impl StaticFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + StaticFn { + get_instance_proc_addr: unsafe { + extern "system" fn get_instance_proc_addr( + _instance: Instance, + _p_name: *const c_char, + ) -> PFN_vkVoidFunction { + panic!(concat!( + "Unable to load ", + stringify!(get_instance_proc_addr) + )) + } + let raw_name = stringify!(vkGetInstanceProcAddr); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_instance_proc_addr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_instance_proc_addr( + &self, + instance: Instance, + p_name: *const c_char, + ) -> PFN_vkVoidFunction { + (self.get_instance_proc_addr)(instance, p_name) + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateInstance = extern "system" fn( + p_create_info: *const InstanceCreateInfo, + p_allocator: *const AllocationCallbacks, + p_instance: *mut Instance, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkEnumerateInstanceExtensionProperties = extern "system" fn( + p_layer_name: *const c_char, + p_property_count: *mut u32, + p_properties: *mut ExtensionProperties, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkEnumerateInstanceLayerProperties = + extern "system" fn(p_property_count: *mut u32, p_properties: *mut LayerProperties) -> Result; +pub struct EntryFnV1_0 { + pub create_instance: extern "system" fn( + p_create_info: *const InstanceCreateInfo, + p_allocator: *const AllocationCallbacks, + p_instance: *mut Instance, + ) -> Result, + pub enumerate_instance_extension_properties: extern "system" fn( + p_layer_name: *const c_char, + p_property_count: *mut u32, + p_properties: *mut ExtensionProperties, + ) -> Result, + pub enumerate_instance_layer_properties: extern "system" fn( + p_property_count: *mut u32, + p_properties: *mut LayerProperties, + ) -> Result, +} +unsafe impl Send for EntryFnV1_0 {} +unsafe impl Sync for EntryFnV1_0 {} +impl ::std::clone::Clone for EntryFnV1_0 { + fn clone(&self) -> Self { + EntryFnV1_0 { + create_instance: self.create_instance, + enumerate_instance_extension_properties: self.enumerate_instance_extension_properties, + enumerate_instance_layer_properties: self.enumerate_instance_layer_properties, + } + } +} +impl EntryFnV1_0 { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + EntryFnV1_0 { + create_instance: unsafe { + extern "system" fn create_instance( + _p_create_info: *const InstanceCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_instance: *mut Instance, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_instance))) + } + let raw_name = stringify!(vkCreateInstance); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_instance + } else { + ::std::mem::transmute(val) + } + }, + enumerate_instance_extension_properties: unsafe { + extern "system" fn enumerate_instance_extension_properties( + _p_layer_name: *const c_char, + _p_property_count: *mut u32, + _p_properties: *mut ExtensionProperties, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(enumerate_instance_extension_properties) + )) + } + let raw_name = stringify!(vkEnumerateInstanceExtensionProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + enumerate_instance_extension_properties + } else { + ::std::mem::transmute(val) + } + }, + enumerate_instance_layer_properties: unsafe { + extern "system" fn enumerate_instance_layer_properties( + _p_property_count: *mut u32, + _p_properties: *mut LayerProperties, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(enumerate_instance_layer_properties) + )) + } + let raw_name = stringify!(vkEnumerateInstanceLayerProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + enumerate_instance_layer_properties + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_instance( + &self, + p_create_info: *const InstanceCreateInfo, + p_allocator: *const AllocationCallbacks, + p_instance: *mut Instance, + ) -> Result { + (self.create_instance)(p_create_info, p_allocator, p_instance) + } + #[doc = ""] + pub unsafe fn enumerate_instance_extension_properties( + &self, + p_layer_name: *const c_char, + p_property_count: *mut u32, + p_properties: *mut ExtensionProperties, + ) -> Result { + (self.enumerate_instance_extension_properties)(p_layer_name, p_property_count, p_properties) + } + #[doc = ""] + pub unsafe fn enumerate_instance_layer_properties( + &self, + p_property_count: *mut u32, + p_properties: *mut LayerProperties, + ) -> Result { + (self.enumerate_instance_layer_properties)(p_property_count, p_properties) + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyInstance = + extern "system" fn(instance: Instance, p_allocator: *const AllocationCallbacks) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkEnumeratePhysicalDevices = extern "system" fn( + instance: Instance, + p_physical_device_count: *mut u32, + p_physical_devices: *mut PhysicalDevice, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceFeatures = extern "system" fn( + physical_device: PhysicalDevice, + p_features: *mut PhysicalDeviceFeatures, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceFormatProperties = extern "system" fn( + physical_device: PhysicalDevice, + format: Format, + p_format_properties: *mut FormatProperties, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceImageFormatProperties = extern "system" fn( + physical_device: PhysicalDevice, + format: Format, + ty: ImageType, + tiling: ImageTiling, + usage: ImageUsageFlags, + flags: ImageCreateFlags, + p_image_format_properties: *mut ImageFormatProperties, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceProperties = extern "system" fn( + physical_device: PhysicalDevice, + p_properties: *mut PhysicalDeviceProperties, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceQueueFamilyProperties = extern "system" fn( + physical_device: PhysicalDevice, + p_queue_family_property_count: *mut u32, + p_queue_family_properties: *mut QueueFamilyProperties, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceMemoryProperties = extern "system" fn( + physical_device: PhysicalDevice, + p_memory_properties: *mut PhysicalDeviceMemoryProperties, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeviceProcAddr = + extern "system" fn(device: Device, p_name: *const c_char) -> PFN_vkVoidFunction; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateDevice = extern "system" fn( + physical_device: PhysicalDevice, + p_create_info: *const DeviceCreateInfo, + p_allocator: *const AllocationCallbacks, + p_device: *mut Device, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkEnumerateDeviceExtensionProperties = extern "system" fn( + physical_device: PhysicalDevice, + p_layer_name: *const c_char, + p_property_count: *mut u32, + p_properties: *mut ExtensionProperties, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkEnumerateDeviceLayerProperties = extern "system" fn( + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut LayerProperties, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceSparseImageFormatProperties = extern "system" fn( + physical_device: PhysicalDevice, + format: Format, + ty: ImageType, + samples: SampleCountFlags, + usage: ImageUsageFlags, + tiling: ImageTiling, + p_property_count: *mut u32, + p_properties: *mut SparseImageFormatProperties, +) -> c_void; +pub struct InstanceFnV1_0 { + pub destroy_instance: + extern "system" fn(instance: Instance, p_allocator: *const AllocationCallbacks) -> c_void, + pub enumerate_physical_devices: extern "system" fn( + instance: Instance, + p_physical_device_count: *mut u32, + p_physical_devices: *mut PhysicalDevice, + ) -> Result, + pub get_physical_device_features: extern "system" fn( + physical_device: PhysicalDevice, + p_features: *mut PhysicalDeviceFeatures, + ) -> c_void, + pub get_physical_device_format_properties: extern "system" fn( + physical_device: PhysicalDevice, + format: Format, + p_format_properties: *mut FormatProperties, + ) -> c_void, + pub get_physical_device_image_format_properties: extern "system" fn( + physical_device: PhysicalDevice, + format: Format, + ty: ImageType, + tiling: ImageTiling, + usage: ImageUsageFlags, + flags: ImageCreateFlags, + p_image_format_properties: *mut ImageFormatProperties, + ) -> Result, + pub get_physical_device_properties: extern "system" fn( + physical_device: PhysicalDevice, + p_properties: *mut PhysicalDeviceProperties, + ) -> c_void, + pub get_physical_device_queue_family_properties: extern "system" fn( + physical_device: PhysicalDevice, + p_queue_family_property_count: *mut u32, + p_queue_family_properties: *mut QueueFamilyProperties, + ) -> c_void, + pub get_physical_device_memory_properties: extern "system" fn( + physical_device: PhysicalDevice, + p_memory_properties: *mut PhysicalDeviceMemoryProperties, + ) -> c_void, + pub get_device_proc_addr: + extern "system" fn(device: Device, p_name: *const c_char) -> PFN_vkVoidFunction, + pub create_device: extern "system" fn( + physical_device: PhysicalDevice, + p_create_info: *const DeviceCreateInfo, + p_allocator: *const AllocationCallbacks, + p_device: *mut Device, + ) -> Result, + pub enumerate_device_extension_properties: extern "system" fn( + physical_device: PhysicalDevice, + p_layer_name: *const c_char, + p_property_count: *mut u32, + p_properties: *mut ExtensionProperties, + ) -> Result, + pub enumerate_device_layer_properties: extern "system" fn( + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut LayerProperties, + ) -> Result, + pub get_physical_device_sparse_image_format_properties: extern "system" fn( + physical_device: PhysicalDevice, + format: Format, + ty: ImageType, + samples: SampleCountFlags, + usage: ImageUsageFlags, + tiling: ImageTiling, + p_property_count: *mut u32, + p_properties: *mut SparseImageFormatProperties, + ) -> c_void, +} +unsafe impl Send for InstanceFnV1_0 {} +unsafe impl Sync for InstanceFnV1_0 {} +impl ::std::clone::Clone for InstanceFnV1_0 { + fn clone(&self) -> Self { + InstanceFnV1_0 { + destroy_instance: self.destroy_instance, + enumerate_physical_devices: self.enumerate_physical_devices, + get_physical_device_features: self.get_physical_device_features, + get_physical_device_format_properties: self.get_physical_device_format_properties, + get_physical_device_image_format_properties: self + .get_physical_device_image_format_properties, + get_physical_device_properties: self.get_physical_device_properties, + get_physical_device_queue_family_properties: self + .get_physical_device_queue_family_properties, + get_physical_device_memory_properties: self.get_physical_device_memory_properties, + get_device_proc_addr: self.get_device_proc_addr, + create_device: self.create_device, + enumerate_device_extension_properties: self.enumerate_device_extension_properties, + enumerate_device_layer_properties: self.enumerate_device_layer_properties, + get_physical_device_sparse_image_format_properties: self + .get_physical_device_sparse_image_format_properties, + } + } +} +impl InstanceFnV1_0 { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + InstanceFnV1_0 { + destroy_instance: unsafe { + extern "system" fn destroy_instance( + _instance: Instance, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_instance))) + } + let raw_name = stringify!(vkDestroyInstance); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_instance + } else { + ::std::mem::transmute(val) + } + }, + enumerate_physical_devices: unsafe { + extern "system" fn enumerate_physical_devices( + _instance: Instance, + _p_physical_device_count: *mut u32, + _p_physical_devices: *mut PhysicalDevice, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(enumerate_physical_devices) + )) + } + let raw_name = stringify!(vkEnumeratePhysicalDevices); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + enumerate_physical_devices + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_features: unsafe { + extern "system" fn get_physical_device_features( + _physical_device: PhysicalDevice, + _p_features: *mut PhysicalDeviceFeatures, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_features) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceFeatures); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_features + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_format_properties: unsafe { + extern "system" fn get_physical_device_format_properties( + _physical_device: PhysicalDevice, + _format: Format, + _p_format_properties: *mut FormatProperties, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_format_properties) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceFormatProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_format_properties + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_image_format_properties: unsafe { + extern "system" fn get_physical_device_image_format_properties( + _physical_device: PhysicalDevice, + _format: Format, + _ty: ImageType, + _tiling: ImageTiling, + _usage: ImageUsageFlags, + _flags: ImageCreateFlags, + _p_image_format_properties: *mut ImageFormatProperties, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_image_format_properties) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceImageFormatProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_image_format_properties + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_properties: unsafe { + extern "system" fn get_physical_device_properties( + _physical_device: PhysicalDevice, + _p_properties: *mut PhysicalDeviceProperties, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_properties) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_properties + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_queue_family_properties: unsafe { + extern "system" fn get_physical_device_queue_family_properties( + _physical_device: PhysicalDevice, + _p_queue_family_property_count: *mut u32, + _p_queue_family_properties: *mut QueueFamilyProperties, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_queue_family_properties) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceQueueFamilyProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_queue_family_properties + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_memory_properties: unsafe { + extern "system" fn get_physical_device_memory_properties( + _physical_device: PhysicalDevice, + _p_memory_properties: *mut PhysicalDeviceMemoryProperties, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_memory_properties) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceMemoryProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_memory_properties + } else { + ::std::mem::transmute(val) + } + }, + get_device_proc_addr: unsafe { + extern "system" fn get_device_proc_addr( + _device: Device, + _p_name: *const c_char, + ) -> PFN_vkVoidFunction { + panic!(concat!("Unable to load ", stringify!(get_device_proc_addr))) + } + let raw_name = stringify!(vkGetDeviceProcAddr); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_proc_addr + } else { + ::std::mem::transmute(val) + } + }, + create_device: unsafe { + extern "system" fn create_device( + _physical_device: PhysicalDevice, + _p_create_info: *const DeviceCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_device: *mut Device, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_device))) + } + let raw_name = stringify!(vkCreateDevice); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_device + } else { + ::std::mem::transmute(val) + } + }, + enumerate_device_extension_properties: unsafe { + extern "system" fn enumerate_device_extension_properties( + _physical_device: PhysicalDevice, + _p_layer_name: *const c_char, + _p_property_count: *mut u32, + _p_properties: *mut ExtensionProperties, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(enumerate_device_extension_properties) + )) + } + let raw_name = stringify!(vkEnumerateDeviceExtensionProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + enumerate_device_extension_properties + } else { + ::std::mem::transmute(val) + } + }, + enumerate_device_layer_properties: unsafe { + extern "system" fn enumerate_device_layer_properties( + _physical_device: PhysicalDevice, + _p_property_count: *mut u32, + _p_properties: *mut LayerProperties, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(enumerate_device_layer_properties) + )) + } + let raw_name = stringify!(vkEnumerateDeviceLayerProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + enumerate_device_layer_properties + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_sparse_image_format_properties: unsafe { + extern "system" fn get_physical_device_sparse_image_format_properties( + _physical_device: PhysicalDevice, + _format: Format, + _ty: ImageType, + _samples: SampleCountFlags, + _usage: ImageUsageFlags, + _tiling: ImageTiling, + _p_property_count: *mut u32, + _p_properties: *mut SparseImageFormatProperties, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_sparse_image_format_properties) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceSparseImageFormatProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_sparse_image_format_properties + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn destroy_instance( + &self, + instance: Instance, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_instance)(instance, p_allocator) + } + #[doc = ""] + pub unsafe fn enumerate_physical_devices( + &self, + instance: Instance, + p_physical_device_count: *mut u32, + p_physical_devices: *mut PhysicalDevice, + ) -> Result { + (self.enumerate_physical_devices)(instance, p_physical_device_count, p_physical_devices) + } + #[doc = ""] + pub unsafe fn get_physical_device_features( + &self, + physical_device: PhysicalDevice, + p_features: *mut PhysicalDeviceFeatures, + ) -> c_void { + (self.get_physical_device_features)(physical_device, p_features) + } + #[doc = ""] + pub unsafe fn get_physical_device_format_properties( + &self, + physical_device: PhysicalDevice, + format: Format, + p_format_properties: *mut FormatProperties, + ) -> c_void { + (self.get_physical_device_format_properties)(physical_device, format, p_format_properties) + } + #[doc = ""] + pub unsafe fn get_physical_device_image_format_properties( + &self, + physical_device: PhysicalDevice, + format: Format, + ty: ImageType, + tiling: ImageTiling, + usage: ImageUsageFlags, + flags: ImageCreateFlags, + p_image_format_properties: *mut ImageFormatProperties, + ) -> Result { + (self.get_physical_device_image_format_properties)( + physical_device, + format, + ty, + tiling, + usage, + flags, + p_image_format_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_properties( + &self, + physical_device: PhysicalDevice, + p_properties: *mut PhysicalDeviceProperties, + ) -> c_void { + (self.get_physical_device_properties)(physical_device, p_properties) + } + #[doc = ""] + pub unsafe fn get_physical_device_queue_family_properties( + &self, + physical_device: PhysicalDevice, + p_queue_family_property_count: *mut u32, + p_queue_family_properties: *mut QueueFamilyProperties, + ) -> c_void { + (self.get_physical_device_queue_family_properties)( + physical_device, + p_queue_family_property_count, + p_queue_family_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_memory_properties( + &self, + physical_device: PhysicalDevice, + p_memory_properties: *mut PhysicalDeviceMemoryProperties, + ) -> c_void { + (self.get_physical_device_memory_properties)(physical_device, p_memory_properties) + } + #[doc = ""] + pub unsafe fn get_device_proc_addr( + &self, + device: Device, + p_name: *const c_char, + ) -> PFN_vkVoidFunction { + (self.get_device_proc_addr)(device, p_name) + } + #[doc = ""] + pub unsafe fn create_device( + &self, + physical_device: PhysicalDevice, + p_create_info: *const DeviceCreateInfo, + p_allocator: *const AllocationCallbacks, + p_device: *mut Device, + ) -> Result { + (self.create_device)(physical_device, p_create_info, p_allocator, p_device) + } + #[doc = ""] + pub unsafe fn enumerate_device_extension_properties( + &self, + physical_device: PhysicalDevice, + p_layer_name: *const c_char, + p_property_count: *mut u32, + p_properties: *mut ExtensionProperties, + ) -> Result { + (self.enumerate_device_extension_properties)( + physical_device, + p_layer_name, + p_property_count, + p_properties, + ) + } + #[doc = ""] + pub unsafe fn enumerate_device_layer_properties( + &self, + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut LayerProperties, + ) -> Result { + (self.enumerate_device_layer_properties)(physical_device, p_property_count, p_properties) + } + #[doc = ""] + pub unsafe fn get_physical_device_sparse_image_format_properties( + &self, + physical_device: PhysicalDevice, + format: Format, + ty: ImageType, + samples: SampleCountFlags, + usage: ImageUsageFlags, + tiling: ImageTiling, + p_property_count: *mut u32, + p_properties: *mut SparseImageFormatProperties, + ) -> c_void { + (self.get_physical_device_sparse_image_format_properties)( + physical_device, + format, + ty, + samples, + usage, + tiling, + p_property_count, + p_properties, + ) + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyDevice = + extern "system" fn(device: Device, p_allocator: *const AllocationCallbacks) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeviceQueue = extern "system" fn( + device: Device, + queue_family_index: u32, + queue_index: u32, + p_queue: *mut Queue, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkQueueSubmit = extern "system" fn( + queue: Queue, + submit_count: u32, + p_submits: *const SubmitInfo, + fence: Fence, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkQueueWaitIdle = extern "system" fn(queue: Queue) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDeviceWaitIdle = extern "system" fn(device: Device) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkAllocateMemory = extern "system" fn( + device: Device, + p_allocate_info: *const MemoryAllocateInfo, + p_allocator: *const AllocationCallbacks, + p_memory: *mut DeviceMemory, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkFreeMemory = extern "system" fn( + device: Device, + memory: DeviceMemory, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkMapMemory = extern "system" fn( + device: Device, + memory: DeviceMemory, + offset: DeviceSize, + size: DeviceSize, + flags: MemoryMapFlags, + pp_data: *mut *mut c_void, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkUnmapMemory = extern "system" fn(device: Device, memory: DeviceMemory) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkFlushMappedMemoryRanges = extern "system" fn( + device: Device, + memory_range_count: u32, + p_memory_ranges: *const MappedMemoryRange, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkInvalidateMappedMemoryRanges = extern "system" fn( + device: Device, + memory_range_count: u32, + p_memory_ranges: *const MappedMemoryRange, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeviceMemoryCommitment = extern "system" fn( + device: Device, + memory: DeviceMemory, + p_committed_memory_in_bytes: *mut DeviceSize, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkBindBufferMemory = extern "system" fn( + device: Device, + buffer: Buffer, + memory: DeviceMemory, + memory_offset: DeviceSize, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkBindImageMemory = extern "system" fn( + device: Device, + image: Image, + memory: DeviceMemory, + memory_offset: DeviceSize, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetBufferMemoryRequirements = extern "system" fn( + device: Device, + buffer: Buffer, + p_memory_requirements: *mut MemoryRequirements, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetImageMemoryRequirements = extern "system" fn( + device: Device, + image: Image, + p_memory_requirements: *mut MemoryRequirements, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetImageSparseMemoryRequirements = extern "system" fn( + device: Device, + image: Image, + p_sparse_memory_requirement_count: *mut u32, + p_sparse_memory_requirements: *mut SparseImageMemoryRequirements, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkQueueBindSparse = extern "system" fn( + queue: Queue, + bind_info_count: u32, + p_bind_info: *const BindSparseInfo, + fence: Fence, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateFence = extern "system" fn( + device: Device, + p_create_info: *const FenceCreateInfo, + p_allocator: *const AllocationCallbacks, + p_fence: *mut Fence, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyFence = extern "system" fn( + device: Device, + fence: Fence, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkResetFences = + extern "system" fn(device: Device, fence_count: u32, p_fences: *const Fence) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetFenceStatus = extern "system" fn(device: Device, fence: Fence) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkWaitForFences = extern "system" fn( + device: Device, + fence_count: u32, + p_fences: *const Fence, + wait_all: Bool32, + timeout: u64, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateSemaphore = extern "system" fn( + device: Device, + p_create_info: *const SemaphoreCreateInfo, + p_allocator: *const AllocationCallbacks, + p_semaphore: *mut Semaphore, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroySemaphore = extern "system" fn( + device: Device, + semaphore: Semaphore, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateEvent = extern "system" fn( + device: Device, + p_create_info: *const EventCreateInfo, + p_allocator: *const AllocationCallbacks, + p_event: *mut Event, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyEvent = extern "system" fn( + device: Device, + event: Event, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetEventStatus = extern "system" fn(device: Device, event: Event) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkSetEvent = extern "system" fn(device: Device, event: Event) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkResetEvent = extern "system" fn(device: Device, event: Event) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateQueryPool = extern "system" fn( + device: Device, + p_create_info: *const QueryPoolCreateInfo, + p_allocator: *const AllocationCallbacks, + p_query_pool: *mut QueryPool, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyQueryPool = extern "system" fn( + device: Device, + query_pool: QueryPool, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetQueryPoolResults = extern "system" fn( + device: Device, + query_pool: QueryPool, + first_query: u32, + query_count: u32, + data_size: usize, + p_data: *mut c_void, + stride: DeviceSize, + flags: QueryResultFlags, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateBuffer = extern "system" fn( + device: Device, + p_create_info: *const BufferCreateInfo, + p_allocator: *const AllocationCallbacks, + p_buffer: *mut Buffer, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyBuffer = extern "system" fn( + device: Device, + buffer: Buffer, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateBufferView = extern "system" fn( + device: Device, + p_create_info: *const BufferViewCreateInfo, + p_allocator: *const AllocationCallbacks, + p_view: *mut BufferView, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyBufferView = extern "system" fn( + device: Device, + buffer_view: BufferView, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateImage = extern "system" fn( + device: Device, + p_create_info: *const ImageCreateInfo, + p_allocator: *const AllocationCallbacks, + p_image: *mut Image, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyImage = extern "system" fn( + device: Device, + image: Image, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetImageSubresourceLayout = extern "system" fn( + device: Device, + image: Image, + p_subresource: *const ImageSubresource, + p_layout: *mut SubresourceLayout, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateImageView = extern "system" fn( + device: Device, + p_create_info: *const ImageViewCreateInfo, + p_allocator: *const AllocationCallbacks, + p_view: *mut ImageView, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyImageView = extern "system" fn( + device: Device, + image_view: ImageView, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateShaderModule = extern "system" fn( + device: Device, + p_create_info: *const ShaderModuleCreateInfo, + p_allocator: *const AllocationCallbacks, + p_shader_module: *mut ShaderModule, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyShaderModule = extern "system" fn( + device: Device, + shader_module: ShaderModule, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreatePipelineCache = extern "system" fn( + device: Device, + p_create_info: *const PipelineCacheCreateInfo, + p_allocator: *const AllocationCallbacks, + p_pipeline_cache: *mut PipelineCache, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyPipelineCache = extern "system" fn( + device: Device, + pipeline_cache: PipelineCache, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPipelineCacheData = extern "system" fn( + device: Device, + pipeline_cache: PipelineCache, + p_data_size: *mut usize, + p_data: *mut c_void, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkMergePipelineCaches = extern "system" fn( + device: Device, + dst_cache: PipelineCache, + src_cache_count: u32, + p_src_caches: *const PipelineCache, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateGraphicsPipelines = extern "system" fn( + device: Device, + pipeline_cache: PipelineCache, + create_info_count: u32, + p_create_infos: *const GraphicsPipelineCreateInfo, + p_allocator: *const AllocationCallbacks, + p_pipelines: *mut Pipeline, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateComputePipelines = extern "system" fn( + device: Device, + pipeline_cache: PipelineCache, + create_info_count: u32, + p_create_infos: *const ComputePipelineCreateInfo, + p_allocator: *const AllocationCallbacks, + p_pipelines: *mut Pipeline, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyPipeline = extern "system" fn( + device: Device, + pipeline: Pipeline, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreatePipelineLayout = extern "system" fn( + device: Device, + p_create_info: *const PipelineLayoutCreateInfo, + p_allocator: *const AllocationCallbacks, + p_pipeline_layout: *mut PipelineLayout, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyPipelineLayout = extern "system" fn( + device: Device, + pipeline_layout: PipelineLayout, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateSampler = extern "system" fn( + device: Device, + p_create_info: *const SamplerCreateInfo, + p_allocator: *const AllocationCallbacks, + p_sampler: *mut Sampler, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroySampler = extern "system" fn( + device: Device, + sampler: Sampler, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateDescriptorSetLayout = extern "system" fn( + device: Device, + p_create_info: *const DescriptorSetLayoutCreateInfo, + p_allocator: *const AllocationCallbacks, + p_set_layout: *mut DescriptorSetLayout, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyDescriptorSetLayout = extern "system" fn( + device: Device, + descriptor_set_layout: DescriptorSetLayout, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateDescriptorPool = extern "system" fn( + device: Device, + p_create_info: *const DescriptorPoolCreateInfo, + p_allocator: *const AllocationCallbacks, + p_descriptor_pool: *mut DescriptorPool, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyDescriptorPool = extern "system" fn( + device: Device, + descriptor_pool: DescriptorPool, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkResetDescriptorPool = extern "system" fn( + device: Device, + descriptor_pool: DescriptorPool, + flags: DescriptorPoolResetFlags, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkAllocateDescriptorSets = extern "system" fn( + device: Device, + p_allocate_info: *const DescriptorSetAllocateInfo, + p_descriptor_sets: *mut DescriptorSet, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkFreeDescriptorSets = extern "system" fn( + device: Device, + descriptor_pool: DescriptorPool, + descriptor_set_count: u32, + p_descriptor_sets: *const DescriptorSet, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkUpdateDescriptorSets = extern "system" fn( + device: Device, + descriptor_write_count: u32, + p_descriptor_writes: *const WriteDescriptorSet, + descriptor_copy_count: u32, + p_descriptor_copies: *const CopyDescriptorSet, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateFramebuffer = extern "system" fn( + device: Device, + p_create_info: *const FramebufferCreateInfo, + p_allocator: *const AllocationCallbacks, + p_framebuffer: *mut Framebuffer, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyFramebuffer = extern "system" fn( + device: Device, + framebuffer: Framebuffer, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateRenderPass = extern "system" fn( + device: Device, + p_create_info: *const RenderPassCreateInfo, + p_allocator: *const AllocationCallbacks, + p_render_pass: *mut RenderPass, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyRenderPass = extern "system" fn( + device: Device, + render_pass: RenderPass, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetRenderAreaGranularity = extern "system" fn( + device: Device, + render_pass: RenderPass, + p_granularity: *mut Extent2D, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateCommandPool = extern "system" fn( + device: Device, + p_create_info: *const CommandPoolCreateInfo, + p_allocator: *const AllocationCallbacks, + p_command_pool: *mut CommandPool, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyCommandPool = extern "system" fn( + device: Device, + command_pool: CommandPool, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkResetCommandPool = extern "system" fn( + device: Device, + command_pool: CommandPool, + flags: CommandPoolResetFlags, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkAllocateCommandBuffers = extern "system" fn( + device: Device, + p_allocate_info: *const CommandBufferAllocateInfo, + p_command_buffers: *mut CommandBuffer, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkFreeCommandBuffers = extern "system" fn( + device: Device, + command_pool: CommandPool, + command_buffer_count: u32, + p_command_buffers: *const CommandBuffer, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkBeginCommandBuffer = extern "system" fn( + command_buffer: CommandBuffer, + p_begin_info: *const CommandBufferBeginInfo, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkEndCommandBuffer = extern "system" fn(command_buffer: CommandBuffer) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkResetCommandBuffer = + extern "system" fn(command_buffer: CommandBuffer, flags: CommandBufferResetFlags) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBindPipeline = extern "system" fn( + command_buffer: CommandBuffer, + pipeline_bind_point: PipelineBindPoint, + pipeline: Pipeline, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetViewport = extern "system" fn( + command_buffer: CommandBuffer, + first_viewport: u32, + viewport_count: u32, + p_viewports: *const Viewport, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetScissor = extern "system" fn( + command_buffer: CommandBuffer, + first_scissor: u32, + scissor_count: u32, + p_scissors: *const Rect2D, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetLineWidth = + extern "system" fn(command_buffer: CommandBuffer, line_width: f32) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetDepthBias = extern "system" fn( + command_buffer: CommandBuffer, + depth_bias_constant_factor: f32, + depth_bias_clamp: f32, + depth_bias_slope_factor: f32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetBlendConstants = + extern "system" fn(command_buffer: CommandBuffer, blend_constants: &[f32; 4]) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetDepthBounds = extern "system" fn( + command_buffer: CommandBuffer, + min_depth_bounds: f32, + max_depth_bounds: f32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetStencilCompareMask = extern "system" fn( + command_buffer: CommandBuffer, + face_mask: StencilFaceFlags, + compare_mask: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetStencilWriteMask = extern "system" fn( + command_buffer: CommandBuffer, + face_mask: StencilFaceFlags, + write_mask: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetStencilReference = extern "system" fn( + command_buffer: CommandBuffer, + face_mask: StencilFaceFlags, + reference: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBindDescriptorSets = extern "system" fn( + command_buffer: CommandBuffer, + pipeline_bind_point: PipelineBindPoint, + layout: PipelineLayout, + first_set: u32, + descriptor_set_count: u32, + p_descriptor_sets: *const DescriptorSet, + dynamic_offset_count: u32, + p_dynamic_offsets: *const u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBindIndexBuffer = extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + index_type: IndexType, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBindVertexBuffers = extern "system" fn( + command_buffer: CommandBuffer, + first_binding: u32, + binding_count: u32, + p_buffers: *const Buffer, + p_offsets: *const DeviceSize, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDraw = extern "system" fn( + command_buffer: CommandBuffer, + vertex_count: u32, + instance_count: u32, + first_vertex: u32, + first_instance: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawIndexed = extern "system" fn( + command_buffer: CommandBuffer, + index_count: u32, + instance_count: u32, + first_index: u32, + vertex_offset: i32, + first_instance: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawIndirect = extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + draw_count: u32, + stride: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawIndexedIndirect = extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + draw_count: u32, + stride: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDispatch = extern "system" fn( + command_buffer: CommandBuffer, + group_count_x: u32, + group_count_y: u32, + group_count_z: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDispatchIndirect = + extern "system" fn(command_buffer: CommandBuffer, buffer: Buffer, offset: DeviceSize) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdCopyBuffer = extern "system" fn( + command_buffer: CommandBuffer, + src_buffer: Buffer, + dst_buffer: Buffer, + region_count: u32, + p_regions: *const BufferCopy, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdCopyImage = extern "system" fn( + command_buffer: CommandBuffer, + src_image: Image, + src_image_layout: ImageLayout, + dst_image: Image, + dst_image_layout: ImageLayout, + region_count: u32, + p_regions: *const ImageCopy, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBlitImage = extern "system" fn( + command_buffer: CommandBuffer, + src_image: Image, + src_image_layout: ImageLayout, + dst_image: Image, + dst_image_layout: ImageLayout, + region_count: u32, + p_regions: *const ImageBlit, + filter: Filter, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdCopyBufferToImage = extern "system" fn( + command_buffer: CommandBuffer, + src_buffer: Buffer, + dst_image: Image, + dst_image_layout: ImageLayout, + region_count: u32, + p_regions: *const BufferImageCopy, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdCopyImageToBuffer = extern "system" fn( + command_buffer: CommandBuffer, + src_image: Image, + src_image_layout: ImageLayout, + dst_buffer: Buffer, + region_count: u32, + p_regions: *const BufferImageCopy, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdUpdateBuffer = extern "system" fn( + command_buffer: CommandBuffer, + dst_buffer: Buffer, + dst_offset: DeviceSize, + data_size: DeviceSize, + p_data: *const c_void, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdFillBuffer = extern "system" fn( + command_buffer: CommandBuffer, + dst_buffer: Buffer, + dst_offset: DeviceSize, + size: DeviceSize, + data: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdClearColorImage = extern "system" fn( + command_buffer: CommandBuffer, + image: Image, + image_layout: ImageLayout, + p_color: *const ClearColorValue, + range_count: u32, + p_ranges: *const ImageSubresourceRange, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdClearDepthStencilImage = extern "system" fn( + command_buffer: CommandBuffer, + image: Image, + image_layout: ImageLayout, + p_depth_stencil: *const ClearDepthStencilValue, + range_count: u32, + p_ranges: *const ImageSubresourceRange, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdClearAttachments = extern "system" fn( + command_buffer: CommandBuffer, + attachment_count: u32, + p_attachments: *const ClearAttachment, + rect_count: u32, + p_rects: *const ClearRect, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdResolveImage = extern "system" fn( + command_buffer: CommandBuffer, + src_image: Image, + src_image_layout: ImageLayout, + dst_image: Image, + dst_image_layout: ImageLayout, + region_count: u32, + p_regions: *const ImageResolve, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetEvent = extern "system" fn( + command_buffer: CommandBuffer, + event: Event, + stage_mask: PipelineStageFlags, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdResetEvent = extern "system" fn( + command_buffer: CommandBuffer, + event: Event, + stage_mask: PipelineStageFlags, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdWaitEvents = extern "system" fn( + command_buffer: CommandBuffer, + event_count: u32, + p_events: *const Event, + src_stage_mask: PipelineStageFlags, + dst_stage_mask: PipelineStageFlags, + memory_barrier_count: u32, + p_memory_barriers: *const MemoryBarrier, + buffer_memory_barrier_count: u32, + p_buffer_memory_barriers: *const BufferMemoryBarrier, + image_memory_barrier_count: u32, + p_image_memory_barriers: *const ImageMemoryBarrier, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdPipelineBarrier = extern "system" fn( + command_buffer: CommandBuffer, + src_stage_mask: PipelineStageFlags, + dst_stage_mask: PipelineStageFlags, + dependency_flags: DependencyFlags, + memory_barrier_count: u32, + p_memory_barriers: *const MemoryBarrier, + buffer_memory_barrier_count: u32, + p_buffer_memory_barriers: *const BufferMemoryBarrier, + image_memory_barrier_count: u32, + p_image_memory_barriers: *const ImageMemoryBarrier, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBeginQuery = extern "system" fn( + command_buffer: CommandBuffer, + query_pool: QueryPool, + query: u32, + flags: QueryControlFlags, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdEndQuery = + extern "system" fn(command_buffer: CommandBuffer, query_pool: QueryPool, query: u32) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdResetQueryPool = extern "system" fn( + command_buffer: CommandBuffer, + query_pool: QueryPool, + first_query: u32, + query_count: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdWriteTimestamp = extern "system" fn( + command_buffer: CommandBuffer, + pipeline_stage: PipelineStageFlags, + query_pool: QueryPool, + query: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdCopyQueryPoolResults = extern "system" fn( + command_buffer: CommandBuffer, + query_pool: QueryPool, + first_query: u32, + query_count: u32, + dst_buffer: Buffer, + dst_offset: DeviceSize, + stride: DeviceSize, + flags: QueryResultFlags, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdPushConstants = extern "system" fn( + command_buffer: CommandBuffer, + layout: PipelineLayout, + stage_flags: ShaderStageFlags, + offset: u32, + size: u32, + p_values: *const c_void, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBeginRenderPass = extern "system" fn( + command_buffer: CommandBuffer, + p_render_pass_begin: *const RenderPassBeginInfo, + contents: SubpassContents, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdNextSubpass = + extern "system" fn(command_buffer: CommandBuffer, contents: SubpassContents) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdEndRenderPass = extern "system" fn(command_buffer: CommandBuffer) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdExecuteCommands = extern "system" fn( + command_buffer: CommandBuffer, + command_buffer_count: u32, + p_command_buffers: *const CommandBuffer, +) -> c_void; +pub struct DeviceFnV1_0 { + pub destroy_device: + extern "system" fn(device: Device, p_allocator: *const AllocationCallbacks) -> c_void, + pub get_device_queue: extern "system" fn( + device: Device, + queue_family_index: u32, + queue_index: u32, + p_queue: *mut Queue, + ) -> c_void, + pub queue_submit: extern "system" fn( + queue: Queue, + submit_count: u32, + p_submits: *const SubmitInfo, + fence: Fence, + ) -> Result, + pub queue_wait_idle: extern "system" fn(queue: Queue) -> Result, + pub device_wait_idle: extern "system" fn(device: Device) -> Result, + pub allocate_memory: extern "system" fn( + device: Device, + p_allocate_info: *const MemoryAllocateInfo, + p_allocator: *const AllocationCallbacks, + p_memory: *mut DeviceMemory, + ) -> Result, + pub free_memory: extern "system" fn( + device: Device, + memory: DeviceMemory, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub map_memory: extern "system" fn( + device: Device, + memory: DeviceMemory, + offset: DeviceSize, + size: DeviceSize, + flags: MemoryMapFlags, + pp_data: *mut *mut c_void, + ) -> Result, + pub unmap_memory: extern "system" fn(device: Device, memory: DeviceMemory) -> c_void, + pub flush_mapped_memory_ranges: extern "system" fn( + device: Device, + memory_range_count: u32, + p_memory_ranges: *const MappedMemoryRange, + ) -> Result, + pub invalidate_mapped_memory_ranges: extern "system" fn( + device: Device, + memory_range_count: u32, + p_memory_ranges: *const MappedMemoryRange, + ) -> Result, + pub get_device_memory_commitment: extern "system" fn( + device: Device, + memory: DeviceMemory, + p_committed_memory_in_bytes: *mut DeviceSize, + ) -> c_void, + pub bind_buffer_memory: extern "system" fn( + device: Device, + buffer: Buffer, + memory: DeviceMemory, + memory_offset: DeviceSize, + ) -> Result, + pub bind_image_memory: extern "system" fn( + device: Device, + image: Image, + memory: DeviceMemory, + memory_offset: DeviceSize, + ) -> Result, + pub get_buffer_memory_requirements: extern "system" fn( + device: Device, + buffer: Buffer, + p_memory_requirements: *mut MemoryRequirements, + ) -> c_void, + pub get_image_memory_requirements: extern "system" fn( + device: Device, + image: Image, + p_memory_requirements: *mut MemoryRequirements, + ) -> c_void, + pub get_image_sparse_memory_requirements: extern "system" fn( + device: Device, + image: Image, + p_sparse_memory_requirement_count: *mut u32, + p_sparse_memory_requirements: *mut SparseImageMemoryRequirements, + ) -> c_void, + pub queue_bind_sparse: extern "system" fn( + queue: Queue, + bind_info_count: u32, + p_bind_info: *const BindSparseInfo, + fence: Fence, + ) -> Result, + pub create_fence: extern "system" fn( + device: Device, + p_create_info: *const FenceCreateInfo, + p_allocator: *const AllocationCallbacks, + p_fence: *mut Fence, + ) -> Result, + pub destroy_fence: extern "system" fn( + device: Device, + fence: Fence, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub reset_fences: + extern "system" fn(device: Device, fence_count: u32, p_fences: *const Fence) -> Result, + pub get_fence_status: extern "system" fn(device: Device, fence: Fence) -> Result, + pub wait_for_fences: extern "system" fn( + device: Device, + fence_count: u32, + p_fences: *const Fence, + wait_all: Bool32, + timeout: u64, + ) -> Result, + pub create_semaphore: extern "system" fn( + device: Device, + p_create_info: *const SemaphoreCreateInfo, + p_allocator: *const AllocationCallbacks, + p_semaphore: *mut Semaphore, + ) -> Result, + pub destroy_semaphore: extern "system" fn( + device: Device, + semaphore: Semaphore, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub create_event: extern "system" fn( + device: Device, + p_create_info: *const EventCreateInfo, + p_allocator: *const AllocationCallbacks, + p_event: *mut Event, + ) -> Result, + pub destroy_event: extern "system" fn( + device: Device, + event: Event, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub get_event_status: extern "system" fn(device: Device, event: Event) -> Result, + pub set_event: extern "system" fn(device: Device, event: Event) -> Result, + pub reset_event: extern "system" fn(device: Device, event: Event) -> Result, + pub create_query_pool: extern "system" fn( + device: Device, + p_create_info: *const QueryPoolCreateInfo, + p_allocator: *const AllocationCallbacks, + p_query_pool: *mut QueryPool, + ) -> Result, + pub destroy_query_pool: extern "system" fn( + device: Device, + query_pool: QueryPool, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub get_query_pool_results: extern "system" fn( + device: Device, + query_pool: QueryPool, + first_query: u32, + query_count: u32, + data_size: usize, + p_data: *mut c_void, + stride: DeviceSize, + flags: QueryResultFlags, + ) -> Result, + pub create_buffer: extern "system" fn( + device: Device, + p_create_info: *const BufferCreateInfo, + p_allocator: *const AllocationCallbacks, + p_buffer: *mut Buffer, + ) -> Result, + pub destroy_buffer: extern "system" fn( + device: Device, + buffer: Buffer, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub create_buffer_view: extern "system" fn( + device: Device, + p_create_info: *const BufferViewCreateInfo, + p_allocator: *const AllocationCallbacks, + p_view: *mut BufferView, + ) -> Result, + pub destroy_buffer_view: extern "system" fn( + device: Device, + buffer_view: BufferView, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub create_image: extern "system" fn( + device: Device, + p_create_info: *const ImageCreateInfo, + p_allocator: *const AllocationCallbacks, + p_image: *mut Image, + ) -> Result, + pub destroy_image: extern "system" fn( + device: Device, + image: Image, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub get_image_subresource_layout: extern "system" fn( + device: Device, + image: Image, + p_subresource: *const ImageSubresource, + p_layout: *mut SubresourceLayout, + ) -> c_void, + pub create_image_view: extern "system" fn( + device: Device, + p_create_info: *const ImageViewCreateInfo, + p_allocator: *const AllocationCallbacks, + p_view: *mut ImageView, + ) -> Result, + pub destroy_image_view: extern "system" fn( + device: Device, + image_view: ImageView, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub create_shader_module: extern "system" fn( + device: Device, + p_create_info: *const ShaderModuleCreateInfo, + p_allocator: *const AllocationCallbacks, + p_shader_module: *mut ShaderModule, + ) -> Result, + pub destroy_shader_module: extern "system" fn( + device: Device, + shader_module: ShaderModule, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub create_pipeline_cache: extern "system" fn( + device: Device, + p_create_info: *const PipelineCacheCreateInfo, + p_allocator: *const AllocationCallbacks, + p_pipeline_cache: *mut PipelineCache, + ) -> Result, + pub destroy_pipeline_cache: extern "system" fn( + device: Device, + pipeline_cache: PipelineCache, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub get_pipeline_cache_data: extern "system" fn( + device: Device, + pipeline_cache: PipelineCache, + p_data_size: *mut usize, + p_data: *mut c_void, + ) -> Result, + pub merge_pipeline_caches: extern "system" fn( + device: Device, + dst_cache: PipelineCache, + src_cache_count: u32, + p_src_caches: *const PipelineCache, + ) -> Result, + pub create_graphics_pipelines: extern "system" fn( + device: Device, + pipeline_cache: PipelineCache, + create_info_count: u32, + p_create_infos: *const GraphicsPipelineCreateInfo, + p_allocator: *const AllocationCallbacks, + p_pipelines: *mut Pipeline, + ) -> Result, + pub create_compute_pipelines: extern "system" fn( + device: Device, + pipeline_cache: PipelineCache, + create_info_count: u32, + p_create_infos: *const ComputePipelineCreateInfo, + p_allocator: *const AllocationCallbacks, + p_pipelines: *mut Pipeline, + ) -> Result, + pub destroy_pipeline: extern "system" fn( + device: Device, + pipeline: Pipeline, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub create_pipeline_layout: extern "system" fn( + device: Device, + p_create_info: *const PipelineLayoutCreateInfo, + p_allocator: *const AllocationCallbacks, + p_pipeline_layout: *mut PipelineLayout, + ) -> Result, + pub destroy_pipeline_layout: extern "system" fn( + device: Device, + pipeline_layout: PipelineLayout, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub create_sampler: extern "system" fn( + device: Device, + p_create_info: *const SamplerCreateInfo, + p_allocator: *const AllocationCallbacks, + p_sampler: *mut Sampler, + ) -> Result, + pub destroy_sampler: extern "system" fn( + device: Device, + sampler: Sampler, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub create_descriptor_set_layout: extern "system" fn( + device: Device, + p_create_info: *const DescriptorSetLayoutCreateInfo, + p_allocator: *const AllocationCallbacks, + p_set_layout: *mut DescriptorSetLayout, + ) -> Result, + pub destroy_descriptor_set_layout: extern "system" fn( + device: Device, + descriptor_set_layout: DescriptorSetLayout, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub create_descriptor_pool: extern "system" fn( + device: Device, + p_create_info: *const DescriptorPoolCreateInfo, + p_allocator: *const AllocationCallbacks, + p_descriptor_pool: *mut DescriptorPool, + ) -> Result, + pub destroy_descriptor_pool: extern "system" fn( + device: Device, + descriptor_pool: DescriptorPool, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub reset_descriptor_pool: extern "system" fn( + device: Device, + descriptor_pool: DescriptorPool, + flags: DescriptorPoolResetFlags, + ) -> Result, + pub allocate_descriptor_sets: extern "system" fn( + device: Device, + p_allocate_info: *const DescriptorSetAllocateInfo, + p_descriptor_sets: *mut DescriptorSet, + ) -> Result, + pub free_descriptor_sets: extern "system" fn( + device: Device, + descriptor_pool: DescriptorPool, + descriptor_set_count: u32, + p_descriptor_sets: *const DescriptorSet, + ) -> Result, + pub update_descriptor_sets: extern "system" fn( + device: Device, + descriptor_write_count: u32, + p_descriptor_writes: *const WriteDescriptorSet, + descriptor_copy_count: u32, + p_descriptor_copies: *const CopyDescriptorSet, + ) -> c_void, + pub create_framebuffer: extern "system" fn( + device: Device, + p_create_info: *const FramebufferCreateInfo, + p_allocator: *const AllocationCallbacks, + p_framebuffer: *mut Framebuffer, + ) -> Result, + pub destroy_framebuffer: extern "system" fn( + device: Device, + framebuffer: Framebuffer, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub create_render_pass: extern "system" fn( + device: Device, + p_create_info: *const RenderPassCreateInfo, + p_allocator: *const AllocationCallbacks, + p_render_pass: *mut RenderPass, + ) -> Result, + pub destroy_render_pass: extern "system" fn( + device: Device, + render_pass: RenderPass, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub get_render_area_granularity: extern "system" fn( + device: Device, + render_pass: RenderPass, + p_granularity: *mut Extent2D, + ) -> c_void, + pub create_command_pool: extern "system" fn( + device: Device, + p_create_info: *const CommandPoolCreateInfo, + p_allocator: *const AllocationCallbacks, + p_command_pool: *mut CommandPool, + ) -> Result, + pub destroy_command_pool: extern "system" fn( + device: Device, + command_pool: CommandPool, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub reset_command_pool: extern "system" fn( + device: Device, + command_pool: CommandPool, + flags: CommandPoolResetFlags, + ) -> Result, + pub allocate_command_buffers: extern "system" fn( + device: Device, + p_allocate_info: *const CommandBufferAllocateInfo, + p_command_buffers: *mut CommandBuffer, + ) -> Result, + pub free_command_buffers: extern "system" fn( + device: Device, + command_pool: CommandPool, + command_buffer_count: u32, + p_command_buffers: *const CommandBuffer, + ) -> c_void, + pub begin_command_buffer: extern "system" fn( + command_buffer: CommandBuffer, + p_begin_info: *const CommandBufferBeginInfo, + ) -> Result, + pub end_command_buffer: extern "system" fn(command_buffer: CommandBuffer) -> Result, + pub reset_command_buffer: + extern "system" fn(command_buffer: CommandBuffer, flags: CommandBufferResetFlags) -> Result, + pub cmd_bind_pipeline: extern "system" fn( + command_buffer: CommandBuffer, + pipeline_bind_point: PipelineBindPoint, + pipeline: Pipeline, + ) -> c_void, + pub cmd_set_viewport: extern "system" fn( + command_buffer: CommandBuffer, + first_viewport: u32, + viewport_count: u32, + p_viewports: *const Viewport, + ) -> c_void, + pub cmd_set_scissor: extern "system" fn( + command_buffer: CommandBuffer, + first_scissor: u32, + scissor_count: u32, + p_scissors: *const Rect2D, + ) -> c_void, + pub cmd_set_line_width: + extern "system" fn(command_buffer: CommandBuffer, line_width: f32) -> c_void, + pub cmd_set_depth_bias: extern "system" fn( + command_buffer: CommandBuffer, + depth_bias_constant_factor: f32, + depth_bias_clamp: f32, + depth_bias_slope_factor: f32, + ) -> c_void, + pub cmd_set_blend_constants: + extern "system" fn(command_buffer: CommandBuffer, blend_constants: &[f32; 4]) -> c_void, + pub cmd_set_depth_bounds: extern "system" fn( + command_buffer: CommandBuffer, + min_depth_bounds: f32, + max_depth_bounds: f32, + ) -> c_void, + pub cmd_set_stencil_compare_mask: extern "system" fn( + command_buffer: CommandBuffer, + face_mask: StencilFaceFlags, + compare_mask: u32, + ) -> c_void, + pub cmd_set_stencil_write_mask: extern "system" fn( + command_buffer: CommandBuffer, + face_mask: StencilFaceFlags, + write_mask: u32, + ) -> c_void, + pub cmd_set_stencil_reference: extern "system" fn( + command_buffer: CommandBuffer, + face_mask: StencilFaceFlags, + reference: u32, + ) -> c_void, + pub cmd_bind_descriptor_sets: extern "system" fn( + command_buffer: CommandBuffer, + pipeline_bind_point: PipelineBindPoint, + layout: PipelineLayout, + first_set: u32, + descriptor_set_count: u32, + p_descriptor_sets: *const DescriptorSet, + dynamic_offset_count: u32, + p_dynamic_offsets: *const u32, + ) -> c_void, + pub cmd_bind_index_buffer: extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + index_type: IndexType, + ) -> c_void, + pub cmd_bind_vertex_buffers: extern "system" fn( + command_buffer: CommandBuffer, + first_binding: u32, + binding_count: u32, + p_buffers: *const Buffer, + p_offsets: *const DeviceSize, + ) -> c_void, + pub cmd_draw: extern "system" fn( + command_buffer: CommandBuffer, + vertex_count: u32, + instance_count: u32, + first_vertex: u32, + first_instance: u32, + ) -> c_void, + pub cmd_draw_indexed: extern "system" fn( + command_buffer: CommandBuffer, + index_count: u32, + instance_count: u32, + first_index: u32, + vertex_offset: i32, + first_instance: u32, + ) -> c_void, + pub cmd_draw_indirect: extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + draw_count: u32, + stride: u32, + ) -> c_void, + pub cmd_draw_indexed_indirect: extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + draw_count: u32, + stride: u32, + ) -> c_void, + pub cmd_dispatch: extern "system" fn( + command_buffer: CommandBuffer, + group_count_x: u32, + group_count_y: u32, + group_count_z: u32, + ) -> c_void, + pub cmd_dispatch_indirect: extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + ) -> c_void, + pub cmd_copy_buffer: extern "system" fn( + command_buffer: CommandBuffer, + src_buffer: Buffer, + dst_buffer: Buffer, + region_count: u32, + p_regions: *const BufferCopy, + ) -> c_void, + pub cmd_copy_image: extern "system" fn( + command_buffer: CommandBuffer, + src_image: Image, + src_image_layout: ImageLayout, + dst_image: Image, + dst_image_layout: ImageLayout, + region_count: u32, + p_regions: *const ImageCopy, + ) -> c_void, + pub cmd_blit_image: extern "system" fn( + command_buffer: CommandBuffer, + src_image: Image, + src_image_layout: ImageLayout, + dst_image: Image, + dst_image_layout: ImageLayout, + region_count: u32, + p_regions: *const ImageBlit, + filter: Filter, + ) -> c_void, + pub cmd_copy_buffer_to_image: extern "system" fn( + command_buffer: CommandBuffer, + src_buffer: Buffer, + dst_image: Image, + dst_image_layout: ImageLayout, + region_count: u32, + p_regions: *const BufferImageCopy, + ) -> c_void, + pub cmd_copy_image_to_buffer: extern "system" fn( + command_buffer: CommandBuffer, + src_image: Image, + src_image_layout: ImageLayout, + dst_buffer: Buffer, + region_count: u32, + p_regions: *const BufferImageCopy, + ) -> c_void, + pub cmd_update_buffer: extern "system" fn( + command_buffer: CommandBuffer, + dst_buffer: Buffer, + dst_offset: DeviceSize, + data_size: DeviceSize, + p_data: *const c_void, + ) -> c_void, + pub cmd_fill_buffer: extern "system" fn( + command_buffer: CommandBuffer, + dst_buffer: Buffer, + dst_offset: DeviceSize, + size: DeviceSize, + data: u32, + ) -> c_void, + pub cmd_clear_color_image: extern "system" fn( + command_buffer: CommandBuffer, + image: Image, + image_layout: ImageLayout, + p_color: *const ClearColorValue, + range_count: u32, + p_ranges: *const ImageSubresourceRange, + ) -> c_void, + pub cmd_clear_depth_stencil_image: extern "system" fn( + command_buffer: CommandBuffer, + image: Image, + image_layout: ImageLayout, + p_depth_stencil: *const ClearDepthStencilValue, + range_count: u32, + p_ranges: *const ImageSubresourceRange, + ) -> c_void, + pub cmd_clear_attachments: extern "system" fn( + command_buffer: CommandBuffer, + attachment_count: u32, + p_attachments: *const ClearAttachment, + rect_count: u32, + p_rects: *const ClearRect, + ) -> c_void, + pub cmd_resolve_image: extern "system" fn( + command_buffer: CommandBuffer, + src_image: Image, + src_image_layout: ImageLayout, + dst_image: Image, + dst_image_layout: ImageLayout, + region_count: u32, + p_regions: *const ImageResolve, + ) -> c_void, + pub cmd_set_event: extern "system" fn( + command_buffer: CommandBuffer, + event: Event, + stage_mask: PipelineStageFlags, + ) -> c_void, + pub cmd_reset_event: extern "system" fn( + command_buffer: CommandBuffer, + event: Event, + stage_mask: PipelineStageFlags, + ) -> c_void, + pub cmd_wait_events: extern "system" fn( + command_buffer: CommandBuffer, + event_count: u32, + p_events: *const Event, + src_stage_mask: PipelineStageFlags, + dst_stage_mask: PipelineStageFlags, + memory_barrier_count: u32, + p_memory_barriers: *const MemoryBarrier, + buffer_memory_barrier_count: u32, + p_buffer_memory_barriers: *const BufferMemoryBarrier, + image_memory_barrier_count: u32, + p_image_memory_barriers: *const ImageMemoryBarrier, + ) -> c_void, + pub cmd_pipeline_barrier: extern "system" fn( + command_buffer: CommandBuffer, + src_stage_mask: PipelineStageFlags, + dst_stage_mask: PipelineStageFlags, + dependency_flags: DependencyFlags, + memory_barrier_count: u32, + p_memory_barriers: *const MemoryBarrier, + buffer_memory_barrier_count: u32, + p_buffer_memory_barriers: *const BufferMemoryBarrier, + image_memory_barrier_count: u32, + p_image_memory_barriers: *const ImageMemoryBarrier, + ) -> c_void, + pub cmd_begin_query: extern "system" fn( + command_buffer: CommandBuffer, + query_pool: QueryPool, + query: u32, + flags: QueryControlFlags, + ) -> c_void, + pub cmd_end_query: extern "system" fn( + command_buffer: CommandBuffer, + query_pool: QueryPool, + query: u32, + ) -> c_void, + pub cmd_reset_query_pool: extern "system" fn( + command_buffer: CommandBuffer, + query_pool: QueryPool, + first_query: u32, + query_count: u32, + ) -> c_void, + pub cmd_write_timestamp: extern "system" fn( + command_buffer: CommandBuffer, + pipeline_stage: PipelineStageFlags, + query_pool: QueryPool, + query: u32, + ) -> c_void, + pub cmd_copy_query_pool_results: extern "system" fn( + command_buffer: CommandBuffer, + query_pool: QueryPool, + first_query: u32, + query_count: u32, + dst_buffer: Buffer, + dst_offset: DeviceSize, + stride: DeviceSize, + flags: QueryResultFlags, + ) -> c_void, + pub cmd_push_constants: extern "system" fn( + command_buffer: CommandBuffer, + layout: PipelineLayout, + stage_flags: ShaderStageFlags, + offset: u32, + size: u32, + p_values: *const c_void, + ) -> c_void, + pub cmd_begin_render_pass: extern "system" fn( + command_buffer: CommandBuffer, + p_render_pass_begin: *const RenderPassBeginInfo, + contents: SubpassContents, + ) -> c_void, + pub cmd_next_subpass: + extern "system" fn(command_buffer: CommandBuffer, contents: SubpassContents) -> c_void, + pub cmd_end_render_pass: extern "system" fn(command_buffer: CommandBuffer) -> c_void, + pub cmd_execute_commands: extern "system" fn( + command_buffer: CommandBuffer, + command_buffer_count: u32, + p_command_buffers: *const CommandBuffer, + ) -> c_void, +} +unsafe impl Send for DeviceFnV1_0 {} +unsafe impl Sync for DeviceFnV1_0 {} +impl ::std::clone::Clone for DeviceFnV1_0 { + fn clone(&self) -> Self { + DeviceFnV1_0 { + destroy_device: self.destroy_device, + get_device_queue: self.get_device_queue, + queue_submit: self.queue_submit, + queue_wait_idle: self.queue_wait_idle, + device_wait_idle: self.device_wait_idle, + allocate_memory: self.allocate_memory, + free_memory: self.free_memory, + map_memory: self.map_memory, + unmap_memory: self.unmap_memory, + flush_mapped_memory_ranges: self.flush_mapped_memory_ranges, + invalidate_mapped_memory_ranges: self.invalidate_mapped_memory_ranges, + get_device_memory_commitment: self.get_device_memory_commitment, + bind_buffer_memory: self.bind_buffer_memory, + bind_image_memory: self.bind_image_memory, + get_buffer_memory_requirements: self.get_buffer_memory_requirements, + get_image_memory_requirements: self.get_image_memory_requirements, + get_image_sparse_memory_requirements: self.get_image_sparse_memory_requirements, + queue_bind_sparse: self.queue_bind_sparse, + create_fence: self.create_fence, + destroy_fence: self.destroy_fence, + reset_fences: self.reset_fences, + get_fence_status: self.get_fence_status, + wait_for_fences: self.wait_for_fences, + create_semaphore: self.create_semaphore, + destroy_semaphore: self.destroy_semaphore, + create_event: self.create_event, + destroy_event: self.destroy_event, + get_event_status: self.get_event_status, + set_event: self.set_event, + reset_event: self.reset_event, + create_query_pool: self.create_query_pool, + destroy_query_pool: self.destroy_query_pool, + get_query_pool_results: self.get_query_pool_results, + create_buffer: self.create_buffer, + destroy_buffer: self.destroy_buffer, + create_buffer_view: self.create_buffer_view, + destroy_buffer_view: self.destroy_buffer_view, + create_image: self.create_image, + destroy_image: self.destroy_image, + get_image_subresource_layout: self.get_image_subresource_layout, + create_image_view: self.create_image_view, + destroy_image_view: self.destroy_image_view, + create_shader_module: self.create_shader_module, + destroy_shader_module: self.destroy_shader_module, + create_pipeline_cache: self.create_pipeline_cache, + destroy_pipeline_cache: self.destroy_pipeline_cache, + get_pipeline_cache_data: self.get_pipeline_cache_data, + merge_pipeline_caches: self.merge_pipeline_caches, + create_graphics_pipelines: self.create_graphics_pipelines, + create_compute_pipelines: self.create_compute_pipelines, + destroy_pipeline: self.destroy_pipeline, + create_pipeline_layout: self.create_pipeline_layout, + destroy_pipeline_layout: self.destroy_pipeline_layout, + create_sampler: self.create_sampler, + destroy_sampler: self.destroy_sampler, + create_descriptor_set_layout: self.create_descriptor_set_layout, + destroy_descriptor_set_layout: self.destroy_descriptor_set_layout, + create_descriptor_pool: self.create_descriptor_pool, + destroy_descriptor_pool: self.destroy_descriptor_pool, + reset_descriptor_pool: self.reset_descriptor_pool, + allocate_descriptor_sets: self.allocate_descriptor_sets, + free_descriptor_sets: self.free_descriptor_sets, + update_descriptor_sets: self.update_descriptor_sets, + create_framebuffer: self.create_framebuffer, + destroy_framebuffer: self.destroy_framebuffer, + create_render_pass: self.create_render_pass, + destroy_render_pass: self.destroy_render_pass, + get_render_area_granularity: self.get_render_area_granularity, + create_command_pool: self.create_command_pool, + destroy_command_pool: self.destroy_command_pool, + reset_command_pool: self.reset_command_pool, + allocate_command_buffers: self.allocate_command_buffers, + free_command_buffers: self.free_command_buffers, + begin_command_buffer: self.begin_command_buffer, + end_command_buffer: self.end_command_buffer, + reset_command_buffer: self.reset_command_buffer, + cmd_bind_pipeline: self.cmd_bind_pipeline, + cmd_set_viewport: self.cmd_set_viewport, + cmd_set_scissor: self.cmd_set_scissor, + cmd_set_line_width: self.cmd_set_line_width, + cmd_set_depth_bias: self.cmd_set_depth_bias, + cmd_set_blend_constants: self.cmd_set_blend_constants, + cmd_set_depth_bounds: self.cmd_set_depth_bounds, + cmd_set_stencil_compare_mask: self.cmd_set_stencil_compare_mask, + cmd_set_stencil_write_mask: self.cmd_set_stencil_write_mask, + cmd_set_stencil_reference: self.cmd_set_stencil_reference, + cmd_bind_descriptor_sets: self.cmd_bind_descriptor_sets, + cmd_bind_index_buffer: self.cmd_bind_index_buffer, + cmd_bind_vertex_buffers: self.cmd_bind_vertex_buffers, + cmd_draw: self.cmd_draw, + cmd_draw_indexed: self.cmd_draw_indexed, + cmd_draw_indirect: self.cmd_draw_indirect, + cmd_draw_indexed_indirect: self.cmd_draw_indexed_indirect, + cmd_dispatch: self.cmd_dispatch, + cmd_dispatch_indirect: self.cmd_dispatch_indirect, + cmd_copy_buffer: self.cmd_copy_buffer, + cmd_copy_image: self.cmd_copy_image, + cmd_blit_image: self.cmd_blit_image, + cmd_copy_buffer_to_image: self.cmd_copy_buffer_to_image, + cmd_copy_image_to_buffer: self.cmd_copy_image_to_buffer, + cmd_update_buffer: self.cmd_update_buffer, + cmd_fill_buffer: self.cmd_fill_buffer, + cmd_clear_color_image: self.cmd_clear_color_image, + cmd_clear_depth_stencil_image: self.cmd_clear_depth_stencil_image, + cmd_clear_attachments: self.cmd_clear_attachments, + cmd_resolve_image: self.cmd_resolve_image, + cmd_set_event: self.cmd_set_event, + cmd_reset_event: self.cmd_reset_event, + cmd_wait_events: self.cmd_wait_events, + cmd_pipeline_barrier: self.cmd_pipeline_barrier, + cmd_begin_query: self.cmd_begin_query, + cmd_end_query: self.cmd_end_query, + cmd_reset_query_pool: self.cmd_reset_query_pool, + cmd_write_timestamp: self.cmd_write_timestamp, + cmd_copy_query_pool_results: self.cmd_copy_query_pool_results, + cmd_push_constants: self.cmd_push_constants, + cmd_begin_render_pass: self.cmd_begin_render_pass, + cmd_next_subpass: self.cmd_next_subpass, + cmd_end_render_pass: self.cmd_end_render_pass, + cmd_execute_commands: self.cmd_execute_commands, + } + } +} +impl DeviceFnV1_0 { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + DeviceFnV1_0 { + destroy_device: unsafe { + extern "system" fn destroy_device( + _device: Device, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_device))) + } + let raw_name = stringify!(vkDestroyDevice); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_device + } else { + ::std::mem::transmute(val) + } + }, + get_device_queue: unsafe { + extern "system" fn get_device_queue( + _device: Device, + _queue_family_index: u32, + _queue_index: u32, + _p_queue: *mut Queue, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(get_device_queue))) + } + let raw_name = stringify!(vkGetDeviceQueue); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_queue + } else { + ::std::mem::transmute(val) + } + }, + queue_submit: unsafe { + extern "system" fn queue_submit( + _queue: Queue, + _submit_count: u32, + _p_submits: *const SubmitInfo, + _fence: Fence, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(queue_submit))) + } + let raw_name = stringify!(vkQueueSubmit); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + queue_submit + } else { + ::std::mem::transmute(val) + } + }, + queue_wait_idle: unsafe { + extern "system" fn queue_wait_idle(_queue: Queue) -> Result { + panic!(concat!("Unable to load ", stringify!(queue_wait_idle))) + } + let raw_name = stringify!(vkQueueWaitIdle); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + queue_wait_idle + } else { + ::std::mem::transmute(val) + } + }, + device_wait_idle: unsafe { + extern "system" fn device_wait_idle(_device: Device) -> Result { + panic!(concat!("Unable to load ", stringify!(device_wait_idle))) + } + let raw_name = stringify!(vkDeviceWaitIdle); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + device_wait_idle + } else { + ::std::mem::transmute(val) + } + }, + allocate_memory: unsafe { + extern "system" fn allocate_memory( + _device: Device, + _p_allocate_info: *const MemoryAllocateInfo, + _p_allocator: *const AllocationCallbacks, + _p_memory: *mut DeviceMemory, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(allocate_memory))) + } + let raw_name = stringify!(vkAllocateMemory); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + allocate_memory + } else { + ::std::mem::transmute(val) + } + }, + free_memory: unsafe { + extern "system" fn free_memory( + _device: Device, + _memory: DeviceMemory, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(free_memory))) + } + let raw_name = stringify!(vkFreeMemory); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + free_memory + } else { + ::std::mem::transmute(val) + } + }, + map_memory: unsafe { + extern "system" fn map_memory( + _device: Device, + _memory: DeviceMemory, + _offset: DeviceSize, + _size: DeviceSize, + _flags: MemoryMapFlags, + _pp_data: *mut *mut c_void, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(map_memory))) + } + let raw_name = stringify!(vkMapMemory); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + map_memory + } else { + ::std::mem::transmute(val) + } + }, + unmap_memory: unsafe { + extern "system" fn unmap_memory(_device: Device, _memory: DeviceMemory) -> c_void { + panic!(concat!("Unable to load ", stringify!(unmap_memory))) + } + let raw_name = stringify!(vkUnmapMemory); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + unmap_memory + } else { + ::std::mem::transmute(val) + } + }, + flush_mapped_memory_ranges: unsafe { + extern "system" fn flush_mapped_memory_ranges( + _device: Device, + _memory_range_count: u32, + _p_memory_ranges: *const MappedMemoryRange, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(flush_mapped_memory_ranges) + )) + } + let raw_name = stringify!(vkFlushMappedMemoryRanges); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + flush_mapped_memory_ranges + } else { + ::std::mem::transmute(val) + } + }, + invalidate_mapped_memory_ranges: unsafe { + extern "system" fn invalidate_mapped_memory_ranges( + _device: Device, + _memory_range_count: u32, + _p_memory_ranges: *const MappedMemoryRange, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(invalidate_mapped_memory_ranges) + )) + } + let raw_name = stringify!(vkInvalidateMappedMemoryRanges); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + invalidate_mapped_memory_ranges + } else { + ::std::mem::transmute(val) + } + }, + get_device_memory_commitment: unsafe { + extern "system" fn get_device_memory_commitment( + _device: Device, + _memory: DeviceMemory, + _p_committed_memory_in_bytes: *mut DeviceSize, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_device_memory_commitment) + )) + } + let raw_name = stringify!(vkGetDeviceMemoryCommitment); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_memory_commitment + } else { + ::std::mem::transmute(val) + } + }, + bind_buffer_memory: unsafe { + extern "system" fn bind_buffer_memory( + _device: Device, + _buffer: Buffer, + _memory: DeviceMemory, + _memory_offset: DeviceSize, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(bind_buffer_memory))) + } + let raw_name = stringify!(vkBindBufferMemory); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + bind_buffer_memory + } else { + ::std::mem::transmute(val) + } + }, + bind_image_memory: unsafe { + extern "system" fn bind_image_memory( + _device: Device, + _image: Image, + _memory: DeviceMemory, + _memory_offset: DeviceSize, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(bind_image_memory))) + } + let raw_name = stringify!(vkBindImageMemory); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + bind_image_memory + } else { + ::std::mem::transmute(val) + } + }, + get_buffer_memory_requirements: unsafe { + extern "system" fn get_buffer_memory_requirements( + _device: Device, + _buffer: Buffer, + _p_memory_requirements: *mut MemoryRequirements, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_buffer_memory_requirements) + )) + } + let raw_name = stringify!(vkGetBufferMemoryRequirements); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_buffer_memory_requirements + } else { + ::std::mem::transmute(val) + } + }, + get_image_memory_requirements: unsafe { + extern "system" fn get_image_memory_requirements( + _device: Device, + _image: Image, + _p_memory_requirements: *mut MemoryRequirements, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_image_memory_requirements) + )) + } + let raw_name = stringify!(vkGetImageMemoryRequirements); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_image_memory_requirements + } else { + ::std::mem::transmute(val) + } + }, + get_image_sparse_memory_requirements: unsafe { + extern "system" fn get_image_sparse_memory_requirements( + _device: Device, + _image: Image, + _p_sparse_memory_requirement_count: *mut u32, + _p_sparse_memory_requirements: *mut SparseImageMemoryRequirements, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_image_sparse_memory_requirements) + )) + } + let raw_name = stringify!(vkGetImageSparseMemoryRequirements); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_image_sparse_memory_requirements + } else { + ::std::mem::transmute(val) + } + }, + queue_bind_sparse: unsafe { + extern "system" fn queue_bind_sparse( + _queue: Queue, + _bind_info_count: u32, + _p_bind_info: *const BindSparseInfo, + _fence: Fence, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(queue_bind_sparse))) + } + let raw_name = stringify!(vkQueueBindSparse); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + queue_bind_sparse + } else { + ::std::mem::transmute(val) + } + }, + create_fence: unsafe { + extern "system" fn create_fence( + _device: Device, + _p_create_info: *const FenceCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_fence: *mut Fence, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_fence))) + } + let raw_name = stringify!(vkCreateFence); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_fence + } else { + ::std::mem::transmute(val) + } + }, + destroy_fence: unsafe { + extern "system" fn destroy_fence( + _device: Device, + _fence: Fence, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_fence))) + } + let raw_name = stringify!(vkDestroyFence); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_fence + } else { + ::std::mem::transmute(val) + } + }, + reset_fences: unsafe { + extern "system" fn reset_fences( + _device: Device, + _fence_count: u32, + _p_fences: *const Fence, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(reset_fences))) + } + let raw_name = stringify!(vkResetFences); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + reset_fences + } else { + ::std::mem::transmute(val) + } + }, + get_fence_status: unsafe { + extern "system" fn get_fence_status(_device: Device, _fence: Fence) -> Result { + panic!(concat!("Unable to load ", stringify!(get_fence_status))) + } + let raw_name = stringify!(vkGetFenceStatus); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_fence_status + } else { + ::std::mem::transmute(val) + } + }, + wait_for_fences: unsafe { + extern "system" fn wait_for_fences( + _device: Device, + _fence_count: u32, + _p_fences: *const Fence, + _wait_all: Bool32, + _timeout: u64, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(wait_for_fences))) + } + let raw_name = stringify!(vkWaitForFences); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + wait_for_fences + } else { + ::std::mem::transmute(val) + } + }, + create_semaphore: unsafe { + extern "system" fn create_semaphore( + _device: Device, + _p_create_info: *const SemaphoreCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_semaphore: *mut Semaphore, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_semaphore))) + } + let raw_name = stringify!(vkCreateSemaphore); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_semaphore + } else { + ::std::mem::transmute(val) + } + }, + destroy_semaphore: unsafe { + extern "system" fn destroy_semaphore( + _device: Device, + _semaphore: Semaphore, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_semaphore))) + } + let raw_name = stringify!(vkDestroySemaphore); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_semaphore + } else { + ::std::mem::transmute(val) + } + }, + create_event: unsafe { + extern "system" fn create_event( + _device: Device, + _p_create_info: *const EventCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_event: *mut Event, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_event))) + } + let raw_name = stringify!(vkCreateEvent); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_event + } else { + ::std::mem::transmute(val) + } + }, + destroy_event: unsafe { + extern "system" fn destroy_event( + _device: Device, + _event: Event, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_event))) + } + let raw_name = stringify!(vkDestroyEvent); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_event + } else { + ::std::mem::transmute(val) + } + }, + get_event_status: unsafe { + extern "system" fn get_event_status(_device: Device, _event: Event) -> Result { + panic!(concat!("Unable to load ", stringify!(get_event_status))) + } + let raw_name = stringify!(vkGetEventStatus); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_event_status + } else { + ::std::mem::transmute(val) + } + }, + set_event: unsafe { + extern "system" fn set_event(_device: Device, _event: Event) -> Result { + panic!(concat!("Unable to load ", stringify!(set_event))) + } + let raw_name = stringify!(vkSetEvent); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + set_event + } else { + ::std::mem::transmute(val) + } + }, + reset_event: unsafe { + extern "system" fn reset_event(_device: Device, _event: Event) -> Result { + panic!(concat!("Unable to load ", stringify!(reset_event))) + } + let raw_name = stringify!(vkResetEvent); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + reset_event + } else { + ::std::mem::transmute(val) + } + }, + create_query_pool: unsafe { + extern "system" fn create_query_pool( + _device: Device, + _p_create_info: *const QueryPoolCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_query_pool: *mut QueryPool, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_query_pool))) + } + let raw_name = stringify!(vkCreateQueryPool); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_query_pool + } else { + ::std::mem::transmute(val) + } + }, + destroy_query_pool: unsafe { + extern "system" fn destroy_query_pool( + _device: Device, + _query_pool: QueryPool, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_query_pool))) + } + let raw_name = stringify!(vkDestroyQueryPool); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_query_pool + } else { + ::std::mem::transmute(val) + } + }, + get_query_pool_results: unsafe { + extern "system" fn get_query_pool_results( + _device: Device, + _query_pool: QueryPool, + _first_query: u32, + _query_count: u32, + _data_size: usize, + _p_data: *mut c_void, + _stride: DeviceSize, + _flags: QueryResultFlags, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_query_pool_results) + )) + } + let raw_name = stringify!(vkGetQueryPoolResults); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_query_pool_results + } else { + ::std::mem::transmute(val) + } + }, + create_buffer: unsafe { + extern "system" fn create_buffer( + _device: Device, + _p_create_info: *const BufferCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_buffer: *mut Buffer, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_buffer))) + } + let raw_name = stringify!(vkCreateBuffer); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_buffer + } else { + ::std::mem::transmute(val) + } + }, + destroy_buffer: unsafe { + extern "system" fn destroy_buffer( + _device: Device, + _buffer: Buffer, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_buffer))) + } + let raw_name = stringify!(vkDestroyBuffer); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_buffer + } else { + ::std::mem::transmute(val) + } + }, + create_buffer_view: unsafe { + extern "system" fn create_buffer_view( + _device: Device, + _p_create_info: *const BufferViewCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_view: *mut BufferView, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_buffer_view))) + } + let raw_name = stringify!(vkCreateBufferView); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_buffer_view + } else { + ::std::mem::transmute(val) + } + }, + destroy_buffer_view: unsafe { + extern "system" fn destroy_buffer_view( + _device: Device, + _buffer_view: BufferView, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_buffer_view))) + } + let raw_name = stringify!(vkDestroyBufferView); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_buffer_view + } else { + ::std::mem::transmute(val) + } + }, + create_image: unsafe { + extern "system" fn create_image( + _device: Device, + _p_create_info: *const ImageCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_image: *mut Image, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_image))) + } + let raw_name = stringify!(vkCreateImage); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_image + } else { + ::std::mem::transmute(val) + } + }, + destroy_image: unsafe { + extern "system" fn destroy_image( + _device: Device, + _image: Image, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_image))) + } + let raw_name = stringify!(vkDestroyImage); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_image + } else { + ::std::mem::transmute(val) + } + }, + get_image_subresource_layout: unsafe { + extern "system" fn get_image_subresource_layout( + _device: Device, + _image: Image, + _p_subresource: *const ImageSubresource, + _p_layout: *mut SubresourceLayout, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_image_subresource_layout) + )) + } + let raw_name = stringify!(vkGetImageSubresourceLayout); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_image_subresource_layout + } else { + ::std::mem::transmute(val) + } + }, + create_image_view: unsafe { + extern "system" fn create_image_view( + _device: Device, + _p_create_info: *const ImageViewCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_view: *mut ImageView, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_image_view))) + } + let raw_name = stringify!(vkCreateImageView); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_image_view + } else { + ::std::mem::transmute(val) + } + }, + destroy_image_view: unsafe { + extern "system" fn destroy_image_view( + _device: Device, + _image_view: ImageView, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_image_view))) + } + let raw_name = stringify!(vkDestroyImageView); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_image_view + } else { + ::std::mem::transmute(val) + } + }, + create_shader_module: unsafe { + extern "system" fn create_shader_module( + _device: Device, + _p_create_info: *const ShaderModuleCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_shader_module: *mut ShaderModule, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_shader_module))) + } + let raw_name = stringify!(vkCreateShaderModule); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_shader_module + } else { + ::std::mem::transmute(val) + } + }, + destroy_shader_module: unsafe { + extern "system" fn destroy_shader_module( + _device: Device, + _shader_module: ShaderModule, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_shader_module) + )) + } + let raw_name = stringify!(vkDestroyShaderModule); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_shader_module + } else { + ::std::mem::transmute(val) + } + }, + create_pipeline_cache: unsafe { + extern "system" fn create_pipeline_cache( + _device: Device, + _p_create_info: *const PipelineCacheCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_pipeline_cache: *mut PipelineCache, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_pipeline_cache) + )) + } + let raw_name = stringify!(vkCreatePipelineCache); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_pipeline_cache + } else { + ::std::mem::transmute(val) + } + }, + destroy_pipeline_cache: unsafe { + extern "system" fn destroy_pipeline_cache( + _device: Device, + _pipeline_cache: PipelineCache, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_pipeline_cache) + )) + } + let raw_name = stringify!(vkDestroyPipelineCache); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_pipeline_cache + } else { + ::std::mem::transmute(val) + } + }, + get_pipeline_cache_data: unsafe { + extern "system" fn get_pipeline_cache_data( + _device: Device, + _pipeline_cache: PipelineCache, + _p_data_size: *mut usize, + _p_data: *mut c_void, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_pipeline_cache_data) + )) + } + let raw_name = stringify!(vkGetPipelineCacheData); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_pipeline_cache_data + } else { + ::std::mem::transmute(val) + } + }, + merge_pipeline_caches: unsafe { + extern "system" fn merge_pipeline_caches( + _device: Device, + _dst_cache: PipelineCache, + _src_cache_count: u32, + _p_src_caches: *const PipelineCache, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(merge_pipeline_caches) + )) + } + let raw_name = stringify!(vkMergePipelineCaches); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + merge_pipeline_caches + } else { + ::std::mem::transmute(val) + } + }, + create_graphics_pipelines: unsafe { + extern "system" fn create_graphics_pipelines( + _device: Device, + _pipeline_cache: PipelineCache, + _create_info_count: u32, + _p_create_infos: *const GraphicsPipelineCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_pipelines: *mut Pipeline, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_graphics_pipelines) + )) + } + let raw_name = stringify!(vkCreateGraphicsPipelines); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_graphics_pipelines + } else { + ::std::mem::transmute(val) + } + }, + create_compute_pipelines: unsafe { + extern "system" fn create_compute_pipelines( + _device: Device, + _pipeline_cache: PipelineCache, + _create_info_count: u32, + _p_create_infos: *const ComputePipelineCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_pipelines: *mut Pipeline, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_compute_pipelines) + )) + } + let raw_name = stringify!(vkCreateComputePipelines); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_compute_pipelines + } else { + ::std::mem::transmute(val) + } + }, + destroy_pipeline: unsafe { + extern "system" fn destroy_pipeline( + _device: Device, + _pipeline: Pipeline, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_pipeline))) + } + let raw_name = stringify!(vkDestroyPipeline); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_pipeline + } else { + ::std::mem::transmute(val) + } + }, + create_pipeline_layout: unsafe { + extern "system" fn create_pipeline_layout( + _device: Device, + _p_create_info: *const PipelineLayoutCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_pipeline_layout: *mut PipelineLayout, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_pipeline_layout) + )) + } + let raw_name = stringify!(vkCreatePipelineLayout); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_pipeline_layout + } else { + ::std::mem::transmute(val) + } + }, + destroy_pipeline_layout: unsafe { + extern "system" fn destroy_pipeline_layout( + _device: Device, + _pipeline_layout: PipelineLayout, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_pipeline_layout) + )) + } + let raw_name = stringify!(vkDestroyPipelineLayout); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_pipeline_layout + } else { + ::std::mem::transmute(val) + } + }, + create_sampler: unsafe { + extern "system" fn create_sampler( + _device: Device, + _p_create_info: *const SamplerCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_sampler: *mut Sampler, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_sampler))) + } + let raw_name = stringify!(vkCreateSampler); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_sampler + } else { + ::std::mem::transmute(val) + } + }, + destroy_sampler: unsafe { + extern "system" fn destroy_sampler( + _device: Device, + _sampler: Sampler, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_sampler))) + } + let raw_name = stringify!(vkDestroySampler); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_sampler + } else { + ::std::mem::transmute(val) + } + }, + create_descriptor_set_layout: unsafe { + extern "system" fn create_descriptor_set_layout( + _device: Device, + _p_create_info: *const DescriptorSetLayoutCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_set_layout: *mut DescriptorSetLayout, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_descriptor_set_layout) + )) + } + let raw_name = stringify!(vkCreateDescriptorSetLayout); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_descriptor_set_layout + } else { + ::std::mem::transmute(val) + } + }, + destroy_descriptor_set_layout: unsafe { + extern "system" fn destroy_descriptor_set_layout( + _device: Device, + _descriptor_set_layout: DescriptorSetLayout, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_descriptor_set_layout) + )) + } + let raw_name = stringify!(vkDestroyDescriptorSetLayout); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_descriptor_set_layout + } else { + ::std::mem::transmute(val) + } + }, + create_descriptor_pool: unsafe { + extern "system" fn create_descriptor_pool( + _device: Device, + _p_create_info: *const DescriptorPoolCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_descriptor_pool: *mut DescriptorPool, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_descriptor_pool) + )) + } + let raw_name = stringify!(vkCreateDescriptorPool); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_descriptor_pool + } else { + ::std::mem::transmute(val) + } + }, + destroy_descriptor_pool: unsafe { + extern "system" fn destroy_descriptor_pool( + _device: Device, + _descriptor_pool: DescriptorPool, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_descriptor_pool) + )) + } + let raw_name = stringify!(vkDestroyDescriptorPool); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_descriptor_pool + } else { + ::std::mem::transmute(val) + } + }, + reset_descriptor_pool: unsafe { + extern "system" fn reset_descriptor_pool( + _device: Device, + _descriptor_pool: DescriptorPool, + _flags: DescriptorPoolResetFlags, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(reset_descriptor_pool) + )) + } + let raw_name = stringify!(vkResetDescriptorPool); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + reset_descriptor_pool + } else { + ::std::mem::transmute(val) + } + }, + allocate_descriptor_sets: unsafe { + extern "system" fn allocate_descriptor_sets( + _device: Device, + _p_allocate_info: *const DescriptorSetAllocateInfo, + _p_descriptor_sets: *mut DescriptorSet, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(allocate_descriptor_sets) + )) + } + let raw_name = stringify!(vkAllocateDescriptorSets); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + allocate_descriptor_sets + } else { + ::std::mem::transmute(val) + } + }, + free_descriptor_sets: unsafe { + extern "system" fn free_descriptor_sets( + _device: Device, + _descriptor_pool: DescriptorPool, + _descriptor_set_count: u32, + _p_descriptor_sets: *const DescriptorSet, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(free_descriptor_sets))) + } + let raw_name = stringify!(vkFreeDescriptorSets); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + free_descriptor_sets + } else { + ::std::mem::transmute(val) + } + }, + update_descriptor_sets: unsafe { + extern "system" fn update_descriptor_sets( + _device: Device, + _descriptor_write_count: u32, + _p_descriptor_writes: *const WriteDescriptorSet, + _descriptor_copy_count: u32, + _p_descriptor_copies: *const CopyDescriptorSet, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(update_descriptor_sets) + )) + } + let raw_name = stringify!(vkUpdateDescriptorSets); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + update_descriptor_sets + } else { + ::std::mem::transmute(val) + } + }, + create_framebuffer: unsafe { + extern "system" fn create_framebuffer( + _device: Device, + _p_create_info: *const FramebufferCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_framebuffer: *mut Framebuffer, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_framebuffer))) + } + let raw_name = stringify!(vkCreateFramebuffer); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_framebuffer + } else { + ::std::mem::transmute(val) + } + }, + destroy_framebuffer: unsafe { + extern "system" fn destroy_framebuffer( + _device: Device, + _framebuffer: Framebuffer, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_framebuffer))) + } + let raw_name = stringify!(vkDestroyFramebuffer); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_framebuffer + } else { + ::std::mem::transmute(val) + } + }, + create_render_pass: unsafe { + extern "system" fn create_render_pass( + _device: Device, + _p_create_info: *const RenderPassCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_render_pass: *mut RenderPass, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_render_pass))) + } + let raw_name = stringify!(vkCreateRenderPass); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_render_pass + } else { + ::std::mem::transmute(val) + } + }, + destroy_render_pass: unsafe { + extern "system" fn destroy_render_pass( + _device: Device, + _render_pass: RenderPass, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_render_pass))) + } + let raw_name = stringify!(vkDestroyRenderPass); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_render_pass + } else { + ::std::mem::transmute(val) + } + }, + get_render_area_granularity: unsafe { + extern "system" fn get_render_area_granularity( + _device: Device, + _render_pass: RenderPass, + _p_granularity: *mut Extent2D, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_render_area_granularity) + )) + } + let raw_name = stringify!(vkGetRenderAreaGranularity); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_render_area_granularity + } else { + ::std::mem::transmute(val) + } + }, + create_command_pool: unsafe { + extern "system" fn create_command_pool( + _device: Device, + _p_create_info: *const CommandPoolCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_command_pool: *mut CommandPool, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_command_pool))) + } + let raw_name = stringify!(vkCreateCommandPool); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_command_pool + } else { + ::std::mem::transmute(val) + } + }, + destroy_command_pool: unsafe { + extern "system" fn destroy_command_pool( + _device: Device, + _command_pool: CommandPool, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_command_pool))) + } + let raw_name = stringify!(vkDestroyCommandPool); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_command_pool + } else { + ::std::mem::transmute(val) + } + }, + reset_command_pool: unsafe { + extern "system" fn reset_command_pool( + _device: Device, + _command_pool: CommandPool, + _flags: CommandPoolResetFlags, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(reset_command_pool))) + } + let raw_name = stringify!(vkResetCommandPool); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + reset_command_pool + } else { + ::std::mem::transmute(val) + } + }, + allocate_command_buffers: unsafe { + extern "system" fn allocate_command_buffers( + _device: Device, + _p_allocate_info: *const CommandBufferAllocateInfo, + _p_command_buffers: *mut CommandBuffer, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(allocate_command_buffers) + )) + } + let raw_name = stringify!(vkAllocateCommandBuffers); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + allocate_command_buffers + } else { + ::std::mem::transmute(val) + } + }, + free_command_buffers: unsafe { + extern "system" fn free_command_buffers( + _device: Device, + _command_pool: CommandPool, + _command_buffer_count: u32, + _p_command_buffers: *const CommandBuffer, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(free_command_buffers))) + } + let raw_name = stringify!(vkFreeCommandBuffers); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + free_command_buffers + } else { + ::std::mem::transmute(val) + } + }, + begin_command_buffer: unsafe { + extern "system" fn begin_command_buffer( + _command_buffer: CommandBuffer, + _p_begin_info: *const CommandBufferBeginInfo, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(begin_command_buffer))) + } + let raw_name = stringify!(vkBeginCommandBuffer); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + begin_command_buffer + } else { + ::std::mem::transmute(val) + } + }, + end_command_buffer: unsafe { + extern "system" fn end_command_buffer(_command_buffer: CommandBuffer) -> Result { + panic!(concat!("Unable to load ", stringify!(end_command_buffer))) + } + let raw_name = stringify!(vkEndCommandBuffer); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + end_command_buffer + } else { + ::std::mem::transmute(val) + } + }, + reset_command_buffer: unsafe { + extern "system" fn reset_command_buffer( + _command_buffer: CommandBuffer, + _flags: CommandBufferResetFlags, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(reset_command_buffer))) + } + let raw_name = stringify!(vkResetCommandBuffer); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + reset_command_buffer + } else { + ::std::mem::transmute(val) + } + }, + cmd_bind_pipeline: unsafe { + extern "system" fn cmd_bind_pipeline( + _command_buffer: CommandBuffer, + _pipeline_bind_point: PipelineBindPoint, + _pipeline: Pipeline, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_bind_pipeline))) + } + let raw_name = stringify!(vkCmdBindPipeline); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_bind_pipeline + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_viewport: unsafe { + extern "system" fn cmd_set_viewport( + _command_buffer: CommandBuffer, + _first_viewport: u32, + _viewport_count: u32, + _p_viewports: *const Viewport, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_set_viewport))) + } + let raw_name = stringify!(vkCmdSetViewport); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_viewport + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_scissor: unsafe { + extern "system" fn cmd_set_scissor( + _command_buffer: CommandBuffer, + _first_scissor: u32, + _scissor_count: u32, + _p_scissors: *const Rect2D, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_set_scissor))) + } + let raw_name = stringify!(vkCmdSetScissor); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_scissor + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_line_width: unsafe { + extern "system" fn cmd_set_line_width( + _command_buffer: CommandBuffer, + _line_width: f32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_set_line_width))) + } + let raw_name = stringify!(vkCmdSetLineWidth); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_line_width + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_depth_bias: unsafe { + extern "system" fn cmd_set_depth_bias( + _command_buffer: CommandBuffer, + _depth_bias_constant_factor: f32, + _depth_bias_clamp: f32, + _depth_bias_slope_factor: f32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_set_depth_bias))) + } + let raw_name = stringify!(vkCmdSetDepthBias); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_depth_bias + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_blend_constants: unsafe { + extern "system" fn cmd_set_blend_constants( + _command_buffer: CommandBuffer, + _blend_constants: &[f32; 4], + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_blend_constants) + )) + } + let raw_name = stringify!(vkCmdSetBlendConstants); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_blend_constants + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_depth_bounds: unsafe { + extern "system" fn cmd_set_depth_bounds( + _command_buffer: CommandBuffer, + _min_depth_bounds: f32, + _max_depth_bounds: f32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_set_depth_bounds))) + } + let raw_name = stringify!(vkCmdSetDepthBounds); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_depth_bounds + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_stencil_compare_mask: unsafe { + extern "system" fn cmd_set_stencil_compare_mask( + _command_buffer: CommandBuffer, + _face_mask: StencilFaceFlags, + _compare_mask: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_stencil_compare_mask) + )) + } + let raw_name = stringify!(vkCmdSetStencilCompareMask); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_stencil_compare_mask + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_stencil_write_mask: unsafe { + extern "system" fn cmd_set_stencil_write_mask( + _command_buffer: CommandBuffer, + _face_mask: StencilFaceFlags, + _write_mask: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_stencil_write_mask) + )) + } + let raw_name = stringify!(vkCmdSetStencilWriteMask); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_stencil_write_mask + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_stencil_reference: unsafe { + extern "system" fn cmd_set_stencil_reference( + _command_buffer: CommandBuffer, + _face_mask: StencilFaceFlags, + _reference: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_stencil_reference) + )) + } + let raw_name = stringify!(vkCmdSetStencilReference); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_stencil_reference + } else { + ::std::mem::transmute(val) + } + }, + cmd_bind_descriptor_sets: unsafe { + extern "system" fn cmd_bind_descriptor_sets( + _command_buffer: CommandBuffer, + _pipeline_bind_point: PipelineBindPoint, + _layout: PipelineLayout, + _first_set: u32, + _descriptor_set_count: u32, + _p_descriptor_sets: *const DescriptorSet, + _dynamic_offset_count: u32, + _p_dynamic_offsets: *const u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_bind_descriptor_sets) + )) + } + let raw_name = stringify!(vkCmdBindDescriptorSets); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_bind_descriptor_sets + } else { + ::std::mem::transmute(val) + } + }, + cmd_bind_index_buffer: unsafe { + extern "system" fn cmd_bind_index_buffer( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + _index_type: IndexType, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_bind_index_buffer) + )) + } + let raw_name = stringify!(vkCmdBindIndexBuffer); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_bind_index_buffer + } else { + ::std::mem::transmute(val) + } + }, + cmd_bind_vertex_buffers: unsafe { + extern "system" fn cmd_bind_vertex_buffers( + _command_buffer: CommandBuffer, + _first_binding: u32, + _binding_count: u32, + _p_buffers: *const Buffer, + _p_offsets: *const DeviceSize, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_bind_vertex_buffers) + )) + } + let raw_name = stringify!(vkCmdBindVertexBuffers); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_bind_vertex_buffers + } else { + ::std::mem::transmute(val) + } + }, + cmd_draw: unsafe { + extern "system" fn cmd_draw( + _command_buffer: CommandBuffer, + _vertex_count: u32, + _instance_count: u32, + _first_vertex: u32, + _first_instance: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_draw))) + } + let raw_name = stringify!(vkCmdDraw); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw + } else { + ::std::mem::transmute(val) + } + }, + cmd_draw_indexed: unsafe { + extern "system" fn cmd_draw_indexed( + _command_buffer: CommandBuffer, + _index_count: u32, + _instance_count: u32, + _first_index: u32, + _vertex_offset: i32, + _first_instance: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_draw_indexed))) + } + let raw_name = stringify!(vkCmdDrawIndexed); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_indexed + } else { + ::std::mem::transmute(val) + } + }, + cmd_draw_indirect: unsafe { + extern "system" fn cmd_draw_indirect( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + _draw_count: u32, + _stride: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_draw_indirect))) + } + let raw_name = stringify!(vkCmdDrawIndirect); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_indirect + } else { + ::std::mem::transmute(val) + } + }, + cmd_draw_indexed_indirect: unsafe { + extern "system" fn cmd_draw_indexed_indirect( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + _draw_count: u32, + _stride: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_indexed_indirect) + )) + } + let raw_name = stringify!(vkCmdDrawIndexedIndirect); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_indexed_indirect + } else { + ::std::mem::transmute(val) + } + }, + cmd_dispatch: unsafe { + extern "system" fn cmd_dispatch( + _command_buffer: CommandBuffer, + _group_count_x: u32, + _group_count_y: u32, + _group_count_z: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_dispatch))) + } + let raw_name = stringify!(vkCmdDispatch); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_dispatch + } else { + ::std::mem::transmute(val) + } + }, + cmd_dispatch_indirect: unsafe { + extern "system" fn cmd_dispatch_indirect( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_dispatch_indirect) + )) + } + let raw_name = stringify!(vkCmdDispatchIndirect); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_dispatch_indirect + } else { + ::std::mem::transmute(val) + } + }, + cmd_copy_buffer: unsafe { + extern "system" fn cmd_copy_buffer( + _command_buffer: CommandBuffer, + _src_buffer: Buffer, + _dst_buffer: Buffer, + _region_count: u32, + _p_regions: *const BufferCopy, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_copy_buffer))) + } + let raw_name = stringify!(vkCmdCopyBuffer); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_copy_buffer + } else { + ::std::mem::transmute(val) + } + }, + cmd_copy_image: unsafe { + extern "system" fn cmd_copy_image( + _command_buffer: CommandBuffer, + _src_image: Image, + _src_image_layout: ImageLayout, + _dst_image: Image, + _dst_image_layout: ImageLayout, + _region_count: u32, + _p_regions: *const ImageCopy, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_copy_image))) + } + let raw_name = stringify!(vkCmdCopyImage); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_copy_image + } else { + ::std::mem::transmute(val) + } + }, + cmd_blit_image: unsafe { + extern "system" fn cmd_blit_image( + _command_buffer: CommandBuffer, + _src_image: Image, + _src_image_layout: ImageLayout, + _dst_image: Image, + _dst_image_layout: ImageLayout, + _region_count: u32, + _p_regions: *const ImageBlit, + _filter: Filter, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_blit_image))) + } + let raw_name = stringify!(vkCmdBlitImage); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_blit_image + } else { + ::std::mem::transmute(val) + } + }, + cmd_copy_buffer_to_image: unsafe { + extern "system" fn cmd_copy_buffer_to_image( + _command_buffer: CommandBuffer, + _src_buffer: Buffer, + _dst_image: Image, + _dst_image_layout: ImageLayout, + _region_count: u32, + _p_regions: *const BufferImageCopy, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_copy_buffer_to_image) + )) + } + let raw_name = stringify!(vkCmdCopyBufferToImage); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_copy_buffer_to_image + } else { + ::std::mem::transmute(val) + } + }, + cmd_copy_image_to_buffer: unsafe { + extern "system" fn cmd_copy_image_to_buffer( + _command_buffer: CommandBuffer, + _src_image: Image, + _src_image_layout: ImageLayout, + _dst_buffer: Buffer, + _region_count: u32, + _p_regions: *const BufferImageCopy, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_copy_image_to_buffer) + )) + } + let raw_name = stringify!(vkCmdCopyImageToBuffer); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_copy_image_to_buffer + } else { + ::std::mem::transmute(val) + } + }, + cmd_update_buffer: unsafe { + extern "system" fn cmd_update_buffer( + _command_buffer: CommandBuffer, + _dst_buffer: Buffer, + _dst_offset: DeviceSize, + _data_size: DeviceSize, + _p_data: *const c_void, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_update_buffer))) + } + let raw_name = stringify!(vkCmdUpdateBuffer); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_update_buffer + } else { + ::std::mem::transmute(val) + } + }, + cmd_fill_buffer: unsafe { + extern "system" fn cmd_fill_buffer( + _command_buffer: CommandBuffer, + _dst_buffer: Buffer, + _dst_offset: DeviceSize, + _size: DeviceSize, + _data: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_fill_buffer))) + } + let raw_name = stringify!(vkCmdFillBuffer); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_fill_buffer + } else { + ::std::mem::transmute(val) + } + }, + cmd_clear_color_image: unsafe { + extern "system" fn cmd_clear_color_image( + _command_buffer: CommandBuffer, + _image: Image, + _image_layout: ImageLayout, + _p_color: *const ClearColorValue, + _range_count: u32, + _p_ranges: *const ImageSubresourceRange, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_clear_color_image) + )) + } + let raw_name = stringify!(vkCmdClearColorImage); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_clear_color_image + } else { + ::std::mem::transmute(val) + } + }, + cmd_clear_depth_stencil_image: unsafe { + extern "system" fn cmd_clear_depth_stencil_image( + _command_buffer: CommandBuffer, + _image: Image, + _image_layout: ImageLayout, + _p_depth_stencil: *const ClearDepthStencilValue, + _range_count: u32, + _p_ranges: *const ImageSubresourceRange, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_clear_depth_stencil_image) + )) + } + let raw_name = stringify!(vkCmdClearDepthStencilImage); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_clear_depth_stencil_image + } else { + ::std::mem::transmute(val) + } + }, + cmd_clear_attachments: unsafe { + extern "system" fn cmd_clear_attachments( + _command_buffer: CommandBuffer, + _attachment_count: u32, + _p_attachments: *const ClearAttachment, + _rect_count: u32, + _p_rects: *const ClearRect, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_clear_attachments) + )) + } + let raw_name = stringify!(vkCmdClearAttachments); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_clear_attachments + } else { + ::std::mem::transmute(val) + } + }, + cmd_resolve_image: unsafe { + extern "system" fn cmd_resolve_image( + _command_buffer: CommandBuffer, + _src_image: Image, + _src_image_layout: ImageLayout, + _dst_image: Image, + _dst_image_layout: ImageLayout, + _region_count: u32, + _p_regions: *const ImageResolve, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_resolve_image))) + } + let raw_name = stringify!(vkCmdResolveImage); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_resolve_image + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_event: unsafe { + extern "system" fn cmd_set_event( + _command_buffer: CommandBuffer, + _event: Event, + _stage_mask: PipelineStageFlags, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_set_event))) + } + let raw_name = stringify!(vkCmdSetEvent); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_event + } else { + ::std::mem::transmute(val) + } + }, + cmd_reset_event: unsafe { + extern "system" fn cmd_reset_event( + _command_buffer: CommandBuffer, + _event: Event, + _stage_mask: PipelineStageFlags, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_reset_event))) + } + let raw_name = stringify!(vkCmdResetEvent); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_reset_event + } else { + ::std::mem::transmute(val) + } + }, + cmd_wait_events: unsafe { + extern "system" fn cmd_wait_events( + _command_buffer: CommandBuffer, + _event_count: u32, + _p_events: *const Event, + _src_stage_mask: PipelineStageFlags, + _dst_stage_mask: PipelineStageFlags, + _memory_barrier_count: u32, + _p_memory_barriers: *const MemoryBarrier, + _buffer_memory_barrier_count: u32, + _p_buffer_memory_barriers: *const BufferMemoryBarrier, + _image_memory_barrier_count: u32, + _p_image_memory_barriers: *const ImageMemoryBarrier, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_wait_events))) + } + let raw_name = stringify!(vkCmdWaitEvents); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_wait_events + } else { + ::std::mem::transmute(val) + } + }, + cmd_pipeline_barrier: unsafe { + extern "system" fn cmd_pipeline_barrier( + _command_buffer: CommandBuffer, + _src_stage_mask: PipelineStageFlags, + _dst_stage_mask: PipelineStageFlags, + _dependency_flags: DependencyFlags, + _memory_barrier_count: u32, + _p_memory_barriers: *const MemoryBarrier, + _buffer_memory_barrier_count: u32, + _p_buffer_memory_barriers: *const BufferMemoryBarrier, + _image_memory_barrier_count: u32, + _p_image_memory_barriers: *const ImageMemoryBarrier, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_pipeline_barrier))) + } + let raw_name = stringify!(vkCmdPipelineBarrier); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_pipeline_barrier + } else { + ::std::mem::transmute(val) + } + }, + cmd_begin_query: unsafe { + extern "system" fn cmd_begin_query( + _command_buffer: CommandBuffer, + _query_pool: QueryPool, + _query: u32, + _flags: QueryControlFlags, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_begin_query))) + } + let raw_name = stringify!(vkCmdBeginQuery); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_begin_query + } else { + ::std::mem::transmute(val) + } + }, + cmd_end_query: unsafe { + extern "system" fn cmd_end_query( + _command_buffer: CommandBuffer, + _query_pool: QueryPool, + _query: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_end_query))) + } + let raw_name = stringify!(vkCmdEndQuery); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_end_query + } else { + ::std::mem::transmute(val) + } + }, + cmd_reset_query_pool: unsafe { + extern "system" fn cmd_reset_query_pool( + _command_buffer: CommandBuffer, + _query_pool: QueryPool, + _first_query: u32, + _query_count: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_reset_query_pool))) + } + let raw_name = stringify!(vkCmdResetQueryPool); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_reset_query_pool + } else { + ::std::mem::transmute(val) + } + }, + cmd_write_timestamp: unsafe { + extern "system" fn cmd_write_timestamp( + _command_buffer: CommandBuffer, + _pipeline_stage: PipelineStageFlags, + _query_pool: QueryPool, + _query: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_write_timestamp))) + } + let raw_name = stringify!(vkCmdWriteTimestamp); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_write_timestamp + } else { + ::std::mem::transmute(val) + } + }, + cmd_copy_query_pool_results: unsafe { + extern "system" fn cmd_copy_query_pool_results( + _command_buffer: CommandBuffer, + _query_pool: QueryPool, + _first_query: u32, + _query_count: u32, + _dst_buffer: Buffer, + _dst_offset: DeviceSize, + _stride: DeviceSize, + _flags: QueryResultFlags, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_copy_query_pool_results) + )) + } + let raw_name = stringify!(vkCmdCopyQueryPoolResults); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_copy_query_pool_results + } else { + ::std::mem::transmute(val) + } + }, + cmd_push_constants: unsafe { + extern "system" fn cmd_push_constants( + _command_buffer: CommandBuffer, + _layout: PipelineLayout, + _stage_flags: ShaderStageFlags, + _offset: u32, + _size: u32, + _p_values: *const c_void, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_push_constants))) + } + let raw_name = stringify!(vkCmdPushConstants); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_push_constants + } else { + ::std::mem::transmute(val) + } + }, + cmd_begin_render_pass: unsafe { + extern "system" fn cmd_begin_render_pass( + _command_buffer: CommandBuffer, + _p_render_pass_begin: *const RenderPassBeginInfo, + _contents: SubpassContents, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_begin_render_pass) + )) + } + let raw_name = stringify!(vkCmdBeginRenderPass); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_begin_render_pass + } else { + ::std::mem::transmute(val) + } + }, + cmd_next_subpass: unsafe { + extern "system" fn cmd_next_subpass( + _command_buffer: CommandBuffer, + _contents: SubpassContents, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_next_subpass))) + } + let raw_name = stringify!(vkCmdNextSubpass); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_next_subpass + } else { + ::std::mem::transmute(val) + } + }, + cmd_end_render_pass: unsafe { + extern "system" fn cmd_end_render_pass(_command_buffer: CommandBuffer) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_end_render_pass))) + } + let raw_name = stringify!(vkCmdEndRenderPass); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_end_render_pass + } else { + ::std::mem::transmute(val) + } + }, + cmd_execute_commands: unsafe { + extern "system" fn cmd_execute_commands( + _command_buffer: CommandBuffer, + _command_buffer_count: u32, + _p_command_buffers: *const CommandBuffer, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_execute_commands))) + } + let raw_name = stringify!(vkCmdExecuteCommands); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_execute_commands + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn destroy_device( + &self, + device: Device, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_device)(device, p_allocator) + } + #[doc = ""] + pub unsafe fn get_device_queue( + &self, + device: Device, + queue_family_index: u32, + queue_index: u32, + p_queue: *mut Queue, + ) -> c_void { + (self.get_device_queue)(device, queue_family_index, queue_index, p_queue) + } + #[doc = ""] + pub unsafe fn queue_submit( + &self, + queue: Queue, + submit_count: u32, + p_submits: *const SubmitInfo, + fence: Fence, + ) -> Result { + (self.queue_submit)(queue, submit_count, p_submits, fence) + } + #[doc = ""] + pub unsafe fn queue_wait_idle(&self, queue: Queue) -> Result { + (self.queue_wait_idle)(queue) + } + #[doc = ""] + pub unsafe fn device_wait_idle(&self, device: Device) -> Result { + (self.device_wait_idle)(device) + } + #[doc = ""] + pub unsafe fn allocate_memory( + &self, + device: Device, + p_allocate_info: *const MemoryAllocateInfo, + p_allocator: *const AllocationCallbacks, + p_memory: *mut DeviceMemory, + ) -> Result { + (self.allocate_memory)(device, p_allocate_info, p_allocator, p_memory) + } + #[doc = ""] + pub unsafe fn free_memory( + &self, + device: Device, + memory: DeviceMemory, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.free_memory)(device, memory, p_allocator) + } + #[doc = ""] + pub unsafe fn map_memory( + &self, + device: Device, + memory: DeviceMemory, + offset: DeviceSize, + size: DeviceSize, + flags: MemoryMapFlags, + pp_data: *mut *mut c_void, + ) -> Result { + (self.map_memory)(device, memory, offset, size, flags, pp_data) + } + #[doc = ""] + pub unsafe fn unmap_memory(&self, device: Device, memory: DeviceMemory) -> c_void { + (self.unmap_memory)(device, memory) + } + #[doc = ""] + pub unsafe fn flush_mapped_memory_ranges( + &self, + device: Device, + memory_range_count: u32, + p_memory_ranges: *const MappedMemoryRange, + ) -> Result { + (self.flush_mapped_memory_ranges)(device, memory_range_count, p_memory_ranges) + } + #[doc = ""] + pub unsafe fn invalidate_mapped_memory_ranges( + &self, + device: Device, + memory_range_count: u32, + p_memory_ranges: *const MappedMemoryRange, + ) -> Result { + (self.invalidate_mapped_memory_ranges)(device, memory_range_count, p_memory_ranges) + } + #[doc = ""] + pub unsafe fn get_device_memory_commitment( + &self, + device: Device, + memory: DeviceMemory, + p_committed_memory_in_bytes: *mut DeviceSize, + ) -> c_void { + (self.get_device_memory_commitment)(device, memory, p_committed_memory_in_bytes) + } + #[doc = ""] + pub unsafe fn bind_buffer_memory( + &self, + device: Device, + buffer: Buffer, + memory: DeviceMemory, + memory_offset: DeviceSize, + ) -> Result { + (self.bind_buffer_memory)(device, buffer, memory, memory_offset) + } + #[doc = ""] + pub unsafe fn bind_image_memory( + &self, + device: Device, + image: Image, + memory: DeviceMemory, + memory_offset: DeviceSize, + ) -> Result { + (self.bind_image_memory)(device, image, memory, memory_offset) + } + #[doc = ""] + pub unsafe fn get_buffer_memory_requirements( + &self, + device: Device, + buffer: Buffer, + p_memory_requirements: *mut MemoryRequirements, + ) -> c_void { + (self.get_buffer_memory_requirements)(device, buffer, p_memory_requirements) + } + #[doc = ""] + pub unsafe fn get_image_memory_requirements( + &self, + device: Device, + image: Image, + p_memory_requirements: *mut MemoryRequirements, + ) -> c_void { + (self.get_image_memory_requirements)(device, image, p_memory_requirements) + } + #[doc = ""] + pub unsafe fn get_image_sparse_memory_requirements( + &self, + device: Device, + image: Image, + p_sparse_memory_requirement_count: *mut u32, + p_sparse_memory_requirements: *mut SparseImageMemoryRequirements, + ) -> c_void { + (self.get_image_sparse_memory_requirements)( + device, + image, + p_sparse_memory_requirement_count, + p_sparse_memory_requirements, + ) + } + #[doc = ""] + pub unsafe fn queue_bind_sparse( + &self, + queue: Queue, + bind_info_count: u32, + p_bind_info: *const BindSparseInfo, + fence: Fence, + ) -> Result { + (self.queue_bind_sparse)(queue, bind_info_count, p_bind_info, fence) + } + #[doc = ""] + pub unsafe fn create_fence( + &self, + device: Device, + p_create_info: *const FenceCreateInfo, + p_allocator: *const AllocationCallbacks, + p_fence: *mut Fence, + ) -> Result { + (self.create_fence)(device, p_create_info, p_allocator, p_fence) + } + #[doc = ""] + pub unsafe fn destroy_fence( + &self, + device: Device, + fence: Fence, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_fence)(device, fence, p_allocator) + } + #[doc = ""] + pub unsafe fn reset_fences( + &self, + device: Device, + fence_count: u32, + p_fences: *const Fence, + ) -> Result { + (self.reset_fences)(device, fence_count, p_fences) + } + #[doc = ""] + pub unsafe fn get_fence_status(&self, device: Device, fence: Fence) -> Result { + (self.get_fence_status)(device, fence) + } + #[doc = ""] + pub unsafe fn wait_for_fences( + &self, + device: Device, + fence_count: u32, + p_fences: *const Fence, + wait_all: Bool32, + timeout: u64, + ) -> Result { + (self.wait_for_fences)(device, fence_count, p_fences, wait_all, timeout) + } + #[doc = ""] + pub unsafe fn create_semaphore( + &self, + device: Device, + p_create_info: *const SemaphoreCreateInfo, + p_allocator: *const AllocationCallbacks, + p_semaphore: *mut Semaphore, + ) -> Result { + (self.create_semaphore)(device, p_create_info, p_allocator, p_semaphore) + } + #[doc = ""] + pub unsafe fn destroy_semaphore( + &self, + device: Device, + semaphore: Semaphore, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_semaphore)(device, semaphore, p_allocator) + } + #[doc = ""] + pub unsafe fn create_event( + &self, + device: Device, + p_create_info: *const EventCreateInfo, + p_allocator: *const AllocationCallbacks, + p_event: *mut Event, + ) -> Result { + (self.create_event)(device, p_create_info, p_allocator, p_event) + } + #[doc = ""] + pub unsafe fn destroy_event( + &self, + device: Device, + event: Event, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_event)(device, event, p_allocator) + } + #[doc = ""] + pub unsafe fn get_event_status(&self, device: Device, event: Event) -> Result { + (self.get_event_status)(device, event) + } + #[doc = ""] + pub unsafe fn set_event(&self, device: Device, event: Event) -> Result { + (self.set_event)(device, event) + } + #[doc = ""] + pub unsafe fn reset_event(&self, device: Device, event: Event) -> Result { + (self.reset_event)(device, event) + } + #[doc = ""] + pub unsafe fn create_query_pool( + &self, + device: Device, + p_create_info: *const QueryPoolCreateInfo, + p_allocator: *const AllocationCallbacks, + p_query_pool: *mut QueryPool, + ) -> Result { + (self.create_query_pool)(device, p_create_info, p_allocator, p_query_pool) + } + #[doc = ""] + pub unsafe fn destroy_query_pool( + &self, + device: Device, + query_pool: QueryPool, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_query_pool)(device, query_pool, p_allocator) + } + #[doc = ""] + pub unsafe fn get_query_pool_results( + &self, + device: Device, + query_pool: QueryPool, + first_query: u32, + query_count: u32, + data_size: usize, + p_data: *mut c_void, + stride: DeviceSize, + flags: QueryResultFlags, + ) -> Result { + (self.get_query_pool_results)( + device, + query_pool, + first_query, + query_count, + data_size, + p_data, + stride, + flags, + ) + } + #[doc = ""] + pub unsafe fn create_buffer( + &self, + device: Device, + p_create_info: *const BufferCreateInfo, + p_allocator: *const AllocationCallbacks, + p_buffer: *mut Buffer, + ) -> Result { + (self.create_buffer)(device, p_create_info, p_allocator, p_buffer) + } + #[doc = ""] + pub unsafe fn destroy_buffer( + &self, + device: Device, + buffer: Buffer, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_buffer)(device, buffer, p_allocator) + } + #[doc = ""] + pub unsafe fn create_buffer_view( + &self, + device: Device, + p_create_info: *const BufferViewCreateInfo, + p_allocator: *const AllocationCallbacks, + p_view: *mut BufferView, + ) -> Result { + (self.create_buffer_view)(device, p_create_info, p_allocator, p_view) + } + #[doc = ""] + pub unsafe fn destroy_buffer_view( + &self, + device: Device, + buffer_view: BufferView, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_buffer_view)(device, buffer_view, p_allocator) + } + #[doc = ""] + pub unsafe fn create_image( + &self, + device: Device, + p_create_info: *const ImageCreateInfo, + p_allocator: *const AllocationCallbacks, + p_image: *mut Image, + ) -> Result { + (self.create_image)(device, p_create_info, p_allocator, p_image) + } + #[doc = ""] + pub unsafe fn destroy_image( + &self, + device: Device, + image: Image, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_image)(device, image, p_allocator) + } + #[doc = ""] + pub unsafe fn get_image_subresource_layout( + &self, + device: Device, + image: Image, + p_subresource: *const ImageSubresource, + p_layout: *mut SubresourceLayout, + ) -> c_void { + (self.get_image_subresource_layout)(device, image, p_subresource, p_layout) + } + #[doc = ""] + pub unsafe fn create_image_view( + &self, + device: Device, + p_create_info: *const ImageViewCreateInfo, + p_allocator: *const AllocationCallbacks, + p_view: *mut ImageView, + ) -> Result { + (self.create_image_view)(device, p_create_info, p_allocator, p_view) + } + #[doc = ""] + pub unsafe fn destroy_image_view( + &self, + device: Device, + image_view: ImageView, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_image_view)(device, image_view, p_allocator) + } + #[doc = ""] + pub unsafe fn create_shader_module( + &self, + device: Device, + p_create_info: *const ShaderModuleCreateInfo, + p_allocator: *const AllocationCallbacks, + p_shader_module: *mut ShaderModule, + ) -> Result { + (self.create_shader_module)(device, p_create_info, p_allocator, p_shader_module) + } + #[doc = ""] + pub unsafe fn destroy_shader_module( + &self, + device: Device, + shader_module: ShaderModule, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_shader_module)(device, shader_module, p_allocator) + } + #[doc = ""] + pub unsafe fn create_pipeline_cache( + &self, + device: Device, + p_create_info: *const PipelineCacheCreateInfo, + p_allocator: *const AllocationCallbacks, + p_pipeline_cache: *mut PipelineCache, + ) -> Result { + (self.create_pipeline_cache)(device, p_create_info, p_allocator, p_pipeline_cache) + } + #[doc = ""] + pub unsafe fn destroy_pipeline_cache( + &self, + device: Device, + pipeline_cache: PipelineCache, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_pipeline_cache)(device, pipeline_cache, p_allocator) + } + #[doc = ""] + pub unsafe fn get_pipeline_cache_data( + &self, + device: Device, + pipeline_cache: PipelineCache, + p_data_size: *mut usize, + p_data: *mut c_void, + ) -> Result { + (self.get_pipeline_cache_data)(device, pipeline_cache, p_data_size, p_data) + } + #[doc = ""] + pub unsafe fn merge_pipeline_caches( + &self, + device: Device, + dst_cache: PipelineCache, + src_cache_count: u32, + p_src_caches: *const PipelineCache, + ) -> Result { + (self.merge_pipeline_caches)(device, dst_cache, src_cache_count, p_src_caches) + } + #[doc = ""] + pub unsafe fn create_graphics_pipelines( + &self, + device: Device, + pipeline_cache: PipelineCache, + create_info_count: u32, + p_create_infos: *const GraphicsPipelineCreateInfo, + p_allocator: *const AllocationCallbacks, + p_pipelines: *mut Pipeline, + ) -> Result { + (self.create_graphics_pipelines)( + device, + pipeline_cache, + create_info_count, + p_create_infos, + p_allocator, + p_pipelines, + ) + } + #[doc = ""] + pub unsafe fn create_compute_pipelines( + &self, + device: Device, + pipeline_cache: PipelineCache, + create_info_count: u32, + p_create_infos: *const ComputePipelineCreateInfo, + p_allocator: *const AllocationCallbacks, + p_pipelines: *mut Pipeline, + ) -> Result { + (self.create_compute_pipelines)( + device, + pipeline_cache, + create_info_count, + p_create_infos, + p_allocator, + p_pipelines, + ) + } + #[doc = ""] + pub unsafe fn destroy_pipeline( + &self, + device: Device, + pipeline: Pipeline, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_pipeline)(device, pipeline, p_allocator) + } + #[doc = ""] + pub unsafe fn create_pipeline_layout( + &self, + device: Device, + p_create_info: *const PipelineLayoutCreateInfo, + p_allocator: *const AllocationCallbacks, + p_pipeline_layout: *mut PipelineLayout, + ) -> Result { + (self.create_pipeline_layout)(device, p_create_info, p_allocator, p_pipeline_layout) + } + #[doc = ""] + pub unsafe fn destroy_pipeline_layout( + &self, + device: Device, + pipeline_layout: PipelineLayout, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_pipeline_layout)(device, pipeline_layout, p_allocator) + } + #[doc = ""] + pub unsafe fn create_sampler( + &self, + device: Device, + p_create_info: *const SamplerCreateInfo, + p_allocator: *const AllocationCallbacks, + p_sampler: *mut Sampler, + ) -> Result { + (self.create_sampler)(device, p_create_info, p_allocator, p_sampler) + } + #[doc = ""] + pub unsafe fn destroy_sampler( + &self, + device: Device, + sampler: Sampler, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_sampler)(device, sampler, p_allocator) + } + #[doc = ""] + pub unsafe fn create_descriptor_set_layout( + &self, + device: Device, + p_create_info: *const DescriptorSetLayoutCreateInfo, + p_allocator: *const AllocationCallbacks, + p_set_layout: *mut DescriptorSetLayout, + ) -> Result { + (self.create_descriptor_set_layout)(device, p_create_info, p_allocator, p_set_layout) + } + #[doc = ""] + pub unsafe fn destroy_descriptor_set_layout( + &self, + device: Device, + descriptor_set_layout: DescriptorSetLayout, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_descriptor_set_layout)(device, descriptor_set_layout, p_allocator) + } + #[doc = ""] + pub unsafe fn create_descriptor_pool( + &self, + device: Device, + p_create_info: *const DescriptorPoolCreateInfo, + p_allocator: *const AllocationCallbacks, + p_descriptor_pool: *mut DescriptorPool, + ) -> Result { + (self.create_descriptor_pool)(device, p_create_info, p_allocator, p_descriptor_pool) + } + #[doc = ""] + pub unsafe fn destroy_descriptor_pool( + &self, + device: Device, + descriptor_pool: DescriptorPool, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_descriptor_pool)(device, descriptor_pool, p_allocator) + } + #[doc = ""] + pub unsafe fn reset_descriptor_pool( + &self, + device: Device, + descriptor_pool: DescriptorPool, + flags: DescriptorPoolResetFlags, + ) -> Result { + (self.reset_descriptor_pool)(device, descriptor_pool, flags) + } + #[doc = ""] + pub unsafe fn allocate_descriptor_sets( + &self, + device: Device, + p_allocate_info: *const DescriptorSetAllocateInfo, + p_descriptor_sets: *mut DescriptorSet, + ) -> Result { + (self.allocate_descriptor_sets)(device, p_allocate_info, p_descriptor_sets) + } + #[doc = ""] + pub unsafe fn free_descriptor_sets( + &self, + device: Device, + descriptor_pool: DescriptorPool, + descriptor_set_count: u32, + p_descriptor_sets: *const DescriptorSet, + ) -> Result { + (self.free_descriptor_sets)( + device, + descriptor_pool, + descriptor_set_count, + p_descriptor_sets, + ) + } + #[doc = ""] + pub unsafe fn update_descriptor_sets( + &self, + device: Device, + descriptor_write_count: u32, + p_descriptor_writes: *const WriteDescriptorSet, + descriptor_copy_count: u32, + p_descriptor_copies: *const CopyDescriptorSet, + ) -> c_void { + (self.update_descriptor_sets)( + device, + descriptor_write_count, + p_descriptor_writes, + descriptor_copy_count, + p_descriptor_copies, + ) + } + #[doc = ""] + pub unsafe fn create_framebuffer( + &self, + device: Device, + p_create_info: *const FramebufferCreateInfo, + p_allocator: *const AllocationCallbacks, + p_framebuffer: *mut Framebuffer, + ) -> Result { + (self.create_framebuffer)(device, p_create_info, p_allocator, p_framebuffer) + } + #[doc = ""] + pub unsafe fn destroy_framebuffer( + &self, + device: Device, + framebuffer: Framebuffer, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_framebuffer)(device, framebuffer, p_allocator) + } + #[doc = ""] + pub unsafe fn create_render_pass( + &self, + device: Device, + p_create_info: *const RenderPassCreateInfo, + p_allocator: *const AllocationCallbacks, + p_render_pass: *mut RenderPass, + ) -> Result { + (self.create_render_pass)(device, p_create_info, p_allocator, p_render_pass) + } + #[doc = ""] + pub unsafe fn destroy_render_pass( + &self, + device: Device, + render_pass: RenderPass, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_render_pass)(device, render_pass, p_allocator) + } + #[doc = ""] + pub unsafe fn get_render_area_granularity( + &self, + device: Device, + render_pass: RenderPass, + p_granularity: *mut Extent2D, + ) -> c_void { + (self.get_render_area_granularity)(device, render_pass, p_granularity) + } + #[doc = ""] + pub unsafe fn create_command_pool( + &self, + device: Device, + p_create_info: *const CommandPoolCreateInfo, + p_allocator: *const AllocationCallbacks, + p_command_pool: *mut CommandPool, + ) -> Result { + (self.create_command_pool)(device, p_create_info, p_allocator, p_command_pool) + } + #[doc = ""] + pub unsafe fn destroy_command_pool( + &self, + device: Device, + command_pool: CommandPool, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_command_pool)(device, command_pool, p_allocator) + } + #[doc = ""] + pub unsafe fn reset_command_pool( + &self, + device: Device, + command_pool: CommandPool, + flags: CommandPoolResetFlags, + ) -> Result { + (self.reset_command_pool)(device, command_pool, flags) + } + #[doc = ""] + pub unsafe fn allocate_command_buffers( + &self, + device: Device, + p_allocate_info: *const CommandBufferAllocateInfo, + p_command_buffers: *mut CommandBuffer, + ) -> Result { + (self.allocate_command_buffers)(device, p_allocate_info, p_command_buffers) + } + #[doc = ""] + pub unsafe fn free_command_buffers( + &self, + device: Device, + command_pool: CommandPool, + command_buffer_count: u32, + p_command_buffers: *const CommandBuffer, + ) -> c_void { + (self.free_command_buffers)( + device, + command_pool, + command_buffer_count, + p_command_buffers, + ) + } + #[doc = ""] + pub unsafe fn begin_command_buffer( + &self, + command_buffer: CommandBuffer, + p_begin_info: *const CommandBufferBeginInfo, + ) -> Result { + (self.begin_command_buffer)(command_buffer, p_begin_info) + } + #[doc = ""] + pub unsafe fn end_command_buffer(&self, command_buffer: CommandBuffer) -> Result { + (self.end_command_buffer)(command_buffer) + } + #[doc = ""] + pub unsafe fn reset_command_buffer( + &self, + command_buffer: CommandBuffer, + flags: CommandBufferResetFlags, + ) -> Result { + (self.reset_command_buffer)(command_buffer, flags) + } + #[doc = ""] + pub unsafe fn cmd_bind_pipeline( + &self, + command_buffer: CommandBuffer, + pipeline_bind_point: PipelineBindPoint, + pipeline: Pipeline, + ) -> c_void { + (self.cmd_bind_pipeline)(command_buffer, pipeline_bind_point, pipeline) + } + #[doc = ""] + pub unsafe fn cmd_set_viewport( + &self, + command_buffer: CommandBuffer, + first_viewport: u32, + viewport_count: u32, + p_viewports: *const Viewport, + ) -> c_void { + (self.cmd_set_viewport)(command_buffer, first_viewport, viewport_count, p_viewports) + } + #[doc = ""] + pub unsafe fn cmd_set_scissor( + &self, + command_buffer: CommandBuffer, + first_scissor: u32, + scissor_count: u32, + p_scissors: *const Rect2D, + ) -> c_void { + (self.cmd_set_scissor)(command_buffer, first_scissor, scissor_count, p_scissors) + } + #[doc = ""] + pub unsafe fn cmd_set_line_width( + &self, + command_buffer: CommandBuffer, + line_width: f32, + ) -> c_void { + (self.cmd_set_line_width)(command_buffer, line_width) + } + #[doc = ""] + pub unsafe fn cmd_set_depth_bias( + &self, + command_buffer: CommandBuffer, + depth_bias_constant_factor: f32, + depth_bias_clamp: f32, + depth_bias_slope_factor: f32, + ) -> c_void { + (self.cmd_set_depth_bias)( + command_buffer, + depth_bias_constant_factor, + depth_bias_clamp, + depth_bias_slope_factor, + ) + } + #[doc = ""] + pub unsafe fn cmd_set_blend_constants( + &self, + command_buffer: CommandBuffer, + blend_constants: &[f32; 4], + ) -> c_void { + (self.cmd_set_blend_constants)(command_buffer, blend_constants) + } + #[doc = ""] + pub unsafe fn cmd_set_depth_bounds( + &self, + command_buffer: CommandBuffer, + min_depth_bounds: f32, + max_depth_bounds: f32, + ) -> c_void { + (self.cmd_set_depth_bounds)(command_buffer, min_depth_bounds, max_depth_bounds) + } + #[doc = ""] + pub unsafe fn cmd_set_stencil_compare_mask( + &self, + command_buffer: CommandBuffer, + face_mask: StencilFaceFlags, + compare_mask: u32, + ) -> c_void { + (self.cmd_set_stencil_compare_mask)(command_buffer, face_mask, compare_mask) + } + #[doc = ""] + pub unsafe fn cmd_set_stencil_write_mask( + &self, + command_buffer: CommandBuffer, + face_mask: StencilFaceFlags, + write_mask: u32, + ) -> c_void { + (self.cmd_set_stencil_write_mask)(command_buffer, face_mask, write_mask) + } + #[doc = ""] + pub unsafe fn cmd_set_stencil_reference( + &self, + command_buffer: CommandBuffer, + face_mask: StencilFaceFlags, + reference: u32, + ) -> c_void { + (self.cmd_set_stencil_reference)(command_buffer, face_mask, reference) + } + #[doc = ""] + pub unsafe fn cmd_bind_descriptor_sets( + &self, + command_buffer: CommandBuffer, + pipeline_bind_point: PipelineBindPoint, + layout: PipelineLayout, + first_set: u32, + descriptor_set_count: u32, + p_descriptor_sets: *const DescriptorSet, + dynamic_offset_count: u32, + p_dynamic_offsets: *const u32, + ) -> c_void { + (self.cmd_bind_descriptor_sets)( + command_buffer, + pipeline_bind_point, + layout, + first_set, + descriptor_set_count, + p_descriptor_sets, + dynamic_offset_count, + p_dynamic_offsets, + ) + } + #[doc = ""] + pub unsafe fn cmd_bind_index_buffer( + &self, + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + index_type: IndexType, + ) -> c_void { + (self.cmd_bind_index_buffer)(command_buffer, buffer, offset, index_type) + } + #[doc = ""] + pub unsafe fn cmd_bind_vertex_buffers( + &self, + command_buffer: CommandBuffer, + first_binding: u32, + binding_count: u32, + p_buffers: *const Buffer, + p_offsets: *const DeviceSize, + ) -> c_void { + (self.cmd_bind_vertex_buffers)( + command_buffer, + first_binding, + binding_count, + p_buffers, + p_offsets, + ) + } + #[doc = ""] + pub unsafe fn cmd_draw( + &self, + command_buffer: CommandBuffer, + vertex_count: u32, + instance_count: u32, + first_vertex: u32, + first_instance: u32, + ) -> c_void { + (self.cmd_draw)( + command_buffer, + vertex_count, + instance_count, + first_vertex, + first_instance, + ) + } + #[doc = ""] + pub unsafe fn cmd_draw_indexed( + &self, + command_buffer: CommandBuffer, + index_count: u32, + instance_count: u32, + first_index: u32, + vertex_offset: i32, + first_instance: u32, + ) -> c_void { + (self.cmd_draw_indexed)( + command_buffer, + index_count, + instance_count, + first_index, + vertex_offset, + first_instance, + ) + } + #[doc = ""] + pub unsafe fn cmd_draw_indirect( + &self, + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + draw_count: u32, + stride: u32, + ) -> c_void { + (self.cmd_draw_indirect)(command_buffer, buffer, offset, draw_count, stride) + } + #[doc = ""] + pub unsafe fn cmd_draw_indexed_indirect( + &self, + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + draw_count: u32, + stride: u32, + ) -> c_void { + (self.cmd_draw_indexed_indirect)(command_buffer, buffer, offset, draw_count, stride) + } + #[doc = ""] + pub unsafe fn cmd_dispatch( + &self, + command_buffer: CommandBuffer, + group_count_x: u32, + group_count_y: u32, + group_count_z: u32, + ) -> c_void { + (self.cmd_dispatch)(command_buffer, group_count_x, group_count_y, group_count_z) + } + #[doc = ""] + pub unsafe fn cmd_dispatch_indirect( + &self, + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + ) -> c_void { + (self.cmd_dispatch_indirect)(command_buffer, buffer, offset) + } + #[doc = ""] + pub unsafe fn cmd_copy_buffer( + &self, + command_buffer: CommandBuffer, + src_buffer: Buffer, + dst_buffer: Buffer, + region_count: u32, + p_regions: *const BufferCopy, + ) -> c_void { + (self.cmd_copy_buffer)( + command_buffer, + src_buffer, + dst_buffer, + region_count, + p_regions, + ) + } + #[doc = ""] + pub unsafe fn cmd_copy_image( + &self, + command_buffer: CommandBuffer, + src_image: Image, + src_image_layout: ImageLayout, + dst_image: Image, + dst_image_layout: ImageLayout, + region_count: u32, + p_regions: *const ImageCopy, + ) -> c_void { + (self.cmd_copy_image)( + command_buffer, + src_image, + src_image_layout, + dst_image, + dst_image_layout, + region_count, + p_regions, + ) + } + #[doc = ""] + pub unsafe fn cmd_blit_image( + &self, + command_buffer: CommandBuffer, + src_image: Image, + src_image_layout: ImageLayout, + dst_image: Image, + dst_image_layout: ImageLayout, + region_count: u32, + p_regions: *const ImageBlit, + filter: Filter, + ) -> c_void { + (self.cmd_blit_image)( + command_buffer, + src_image, + src_image_layout, + dst_image, + dst_image_layout, + region_count, + p_regions, + filter, + ) + } + #[doc = ""] + pub unsafe fn cmd_copy_buffer_to_image( + &self, + command_buffer: CommandBuffer, + src_buffer: Buffer, + dst_image: Image, + dst_image_layout: ImageLayout, + region_count: u32, + p_regions: *const BufferImageCopy, + ) -> c_void { + (self.cmd_copy_buffer_to_image)( + command_buffer, + src_buffer, + dst_image, + dst_image_layout, + region_count, + p_regions, + ) + } + #[doc = ""] + pub unsafe fn cmd_copy_image_to_buffer( + &self, + command_buffer: CommandBuffer, + src_image: Image, + src_image_layout: ImageLayout, + dst_buffer: Buffer, + region_count: u32, + p_regions: *const BufferImageCopy, + ) -> c_void { + (self.cmd_copy_image_to_buffer)( + command_buffer, + src_image, + src_image_layout, + dst_buffer, + region_count, + p_regions, + ) + } + #[doc = ""] + pub unsafe fn cmd_update_buffer( + &self, + command_buffer: CommandBuffer, + dst_buffer: Buffer, + dst_offset: DeviceSize, + data_size: DeviceSize, + p_data: *const c_void, + ) -> c_void { + (self.cmd_update_buffer)(command_buffer, dst_buffer, dst_offset, data_size, p_data) + } + #[doc = ""] + pub unsafe fn cmd_fill_buffer( + &self, + command_buffer: CommandBuffer, + dst_buffer: Buffer, + dst_offset: DeviceSize, + size: DeviceSize, + data: u32, + ) -> c_void { + (self.cmd_fill_buffer)(command_buffer, dst_buffer, dst_offset, size, data) + } + #[doc = ""] + pub unsafe fn cmd_clear_color_image( + &self, + command_buffer: CommandBuffer, + image: Image, + image_layout: ImageLayout, + p_color: *const ClearColorValue, + range_count: u32, + p_ranges: *const ImageSubresourceRange, + ) -> c_void { + (self.cmd_clear_color_image)( + command_buffer, + image, + image_layout, + p_color, + range_count, + p_ranges, + ) + } + #[doc = ""] + pub unsafe fn cmd_clear_depth_stencil_image( + &self, + command_buffer: CommandBuffer, + image: Image, + image_layout: ImageLayout, + p_depth_stencil: *const ClearDepthStencilValue, + range_count: u32, + p_ranges: *const ImageSubresourceRange, + ) -> c_void { + (self.cmd_clear_depth_stencil_image)( + command_buffer, + image, + image_layout, + p_depth_stencil, + range_count, + p_ranges, + ) + } + #[doc = ""] + pub unsafe fn cmd_clear_attachments( + &self, + command_buffer: CommandBuffer, + attachment_count: u32, + p_attachments: *const ClearAttachment, + rect_count: u32, + p_rects: *const ClearRect, + ) -> c_void { + (self.cmd_clear_attachments)( + command_buffer, + attachment_count, + p_attachments, + rect_count, + p_rects, + ) + } + #[doc = ""] + pub unsafe fn cmd_resolve_image( + &self, + command_buffer: CommandBuffer, + src_image: Image, + src_image_layout: ImageLayout, + dst_image: Image, + dst_image_layout: ImageLayout, + region_count: u32, + p_regions: *const ImageResolve, + ) -> c_void { + (self.cmd_resolve_image)( + command_buffer, + src_image, + src_image_layout, + dst_image, + dst_image_layout, + region_count, + p_regions, + ) + } + #[doc = ""] + pub unsafe fn cmd_set_event( + &self, + command_buffer: CommandBuffer, + event: Event, + stage_mask: PipelineStageFlags, + ) -> c_void { + (self.cmd_set_event)(command_buffer, event, stage_mask) + } + #[doc = ""] + pub unsafe fn cmd_reset_event( + &self, + command_buffer: CommandBuffer, + event: Event, + stage_mask: PipelineStageFlags, + ) -> c_void { + (self.cmd_reset_event)(command_buffer, event, stage_mask) + } + #[doc = ""] + pub unsafe fn cmd_wait_events( + &self, + command_buffer: CommandBuffer, + event_count: u32, + p_events: *const Event, + src_stage_mask: PipelineStageFlags, + dst_stage_mask: PipelineStageFlags, + memory_barrier_count: u32, + p_memory_barriers: *const MemoryBarrier, + buffer_memory_barrier_count: u32, + p_buffer_memory_barriers: *const BufferMemoryBarrier, + image_memory_barrier_count: u32, + p_image_memory_barriers: *const ImageMemoryBarrier, + ) -> c_void { + (self.cmd_wait_events)( + command_buffer, + event_count, + p_events, + src_stage_mask, + dst_stage_mask, + memory_barrier_count, + p_memory_barriers, + buffer_memory_barrier_count, + p_buffer_memory_barriers, + image_memory_barrier_count, + p_image_memory_barriers, + ) + } + #[doc = ""] + pub unsafe fn cmd_pipeline_barrier( + &self, + command_buffer: CommandBuffer, + src_stage_mask: PipelineStageFlags, + dst_stage_mask: PipelineStageFlags, + dependency_flags: DependencyFlags, + memory_barrier_count: u32, + p_memory_barriers: *const MemoryBarrier, + buffer_memory_barrier_count: u32, + p_buffer_memory_barriers: *const BufferMemoryBarrier, + image_memory_barrier_count: u32, + p_image_memory_barriers: *const ImageMemoryBarrier, + ) -> c_void { + (self.cmd_pipeline_barrier)( + command_buffer, + src_stage_mask, + dst_stage_mask, + dependency_flags, + memory_barrier_count, + p_memory_barriers, + buffer_memory_barrier_count, + p_buffer_memory_barriers, + image_memory_barrier_count, + p_image_memory_barriers, + ) + } + #[doc = ""] + pub unsafe fn cmd_begin_query( + &self, + command_buffer: CommandBuffer, + query_pool: QueryPool, + query: u32, + flags: QueryControlFlags, + ) -> c_void { + (self.cmd_begin_query)(command_buffer, query_pool, query, flags) + } + #[doc = ""] + pub unsafe fn cmd_end_query( + &self, + command_buffer: CommandBuffer, + query_pool: QueryPool, + query: u32, + ) -> c_void { + (self.cmd_end_query)(command_buffer, query_pool, query) + } + #[doc = ""] + pub unsafe fn cmd_reset_query_pool( + &self, + command_buffer: CommandBuffer, + query_pool: QueryPool, + first_query: u32, + query_count: u32, + ) -> c_void { + (self.cmd_reset_query_pool)(command_buffer, query_pool, first_query, query_count) + } + #[doc = ""] + pub unsafe fn cmd_write_timestamp( + &self, + command_buffer: CommandBuffer, + pipeline_stage: PipelineStageFlags, + query_pool: QueryPool, + query: u32, + ) -> c_void { + (self.cmd_write_timestamp)(command_buffer, pipeline_stage, query_pool, query) + } + #[doc = ""] + pub unsafe fn cmd_copy_query_pool_results( + &self, + command_buffer: CommandBuffer, + query_pool: QueryPool, + first_query: u32, + query_count: u32, + dst_buffer: Buffer, + dst_offset: DeviceSize, + stride: DeviceSize, + flags: QueryResultFlags, + ) -> c_void { + (self.cmd_copy_query_pool_results)( + command_buffer, + query_pool, + first_query, + query_count, + dst_buffer, + dst_offset, + stride, + flags, + ) + } + #[doc = ""] + pub unsafe fn cmd_push_constants( + &self, + command_buffer: CommandBuffer, + layout: PipelineLayout, + stage_flags: ShaderStageFlags, + offset: u32, + size: u32, + p_values: *const c_void, + ) -> c_void { + (self.cmd_push_constants)(command_buffer, layout, stage_flags, offset, size, p_values) + } + #[doc = ""] + pub unsafe fn cmd_begin_render_pass( + &self, + command_buffer: CommandBuffer, + p_render_pass_begin: *const RenderPassBeginInfo, + contents: SubpassContents, + ) -> c_void { + (self.cmd_begin_render_pass)(command_buffer, p_render_pass_begin, contents) + } + #[doc = ""] + pub unsafe fn cmd_next_subpass( + &self, + command_buffer: CommandBuffer, + contents: SubpassContents, + ) -> c_void { + (self.cmd_next_subpass)(command_buffer, contents) + } + #[doc = ""] + pub unsafe fn cmd_end_render_pass(&self, command_buffer: CommandBuffer) -> c_void { + (self.cmd_end_render_pass)(command_buffer) + } + #[doc = ""] + pub unsafe fn cmd_execute_commands( + &self, + command_buffer: CommandBuffer, + command_buffer_count: u32, + p_command_buffers: *const CommandBuffer, + ) -> c_void { + (self.cmd_execute_commands)(command_buffer, command_buffer_count, p_command_buffers) + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkEnumerateInstanceVersion = extern "system" fn(p_api_version: *mut u32) -> Result; +pub struct EntryFnV1_1 { + pub enumerate_instance_version: extern "system" fn(p_api_version: *mut u32) -> Result, +} +unsafe impl Send for EntryFnV1_1 {} +unsafe impl Sync for EntryFnV1_1 {} +impl ::std::clone::Clone for EntryFnV1_1 { + fn clone(&self) -> Self { + EntryFnV1_1 { + enumerate_instance_version: self.enumerate_instance_version, + } + } +} +impl EntryFnV1_1 { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + EntryFnV1_1 { + enumerate_instance_version: unsafe { + extern "system" fn enumerate_instance_version(_p_api_version: *mut u32) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(enumerate_instance_version) + )) + } + let raw_name = stringify!(vkEnumerateInstanceVersion); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + enumerate_instance_version + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn enumerate_instance_version(&self, p_api_version: *mut u32) -> Result { + (self.enumerate_instance_version)(p_api_version) + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkEnumeratePhysicalDeviceGroups = extern "system" fn( + instance: Instance, + p_physical_device_group_count: *mut u32, + p_physical_device_group_properties: *mut PhysicalDeviceGroupProperties, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceFeatures2 = extern "system" fn( + physical_device: PhysicalDevice, + p_features: *mut PhysicalDeviceFeatures2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceProperties2 = extern "system" fn( + physical_device: PhysicalDevice, + p_properties: *mut PhysicalDeviceProperties2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceFormatProperties2 = extern "system" fn( + physical_device: PhysicalDevice, + format: Format, + p_format_properties: *mut FormatProperties2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceImageFormatProperties2 = extern "system" fn( + physical_device: PhysicalDevice, + p_image_format_info: *const PhysicalDeviceImageFormatInfo2, + p_image_format_properties: *mut ImageFormatProperties2, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceQueueFamilyProperties2 = extern "system" fn( + physical_device: PhysicalDevice, + p_queue_family_property_count: *mut u32, + p_queue_family_properties: *mut QueueFamilyProperties2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceMemoryProperties2 = extern "system" fn( + physical_device: PhysicalDevice, + p_memory_properties: *mut PhysicalDeviceMemoryProperties2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 = extern "system" fn( + physical_device: PhysicalDevice, + p_format_info: *const PhysicalDeviceSparseImageFormatInfo2, + p_property_count: *mut u32, + p_properties: *mut SparseImageFormatProperties2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceExternalBufferProperties = extern "system" fn( + physical_device: PhysicalDevice, + p_external_buffer_info: *const PhysicalDeviceExternalBufferInfo, + p_external_buffer_properties: *mut ExternalBufferProperties, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceExternalFenceProperties = extern "system" fn( + physical_device: PhysicalDevice, + p_external_fence_info: *const PhysicalDeviceExternalFenceInfo, + p_external_fence_properties: *mut ExternalFenceProperties, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceExternalSemaphoreProperties = extern "system" fn( + physical_device: PhysicalDevice, + p_external_semaphore_info: *const PhysicalDeviceExternalSemaphoreInfo, + p_external_semaphore_properties: *mut ExternalSemaphoreProperties, +) -> c_void; +pub struct InstanceFnV1_1 { + pub enumerate_physical_device_groups: extern "system" fn( + instance: Instance, + p_physical_device_group_count: *mut u32, + p_physical_device_group_properties: *mut PhysicalDeviceGroupProperties, + ) -> Result, + pub get_physical_device_features2: extern "system" fn( + physical_device: PhysicalDevice, + p_features: *mut PhysicalDeviceFeatures2, + ) -> c_void, + pub get_physical_device_properties2: extern "system" fn( + physical_device: PhysicalDevice, + p_properties: *mut PhysicalDeviceProperties2, + ) -> c_void, + pub get_physical_device_format_properties2: extern "system" fn( + physical_device: PhysicalDevice, + format: Format, + p_format_properties: *mut FormatProperties2, + ) -> c_void, + pub get_physical_device_image_format_properties2: extern "system" fn( + physical_device: PhysicalDevice, + p_image_format_info: *const PhysicalDeviceImageFormatInfo2, + p_image_format_properties: *mut ImageFormatProperties2, + ) -> Result, + pub get_physical_device_queue_family_properties2: extern "system" fn( + physical_device: PhysicalDevice, + p_queue_family_property_count: *mut u32, + p_queue_family_properties: *mut QueueFamilyProperties2, + ) -> c_void, + pub get_physical_device_memory_properties2: extern "system" fn( + physical_device: PhysicalDevice, + p_memory_properties: *mut PhysicalDeviceMemoryProperties2, + ) -> c_void, + pub get_physical_device_sparse_image_format_properties2: extern "system" fn( + physical_device: PhysicalDevice, + p_format_info: *const PhysicalDeviceSparseImageFormatInfo2, + p_property_count: *mut u32, + p_properties: *mut SparseImageFormatProperties2, + ) -> c_void, + pub get_physical_device_external_buffer_properties: extern "system" fn( + physical_device: PhysicalDevice, + p_external_buffer_info: *const PhysicalDeviceExternalBufferInfo, + p_external_buffer_properties: *mut ExternalBufferProperties, + ) -> c_void, + pub get_physical_device_external_fence_properties: extern "system" fn( + physical_device: PhysicalDevice, + p_external_fence_info: *const PhysicalDeviceExternalFenceInfo, + p_external_fence_properties: *mut ExternalFenceProperties, + ) -> c_void, + pub get_physical_device_external_semaphore_properties: extern "system" fn( + physical_device: PhysicalDevice, + p_external_semaphore_info: *const PhysicalDeviceExternalSemaphoreInfo, + p_external_semaphore_properties: *mut ExternalSemaphoreProperties, + ) -> c_void, +} +unsafe impl Send for InstanceFnV1_1 {} +unsafe impl Sync for InstanceFnV1_1 {} +impl ::std::clone::Clone for InstanceFnV1_1 { + fn clone(&self) -> Self { + InstanceFnV1_1 { + enumerate_physical_device_groups: self.enumerate_physical_device_groups, + get_physical_device_features2: self.get_physical_device_features2, + get_physical_device_properties2: self.get_physical_device_properties2, + get_physical_device_format_properties2: self.get_physical_device_format_properties2, + get_physical_device_image_format_properties2: self + .get_physical_device_image_format_properties2, + get_physical_device_queue_family_properties2: self + .get_physical_device_queue_family_properties2, + get_physical_device_memory_properties2: self.get_physical_device_memory_properties2, + get_physical_device_sparse_image_format_properties2: self + .get_physical_device_sparse_image_format_properties2, + get_physical_device_external_buffer_properties: self + .get_physical_device_external_buffer_properties, + get_physical_device_external_fence_properties: self + .get_physical_device_external_fence_properties, + get_physical_device_external_semaphore_properties: self + .get_physical_device_external_semaphore_properties, + } + } +} +impl InstanceFnV1_1 { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + InstanceFnV1_1 { + enumerate_physical_device_groups: unsafe { + extern "system" fn enumerate_physical_device_groups( + _instance: Instance, + _p_physical_device_group_count: *mut u32, + _p_physical_device_group_properties: *mut PhysicalDeviceGroupProperties, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(enumerate_physical_device_groups) + )) + } + let raw_name = stringify!(vkEnumeratePhysicalDeviceGroups); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + enumerate_physical_device_groups + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_features2: unsafe { + extern "system" fn get_physical_device_features2( + _physical_device: PhysicalDevice, + _p_features: *mut PhysicalDeviceFeatures2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_features2) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceFeatures2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_features2 + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_properties2: unsafe { + extern "system" fn get_physical_device_properties2( + _physical_device: PhysicalDevice, + _p_properties: *mut PhysicalDeviceProperties2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_properties2) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceProperties2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_properties2 + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_format_properties2: unsafe { + extern "system" fn get_physical_device_format_properties2( + _physical_device: PhysicalDevice, + _format: Format, + _p_format_properties: *mut FormatProperties2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_format_properties2) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceFormatProperties2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_format_properties2 + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_image_format_properties2: unsafe { + extern "system" fn get_physical_device_image_format_properties2( + _physical_device: PhysicalDevice, + _p_image_format_info: *const PhysicalDeviceImageFormatInfo2, + _p_image_format_properties: *mut ImageFormatProperties2, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_image_format_properties2) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceImageFormatProperties2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_image_format_properties2 + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_queue_family_properties2: unsafe { + extern "system" fn get_physical_device_queue_family_properties2( + _physical_device: PhysicalDevice, + _p_queue_family_property_count: *mut u32, + _p_queue_family_properties: *mut QueueFamilyProperties2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_queue_family_properties2) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceQueueFamilyProperties2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_queue_family_properties2 + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_memory_properties2: unsafe { + extern "system" fn get_physical_device_memory_properties2( + _physical_device: PhysicalDevice, + _p_memory_properties: *mut PhysicalDeviceMemoryProperties2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_memory_properties2) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceMemoryProperties2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_memory_properties2 + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_sparse_image_format_properties2: unsafe { + extern "system" fn get_physical_device_sparse_image_format_properties2( + _physical_device: PhysicalDevice, + _p_format_info: *const PhysicalDeviceSparseImageFormatInfo2, + _p_property_count: *mut u32, + _p_properties: *mut SparseImageFormatProperties2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_sparse_image_format_properties2) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceSparseImageFormatProperties2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_sparse_image_format_properties2 + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_external_buffer_properties: unsafe { + extern "system" fn get_physical_device_external_buffer_properties( + _physical_device: PhysicalDevice, + _p_external_buffer_info: *const PhysicalDeviceExternalBufferInfo, + _p_external_buffer_properties: *mut ExternalBufferProperties, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_external_buffer_properties) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceExternalBufferProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_external_buffer_properties + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_external_fence_properties: unsafe { + extern "system" fn get_physical_device_external_fence_properties( + _physical_device: PhysicalDevice, + _p_external_fence_info: *const PhysicalDeviceExternalFenceInfo, + _p_external_fence_properties: *mut ExternalFenceProperties, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_external_fence_properties) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceExternalFenceProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_external_fence_properties + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_external_semaphore_properties: unsafe { + extern "system" fn get_physical_device_external_semaphore_properties( + _physical_device: PhysicalDevice, + _p_external_semaphore_info: *const PhysicalDeviceExternalSemaphoreInfo, + _p_external_semaphore_properties: *mut ExternalSemaphoreProperties, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_external_semaphore_properties) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceExternalSemaphoreProperties); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_external_semaphore_properties + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn enumerate_physical_device_groups( + &self, + instance: Instance, + p_physical_device_group_count: *mut u32, + p_physical_device_group_properties: *mut PhysicalDeviceGroupProperties, + ) -> Result { + (self.enumerate_physical_device_groups)( + instance, + p_physical_device_group_count, + p_physical_device_group_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_features2( + &self, + physical_device: PhysicalDevice, + p_features: *mut PhysicalDeviceFeatures2, + ) -> c_void { + (self.get_physical_device_features2)(physical_device, p_features) + } + #[doc = ""] + pub unsafe fn get_physical_device_properties2( + &self, + physical_device: PhysicalDevice, + p_properties: *mut PhysicalDeviceProperties2, + ) -> c_void { + (self.get_physical_device_properties2)(physical_device, p_properties) + } + #[doc = ""] + pub unsafe fn get_physical_device_format_properties2( + &self, + physical_device: PhysicalDevice, + format: Format, + p_format_properties: *mut FormatProperties2, + ) -> c_void { + (self.get_physical_device_format_properties2)(physical_device, format, p_format_properties) + } + #[doc = ""] + pub unsafe fn get_physical_device_image_format_properties2( + &self, + physical_device: PhysicalDevice, + p_image_format_info: *const PhysicalDeviceImageFormatInfo2, + p_image_format_properties: *mut ImageFormatProperties2, + ) -> Result { + (self.get_physical_device_image_format_properties2)( + physical_device, + p_image_format_info, + p_image_format_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_queue_family_properties2( + &self, + physical_device: PhysicalDevice, + p_queue_family_property_count: *mut u32, + p_queue_family_properties: *mut QueueFamilyProperties2, + ) -> c_void { + (self.get_physical_device_queue_family_properties2)( + physical_device, + p_queue_family_property_count, + p_queue_family_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_memory_properties2( + &self, + physical_device: PhysicalDevice, + p_memory_properties: *mut PhysicalDeviceMemoryProperties2, + ) -> c_void { + (self.get_physical_device_memory_properties2)(physical_device, p_memory_properties) + } + #[doc = ""] + pub unsafe fn get_physical_device_sparse_image_format_properties2( + &self, + physical_device: PhysicalDevice, + p_format_info: *const PhysicalDeviceSparseImageFormatInfo2, + p_property_count: *mut u32, + p_properties: *mut SparseImageFormatProperties2, + ) -> c_void { + (self.get_physical_device_sparse_image_format_properties2)( + physical_device, + p_format_info, + p_property_count, + p_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_external_buffer_properties( + &self, + physical_device: PhysicalDevice, + p_external_buffer_info: *const PhysicalDeviceExternalBufferInfo, + p_external_buffer_properties: *mut ExternalBufferProperties, + ) -> c_void { + (self.get_physical_device_external_buffer_properties)( + physical_device, + p_external_buffer_info, + p_external_buffer_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_external_fence_properties( + &self, + physical_device: PhysicalDevice, + p_external_fence_info: *const PhysicalDeviceExternalFenceInfo, + p_external_fence_properties: *mut ExternalFenceProperties, + ) -> c_void { + (self.get_physical_device_external_fence_properties)( + physical_device, + p_external_fence_info, + p_external_fence_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_external_semaphore_properties( + &self, + physical_device: PhysicalDevice, + p_external_semaphore_info: *const PhysicalDeviceExternalSemaphoreInfo, + p_external_semaphore_properties: *mut ExternalSemaphoreProperties, + ) -> c_void { + (self.get_physical_device_external_semaphore_properties)( + physical_device, + p_external_semaphore_info, + p_external_semaphore_properties, + ) + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkBindBufferMemory2 = extern "system" fn( + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindBufferMemoryInfo, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkBindImageMemory2 = extern "system" fn( + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindImageMemoryInfo, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeviceGroupPeerMemoryFeatures = extern "system" fn( + device: Device, + heap_index: u32, + local_device_index: u32, + remote_device_index: u32, + p_peer_memory_features: *mut PeerMemoryFeatureFlags, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetDeviceMask = + extern "system" fn(command_buffer: CommandBuffer, device_mask: u32) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDispatchBase = extern "system" fn( + command_buffer: CommandBuffer, + base_group_x: u32, + base_group_y: u32, + base_group_z: u32, + group_count_x: u32, + group_count_y: u32, + group_count_z: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetImageMemoryRequirements2 = extern "system" fn( + device: Device, + p_info: *const ImageMemoryRequirementsInfo2, + p_memory_requirements: *mut MemoryRequirements2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetBufferMemoryRequirements2 = extern "system" fn( + device: Device, + p_info: *const BufferMemoryRequirementsInfo2, + p_memory_requirements: *mut MemoryRequirements2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetImageSparseMemoryRequirements2 = extern "system" fn( + device: Device, + p_info: *const ImageSparseMemoryRequirementsInfo2, + p_sparse_memory_requirement_count: *mut u32, + p_sparse_memory_requirements: *mut SparseImageMemoryRequirements2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkTrimCommandPool = extern "system" fn( + device: Device, + command_pool: CommandPool, + flags: CommandPoolTrimFlags, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeviceQueue2 = extern "system" fn( + device: Device, + p_queue_info: *const DeviceQueueInfo2, + p_queue: *mut Queue, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateSamplerYcbcrConversion = extern "system" fn( + device: Device, + p_create_info: *const SamplerYcbcrConversionCreateInfo, + p_allocator: *const AllocationCallbacks, + p_ycbcr_conversion: *mut SamplerYcbcrConversion, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroySamplerYcbcrConversion = extern "system" fn( + device: Device, + ycbcr_conversion: SamplerYcbcrConversion, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateDescriptorUpdateTemplate = extern "system" fn( + device: Device, + p_create_info: *const DescriptorUpdateTemplateCreateInfo, + p_allocator: *const AllocationCallbacks, + p_descriptor_update_template: *mut DescriptorUpdateTemplate, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyDescriptorUpdateTemplate = extern "system" fn( + device: Device, + descriptor_update_template: DescriptorUpdateTemplate, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkUpdateDescriptorSetWithTemplate = extern "system" fn( + device: Device, + descriptor_set: DescriptorSet, + descriptor_update_template: DescriptorUpdateTemplate, + p_data: *const c_void, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDescriptorSetLayoutSupport = extern "system" fn( + device: Device, + p_create_info: *const DescriptorSetLayoutCreateInfo, + p_support: *mut DescriptorSetLayoutSupport, +) -> c_void; +pub struct DeviceFnV1_1 { + pub bind_buffer_memory2: extern "system" fn( + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindBufferMemoryInfo, + ) -> Result, + pub bind_image_memory2: extern "system" fn( + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindImageMemoryInfo, + ) -> Result, + pub get_device_group_peer_memory_features: extern "system" fn( + device: Device, + heap_index: u32, + local_device_index: u32, + remote_device_index: u32, + p_peer_memory_features: *mut PeerMemoryFeatureFlags, + ) -> c_void, + pub cmd_set_device_mask: + extern "system" fn(command_buffer: CommandBuffer, device_mask: u32) -> c_void, + pub cmd_dispatch_base: extern "system" fn( + command_buffer: CommandBuffer, + base_group_x: u32, + base_group_y: u32, + base_group_z: u32, + group_count_x: u32, + group_count_y: u32, + group_count_z: u32, + ) -> c_void, + pub get_image_memory_requirements2: extern "system" fn( + device: Device, + p_info: *const ImageMemoryRequirementsInfo2, + p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void, + pub get_buffer_memory_requirements2: extern "system" fn( + device: Device, + p_info: *const BufferMemoryRequirementsInfo2, + p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void, + pub get_image_sparse_memory_requirements2: extern "system" fn( + device: Device, + p_info: *const ImageSparseMemoryRequirementsInfo2, + p_sparse_memory_requirement_count: *mut u32, + p_sparse_memory_requirements: *mut SparseImageMemoryRequirements2, + ) -> c_void, + pub trim_command_pool: extern "system" fn( + device: Device, + command_pool: CommandPool, + flags: CommandPoolTrimFlags, + ) -> c_void, + pub get_device_queue2: extern "system" fn( + device: Device, + p_queue_info: *const DeviceQueueInfo2, + p_queue: *mut Queue, + ) -> c_void, + pub create_sampler_ycbcr_conversion: extern "system" fn( + device: Device, + p_create_info: *const SamplerYcbcrConversionCreateInfo, + p_allocator: *const AllocationCallbacks, + p_ycbcr_conversion: *mut SamplerYcbcrConversion, + ) -> Result, + pub destroy_sampler_ycbcr_conversion: extern "system" fn( + device: Device, + ycbcr_conversion: SamplerYcbcrConversion, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub create_descriptor_update_template: extern "system" fn( + device: Device, + p_create_info: *const DescriptorUpdateTemplateCreateInfo, + p_allocator: *const AllocationCallbacks, + p_descriptor_update_template: *mut DescriptorUpdateTemplate, + ) -> Result, + pub destroy_descriptor_update_template: extern "system" fn( + device: Device, + descriptor_update_template: DescriptorUpdateTemplate, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub update_descriptor_set_with_template: extern "system" fn( + device: Device, + descriptor_set: DescriptorSet, + descriptor_update_template: DescriptorUpdateTemplate, + p_data: *const c_void, + ) -> c_void, + pub get_descriptor_set_layout_support: extern "system" fn( + device: Device, + p_create_info: *const DescriptorSetLayoutCreateInfo, + p_support: *mut DescriptorSetLayoutSupport, + ) -> c_void, +} +unsafe impl Send for DeviceFnV1_1 {} +unsafe impl Sync for DeviceFnV1_1 {} +impl ::std::clone::Clone for DeviceFnV1_1 { + fn clone(&self) -> Self { + DeviceFnV1_1 { + bind_buffer_memory2: self.bind_buffer_memory2, + bind_image_memory2: self.bind_image_memory2, + get_device_group_peer_memory_features: self.get_device_group_peer_memory_features, + cmd_set_device_mask: self.cmd_set_device_mask, + cmd_dispatch_base: self.cmd_dispatch_base, + get_image_memory_requirements2: self.get_image_memory_requirements2, + get_buffer_memory_requirements2: self.get_buffer_memory_requirements2, + get_image_sparse_memory_requirements2: self.get_image_sparse_memory_requirements2, + trim_command_pool: self.trim_command_pool, + get_device_queue2: self.get_device_queue2, + create_sampler_ycbcr_conversion: self.create_sampler_ycbcr_conversion, + destroy_sampler_ycbcr_conversion: self.destroy_sampler_ycbcr_conversion, + create_descriptor_update_template: self.create_descriptor_update_template, + destroy_descriptor_update_template: self.destroy_descriptor_update_template, + update_descriptor_set_with_template: self.update_descriptor_set_with_template, + get_descriptor_set_layout_support: self.get_descriptor_set_layout_support, + } + } +} +impl DeviceFnV1_1 { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + DeviceFnV1_1 { + bind_buffer_memory2: unsafe { + extern "system" fn bind_buffer_memory2( + _device: Device, + _bind_info_count: u32, + _p_bind_infos: *const BindBufferMemoryInfo, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(bind_buffer_memory2))) + } + let raw_name = stringify!(vkBindBufferMemory2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + bind_buffer_memory2 + } else { + ::std::mem::transmute(val) + } + }, + bind_image_memory2: unsafe { + extern "system" fn bind_image_memory2( + _device: Device, + _bind_info_count: u32, + _p_bind_infos: *const BindImageMemoryInfo, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(bind_image_memory2))) + } + let raw_name = stringify!(vkBindImageMemory2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + bind_image_memory2 + } else { + ::std::mem::transmute(val) + } + }, + get_device_group_peer_memory_features: unsafe { + extern "system" fn get_device_group_peer_memory_features( + _device: Device, + _heap_index: u32, + _local_device_index: u32, + _remote_device_index: u32, + _p_peer_memory_features: *mut PeerMemoryFeatureFlags, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_device_group_peer_memory_features) + )) + } + let raw_name = stringify!(vkGetDeviceGroupPeerMemoryFeatures); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_group_peer_memory_features + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_device_mask: unsafe { + extern "system" fn cmd_set_device_mask( + _command_buffer: CommandBuffer, + _device_mask: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_set_device_mask))) + } + let raw_name = stringify!(vkCmdSetDeviceMask); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_device_mask + } else { + ::std::mem::transmute(val) + } + }, + cmd_dispatch_base: unsafe { + extern "system" fn cmd_dispatch_base( + _command_buffer: CommandBuffer, + _base_group_x: u32, + _base_group_y: u32, + _base_group_z: u32, + _group_count_x: u32, + _group_count_y: u32, + _group_count_z: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_dispatch_base))) + } + let raw_name = stringify!(vkCmdDispatchBase); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_dispatch_base + } else { + ::std::mem::transmute(val) + } + }, + get_image_memory_requirements2: unsafe { + extern "system" fn get_image_memory_requirements2( + _device: Device, + _p_info: *const ImageMemoryRequirementsInfo2, + _p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_image_memory_requirements2) + )) + } + let raw_name = stringify!(vkGetImageMemoryRequirements2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_image_memory_requirements2 + } else { + ::std::mem::transmute(val) + } + }, + get_buffer_memory_requirements2: unsafe { + extern "system" fn get_buffer_memory_requirements2( + _device: Device, + _p_info: *const BufferMemoryRequirementsInfo2, + _p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_buffer_memory_requirements2) + )) + } + let raw_name = stringify!(vkGetBufferMemoryRequirements2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_buffer_memory_requirements2 + } else { + ::std::mem::transmute(val) + } + }, + get_image_sparse_memory_requirements2: unsafe { + extern "system" fn get_image_sparse_memory_requirements2( + _device: Device, + _p_info: *const ImageSparseMemoryRequirementsInfo2, + _p_sparse_memory_requirement_count: *mut u32, + _p_sparse_memory_requirements: *mut SparseImageMemoryRequirements2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_image_sparse_memory_requirements2) + )) + } + let raw_name = stringify!(vkGetImageSparseMemoryRequirements2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_image_sparse_memory_requirements2 + } else { + ::std::mem::transmute(val) + } + }, + trim_command_pool: unsafe { + extern "system" fn trim_command_pool( + _device: Device, + _command_pool: CommandPool, + _flags: CommandPoolTrimFlags, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(trim_command_pool))) + } + let raw_name = stringify!(vkTrimCommandPool); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + trim_command_pool + } else { + ::std::mem::transmute(val) + } + }, + get_device_queue2: unsafe { + extern "system" fn get_device_queue2( + _device: Device, + _p_queue_info: *const DeviceQueueInfo2, + _p_queue: *mut Queue, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(get_device_queue2))) + } + let raw_name = stringify!(vkGetDeviceQueue2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_queue2 + } else { + ::std::mem::transmute(val) + } + }, + create_sampler_ycbcr_conversion: unsafe { + extern "system" fn create_sampler_ycbcr_conversion( + _device: Device, + _p_create_info: *const SamplerYcbcrConversionCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_ycbcr_conversion: *mut SamplerYcbcrConversion, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_sampler_ycbcr_conversion) + )) + } + let raw_name = stringify!(vkCreateSamplerYcbcrConversion); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_sampler_ycbcr_conversion + } else { + ::std::mem::transmute(val) + } + }, + destroy_sampler_ycbcr_conversion: unsafe { + extern "system" fn destroy_sampler_ycbcr_conversion( + _device: Device, + _ycbcr_conversion: SamplerYcbcrConversion, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_sampler_ycbcr_conversion) + )) + } + let raw_name = stringify!(vkDestroySamplerYcbcrConversion); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_sampler_ycbcr_conversion + } else { + ::std::mem::transmute(val) + } + }, + create_descriptor_update_template: unsafe { + extern "system" fn create_descriptor_update_template( + _device: Device, + _p_create_info: *const DescriptorUpdateTemplateCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_descriptor_update_template: *mut DescriptorUpdateTemplate, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_descriptor_update_template) + )) + } + let raw_name = stringify!(vkCreateDescriptorUpdateTemplate); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_descriptor_update_template + } else { + ::std::mem::transmute(val) + } + }, + destroy_descriptor_update_template: unsafe { + extern "system" fn destroy_descriptor_update_template( + _device: Device, + _descriptor_update_template: DescriptorUpdateTemplate, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_descriptor_update_template) + )) + } + let raw_name = stringify!(vkDestroyDescriptorUpdateTemplate); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_descriptor_update_template + } else { + ::std::mem::transmute(val) + } + }, + update_descriptor_set_with_template: unsafe { + extern "system" fn update_descriptor_set_with_template( + _device: Device, + _descriptor_set: DescriptorSet, + _descriptor_update_template: DescriptorUpdateTemplate, + _p_data: *const c_void, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(update_descriptor_set_with_template) + )) + } + let raw_name = stringify!(vkUpdateDescriptorSetWithTemplate); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + update_descriptor_set_with_template + } else { + ::std::mem::transmute(val) + } + }, + get_descriptor_set_layout_support: unsafe { + extern "system" fn get_descriptor_set_layout_support( + _device: Device, + _p_create_info: *const DescriptorSetLayoutCreateInfo, + _p_support: *mut DescriptorSetLayoutSupport, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_descriptor_set_layout_support) + )) + } + let raw_name = stringify!(vkGetDescriptorSetLayoutSupport); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_descriptor_set_layout_support + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn bind_buffer_memory2( + &self, + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindBufferMemoryInfo, + ) -> Result { + (self.bind_buffer_memory2)(device, bind_info_count, p_bind_infos) + } + #[doc = ""] + pub unsafe fn bind_image_memory2( + &self, + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindImageMemoryInfo, + ) -> Result { + (self.bind_image_memory2)(device, bind_info_count, p_bind_infos) + } + #[doc = ""] + pub unsafe fn get_device_group_peer_memory_features( + &self, + device: Device, + heap_index: u32, + local_device_index: u32, + remote_device_index: u32, + p_peer_memory_features: *mut PeerMemoryFeatureFlags, + ) -> c_void { + (self.get_device_group_peer_memory_features)( + device, + heap_index, + local_device_index, + remote_device_index, + p_peer_memory_features, + ) + } + #[doc = ""] + pub unsafe fn cmd_set_device_mask( + &self, + command_buffer: CommandBuffer, + device_mask: u32, + ) -> c_void { + (self.cmd_set_device_mask)(command_buffer, device_mask) + } + #[doc = ""] + pub unsafe fn cmd_dispatch_base( + &self, + command_buffer: CommandBuffer, + base_group_x: u32, + base_group_y: u32, + base_group_z: u32, + group_count_x: u32, + group_count_y: u32, + group_count_z: u32, + ) -> c_void { + (self.cmd_dispatch_base)( + command_buffer, + base_group_x, + base_group_y, + base_group_z, + group_count_x, + group_count_y, + group_count_z, + ) + } + #[doc = ""] + pub unsafe fn get_image_memory_requirements2( + &self, + device: Device, + p_info: *const ImageMemoryRequirementsInfo2, + p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void { + (self.get_image_memory_requirements2)(device, p_info, p_memory_requirements) + } + #[doc = ""] + pub unsafe fn get_buffer_memory_requirements2( + &self, + device: Device, + p_info: *const BufferMemoryRequirementsInfo2, + p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void { + (self.get_buffer_memory_requirements2)(device, p_info, p_memory_requirements) + } + #[doc = ""] + pub unsafe fn get_image_sparse_memory_requirements2( + &self, + device: Device, + p_info: *const ImageSparseMemoryRequirementsInfo2, + p_sparse_memory_requirement_count: *mut u32, + p_sparse_memory_requirements: *mut SparseImageMemoryRequirements2, + ) -> c_void { + (self.get_image_sparse_memory_requirements2)( + device, + p_info, + p_sparse_memory_requirement_count, + p_sparse_memory_requirements, + ) + } + #[doc = ""] + pub unsafe fn trim_command_pool( + &self, + device: Device, + command_pool: CommandPool, + flags: CommandPoolTrimFlags, + ) -> c_void { + (self.trim_command_pool)(device, command_pool, flags) + } + #[doc = ""] + pub unsafe fn get_device_queue2( + &self, + device: Device, + p_queue_info: *const DeviceQueueInfo2, + p_queue: *mut Queue, + ) -> c_void { + (self.get_device_queue2)(device, p_queue_info, p_queue) + } + #[doc = ""] + pub unsafe fn create_sampler_ycbcr_conversion( + &self, + device: Device, + p_create_info: *const SamplerYcbcrConversionCreateInfo, + p_allocator: *const AllocationCallbacks, + p_ycbcr_conversion: *mut SamplerYcbcrConversion, + ) -> Result { + (self.create_sampler_ycbcr_conversion)( + device, + p_create_info, + p_allocator, + p_ycbcr_conversion, + ) + } + #[doc = ""] + pub unsafe fn destroy_sampler_ycbcr_conversion( + &self, + device: Device, + ycbcr_conversion: SamplerYcbcrConversion, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_sampler_ycbcr_conversion)(device, ycbcr_conversion, p_allocator) + } + #[doc = ""] + pub unsafe fn create_descriptor_update_template( + &self, + device: Device, + p_create_info: *const DescriptorUpdateTemplateCreateInfo, + p_allocator: *const AllocationCallbacks, + p_descriptor_update_template: *mut DescriptorUpdateTemplate, + ) -> Result { + (self.create_descriptor_update_template)( + device, + p_create_info, + p_allocator, + p_descriptor_update_template, + ) + } + #[doc = ""] + pub unsafe fn destroy_descriptor_update_template( + &self, + device: Device, + descriptor_update_template: DescriptorUpdateTemplate, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_descriptor_update_template)(device, descriptor_update_template, p_allocator) + } + #[doc = ""] + pub unsafe fn update_descriptor_set_with_template( + &self, + device: Device, + descriptor_set: DescriptorSet, + descriptor_update_template: DescriptorUpdateTemplate, + p_data: *const c_void, + ) -> c_void { + (self.update_descriptor_set_with_template)( + device, + descriptor_set, + descriptor_update_template, + p_data, + ) + } + #[doc = ""] + pub unsafe fn get_descriptor_set_layout_support( + &self, + device: Device, + p_create_info: *const DescriptorSetLayoutCreateInfo, + p_support: *mut DescriptorSetLayoutSupport, + ) -> c_void { + (self.get_descriptor_set_layout_support)(device, p_create_info, p_support) + } +} +#[doc = ""] +pub type SampleMask = u32; +#[doc = ""] +pub type Bool32 = u32; +#[doc = ""] +pub type Flags = u32; +#[doc = ""] +pub type DeviceSize = u64; +#[doc = ""] +pub type DeviceAddress = u64; +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct FramebufferCreateFlags(Flags); +vk_bitflags_wrapped!(FramebufferCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct QueryPoolCreateFlags(Flags); +vk_bitflags_wrapped!(QueryPoolCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineLayoutCreateFlags(Flags); +vk_bitflags_wrapped!(PipelineLayoutCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineCacheCreateFlags(Flags); +vk_bitflags_wrapped!(PipelineCacheCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineDepthStencilStateCreateFlags(Flags); +vk_bitflags_wrapped!(PipelineDepthStencilStateCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineDynamicStateCreateFlags(Flags); +vk_bitflags_wrapped!(PipelineDynamicStateCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineColorBlendStateCreateFlags(Flags); +vk_bitflags_wrapped!(PipelineColorBlendStateCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineMultisampleStateCreateFlags(Flags); +vk_bitflags_wrapped!(PipelineMultisampleStateCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineRasterizationStateCreateFlags(Flags); +vk_bitflags_wrapped!(PipelineRasterizationStateCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineViewportStateCreateFlags(Flags); +vk_bitflags_wrapped!(PipelineViewportStateCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineTessellationStateCreateFlags(Flags); +vk_bitflags_wrapped!(PipelineTessellationStateCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineInputAssemblyStateCreateFlags(Flags); +vk_bitflags_wrapped!(PipelineInputAssemblyStateCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineVertexInputStateCreateFlags(Flags); +vk_bitflags_wrapped!(PipelineVertexInputStateCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineShaderStageCreateFlags(Flags); +vk_bitflags_wrapped!(PipelineShaderStageCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct BufferViewCreateFlags(Flags); +vk_bitflags_wrapped!(BufferViewCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct InstanceCreateFlags(Flags); +vk_bitflags_wrapped!(InstanceCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DeviceCreateFlags(Flags); +vk_bitflags_wrapped!(DeviceCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct SemaphoreCreateFlags(Flags); +vk_bitflags_wrapped!(SemaphoreCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ShaderModuleCreateFlags(Flags); +vk_bitflags_wrapped!(ShaderModuleCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct EventCreateFlags(Flags); +vk_bitflags_wrapped!(EventCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct MemoryMapFlags(Flags); +vk_bitflags_wrapped!(MemoryMapFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DescriptorPoolResetFlags(Flags); +vk_bitflags_wrapped!(DescriptorPoolResetFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DescriptorUpdateTemplateCreateFlags(Flags); +vk_bitflags_wrapped!(DescriptorUpdateTemplateCreateFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DisplayModeCreateFlagsKHR(Flags); +vk_bitflags_wrapped!(DisplayModeCreateFlagsKHR, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DisplaySurfaceCreateFlagsKHR(Flags); +vk_bitflags_wrapped!(DisplaySurfaceCreateFlagsKHR, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct AndroidSurfaceCreateFlagsKHR(Flags); +vk_bitflags_wrapped!(AndroidSurfaceCreateFlagsKHR, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ViSurfaceCreateFlagsNN(Flags); +vk_bitflags_wrapped!(ViSurfaceCreateFlagsNN, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct WaylandSurfaceCreateFlagsKHR(Flags); +vk_bitflags_wrapped!(WaylandSurfaceCreateFlagsKHR, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct Win32SurfaceCreateFlagsKHR(Flags); +vk_bitflags_wrapped!(Win32SurfaceCreateFlagsKHR, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct XlibSurfaceCreateFlagsKHR(Flags); +vk_bitflags_wrapped!(XlibSurfaceCreateFlagsKHR, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct XcbSurfaceCreateFlagsKHR(Flags); +vk_bitflags_wrapped!(XcbSurfaceCreateFlagsKHR, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct IOSSurfaceCreateFlagsMVK(Flags); +vk_bitflags_wrapped!(IOSSurfaceCreateFlagsMVK, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct MacOSSurfaceCreateFlagsMVK(Flags); +vk_bitflags_wrapped!(MacOSSurfaceCreateFlagsMVK, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ImagePipeSurfaceCreateFlagsFUCHSIA(Flags); +vk_bitflags_wrapped!(ImagePipeSurfaceCreateFlagsFUCHSIA, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct CommandPoolTrimFlags(Flags); +vk_bitflags_wrapped!(CommandPoolTrimFlags, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineViewportSwizzleStateCreateFlagsNV(Flags); +vk_bitflags_wrapped!(PipelineViewportSwizzleStateCreateFlagsNV, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineDiscardRectangleStateCreateFlagsEXT(Flags); +vk_bitflags_wrapped!(PipelineDiscardRectangleStateCreateFlagsEXT, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineCoverageToColorStateCreateFlagsNV(Flags); +vk_bitflags_wrapped!(PipelineCoverageToColorStateCreateFlagsNV, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineCoverageModulationStateCreateFlagsNV(Flags); +vk_bitflags_wrapped!(PipelineCoverageModulationStateCreateFlagsNV, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ValidationCacheCreateFlagsEXT(Flags); +vk_bitflags_wrapped!(ValidationCacheCreateFlagsEXT, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DebugUtilsMessengerCreateFlagsEXT(Flags); +vk_bitflags_wrapped!(DebugUtilsMessengerCreateFlagsEXT, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DebugUtilsMessengerCallbackDataFlagsEXT(Flags); +vk_bitflags_wrapped!(DebugUtilsMessengerCallbackDataFlagsEXT, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineRasterizationConservativeStateCreateFlagsEXT(Flags); +vk_bitflags_wrapped!( + PipelineRasterizationConservativeStateCreateFlagsEXT, + 0b0, + Flags +); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineRasterizationStateStreamCreateFlagsEXT(Flags); +vk_bitflags_wrapped!(PipelineRasterizationStateStreamCreateFlagsEXT, 0b0, Flags); +define_handle!( + Instance, + INSTANCE, + doc = "" +); +define_handle ! ( PhysicalDevice , PHYSICAL_DEVICE , doc = "" ) ; +define_handle!( + Device, + DEVICE, + doc = "" +); +define_handle!( + Queue, + QUEUE, + doc = "" +); +define_handle ! ( CommandBuffer , COMMAND_BUFFER , doc = "" ) ; +handle_nondispatchable ! ( DeviceMemory , DEVICE_MEMORY , doc = "" ) ; +handle_nondispatchable ! ( CommandPool , COMMAND_POOL , doc = "" ) ; +handle_nondispatchable!( + Buffer, + BUFFER, + doc = "" +); +handle_nondispatchable!( + BufferView, + BUFFER_VIEW, + doc = + "" +); +handle_nondispatchable!( + Image, + IMAGE, + doc = "" +); +handle_nondispatchable!( + ImageView, + IMAGE_VIEW, + doc = + "" +); +handle_nondispatchable ! ( ShaderModule , SHADER_MODULE , doc = "" ) ; +handle_nondispatchable!( + Pipeline, + PIPELINE, + doc = "" +); +handle_nondispatchable ! ( PipelineLayout , PIPELINE_LAYOUT , doc = "" ) ; +handle_nondispatchable!( + Sampler, + SAMPLER, + doc = "" +); +handle_nondispatchable ! ( DescriptorSet , DESCRIPTOR_SET , doc = "" ) ; +handle_nondispatchable ! ( DescriptorSetLayout , DESCRIPTOR_SET_LAYOUT , doc = "" ) ; +handle_nondispatchable ! ( DescriptorPool , DESCRIPTOR_POOL , doc = "" ) ; +handle_nondispatchable!( + Fence, + FENCE, + doc = "" +); +handle_nondispatchable!( + Semaphore, + SEMAPHORE, + doc = + "" +); +handle_nondispatchable!( + Event, + EVENT, + doc = "" +); +handle_nondispatchable!( + QueryPool, + QUERY_POOL, + doc = + "" +); +handle_nondispatchable ! ( Framebuffer , FRAMEBUFFER , doc = "" ) ; +handle_nondispatchable!( + RenderPass, + RENDER_PASS, + doc = + "" +); +handle_nondispatchable ! ( PipelineCache , PIPELINE_CACHE , doc = "" ) ; +handle_nondispatchable ! ( ObjectTableNVX , OBJECT_TABLE_NVX , doc = "" ) ; +handle_nondispatchable ! ( IndirectCommandsLayoutNVX , INDIRECT_COMMANDS_LAYOUT_NVX , doc = "" ) ; +handle_nondispatchable ! ( DescriptorUpdateTemplate , DESCRIPTOR_UPDATE_TEMPLATE , doc = "" ) ; +handle_nondispatchable ! ( SamplerYcbcrConversion , SAMPLER_YCBCR_CONVERSION , doc = "" ) ; +handle_nondispatchable ! ( ValidationCacheEXT , VALIDATION_CACHE_EXT , doc = "" ) ; +handle_nondispatchable ! ( AccelerationStructureNV , ACCELERATION_STRUCTURE_NV , doc = "" ) ; +handle_nondispatchable!( + DisplayKHR, + DISPLAY_KHR, + doc = + "" +); +handle_nondispatchable ! ( DisplayModeKHR , DISPLAY_MODE_KHR , doc = "" ) ; +handle_nondispatchable!( + SurfaceKHR, + SURFACE_KHR, + doc = + "" +); +handle_nondispatchable ! ( SwapchainKHR , SWAPCHAIN_KHR , doc = "" ) ; +handle_nondispatchable ! ( DebugReportCallbackEXT , DEBUG_REPORT_CALLBACK_EXT , doc = "" ) ; +handle_nondispatchable ! ( DebugUtilsMessengerEXT , DEBUG_UTILS_MESSENGER_EXT , doc = "" ) ; +#[allow(non_camel_case_types)] +#[doc = ""] +pub type PFN_vkInternalAllocationNotification = Option< + unsafe extern "system" fn( + p_user_data: *mut c_void, + size: usize, + allocation_type: InternalAllocationType, + allocation_scope: SystemAllocationScope, + ) -> c_void, +>; +#[allow(non_camel_case_types)] +#[doc = ""] +pub type PFN_vkInternalFreeNotification = Option< + unsafe extern "system" fn( + p_user_data: *mut c_void, + size: usize, + allocation_type: InternalAllocationType, + allocation_scope: SystemAllocationScope, + ) -> c_void, +>; +#[allow(non_camel_case_types)] +#[doc = ""] +pub type PFN_vkReallocationFunction = Option< + unsafe extern "system" fn( + p_user_data: *mut c_void, + p_original: *mut c_void, + size: usize, + alignment: usize, + allocation_scope: SystemAllocationScope, + ) -> *mut c_void, +>; +#[allow(non_camel_case_types)] +#[doc = ""] +pub type PFN_vkAllocationFunction = Option< + unsafe extern "system" fn( + p_user_data: *mut c_void, + size: usize, + alignment: usize, + allocation_scope: SystemAllocationScope, + ) -> *mut c_void, +>; +#[allow(non_camel_case_types)] +#[doc = ""] +pub type PFN_vkFreeFunction = + Option c_void>; +#[allow(non_camel_case_types)] +#[doc = ""] +pub type PFN_vkVoidFunction = Option c_void>; +#[allow(non_camel_case_types)] +#[doc = ""] +pub type PFN_vkDebugReportCallbackEXT = Option< + unsafe extern "system" fn( + flags: DebugReportFlagsEXT, + object_type: DebugReportObjectTypeEXT, + object: u64, + location: usize, + message_code: i32, + p_layer_prefix: *const c_char, + p_message: *const c_char, + p_user_data: *mut c_void, + ) -> Bool32, +>; +#[allow(non_camel_case_types)] +#[doc = ""] +pub type PFN_vkDebugUtilsMessengerCallbackEXT = Option< + unsafe extern "system" fn( + message_severity: DebugUtilsMessageSeverityFlagsEXT, + message_types: DebugUtilsMessageTypeFlagsEXT, + p_callback_data: *const DebugUtilsMessengerCallbackDataEXT, + p_user_data: *mut c_void, + ) -> Bool32, +>; +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BaseOutStructure { + pub s_type: StructureType, + pub p_next: *mut BaseOutStructure, +} +impl ::std::default::Default for BaseOutStructure { + fn default() -> BaseOutStructure { + BaseOutStructure { + s_type: unsafe { ::std::mem::zeroed() }, + p_next: ::std::ptr::null_mut(), + } + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BaseInStructure { + pub s_type: StructureType, + pub p_next: *const BaseInStructure, +} +impl ::std::default::Default for BaseInStructure { + fn default() -> BaseInStructure { + BaseInStructure { + s_type: unsafe { ::std::mem::zeroed() }, + p_next: ::std::ptr::null(), + } + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct Offset2D { + pub x: i32, + pub y: i32, +} +impl Offset2D { + pub fn builder<'a>() -> Offset2DBuilder<'a> { + Offset2DBuilder { + inner: Offset2D::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct Offset2DBuilder<'a> { + inner: Offset2D, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for Offset2DBuilder<'a> { + type Target = Offset2D; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for Offset2DBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> Offset2DBuilder<'a> { + pub fn x(mut self, x: i32) -> Offset2DBuilder<'a> { + self.inner.x = x; + self + } + pub fn y(mut self, y: i32) -> Offset2DBuilder<'a> { + self.inner.y = y; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> Offset2D { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct Offset3D { + pub x: i32, + pub y: i32, + pub z: i32, +} +impl Offset3D { + pub fn builder<'a>() -> Offset3DBuilder<'a> { + Offset3DBuilder { + inner: Offset3D::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct Offset3DBuilder<'a> { + inner: Offset3D, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for Offset3DBuilder<'a> { + type Target = Offset3D; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for Offset3DBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> Offset3DBuilder<'a> { + pub fn x(mut self, x: i32) -> Offset3DBuilder<'a> { + self.inner.x = x; + self + } + pub fn y(mut self, y: i32) -> Offset3DBuilder<'a> { + self.inner.y = y; + self + } + pub fn z(mut self, z: i32) -> Offset3DBuilder<'a> { + self.inner.z = z; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> Offset3D { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct Extent2D { + pub width: u32, + pub height: u32, +} +impl Extent2D { + pub fn builder<'a>() -> Extent2DBuilder<'a> { + Extent2DBuilder { + inner: Extent2D::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct Extent2DBuilder<'a> { + inner: Extent2D, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for Extent2DBuilder<'a> { + type Target = Extent2D; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for Extent2DBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> Extent2DBuilder<'a> { + pub fn width(mut self, width: u32) -> Extent2DBuilder<'a> { + self.inner.width = width; + self + } + pub fn height(mut self, height: u32) -> Extent2DBuilder<'a> { + self.inner.height = height; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> Extent2D { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Hash)] +#[doc = ""] +pub struct Extent3D { + pub width: u32, + pub height: u32, + pub depth: u32, +} +impl Extent3D { + pub fn builder<'a>() -> Extent3DBuilder<'a> { + Extent3DBuilder { + inner: Extent3D::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct Extent3DBuilder<'a> { + inner: Extent3D, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for Extent3DBuilder<'a> { + type Target = Extent3D; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for Extent3DBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> Extent3DBuilder<'a> { + pub fn width(mut self, width: u32) -> Extent3DBuilder<'a> { + self.inner.width = width; + self + } + pub fn height(mut self, height: u32) -> Extent3DBuilder<'a> { + self.inner.height = height; + self + } + pub fn depth(mut self, depth: u32) -> Extent3DBuilder<'a> { + self.inner.depth = depth; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> Extent3D { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct Viewport { + pub x: f32, + pub y: f32, + pub width: f32, + pub height: f32, + pub min_depth: f32, + pub max_depth: f32, +} +impl Viewport { + pub fn builder<'a>() -> ViewportBuilder<'a> { + ViewportBuilder { + inner: Viewport::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ViewportBuilder<'a> { + inner: Viewport, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ViewportBuilder<'a> { + type Target = Viewport; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ViewportBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ViewportBuilder<'a> { + pub fn x(mut self, x: f32) -> ViewportBuilder<'a> { + self.inner.x = x; + self + } + pub fn y(mut self, y: f32) -> ViewportBuilder<'a> { + self.inner.y = y; + self + } + pub fn width(mut self, width: f32) -> ViewportBuilder<'a> { + self.inner.width = width; + self + } + pub fn height(mut self, height: f32) -> ViewportBuilder<'a> { + self.inner.height = height; + self + } + pub fn min_depth(mut self, min_depth: f32) -> ViewportBuilder<'a> { + self.inner.min_depth = min_depth; + self + } + pub fn max_depth(mut self, max_depth: f32) -> ViewportBuilder<'a> { + self.inner.max_depth = max_depth; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> Viewport { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct Rect2D { + pub offset: Offset2D, + pub extent: Extent2D, +} +impl Rect2D { + pub fn builder<'a>() -> Rect2DBuilder<'a> { + Rect2DBuilder { + inner: Rect2D::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct Rect2DBuilder<'a> { + inner: Rect2D, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for Rect2DBuilder<'a> { + type Target = Rect2D; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for Rect2DBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> Rect2DBuilder<'a> { + pub fn offset(mut self, offset: Offset2D) -> Rect2DBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn extent(mut self, extent: Extent2D) -> Rect2DBuilder<'a> { + self.inner.extent = extent; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> Rect2D { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ClearRect { + pub rect: Rect2D, + pub base_array_layer: u32, + pub layer_count: u32, +} +impl ClearRect { + pub fn builder<'a>() -> ClearRectBuilder<'a> { + ClearRectBuilder { + inner: ClearRect::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ClearRectBuilder<'a> { + inner: ClearRect, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ClearRectBuilder<'a> { + type Target = ClearRect; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ClearRectBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ClearRectBuilder<'a> { + pub fn rect(mut self, rect: Rect2D) -> ClearRectBuilder<'a> { + self.inner.rect = rect; + self + } + pub fn base_array_layer(mut self, base_array_layer: u32) -> ClearRectBuilder<'a> { + self.inner.base_array_layer = base_array_layer; + self + } + pub fn layer_count(mut self, layer_count: u32) -> ClearRectBuilder<'a> { + self.inner.layer_count = layer_count; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ClearRect { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ComponentMapping { + pub r: ComponentSwizzle, + pub g: ComponentSwizzle, + pub b: ComponentSwizzle, + pub a: ComponentSwizzle, +} +impl ComponentMapping { + pub fn builder<'a>() -> ComponentMappingBuilder<'a> { + ComponentMappingBuilder { + inner: ComponentMapping::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ComponentMappingBuilder<'a> { + inner: ComponentMapping, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ComponentMappingBuilder<'a> { + type Target = ComponentMapping; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ComponentMappingBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ComponentMappingBuilder<'a> { + pub fn r(mut self, r: ComponentSwizzle) -> ComponentMappingBuilder<'a> { + self.inner.r = r; + self + } + pub fn g(mut self, g: ComponentSwizzle) -> ComponentMappingBuilder<'a> { + self.inner.g = g; + self + } + pub fn b(mut self, b: ComponentSwizzle) -> ComponentMappingBuilder<'a> { + self.inner.b = b; + self + } + pub fn a(mut self, a: ComponentSwizzle) -> ComponentMappingBuilder<'a> { + self.inner.a = a; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ComponentMapping { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceProperties { + pub api_version: u32, + pub driver_version: u32, + pub vendor_id: u32, + pub device_id: u32, + pub device_type: PhysicalDeviceType, + pub device_name: [c_char; MAX_PHYSICAL_DEVICE_NAME_SIZE], + pub pipeline_cache_uuid: [u8; UUID_SIZE], + pub limits: PhysicalDeviceLimits, + pub sparse_properties: PhysicalDeviceSparseProperties, +} +impl fmt::Debug for PhysicalDeviceProperties { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("PhysicalDeviceProperties") + .field("api_version", &self.api_version) + .field("driver_version", &self.driver_version) + .field("vendor_id", &self.vendor_id) + .field("device_id", &self.device_id) + .field("device_type", &self.device_type) + .field("device_name", &unsafe { + ::std::ffi::CStr::from_ptr(self.device_name.as_ptr() as *const c_char) + }) + .field("pipeline_cache_uuid", &self.pipeline_cache_uuid) + .field("limits", &self.limits) + .field("sparse_properties", &self.sparse_properties) + .finish() + } +} +impl ::std::default::Default for PhysicalDeviceProperties { + fn default() -> PhysicalDeviceProperties { + PhysicalDeviceProperties { + api_version: u32::default(), + driver_version: u32::default(), + vendor_id: u32::default(), + device_id: u32::default(), + device_type: PhysicalDeviceType::default(), + device_name: unsafe { ::std::mem::zeroed() }, + pipeline_cache_uuid: unsafe { ::std::mem::zeroed() }, + limits: PhysicalDeviceLimits::default(), + sparse_properties: PhysicalDeviceSparseProperties::default(), + } + } +} +impl PhysicalDeviceProperties { + pub fn builder<'a>() -> PhysicalDevicePropertiesBuilder<'a> { + PhysicalDevicePropertiesBuilder { + inner: PhysicalDeviceProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDevicePropertiesBuilder<'a> { + inner: PhysicalDeviceProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for PhysicalDevicePropertiesBuilder<'a> { + type Target = PhysicalDeviceProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDevicePropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDevicePropertiesBuilder<'a> { + pub fn api_version(mut self, api_version: u32) -> PhysicalDevicePropertiesBuilder<'a> { + self.inner.api_version = api_version; + self + } + pub fn driver_version(mut self, driver_version: u32) -> PhysicalDevicePropertiesBuilder<'a> { + self.inner.driver_version = driver_version; + self + } + pub fn vendor_id(mut self, vendor_id: u32) -> PhysicalDevicePropertiesBuilder<'a> { + self.inner.vendor_id = vendor_id; + self + } + pub fn device_id(mut self, device_id: u32) -> PhysicalDevicePropertiesBuilder<'a> { + self.inner.device_id = device_id; + self + } + pub fn device_type( + mut self, + device_type: PhysicalDeviceType, + ) -> PhysicalDevicePropertiesBuilder<'a> { + self.inner.device_type = device_type; + self + } + pub fn device_name( + mut self, + device_name: [c_char; MAX_PHYSICAL_DEVICE_NAME_SIZE], + ) -> PhysicalDevicePropertiesBuilder<'a> { + self.inner.device_name = device_name; + self + } + pub fn pipeline_cache_uuid( + mut self, + pipeline_cache_uuid: [u8; UUID_SIZE], + ) -> PhysicalDevicePropertiesBuilder<'a> { + self.inner.pipeline_cache_uuid = pipeline_cache_uuid; + self + } + pub fn limits(mut self, limits: PhysicalDeviceLimits) -> PhysicalDevicePropertiesBuilder<'a> { + self.inner.limits = limits; + self + } + pub fn sparse_properties( + mut self, + sparse_properties: PhysicalDeviceSparseProperties, + ) -> PhysicalDevicePropertiesBuilder<'a> { + self.inner.sparse_properties = sparse_properties; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct ExtensionProperties { + pub extension_name: [c_char; MAX_EXTENSION_NAME_SIZE], + pub spec_version: u32, +} +impl fmt::Debug for ExtensionProperties { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("ExtensionProperties") + .field("extension_name", &unsafe { + ::std::ffi::CStr::from_ptr(self.extension_name.as_ptr() as *const c_char) + }) + .field("spec_version", &self.spec_version) + .finish() + } +} +impl ::std::default::Default for ExtensionProperties { + fn default() -> ExtensionProperties { + ExtensionProperties { + extension_name: unsafe { ::std::mem::zeroed() }, + spec_version: u32::default(), + } + } +} +impl ExtensionProperties { + pub fn builder<'a>() -> ExtensionPropertiesBuilder<'a> { + ExtensionPropertiesBuilder { + inner: ExtensionProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExtensionPropertiesBuilder<'a> { + inner: ExtensionProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ExtensionPropertiesBuilder<'a> { + type Target = ExtensionProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExtensionPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExtensionPropertiesBuilder<'a> { + pub fn extension_name( + mut self, + extension_name: [c_char; MAX_EXTENSION_NAME_SIZE], + ) -> ExtensionPropertiesBuilder<'a> { + self.inner.extension_name = extension_name; + self + } + pub fn spec_version(mut self, spec_version: u32) -> ExtensionPropertiesBuilder<'a> { + self.inner.spec_version = spec_version; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExtensionProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct LayerProperties { + pub layer_name: [c_char; MAX_EXTENSION_NAME_SIZE], + pub spec_version: u32, + pub implementation_version: u32, + pub description: [c_char; MAX_DESCRIPTION_SIZE], +} +impl fmt::Debug for LayerProperties { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("LayerProperties") + .field("layer_name", &unsafe { + ::std::ffi::CStr::from_ptr(self.layer_name.as_ptr() as *const c_char) + }) + .field("spec_version", &self.spec_version) + .field("implementation_version", &self.implementation_version) + .field("description", &unsafe { + ::std::ffi::CStr::from_ptr(self.description.as_ptr() as *const c_char) + }) + .finish() + } +} +impl ::std::default::Default for LayerProperties { + fn default() -> LayerProperties { + LayerProperties { + layer_name: unsafe { ::std::mem::zeroed() }, + spec_version: u32::default(), + implementation_version: u32::default(), + description: unsafe { ::std::mem::zeroed() }, + } + } +} +impl LayerProperties { + pub fn builder<'a>() -> LayerPropertiesBuilder<'a> { + LayerPropertiesBuilder { + inner: LayerProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct LayerPropertiesBuilder<'a> { + inner: LayerProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for LayerPropertiesBuilder<'a> { + type Target = LayerProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for LayerPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> LayerPropertiesBuilder<'a> { + pub fn layer_name( + mut self, + layer_name: [c_char; MAX_EXTENSION_NAME_SIZE], + ) -> LayerPropertiesBuilder<'a> { + self.inner.layer_name = layer_name; + self + } + pub fn spec_version(mut self, spec_version: u32) -> LayerPropertiesBuilder<'a> { + self.inner.spec_version = spec_version; + self + } + pub fn implementation_version( + mut self, + implementation_version: u32, + ) -> LayerPropertiesBuilder<'a> { + self.inner.implementation_version = implementation_version; + self + } + pub fn description( + mut self, + description: [c_char; MAX_DESCRIPTION_SIZE], + ) -> LayerPropertiesBuilder<'a> { + self.inner.description = description; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> LayerProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ApplicationInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub p_application_name: *const c_char, + pub application_version: u32, + pub p_engine_name: *const c_char, + pub engine_version: u32, + pub api_version: u32, +} +impl ::std::default::Default for ApplicationInfo { + fn default() -> ApplicationInfo { + ApplicationInfo { + s_type: StructureType::APPLICATION_INFO, + p_next: ::std::ptr::null(), + p_application_name: ::std::ptr::null(), + application_version: u32::default(), + p_engine_name: ::std::ptr::null(), + engine_version: u32::default(), + api_version: u32::default(), + } + } +} +impl ApplicationInfo { + pub fn builder<'a>() -> ApplicationInfoBuilder<'a> { + ApplicationInfoBuilder { + inner: ApplicationInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ApplicationInfoBuilder<'a> { + inner: ApplicationInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsApplicationInfo {} +impl<'a> ::std::ops::Deref for ApplicationInfoBuilder<'a> { + type Target = ApplicationInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ApplicationInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ApplicationInfoBuilder<'a> { + pub fn application_name( + mut self, + application_name: &'a ::std::ffi::CStr, + ) -> ApplicationInfoBuilder<'a> { + self.inner.p_application_name = application_name.as_ptr(); + self + } + pub fn application_version(mut self, application_version: u32) -> ApplicationInfoBuilder<'a> { + self.inner.application_version = application_version; + self + } + pub fn engine_name(mut self, engine_name: &'a ::std::ffi::CStr) -> ApplicationInfoBuilder<'a> { + self.inner.p_engine_name = engine_name.as_ptr(); + self + } + pub fn engine_version(mut self, engine_version: u32) -> ApplicationInfoBuilder<'a> { + self.inner.engine_version = engine_version; + self + } + pub fn api_version(mut self, api_version: u32) -> ApplicationInfoBuilder<'a> { + self.inner.api_version = api_version; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ApplicationInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ApplicationInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct AllocationCallbacks { + pub p_user_data: *mut c_void, + pub pfn_allocation: PFN_vkAllocationFunction, + pub pfn_reallocation: PFN_vkReallocationFunction, + pub pfn_free: PFN_vkFreeFunction, + pub pfn_internal_allocation: PFN_vkInternalAllocationNotification, + pub pfn_internal_free: PFN_vkInternalFreeNotification, +} +impl fmt::Debug for AllocationCallbacks { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("AllocationCallbacks") + .field("p_user_data", &self.p_user_data) + .field( + "pfn_allocation", + &(self.pfn_allocation.map(|x| x as *const ())), + ) + .field( + "pfn_reallocation", + &(self.pfn_reallocation.map(|x| x as *const ())), + ) + .field("pfn_free", &(self.pfn_free.map(|x| x as *const ()))) + .field( + "pfn_internal_allocation", + &(self.pfn_internal_allocation.map(|x| x as *const ())), + ) + .field( + "pfn_internal_free", + &(self.pfn_internal_free.map(|x| x as *const ())), + ) + .finish() + } +} +impl ::std::default::Default for AllocationCallbacks { + fn default() -> AllocationCallbacks { + AllocationCallbacks { + p_user_data: ::std::ptr::null_mut(), + pfn_allocation: PFN_vkAllocationFunction::default(), + pfn_reallocation: PFN_vkReallocationFunction::default(), + pfn_free: PFN_vkFreeFunction::default(), + pfn_internal_allocation: PFN_vkInternalAllocationNotification::default(), + pfn_internal_free: PFN_vkInternalFreeNotification::default(), + } + } +} +impl AllocationCallbacks { + pub fn builder<'a>() -> AllocationCallbacksBuilder<'a> { + AllocationCallbacksBuilder { + inner: AllocationCallbacks::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AllocationCallbacksBuilder<'a> { + inner: AllocationCallbacks, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for AllocationCallbacksBuilder<'a> { + type Target = AllocationCallbacks; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AllocationCallbacksBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AllocationCallbacksBuilder<'a> { + pub fn user_data(mut self, user_data: *mut c_void) -> AllocationCallbacksBuilder<'a> { + self.inner.p_user_data = user_data; + self + } + pub fn pfn_allocation( + mut self, + pfn_allocation: PFN_vkAllocationFunction, + ) -> AllocationCallbacksBuilder<'a> { + self.inner.pfn_allocation = pfn_allocation; + self + } + pub fn pfn_reallocation( + mut self, + pfn_reallocation: PFN_vkReallocationFunction, + ) -> AllocationCallbacksBuilder<'a> { + self.inner.pfn_reallocation = pfn_reallocation; + self + } + pub fn pfn_free(mut self, pfn_free: PFN_vkFreeFunction) -> AllocationCallbacksBuilder<'a> { + self.inner.pfn_free = pfn_free; + self + } + pub fn pfn_internal_allocation( + mut self, + pfn_internal_allocation: PFN_vkInternalAllocationNotification, + ) -> AllocationCallbacksBuilder<'a> { + self.inner.pfn_internal_allocation = pfn_internal_allocation; + self + } + pub fn pfn_internal_free( + mut self, + pfn_internal_free: PFN_vkInternalFreeNotification, + ) -> AllocationCallbacksBuilder<'a> { + self.inner.pfn_internal_free = pfn_internal_free; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AllocationCallbacks { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceQueueCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: DeviceQueueCreateFlags, + pub queue_family_index: u32, + pub queue_count: u32, + pub p_queue_priorities: *const f32, +} +impl ::std::default::Default for DeviceQueueCreateInfo { + fn default() -> DeviceQueueCreateInfo { + DeviceQueueCreateInfo { + s_type: StructureType::DEVICE_QUEUE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: DeviceQueueCreateFlags::default(), + queue_family_index: u32::default(), + queue_count: u32::default(), + p_queue_priorities: ::std::ptr::null(), + } + } +} +impl DeviceQueueCreateInfo { + pub fn builder<'a>() -> DeviceQueueCreateInfoBuilder<'a> { + DeviceQueueCreateInfoBuilder { + inner: DeviceQueueCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceQueueCreateInfoBuilder<'a> { + inner: DeviceQueueCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDeviceQueueCreateInfo {} +impl<'a> ::std::ops::Deref for DeviceQueueCreateInfoBuilder<'a> { + type Target = DeviceQueueCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceQueueCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceQueueCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: DeviceQueueCreateFlags) -> DeviceQueueCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn queue_family_index( + mut self, + queue_family_index: u32, + ) -> DeviceQueueCreateInfoBuilder<'a> { + self.inner.queue_family_index = queue_family_index; + self + } + pub fn queue_priorities( + mut self, + queue_priorities: &'a [f32], + ) -> DeviceQueueCreateInfoBuilder<'a> { + self.inner.queue_count = queue_priorities.len() as _; + self.inner.p_queue_priorities = queue_priorities.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DeviceQueueCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceQueueCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: DeviceCreateFlags, + pub queue_create_info_count: u32, + pub p_queue_create_infos: *const DeviceQueueCreateInfo, + pub enabled_layer_count: u32, + pub pp_enabled_layer_names: *const *const c_char, + pub enabled_extension_count: u32, + pub pp_enabled_extension_names: *const *const c_char, + pub p_enabled_features: *const PhysicalDeviceFeatures, +} +impl ::std::default::Default for DeviceCreateInfo { + fn default() -> DeviceCreateInfo { + DeviceCreateInfo { + s_type: StructureType::DEVICE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: DeviceCreateFlags::default(), + queue_create_info_count: u32::default(), + p_queue_create_infos: ::std::ptr::null(), + enabled_layer_count: u32::default(), + pp_enabled_layer_names: ::std::ptr::null(), + enabled_extension_count: u32::default(), + pp_enabled_extension_names: ::std::ptr::null(), + p_enabled_features: ::std::ptr::null(), + } + } +} +impl DeviceCreateInfo { + pub fn builder<'a>() -> DeviceCreateInfoBuilder<'a> { + DeviceCreateInfoBuilder { + inner: DeviceCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceCreateInfoBuilder<'a> { + inner: DeviceCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDeviceCreateInfo {} +impl<'a> ::std::ops::Deref for DeviceCreateInfoBuilder<'a> { + type Target = DeviceCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: DeviceCreateFlags) -> DeviceCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn queue_create_infos( + mut self, + queue_create_infos: &'a [DeviceQueueCreateInfo], + ) -> DeviceCreateInfoBuilder<'a> { + self.inner.queue_create_info_count = queue_create_infos.len() as _; + self.inner.p_queue_create_infos = queue_create_infos.as_ptr(); + self + } + pub fn enabled_layer_names( + mut self, + enabled_layer_names: &'a [*const c_char], + ) -> DeviceCreateInfoBuilder<'a> { + self.inner.pp_enabled_layer_names = enabled_layer_names.as_ptr(); + self.inner.enabled_layer_count = enabled_layer_names.len() as _; + self + } + pub fn enabled_extension_names( + mut self, + enabled_extension_names: &'a [*const c_char], + ) -> DeviceCreateInfoBuilder<'a> { + self.inner.pp_enabled_extension_names = enabled_extension_names.as_ptr(); + self.inner.enabled_extension_count = enabled_extension_names.len() as _; + self + } + pub fn enabled_features( + mut self, + enabled_features: &'a PhysicalDeviceFeatures, + ) -> DeviceCreateInfoBuilder<'a> { + self.inner.p_enabled_features = enabled_features; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DeviceCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct InstanceCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: InstanceCreateFlags, + pub p_application_info: *const ApplicationInfo, + pub enabled_layer_count: u32, + pub pp_enabled_layer_names: *const *const c_char, + pub enabled_extension_count: u32, + pub pp_enabled_extension_names: *const *const c_char, +} +impl ::std::default::Default for InstanceCreateInfo { + fn default() -> InstanceCreateInfo { + InstanceCreateInfo { + s_type: StructureType::INSTANCE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: InstanceCreateFlags::default(), + p_application_info: ::std::ptr::null(), + enabled_layer_count: u32::default(), + pp_enabled_layer_names: ::std::ptr::null(), + enabled_extension_count: u32::default(), + pp_enabled_extension_names: ::std::ptr::null(), + } + } +} +impl InstanceCreateInfo { + pub fn builder<'a>() -> InstanceCreateInfoBuilder<'a> { + InstanceCreateInfoBuilder { + inner: InstanceCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct InstanceCreateInfoBuilder<'a> { + inner: InstanceCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsInstanceCreateInfo {} +impl<'a> ::std::ops::Deref for InstanceCreateInfoBuilder<'a> { + type Target = InstanceCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for InstanceCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> InstanceCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: InstanceCreateFlags) -> InstanceCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn application_info( + mut self, + application_info: &'a ApplicationInfo, + ) -> InstanceCreateInfoBuilder<'a> { + self.inner.p_application_info = application_info; + self + } + pub fn enabled_layer_names( + mut self, + enabled_layer_names: &'a [*const c_char], + ) -> InstanceCreateInfoBuilder<'a> { + self.inner.pp_enabled_layer_names = enabled_layer_names.as_ptr(); + self.inner.enabled_layer_count = enabled_layer_names.len() as _; + self + } + pub fn enabled_extension_names( + mut self, + enabled_extension_names: &'a [*const c_char], + ) -> InstanceCreateInfoBuilder<'a> { + self.inner.pp_enabled_extension_names = enabled_extension_names.as_ptr(); + self.inner.enabled_extension_count = enabled_extension_names.len() as _; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> InstanceCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> InstanceCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct QueueFamilyProperties { + pub queue_flags: QueueFlags, + pub queue_count: u32, + pub timestamp_valid_bits: u32, + pub min_image_transfer_granularity: Extent3D, +} +impl QueueFamilyProperties { + pub fn builder<'a>() -> QueueFamilyPropertiesBuilder<'a> { + QueueFamilyPropertiesBuilder { + inner: QueueFamilyProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct QueueFamilyPropertiesBuilder<'a> { + inner: QueueFamilyProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for QueueFamilyPropertiesBuilder<'a> { + type Target = QueueFamilyProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for QueueFamilyPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> QueueFamilyPropertiesBuilder<'a> { + pub fn queue_flags(mut self, queue_flags: QueueFlags) -> QueueFamilyPropertiesBuilder<'a> { + self.inner.queue_flags = queue_flags; + self + } + pub fn queue_count(mut self, queue_count: u32) -> QueueFamilyPropertiesBuilder<'a> { + self.inner.queue_count = queue_count; + self + } + pub fn timestamp_valid_bits( + mut self, + timestamp_valid_bits: u32, + ) -> QueueFamilyPropertiesBuilder<'a> { + self.inner.timestamp_valid_bits = timestamp_valid_bits; + self + } + pub fn min_image_transfer_granularity( + mut self, + min_image_transfer_granularity: Extent3D, + ) -> QueueFamilyPropertiesBuilder<'a> { + self.inner.min_image_transfer_granularity = min_image_transfer_granularity; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> QueueFamilyProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceMemoryProperties { + pub memory_type_count: u32, + pub memory_types: [MemoryType; MAX_MEMORY_TYPES], + pub memory_heap_count: u32, + pub memory_heaps: [MemoryHeap; MAX_MEMORY_HEAPS], +} +impl ::std::default::Default for PhysicalDeviceMemoryProperties { + fn default() -> PhysicalDeviceMemoryProperties { + PhysicalDeviceMemoryProperties { + memory_type_count: u32::default(), + memory_types: unsafe { ::std::mem::zeroed() }, + memory_heap_count: u32::default(), + memory_heaps: unsafe { ::std::mem::zeroed() }, + } + } +} +impl PhysicalDeviceMemoryProperties { + pub fn builder<'a>() -> PhysicalDeviceMemoryPropertiesBuilder<'a> { + PhysicalDeviceMemoryPropertiesBuilder { + inner: PhysicalDeviceMemoryProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceMemoryPropertiesBuilder<'a> { + inner: PhysicalDeviceMemoryProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for PhysicalDeviceMemoryPropertiesBuilder<'a> { + type Target = PhysicalDeviceMemoryProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceMemoryPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceMemoryPropertiesBuilder<'a> { + pub fn memory_type_count( + mut self, + memory_type_count: u32, + ) -> PhysicalDeviceMemoryPropertiesBuilder<'a> { + self.inner.memory_type_count = memory_type_count; + self + } + pub fn memory_types( + mut self, + memory_types: [MemoryType; MAX_MEMORY_TYPES], + ) -> PhysicalDeviceMemoryPropertiesBuilder<'a> { + self.inner.memory_types = memory_types; + self + } + pub fn memory_heap_count( + mut self, + memory_heap_count: u32, + ) -> PhysicalDeviceMemoryPropertiesBuilder<'a> { + self.inner.memory_heap_count = memory_heap_count; + self + } + pub fn memory_heaps( + mut self, + memory_heaps: [MemoryHeap; MAX_MEMORY_HEAPS], + ) -> PhysicalDeviceMemoryPropertiesBuilder<'a> { + self.inner.memory_heaps = memory_heaps; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceMemoryProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryAllocateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub allocation_size: DeviceSize, + pub memory_type_index: u32, +} +impl ::std::default::Default for MemoryAllocateInfo { + fn default() -> MemoryAllocateInfo { + MemoryAllocateInfo { + s_type: StructureType::MEMORY_ALLOCATE_INFO, + p_next: ::std::ptr::null(), + allocation_size: DeviceSize::default(), + memory_type_index: u32::default(), + } + } +} +impl MemoryAllocateInfo { + pub fn builder<'a>() -> MemoryAllocateInfoBuilder<'a> { + MemoryAllocateInfoBuilder { + inner: MemoryAllocateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryAllocateInfoBuilder<'a> { + inner: MemoryAllocateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMemoryAllocateInfo {} +impl<'a> ::std::ops::Deref for MemoryAllocateInfoBuilder<'a> { + type Target = MemoryAllocateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryAllocateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryAllocateInfoBuilder<'a> { + pub fn allocation_size(mut self, allocation_size: DeviceSize) -> MemoryAllocateInfoBuilder<'a> { + self.inner.allocation_size = allocation_size; + self + } + pub fn memory_type_index(mut self, memory_type_index: u32) -> MemoryAllocateInfoBuilder<'a> { + self.inner.memory_type_index = memory_type_index; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MemoryAllocateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryAllocateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct MemoryRequirements { + pub size: DeviceSize, + pub alignment: DeviceSize, + pub memory_type_bits: u32, +} +impl MemoryRequirements { + pub fn builder<'a>() -> MemoryRequirementsBuilder<'a> { + MemoryRequirementsBuilder { + inner: MemoryRequirements::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryRequirementsBuilder<'a> { + inner: MemoryRequirements, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for MemoryRequirementsBuilder<'a> { + type Target = MemoryRequirements; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryRequirementsBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryRequirementsBuilder<'a> { + pub fn size(mut self, size: DeviceSize) -> MemoryRequirementsBuilder<'a> { + self.inner.size = size; + self + } + pub fn alignment(mut self, alignment: DeviceSize) -> MemoryRequirementsBuilder<'a> { + self.inner.alignment = alignment; + self + } + pub fn memory_type_bits(mut self, memory_type_bits: u32) -> MemoryRequirementsBuilder<'a> { + self.inner.memory_type_bits = memory_type_bits; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryRequirements { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct SparseImageFormatProperties { + pub aspect_mask: ImageAspectFlags, + pub image_granularity: Extent3D, + pub flags: SparseImageFormatFlags, +} +impl SparseImageFormatProperties { + pub fn builder<'a>() -> SparseImageFormatPropertiesBuilder<'a> { + SparseImageFormatPropertiesBuilder { + inner: SparseImageFormatProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SparseImageFormatPropertiesBuilder<'a> { + inner: SparseImageFormatProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SparseImageFormatPropertiesBuilder<'a> { + type Target = SparseImageFormatProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SparseImageFormatPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SparseImageFormatPropertiesBuilder<'a> { + pub fn aspect_mask( + mut self, + aspect_mask: ImageAspectFlags, + ) -> SparseImageFormatPropertiesBuilder<'a> { + self.inner.aspect_mask = aspect_mask; + self + } + pub fn image_granularity( + mut self, + image_granularity: Extent3D, + ) -> SparseImageFormatPropertiesBuilder<'a> { + self.inner.image_granularity = image_granularity; + self + } + pub fn flags( + mut self, + flags: SparseImageFormatFlags, + ) -> SparseImageFormatPropertiesBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SparseImageFormatProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct SparseImageMemoryRequirements { + pub format_properties: SparseImageFormatProperties, + pub image_mip_tail_first_lod: u32, + pub image_mip_tail_size: DeviceSize, + pub image_mip_tail_offset: DeviceSize, + pub image_mip_tail_stride: DeviceSize, +} +impl SparseImageMemoryRequirements { + pub fn builder<'a>() -> SparseImageMemoryRequirementsBuilder<'a> { + SparseImageMemoryRequirementsBuilder { + inner: SparseImageMemoryRequirements::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SparseImageMemoryRequirementsBuilder<'a> { + inner: SparseImageMemoryRequirements, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SparseImageMemoryRequirementsBuilder<'a> { + type Target = SparseImageMemoryRequirements; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SparseImageMemoryRequirementsBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SparseImageMemoryRequirementsBuilder<'a> { + pub fn format_properties( + mut self, + format_properties: SparseImageFormatProperties, + ) -> SparseImageMemoryRequirementsBuilder<'a> { + self.inner.format_properties = format_properties; + self + } + pub fn image_mip_tail_first_lod( + mut self, + image_mip_tail_first_lod: u32, + ) -> SparseImageMemoryRequirementsBuilder<'a> { + self.inner.image_mip_tail_first_lod = image_mip_tail_first_lod; + self + } + pub fn image_mip_tail_size( + mut self, + image_mip_tail_size: DeviceSize, + ) -> SparseImageMemoryRequirementsBuilder<'a> { + self.inner.image_mip_tail_size = image_mip_tail_size; + self + } + pub fn image_mip_tail_offset( + mut self, + image_mip_tail_offset: DeviceSize, + ) -> SparseImageMemoryRequirementsBuilder<'a> { + self.inner.image_mip_tail_offset = image_mip_tail_offset; + self + } + pub fn image_mip_tail_stride( + mut self, + image_mip_tail_stride: DeviceSize, + ) -> SparseImageMemoryRequirementsBuilder<'a> { + self.inner.image_mip_tail_stride = image_mip_tail_stride; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SparseImageMemoryRequirements { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct MemoryType { + pub property_flags: MemoryPropertyFlags, + pub heap_index: u32, +} +impl MemoryType { + pub fn builder<'a>() -> MemoryTypeBuilder<'a> { + MemoryTypeBuilder { + inner: MemoryType::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryTypeBuilder<'a> { + inner: MemoryType, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for MemoryTypeBuilder<'a> { + type Target = MemoryType; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryTypeBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryTypeBuilder<'a> { + pub fn property_flags(mut self, property_flags: MemoryPropertyFlags) -> MemoryTypeBuilder<'a> { + self.inner.property_flags = property_flags; + self + } + pub fn heap_index(mut self, heap_index: u32) -> MemoryTypeBuilder<'a> { + self.inner.heap_index = heap_index; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryType { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct MemoryHeap { + pub size: DeviceSize, + pub flags: MemoryHeapFlags, +} +impl MemoryHeap { + pub fn builder<'a>() -> MemoryHeapBuilder<'a> { + MemoryHeapBuilder { + inner: MemoryHeap::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryHeapBuilder<'a> { + inner: MemoryHeap, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for MemoryHeapBuilder<'a> { + type Target = MemoryHeap; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryHeapBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryHeapBuilder<'a> { + pub fn size(mut self, size: DeviceSize) -> MemoryHeapBuilder<'a> { + self.inner.size = size; + self + } + pub fn flags(mut self, flags: MemoryHeapFlags) -> MemoryHeapBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryHeap { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MappedMemoryRange { + pub s_type: StructureType, + pub p_next: *const c_void, + pub memory: DeviceMemory, + pub offset: DeviceSize, + pub size: DeviceSize, +} +impl ::std::default::Default for MappedMemoryRange { + fn default() -> MappedMemoryRange { + MappedMemoryRange { + s_type: StructureType::MAPPED_MEMORY_RANGE, + p_next: ::std::ptr::null(), + memory: DeviceMemory::default(), + offset: DeviceSize::default(), + size: DeviceSize::default(), + } + } +} +impl MappedMemoryRange { + pub fn builder<'a>() -> MappedMemoryRangeBuilder<'a> { + MappedMemoryRangeBuilder { + inner: MappedMemoryRange::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MappedMemoryRangeBuilder<'a> { + inner: MappedMemoryRange, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMappedMemoryRange {} +impl<'a> ::std::ops::Deref for MappedMemoryRangeBuilder<'a> { + type Target = MappedMemoryRange; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MappedMemoryRangeBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MappedMemoryRangeBuilder<'a> { + pub fn memory(mut self, memory: DeviceMemory) -> MappedMemoryRangeBuilder<'a> { + self.inner.memory = memory; + self + } + pub fn offset(mut self, offset: DeviceSize) -> MappedMemoryRangeBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn size(mut self, size: DeviceSize) -> MappedMemoryRangeBuilder<'a> { + self.inner.size = size; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MappedMemoryRangeBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MappedMemoryRange { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct FormatProperties { + pub linear_tiling_features: FormatFeatureFlags, + pub optimal_tiling_features: FormatFeatureFlags, + pub buffer_features: FormatFeatureFlags, +} +impl FormatProperties { + pub fn builder<'a>() -> FormatPropertiesBuilder<'a> { + FormatPropertiesBuilder { + inner: FormatProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct FormatPropertiesBuilder<'a> { + inner: FormatProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for FormatPropertiesBuilder<'a> { + type Target = FormatProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for FormatPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> FormatPropertiesBuilder<'a> { + pub fn linear_tiling_features( + mut self, + linear_tiling_features: FormatFeatureFlags, + ) -> FormatPropertiesBuilder<'a> { + self.inner.linear_tiling_features = linear_tiling_features; + self + } + pub fn optimal_tiling_features( + mut self, + optimal_tiling_features: FormatFeatureFlags, + ) -> FormatPropertiesBuilder<'a> { + self.inner.optimal_tiling_features = optimal_tiling_features; + self + } + pub fn buffer_features( + mut self, + buffer_features: FormatFeatureFlags, + ) -> FormatPropertiesBuilder<'a> { + self.inner.buffer_features = buffer_features; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> FormatProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ImageFormatProperties { + pub max_extent: Extent3D, + pub max_mip_levels: u32, + pub max_array_layers: u32, + pub sample_counts: SampleCountFlags, + pub max_resource_size: DeviceSize, +} +impl ImageFormatProperties { + pub fn builder<'a>() -> ImageFormatPropertiesBuilder<'a> { + ImageFormatPropertiesBuilder { + inner: ImageFormatProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageFormatPropertiesBuilder<'a> { + inner: ImageFormatProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ImageFormatPropertiesBuilder<'a> { + type Target = ImageFormatProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageFormatPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageFormatPropertiesBuilder<'a> { + pub fn max_extent(mut self, max_extent: Extent3D) -> ImageFormatPropertiesBuilder<'a> { + self.inner.max_extent = max_extent; + self + } + pub fn max_mip_levels(mut self, max_mip_levels: u32) -> ImageFormatPropertiesBuilder<'a> { + self.inner.max_mip_levels = max_mip_levels; + self + } + pub fn max_array_layers(mut self, max_array_layers: u32) -> ImageFormatPropertiesBuilder<'a> { + self.inner.max_array_layers = max_array_layers; + self + } + pub fn sample_counts( + mut self, + sample_counts: SampleCountFlags, + ) -> ImageFormatPropertiesBuilder<'a> { + self.inner.sample_counts = sample_counts; + self + } + pub fn max_resource_size( + mut self, + max_resource_size: DeviceSize, + ) -> ImageFormatPropertiesBuilder<'a> { + self.inner.max_resource_size = max_resource_size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageFormatProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DescriptorBufferInfo { + pub buffer: Buffer, + pub offset: DeviceSize, + pub range: DeviceSize, +} +impl DescriptorBufferInfo { + pub fn builder<'a>() -> DescriptorBufferInfoBuilder<'a> { + DescriptorBufferInfoBuilder { + inner: DescriptorBufferInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorBufferInfoBuilder<'a> { + inner: DescriptorBufferInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DescriptorBufferInfoBuilder<'a> { + type Target = DescriptorBufferInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorBufferInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorBufferInfoBuilder<'a> { + pub fn buffer(mut self, buffer: Buffer) -> DescriptorBufferInfoBuilder<'a> { + self.inner.buffer = buffer; + self + } + pub fn offset(mut self, offset: DeviceSize) -> DescriptorBufferInfoBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn range(mut self, range: DeviceSize) -> DescriptorBufferInfoBuilder<'a> { + self.inner.range = range; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorBufferInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DescriptorImageInfo { + pub sampler: Sampler, + pub image_view: ImageView, + pub image_layout: ImageLayout, +} +impl DescriptorImageInfo { + pub fn builder<'a>() -> DescriptorImageInfoBuilder<'a> { + DescriptorImageInfoBuilder { + inner: DescriptorImageInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorImageInfoBuilder<'a> { + inner: DescriptorImageInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DescriptorImageInfoBuilder<'a> { + type Target = DescriptorImageInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorImageInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorImageInfoBuilder<'a> { + pub fn sampler(mut self, sampler: Sampler) -> DescriptorImageInfoBuilder<'a> { + self.inner.sampler = sampler; + self + } + pub fn image_view(mut self, image_view: ImageView) -> DescriptorImageInfoBuilder<'a> { + self.inner.image_view = image_view; + self + } + pub fn image_layout(mut self, image_layout: ImageLayout) -> DescriptorImageInfoBuilder<'a> { + self.inner.image_layout = image_layout; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorImageInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct WriteDescriptorSet { + pub s_type: StructureType, + pub p_next: *const c_void, + pub dst_set: DescriptorSet, + pub dst_binding: u32, + pub dst_array_element: u32, + pub descriptor_count: u32, + pub descriptor_type: DescriptorType, + pub p_image_info: *const DescriptorImageInfo, + pub p_buffer_info: *const DescriptorBufferInfo, + pub p_texel_buffer_view: *const BufferView, +} +impl ::std::default::Default for WriteDescriptorSet { + fn default() -> WriteDescriptorSet { + WriteDescriptorSet { + s_type: StructureType::WRITE_DESCRIPTOR_SET, + p_next: ::std::ptr::null(), + dst_set: DescriptorSet::default(), + dst_binding: u32::default(), + dst_array_element: u32::default(), + descriptor_count: u32::default(), + descriptor_type: DescriptorType::default(), + p_image_info: ::std::ptr::null(), + p_buffer_info: ::std::ptr::null(), + p_texel_buffer_view: ::std::ptr::null(), + } + } +} +impl WriteDescriptorSet { + pub fn builder<'a>() -> WriteDescriptorSetBuilder<'a> { + WriteDescriptorSetBuilder { + inner: WriteDescriptorSet::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct WriteDescriptorSetBuilder<'a> { + inner: WriteDescriptorSet, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsWriteDescriptorSet {} +impl<'a> ::std::ops::Deref for WriteDescriptorSetBuilder<'a> { + type Target = WriteDescriptorSet; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for WriteDescriptorSetBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> WriteDescriptorSetBuilder<'a> { + pub fn dst_set(mut self, dst_set: DescriptorSet) -> WriteDescriptorSetBuilder<'a> { + self.inner.dst_set = dst_set; + self + } + pub fn dst_binding(mut self, dst_binding: u32) -> WriteDescriptorSetBuilder<'a> { + self.inner.dst_binding = dst_binding; + self + } + pub fn dst_array_element(mut self, dst_array_element: u32) -> WriteDescriptorSetBuilder<'a> { + self.inner.dst_array_element = dst_array_element; + self + } + pub fn descriptor_type( + mut self, + descriptor_type: DescriptorType, + ) -> WriteDescriptorSetBuilder<'a> { + self.inner.descriptor_type = descriptor_type; + self + } + pub fn image_info( + mut self, + image_info: &'a [DescriptorImageInfo], + ) -> WriteDescriptorSetBuilder<'a> { + self.inner.descriptor_count = image_info.len() as _; + self.inner.p_image_info = image_info.as_ptr(); + self + } + pub fn buffer_info( + mut self, + buffer_info: &'a [DescriptorBufferInfo], + ) -> WriteDescriptorSetBuilder<'a> { + self.inner.descriptor_count = buffer_info.len() as _; + self.inner.p_buffer_info = buffer_info.as_ptr(); + self + } + pub fn texel_buffer_view( + mut self, + texel_buffer_view: &'a [BufferView], + ) -> WriteDescriptorSetBuilder<'a> { + self.inner.descriptor_count = texel_buffer_view.len() as _; + self.inner.p_texel_buffer_view = texel_buffer_view.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> WriteDescriptorSetBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> WriteDescriptorSet { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CopyDescriptorSet { + pub s_type: StructureType, + pub p_next: *const c_void, + pub src_set: DescriptorSet, + pub src_binding: u32, + pub src_array_element: u32, + pub dst_set: DescriptorSet, + pub dst_binding: u32, + pub dst_array_element: u32, + pub descriptor_count: u32, +} +impl ::std::default::Default for CopyDescriptorSet { + fn default() -> CopyDescriptorSet { + CopyDescriptorSet { + s_type: StructureType::COPY_DESCRIPTOR_SET, + p_next: ::std::ptr::null(), + src_set: DescriptorSet::default(), + src_binding: u32::default(), + src_array_element: u32::default(), + dst_set: DescriptorSet::default(), + dst_binding: u32::default(), + dst_array_element: u32::default(), + descriptor_count: u32::default(), + } + } +} +impl CopyDescriptorSet { + pub fn builder<'a>() -> CopyDescriptorSetBuilder<'a> { + CopyDescriptorSetBuilder { + inner: CopyDescriptorSet::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CopyDescriptorSetBuilder<'a> { + inner: CopyDescriptorSet, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCopyDescriptorSet {} +impl<'a> ::std::ops::Deref for CopyDescriptorSetBuilder<'a> { + type Target = CopyDescriptorSet; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CopyDescriptorSetBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CopyDescriptorSetBuilder<'a> { + pub fn src_set(mut self, src_set: DescriptorSet) -> CopyDescriptorSetBuilder<'a> { + self.inner.src_set = src_set; + self + } + pub fn src_binding(mut self, src_binding: u32) -> CopyDescriptorSetBuilder<'a> { + self.inner.src_binding = src_binding; + self + } + pub fn src_array_element(mut self, src_array_element: u32) -> CopyDescriptorSetBuilder<'a> { + self.inner.src_array_element = src_array_element; + self + } + pub fn dst_set(mut self, dst_set: DescriptorSet) -> CopyDescriptorSetBuilder<'a> { + self.inner.dst_set = dst_set; + self + } + pub fn dst_binding(mut self, dst_binding: u32) -> CopyDescriptorSetBuilder<'a> { + self.inner.dst_binding = dst_binding; + self + } + pub fn dst_array_element(mut self, dst_array_element: u32) -> CopyDescriptorSetBuilder<'a> { + self.inner.dst_array_element = dst_array_element; + self + } + pub fn descriptor_count(mut self, descriptor_count: u32) -> CopyDescriptorSetBuilder<'a> { + self.inner.descriptor_count = descriptor_count; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CopyDescriptorSetBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CopyDescriptorSet { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BufferCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: BufferCreateFlags, + pub size: DeviceSize, + pub usage: BufferUsageFlags, + pub sharing_mode: SharingMode, + pub queue_family_index_count: u32, + pub p_queue_family_indices: *const u32, +} +impl ::std::default::Default for BufferCreateInfo { + fn default() -> BufferCreateInfo { + BufferCreateInfo { + s_type: StructureType::BUFFER_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: BufferCreateFlags::default(), + size: DeviceSize::default(), + usage: BufferUsageFlags::default(), + sharing_mode: SharingMode::default(), + queue_family_index_count: u32::default(), + p_queue_family_indices: ::std::ptr::null(), + } + } +} +impl BufferCreateInfo { + pub fn builder<'a>() -> BufferCreateInfoBuilder<'a> { + BufferCreateInfoBuilder { + inner: BufferCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BufferCreateInfoBuilder<'a> { + inner: BufferCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsBufferCreateInfo {} +impl<'a> ::std::ops::Deref for BufferCreateInfoBuilder<'a> { + type Target = BufferCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BufferCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BufferCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: BufferCreateFlags) -> BufferCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn size(mut self, size: DeviceSize) -> BufferCreateInfoBuilder<'a> { + self.inner.size = size; + self + } + pub fn usage(mut self, usage: BufferUsageFlags) -> BufferCreateInfoBuilder<'a> { + self.inner.usage = usage; + self + } + pub fn sharing_mode(mut self, sharing_mode: SharingMode) -> BufferCreateInfoBuilder<'a> { + self.inner.sharing_mode = sharing_mode; + self + } + pub fn queue_family_indices( + mut self, + queue_family_indices: &'a [u32], + ) -> BufferCreateInfoBuilder<'a> { + self.inner.queue_family_index_count = queue_family_indices.len() as _; + self.inner.p_queue_family_indices = queue_family_indices.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> BufferCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BufferCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BufferViewCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: BufferViewCreateFlags, + pub buffer: Buffer, + pub format: Format, + pub offset: DeviceSize, + pub range: DeviceSize, +} +impl ::std::default::Default for BufferViewCreateInfo { + fn default() -> BufferViewCreateInfo { + BufferViewCreateInfo { + s_type: StructureType::BUFFER_VIEW_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: BufferViewCreateFlags::default(), + buffer: Buffer::default(), + format: Format::default(), + offset: DeviceSize::default(), + range: DeviceSize::default(), + } + } +} +impl BufferViewCreateInfo { + pub fn builder<'a>() -> BufferViewCreateInfoBuilder<'a> { + BufferViewCreateInfoBuilder { + inner: BufferViewCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BufferViewCreateInfoBuilder<'a> { + inner: BufferViewCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsBufferViewCreateInfo {} +impl<'a> ::std::ops::Deref for BufferViewCreateInfoBuilder<'a> { + type Target = BufferViewCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BufferViewCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BufferViewCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: BufferViewCreateFlags) -> BufferViewCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn buffer(mut self, buffer: Buffer) -> BufferViewCreateInfoBuilder<'a> { + self.inner.buffer = buffer; + self + } + pub fn format(mut self, format: Format) -> BufferViewCreateInfoBuilder<'a> { + self.inner.format = format; + self + } + pub fn offset(mut self, offset: DeviceSize) -> BufferViewCreateInfoBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn range(mut self, range: DeviceSize) -> BufferViewCreateInfoBuilder<'a> { + self.inner.range = range; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> BufferViewCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BufferViewCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ImageSubresource { + pub aspect_mask: ImageAspectFlags, + pub mip_level: u32, + pub array_layer: u32, +} +impl ImageSubresource { + pub fn builder<'a>() -> ImageSubresourceBuilder<'a> { + ImageSubresourceBuilder { + inner: ImageSubresource::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageSubresourceBuilder<'a> { + inner: ImageSubresource, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ImageSubresourceBuilder<'a> { + type Target = ImageSubresource; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageSubresourceBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageSubresourceBuilder<'a> { + pub fn aspect_mask(mut self, aspect_mask: ImageAspectFlags) -> ImageSubresourceBuilder<'a> { + self.inner.aspect_mask = aspect_mask; + self + } + pub fn mip_level(mut self, mip_level: u32) -> ImageSubresourceBuilder<'a> { + self.inner.mip_level = mip_level; + self + } + pub fn array_layer(mut self, array_layer: u32) -> ImageSubresourceBuilder<'a> { + self.inner.array_layer = array_layer; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageSubresource { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ImageSubresourceLayers { + pub aspect_mask: ImageAspectFlags, + pub mip_level: u32, + pub base_array_layer: u32, + pub layer_count: u32, +} +impl ImageSubresourceLayers { + pub fn builder<'a>() -> ImageSubresourceLayersBuilder<'a> { + ImageSubresourceLayersBuilder { + inner: ImageSubresourceLayers::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageSubresourceLayersBuilder<'a> { + inner: ImageSubresourceLayers, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ImageSubresourceLayersBuilder<'a> { + type Target = ImageSubresourceLayers; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageSubresourceLayersBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageSubresourceLayersBuilder<'a> { + pub fn aspect_mask( + mut self, + aspect_mask: ImageAspectFlags, + ) -> ImageSubresourceLayersBuilder<'a> { + self.inner.aspect_mask = aspect_mask; + self + } + pub fn mip_level(mut self, mip_level: u32) -> ImageSubresourceLayersBuilder<'a> { + self.inner.mip_level = mip_level; + self + } + pub fn base_array_layer(mut self, base_array_layer: u32) -> ImageSubresourceLayersBuilder<'a> { + self.inner.base_array_layer = base_array_layer; + self + } + pub fn layer_count(mut self, layer_count: u32) -> ImageSubresourceLayersBuilder<'a> { + self.inner.layer_count = layer_count; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageSubresourceLayers { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ImageSubresourceRange { + pub aspect_mask: ImageAspectFlags, + pub base_mip_level: u32, + pub level_count: u32, + pub base_array_layer: u32, + pub layer_count: u32, +} +impl ImageSubresourceRange { + pub fn builder<'a>() -> ImageSubresourceRangeBuilder<'a> { + ImageSubresourceRangeBuilder { + inner: ImageSubresourceRange::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageSubresourceRangeBuilder<'a> { + inner: ImageSubresourceRange, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ImageSubresourceRangeBuilder<'a> { + type Target = ImageSubresourceRange; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageSubresourceRangeBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageSubresourceRangeBuilder<'a> { + pub fn aspect_mask( + mut self, + aspect_mask: ImageAspectFlags, + ) -> ImageSubresourceRangeBuilder<'a> { + self.inner.aspect_mask = aspect_mask; + self + } + pub fn base_mip_level(mut self, base_mip_level: u32) -> ImageSubresourceRangeBuilder<'a> { + self.inner.base_mip_level = base_mip_level; + self + } + pub fn level_count(mut self, level_count: u32) -> ImageSubresourceRangeBuilder<'a> { + self.inner.level_count = level_count; + self + } + pub fn base_array_layer(mut self, base_array_layer: u32) -> ImageSubresourceRangeBuilder<'a> { + self.inner.base_array_layer = base_array_layer; + self + } + pub fn layer_count(mut self, layer_count: u32) -> ImageSubresourceRangeBuilder<'a> { + self.inner.layer_count = layer_count; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageSubresourceRange { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryBarrier { + pub s_type: StructureType, + pub p_next: *const c_void, + pub src_access_mask: AccessFlags, + pub dst_access_mask: AccessFlags, +} +impl ::std::default::Default for MemoryBarrier { + fn default() -> MemoryBarrier { + MemoryBarrier { + s_type: StructureType::MEMORY_BARRIER, + p_next: ::std::ptr::null(), + src_access_mask: AccessFlags::default(), + dst_access_mask: AccessFlags::default(), + } + } +} +impl MemoryBarrier { + pub fn builder<'a>() -> MemoryBarrierBuilder<'a> { + MemoryBarrierBuilder { + inner: MemoryBarrier::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryBarrierBuilder<'a> { + inner: MemoryBarrier, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMemoryBarrier {} +impl<'a> ::std::ops::Deref for MemoryBarrierBuilder<'a> { + type Target = MemoryBarrier; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryBarrierBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryBarrierBuilder<'a> { + pub fn src_access_mask(mut self, src_access_mask: AccessFlags) -> MemoryBarrierBuilder<'a> { + self.inner.src_access_mask = src_access_mask; + self + } + pub fn dst_access_mask(mut self, dst_access_mask: AccessFlags) -> MemoryBarrierBuilder<'a> { + self.inner.dst_access_mask = dst_access_mask; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MemoryBarrierBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryBarrier { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BufferMemoryBarrier { + pub s_type: StructureType, + pub p_next: *const c_void, + pub src_access_mask: AccessFlags, + pub dst_access_mask: AccessFlags, + pub src_queue_family_index: u32, + pub dst_queue_family_index: u32, + pub buffer: Buffer, + pub offset: DeviceSize, + pub size: DeviceSize, +} +impl ::std::default::Default for BufferMemoryBarrier { + fn default() -> BufferMemoryBarrier { + BufferMemoryBarrier { + s_type: StructureType::BUFFER_MEMORY_BARRIER, + p_next: ::std::ptr::null(), + src_access_mask: AccessFlags::default(), + dst_access_mask: AccessFlags::default(), + src_queue_family_index: u32::default(), + dst_queue_family_index: u32::default(), + buffer: Buffer::default(), + offset: DeviceSize::default(), + size: DeviceSize::default(), + } + } +} +impl BufferMemoryBarrier { + pub fn builder<'a>() -> BufferMemoryBarrierBuilder<'a> { + BufferMemoryBarrierBuilder { + inner: BufferMemoryBarrier::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BufferMemoryBarrierBuilder<'a> { + inner: BufferMemoryBarrier, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsBufferMemoryBarrier {} +impl<'a> ::std::ops::Deref for BufferMemoryBarrierBuilder<'a> { + type Target = BufferMemoryBarrier; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BufferMemoryBarrierBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BufferMemoryBarrierBuilder<'a> { + pub fn src_access_mask( + mut self, + src_access_mask: AccessFlags, + ) -> BufferMemoryBarrierBuilder<'a> { + self.inner.src_access_mask = src_access_mask; + self + } + pub fn dst_access_mask( + mut self, + dst_access_mask: AccessFlags, + ) -> BufferMemoryBarrierBuilder<'a> { + self.inner.dst_access_mask = dst_access_mask; + self + } + pub fn src_queue_family_index( + mut self, + src_queue_family_index: u32, + ) -> BufferMemoryBarrierBuilder<'a> { + self.inner.src_queue_family_index = src_queue_family_index; + self + } + pub fn dst_queue_family_index( + mut self, + dst_queue_family_index: u32, + ) -> BufferMemoryBarrierBuilder<'a> { + self.inner.dst_queue_family_index = dst_queue_family_index; + self + } + pub fn buffer(mut self, buffer: Buffer) -> BufferMemoryBarrierBuilder<'a> { + self.inner.buffer = buffer; + self + } + pub fn offset(mut self, offset: DeviceSize) -> BufferMemoryBarrierBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn size(mut self, size: DeviceSize) -> BufferMemoryBarrierBuilder<'a> { + self.inner.size = size; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> BufferMemoryBarrierBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BufferMemoryBarrier { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageMemoryBarrier { + pub s_type: StructureType, + pub p_next: *const c_void, + pub src_access_mask: AccessFlags, + pub dst_access_mask: AccessFlags, + pub old_layout: ImageLayout, + pub new_layout: ImageLayout, + pub src_queue_family_index: u32, + pub dst_queue_family_index: u32, + pub image: Image, + pub subresource_range: ImageSubresourceRange, +} +impl ::std::default::Default for ImageMemoryBarrier { + fn default() -> ImageMemoryBarrier { + ImageMemoryBarrier { + s_type: StructureType::IMAGE_MEMORY_BARRIER, + p_next: ::std::ptr::null(), + src_access_mask: AccessFlags::default(), + dst_access_mask: AccessFlags::default(), + old_layout: ImageLayout::default(), + new_layout: ImageLayout::default(), + src_queue_family_index: u32::default(), + dst_queue_family_index: u32::default(), + image: Image::default(), + subresource_range: ImageSubresourceRange::default(), + } + } +} +impl ImageMemoryBarrier { + pub fn builder<'a>() -> ImageMemoryBarrierBuilder<'a> { + ImageMemoryBarrierBuilder { + inner: ImageMemoryBarrier::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageMemoryBarrierBuilder<'a> { + inner: ImageMemoryBarrier, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImageMemoryBarrier {} +impl<'a> ::std::ops::Deref for ImageMemoryBarrierBuilder<'a> { + type Target = ImageMemoryBarrier; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageMemoryBarrierBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageMemoryBarrierBuilder<'a> { + pub fn src_access_mask( + mut self, + src_access_mask: AccessFlags, + ) -> ImageMemoryBarrierBuilder<'a> { + self.inner.src_access_mask = src_access_mask; + self + } + pub fn dst_access_mask( + mut self, + dst_access_mask: AccessFlags, + ) -> ImageMemoryBarrierBuilder<'a> { + self.inner.dst_access_mask = dst_access_mask; + self + } + pub fn old_layout(mut self, old_layout: ImageLayout) -> ImageMemoryBarrierBuilder<'a> { + self.inner.old_layout = old_layout; + self + } + pub fn new_layout(mut self, new_layout: ImageLayout) -> ImageMemoryBarrierBuilder<'a> { + self.inner.new_layout = new_layout; + self + } + pub fn src_queue_family_index( + mut self, + src_queue_family_index: u32, + ) -> ImageMemoryBarrierBuilder<'a> { + self.inner.src_queue_family_index = src_queue_family_index; + self + } + pub fn dst_queue_family_index( + mut self, + dst_queue_family_index: u32, + ) -> ImageMemoryBarrierBuilder<'a> { + self.inner.dst_queue_family_index = dst_queue_family_index; + self + } + pub fn image(mut self, image: Image) -> ImageMemoryBarrierBuilder<'a> { + self.inner.image = image; + self + } + pub fn subresource_range( + mut self, + subresource_range: ImageSubresourceRange, + ) -> ImageMemoryBarrierBuilder<'a> { + self.inner.subresource_range = subresource_range; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImageMemoryBarrierBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageMemoryBarrier { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: ImageCreateFlags, + pub image_type: ImageType, + pub format: Format, + pub extent: Extent3D, + pub mip_levels: u32, + pub array_layers: u32, + pub samples: SampleCountFlags, + pub tiling: ImageTiling, + pub usage: ImageUsageFlags, + pub sharing_mode: SharingMode, + pub queue_family_index_count: u32, + pub p_queue_family_indices: *const u32, + pub initial_layout: ImageLayout, +} +impl ::std::default::Default for ImageCreateInfo { + fn default() -> ImageCreateInfo { + ImageCreateInfo { + s_type: StructureType::IMAGE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: ImageCreateFlags::default(), + image_type: ImageType::default(), + format: Format::default(), + extent: Extent3D::default(), + mip_levels: u32::default(), + array_layers: u32::default(), + samples: SampleCountFlags::default(), + tiling: ImageTiling::default(), + usage: ImageUsageFlags::default(), + sharing_mode: SharingMode::default(), + queue_family_index_count: u32::default(), + p_queue_family_indices: ::std::ptr::null(), + initial_layout: ImageLayout::default(), + } + } +} +impl ImageCreateInfo { + pub fn builder<'a>() -> ImageCreateInfoBuilder<'a> { + ImageCreateInfoBuilder { + inner: ImageCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageCreateInfoBuilder<'a> { + inner: ImageCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImageCreateInfo {} +impl<'a> ::std::ops::Deref for ImageCreateInfoBuilder<'a> { + type Target = ImageCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: ImageCreateFlags) -> ImageCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn image_type(mut self, image_type: ImageType) -> ImageCreateInfoBuilder<'a> { + self.inner.image_type = image_type; + self + } + pub fn format(mut self, format: Format) -> ImageCreateInfoBuilder<'a> { + self.inner.format = format; + self + } + pub fn extent(mut self, extent: Extent3D) -> ImageCreateInfoBuilder<'a> { + self.inner.extent = extent; + self + } + pub fn mip_levels(mut self, mip_levels: u32) -> ImageCreateInfoBuilder<'a> { + self.inner.mip_levels = mip_levels; + self + } + pub fn array_layers(mut self, array_layers: u32) -> ImageCreateInfoBuilder<'a> { + self.inner.array_layers = array_layers; + self + } + pub fn samples(mut self, samples: SampleCountFlags) -> ImageCreateInfoBuilder<'a> { + self.inner.samples = samples; + self + } + pub fn tiling(mut self, tiling: ImageTiling) -> ImageCreateInfoBuilder<'a> { + self.inner.tiling = tiling; + self + } + pub fn usage(mut self, usage: ImageUsageFlags) -> ImageCreateInfoBuilder<'a> { + self.inner.usage = usage; + self + } + pub fn sharing_mode(mut self, sharing_mode: SharingMode) -> ImageCreateInfoBuilder<'a> { + self.inner.sharing_mode = sharing_mode; + self + } + pub fn queue_family_indices( + mut self, + queue_family_indices: &'a [u32], + ) -> ImageCreateInfoBuilder<'a> { + self.inner.queue_family_index_count = queue_family_indices.len() as _; + self.inner.p_queue_family_indices = queue_family_indices.as_ptr(); + self + } + pub fn initial_layout(mut self, initial_layout: ImageLayout) -> ImageCreateInfoBuilder<'a> { + self.inner.initial_layout = initial_layout; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImageCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct SubresourceLayout { + pub offset: DeviceSize, + pub size: DeviceSize, + pub row_pitch: DeviceSize, + pub array_pitch: DeviceSize, + pub depth_pitch: DeviceSize, +} +impl SubresourceLayout { + pub fn builder<'a>() -> SubresourceLayoutBuilder<'a> { + SubresourceLayoutBuilder { + inner: SubresourceLayout::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SubresourceLayoutBuilder<'a> { + inner: SubresourceLayout, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SubresourceLayoutBuilder<'a> { + type Target = SubresourceLayout; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SubresourceLayoutBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SubresourceLayoutBuilder<'a> { + pub fn offset(mut self, offset: DeviceSize) -> SubresourceLayoutBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn size(mut self, size: DeviceSize) -> SubresourceLayoutBuilder<'a> { + self.inner.size = size; + self + } + pub fn row_pitch(mut self, row_pitch: DeviceSize) -> SubresourceLayoutBuilder<'a> { + self.inner.row_pitch = row_pitch; + self + } + pub fn array_pitch(mut self, array_pitch: DeviceSize) -> SubresourceLayoutBuilder<'a> { + self.inner.array_pitch = array_pitch; + self + } + pub fn depth_pitch(mut self, depth_pitch: DeviceSize) -> SubresourceLayoutBuilder<'a> { + self.inner.depth_pitch = depth_pitch; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SubresourceLayout { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageViewCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: ImageViewCreateFlags, + pub image: Image, + pub view_type: ImageViewType, + pub format: Format, + pub components: ComponentMapping, + pub subresource_range: ImageSubresourceRange, +} +impl ::std::default::Default for ImageViewCreateInfo { + fn default() -> ImageViewCreateInfo { + ImageViewCreateInfo { + s_type: StructureType::IMAGE_VIEW_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: ImageViewCreateFlags::default(), + image: Image::default(), + view_type: ImageViewType::default(), + format: Format::default(), + components: ComponentMapping::default(), + subresource_range: ImageSubresourceRange::default(), + } + } +} +impl ImageViewCreateInfo { + pub fn builder<'a>() -> ImageViewCreateInfoBuilder<'a> { + ImageViewCreateInfoBuilder { + inner: ImageViewCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageViewCreateInfoBuilder<'a> { + inner: ImageViewCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImageViewCreateInfo {} +impl<'a> ::std::ops::Deref for ImageViewCreateInfoBuilder<'a> { + type Target = ImageViewCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageViewCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageViewCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: ImageViewCreateFlags) -> ImageViewCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn image(mut self, image: Image) -> ImageViewCreateInfoBuilder<'a> { + self.inner.image = image; + self + } + pub fn view_type(mut self, view_type: ImageViewType) -> ImageViewCreateInfoBuilder<'a> { + self.inner.view_type = view_type; + self + } + pub fn format(mut self, format: Format) -> ImageViewCreateInfoBuilder<'a> { + self.inner.format = format; + self + } + pub fn components(mut self, components: ComponentMapping) -> ImageViewCreateInfoBuilder<'a> { + self.inner.components = components; + self + } + pub fn subresource_range( + mut self, + subresource_range: ImageSubresourceRange, + ) -> ImageViewCreateInfoBuilder<'a> { + self.inner.subresource_range = subresource_range; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImageViewCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageViewCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct BufferCopy { + pub src_offset: DeviceSize, + pub dst_offset: DeviceSize, + pub size: DeviceSize, +} +impl BufferCopy { + pub fn builder<'a>() -> BufferCopyBuilder<'a> { + BufferCopyBuilder { + inner: BufferCopy::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BufferCopyBuilder<'a> { + inner: BufferCopy, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for BufferCopyBuilder<'a> { + type Target = BufferCopy; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BufferCopyBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BufferCopyBuilder<'a> { + pub fn src_offset(mut self, src_offset: DeviceSize) -> BufferCopyBuilder<'a> { + self.inner.src_offset = src_offset; + self + } + pub fn dst_offset(mut self, dst_offset: DeviceSize) -> BufferCopyBuilder<'a> { + self.inner.dst_offset = dst_offset; + self + } + pub fn size(mut self, size: DeviceSize) -> BufferCopyBuilder<'a> { + self.inner.size = size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BufferCopy { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct SparseMemoryBind { + pub resource_offset: DeviceSize, + pub size: DeviceSize, + pub memory: DeviceMemory, + pub memory_offset: DeviceSize, + pub flags: SparseMemoryBindFlags, +} +impl SparseMemoryBind { + pub fn builder<'a>() -> SparseMemoryBindBuilder<'a> { + SparseMemoryBindBuilder { + inner: SparseMemoryBind::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SparseMemoryBindBuilder<'a> { + inner: SparseMemoryBind, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SparseMemoryBindBuilder<'a> { + type Target = SparseMemoryBind; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SparseMemoryBindBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SparseMemoryBindBuilder<'a> { + pub fn resource_offset(mut self, resource_offset: DeviceSize) -> SparseMemoryBindBuilder<'a> { + self.inner.resource_offset = resource_offset; + self + } + pub fn size(mut self, size: DeviceSize) -> SparseMemoryBindBuilder<'a> { + self.inner.size = size; + self + } + pub fn memory(mut self, memory: DeviceMemory) -> SparseMemoryBindBuilder<'a> { + self.inner.memory = memory; + self + } + pub fn memory_offset(mut self, memory_offset: DeviceSize) -> SparseMemoryBindBuilder<'a> { + self.inner.memory_offset = memory_offset; + self + } + pub fn flags(mut self, flags: SparseMemoryBindFlags) -> SparseMemoryBindBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SparseMemoryBind { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct SparseImageMemoryBind { + pub subresource: ImageSubresource, + pub offset: Offset3D, + pub extent: Extent3D, + pub memory: DeviceMemory, + pub memory_offset: DeviceSize, + pub flags: SparseMemoryBindFlags, +} +impl SparseImageMemoryBind { + pub fn builder<'a>() -> SparseImageMemoryBindBuilder<'a> { + SparseImageMemoryBindBuilder { + inner: SparseImageMemoryBind::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SparseImageMemoryBindBuilder<'a> { + inner: SparseImageMemoryBind, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SparseImageMemoryBindBuilder<'a> { + type Target = SparseImageMemoryBind; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SparseImageMemoryBindBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SparseImageMemoryBindBuilder<'a> { + pub fn subresource( + mut self, + subresource: ImageSubresource, + ) -> SparseImageMemoryBindBuilder<'a> { + self.inner.subresource = subresource; + self + } + pub fn offset(mut self, offset: Offset3D) -> SparseImageMemoryBindBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn extent(mut self, extent: Extent3D) -> SparseImageMemoryBindBuilder<'a> { + self.inner.extent = extent; + self + } + pub fn memory(mut self, memory: DeviceMemory) -> SparseImageMemoryBindBuilder<'a> { + self.inner.memory = memory; + self + } + pub fn memory_offset(mut self, memory_offset: DeviceSize) -> SparseImageMemoryBindBuilder<'a> { + self.inner.memory_offset = memory_offset; + self + } + pub fn flags(mut self, flags: SparseMemoryBindFlags) -> SparseImageMemoryBindBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SparseImageMemoryBind { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SparseBufferMemoryBindInfo { + pub buffer: Buffer, + pub bind_count: u32, + pub p_binds: *const SparseMemoryBind, +} +impl ::std::default::Default for SparseBufferMemoryBindInfo { + fn default() -> SparseBufferMemoryBindInfo { + SparseBufferMemoryBindInfo { + buffer: Buffer::default(), + bind_count: u32::default(), + p_binds: ::std::ptr::null(), + } + } +} +impl SparseBufferMemoryBindInfo { + pub fn builder<'a>() -> SparseBufferMemoryBindInfoBuilder<'a> { + SparseBufferMemoryBindInfoBuilder { + inner: SparseBufferMemoryBindInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SparseBufferMemoryBindInfoBuilder<'a> { + inner: SparseBufferMemoryBindInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SparseBufferMemoryBindInfoBuilder<'a> { + type Target = SparseBufferMemoryBindInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SparseBufferMemoryBindInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SparseBufferMemoryBindInfoBuilder<'a> { + pub fn buffer(mut self, buffer: Buffer) -> SparseBufferMemoryBindInfoBuilder<'a> { + self.inner.buffer = buffer; + self + } + pub fn binds(mut self, binds: &'a [SparseMemoryBind]) -> SparseBufferMemoryBindInfoBuilder<'a> { + self.inner.bind_count = binds.len() as _; + self.inner.p_binds = binds.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SparseBufferMemoryBindInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SparseImageOpaqueMemoryBindInfo { + pub image: Image, + pub bind_count: u32, + pub p_binds: *const SparseMemoryBind, +} +impl ::std::default::Default for SparseImageOpaqueMemoryBindInfo { + fn default() -> SparseImageOpaqueMemoryBindInfo { + SparseImageOpaqueMemoryBindInfo { + image: Image::default(), + bind_count: u32::default(), + p_binds: ::std::ptr::null(), + } + } +} +impl SparseImageOpaqueMemoryBindInfo { + pub fn builder<'a>() -> SparseImageOpaqueMemoryBindInfoBuilder<'a> { + SparseImageOpaqueMemoryBindInfoBuilder { + inner: SparseImageOpaqueMemoryBindInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SparseImageOpaqueMemoryBindInfoBuilder<'a> { + inner: SparseImageOpaqueMemoryBindInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SparseImageOpaqueMemoryBindInfoBuilder<'a> { + type Target = SparseImageOpaqueMemoryBindInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SparseImageOpaqueMemoryBindInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SparseImageOpaqueMemoryBindInfoBuilder<'a> { + pub fn image(mut self, image: Image) -> SparseImageOpaqueMemoryBindInfoBuilder<'a> { + self.inner.image = image; + self + } + pub fn binds( + mut self, + binds: &'a [SparseMemoryBind], + ) -> SparseImageOpaqueMemoryBindInfoBuilder<'a> { + self.inner.bind_count = binds.len() as _; + self.inner.p_binds = binds.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SparseImageOpaqueMemoryBindInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SparseImageMemoryBindInfo { + pub image: Image, + pub bind_count: u32, + pub p_binds: *const SparseImageMemoryBind, +} +impl ::std::default::Default for SparseImageMemoryBindInfo { + fn default() -> SparseImageMemoryBindInfo { + SparseImageMemoryBindInfo { + image: Image::default(), + bind_count: u32::default(), + p_binds: ::std::ptr::null(), + } + } +} +impl SparseImageMemoryBindInfo { + pub fn builder<'a>() -> SparseImageMemoryBindInfoBuilder<'a> { + SparseImageMemoryBindInfoBuilder { + inner: SparseImageMemoryBindInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SparseImageMemoryBindInfoBuilder<'a> { + inner: SparseImageMemoryBindInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SparseImageMemoryBindInfoBuilder<'a> { + type Target = SparseImageMemoryBindInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SparseImageMemoryBindInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SparseImageMemoryBindInfoBuilder<'a> { + pub fn image(mut self, image: Image) -> SparseImageMemoryBindInfoBuilder<'a> { + self.inner.image = image; + self + } + pub fn binds( + mut self, + binds: &'a [SparseImageMemoryBind], + ) -> SparseImageMemoryBindInfoBuilder<'a> { + self.inner.bind_count = binds.len() as _; + self.inner.p_binds = binds.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SparseImageMemoryBindInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BindSparseInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub wait_semaphore_count: u32, + pub p_wait_semaphores: *const Semaphore, + pub buffer_bind_count: u32, + pub p_buffer_binds: *const SparseBufferMemoryBindInfo, + pub image_opaque_bind_count: u32, + pub p_image_opaque_binds: *const SparseImageOpaqueMemoryBindInfo, + pub image_bind_count: u32, + pub p_image_binds: *const SparseImageMemoryBindInfo, + pub signal_semaphore_count: u32, + pub p_signal_semaphores: *const Semaphore, +} +impl ::std::default::Default for BindSparseInfo { + fn default() -> BindSparseInfo { + BindSparseInfo { + s_type: StructureType::BIND_SPARSE_INFO, + p_next: ::std::ptr::null(), + wait_semaphore_count: u32::default(), + p_wait_semaphores: ::std::ptr::null(), + buffer_bind_count: u32::default(), + p_buffer_binds: ::std::ptr::null(), + image_opaque_bind_count: u32::default(), + p_image_opaque_binds: ::std::ptr::null(), + image_bind_count: u32::default(), + p_image_binds: ::std::ptr::null(), + signal_semaphore_count: u32::default(), + p_signal_semaphores: ::std::ptr::null(), + } + } +} +impl BindSparseInfo { + pub fn builder<'a>() -> BindSparseInfoBuilder<'a> { + BindSparseInfoBuilder { + inner: BindSparseInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BindSparseInfoBuilder<'a> { + inner: BindSparseInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsBindSparseInfo {} +impl<'a> ::std::ops::Deref for BindSparseInfoBuilder<'a> { + type Target = BindSparseInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BindSparseInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BindSparseInfoBuilder<'a> { + pub fn wait_semaphores( + mut self, + wait_semaphores: &'a [Semaphore], + ) -> BindSparseInfoBuilder<'a> { + self.inner.wait_semaphore_count = wait_semaphores.len() as _; + self.inner.p_wait_semaphores = wait_semaphores.as_ptr(); + self + } + pub fn buffer_binds( + mut self, + buffer_binds: &'a [SparseBufferMemoryBindInfo], + ) -> BindSparseInfoBuilder<'a> { + self.inner.buffer_bind_count = buffer_binds.len() as _; + self.inner.p_buffer_binds = buffer_binds.as_ptr(); + self + } + pub fn image_opaque_binds( + mut self, + image_opaque_binds: &'a [SparseImageOpaqueMemoryBindInfo], + ) -> BindSparseInfoBuilder<'a> { + self.inner.image_opaque_bind_count = image_opaque_binds.len() as _; + self.inner.p_image_opaque_binds = image_opaque_binds.as_ptr(); + self + } + pub fn image_binds( + mut self, + image_binds: &'a [SparseImageMemoryBindInfo], + ) -> BindSparseInfoBuilder<'a> { + self.inner.image_bind_count = image_binds.len() as _; + self.inner.p_image_binds = image_binds.as_ptr(); + self + } + pub fn signal_semaphores( + mut self, + signal_semaphores: &'a [Semaphore], + ) -> BindSparseInfoBuilder<'a> { + self.inner.signal_semaphore_count = signal_semaphores.len() as _; + self.inner.p_signal_semaphores = signal_semaphores.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> BindSparseInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BindSparseInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ImageCopy { + pub src_subresource: ImageSubresourceLayers, + pub src_offset: Offset3D, + pub dst_subresource: ImageSubresourceLayers, + pub dst_offset: Offset3D, + pub extent: Extent3D, +} +impl ImageCopy { + pub fn builder<'a>() -> ImageCopyBuilder<'a> { + ImageCopyBuilder { + inner: ImageCopy::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageCopyBuilder<'a> { + inner: ImageCopy, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ImageCopyBuilder<'a> { + type Target = ImageCopy; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageCopyBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageCopyBuilder<'a> { + pub fn src_subresource( + mut self, + src_subresource: ImageSubresourceLayers, + ) -> ImageCopyBuilder<'a> { + self.inner.src_subresource = src_subresource; + self + } + pub fn src_offset(mut self, src_offset: Offset3D) -> ImageCopyBuilder<'a> { + self.inner.src_offset = src_offset; + self + } + pub fn dst_subresource( + mut self, + dst_subresource: ImageSubresourceLayers, + ) -> ImageCopyBuilder<'a> { + self.inner.dst_subresource = dst_subresource; + self + } + pub fn dst_offset(mut self, dst_offset: Offset3D) -> ImageCopyBuilder<'a> { + self.inner.dst_offset = dst_offset; + self + } + pub fn extent(mut self, extent: Extent3D) -> ImageCopyBuilder<'a> { + self.inner.extent = extent; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageCopy { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageBlit { + pub src_subresource: ImageSubresourceLayers, + pub src_offsets: [Offset3D; 2], + pub dst_subresource: ImageSubresourceLayers, + pub dst_offsets: [Offset3D; 2], +} +impl ::std::default::Default for ImageBlit { + fn default() -> ImageBlit { + ImageBlit { + src_subresource: ImageSubresourceLayers::default(), + src_offsets: unsafe { ::std::mem::zeroed() }, + dst_subresource: ImageSubresourceLayers::default(), + dst_offsets: unsafe { ::std::mem::zeroed() }, + } + } +} +impl ImageBlit { + pub fn builder<'a>() -> ImageBlitBuilder<'a> { + ImageBlitBuilder { + inner: ImageBlit::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageBlitBuilder<'a> { + inner: ImageBlit, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ImageBlitBuilder<'a> { + type Target = ImageBlit; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageBlitBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageBlitBuilder<'a> { + pub fn src_subresource( + mut self, + src_subresource: ImageSubresourceLayers, + ) -> ImageBlitBuilder<'a> { + self.inner.src_subresource = src_subresource; + self + } + pub fn src_offsets(mut self, src_offsets: [Offset3D; 2]) -> ImageBlitBuilder<'a> { + self.inner.src_offsets = src_offsets; + self + } + pub fn dst_subresource( + mut self, + dst_subresource: ImageSubresourceLayers, + ) -> ImageBlitBuilder<'a> { + self.inner.dst_subresource = dst_subresource; + self + } + pub fn dst_offsets(mut self, dst_offsets: [Offset3D; 2]) -> ImageBlitBuilder<'a> { + self.inner.dst_offsets = dst_offsets; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageBlit { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct BufferImageCopy { + pub buffer_offset: DeviceSize, + pub buffer_row_length: u32, + pub buffer_image_height: u32, + pub image_subresource: ImageSubresourceLayers, + pub image_offset: Offset3D, + pub image_extent: Extent3D, +} +impl BufferImageCopy { + pub fn builder<'a>() -> BufferImageCopyBuilder<'a> { + BufferImageCopyBuilder { + inner: BufferImageCopy::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BufferImageCopyBuilder<'a> { + inner: BufferImageCopy, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for BufferImageCopyBuilder<'a> { + type Target = BufferImageCopy; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BufferImageCopyBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BufferImageCopyBuilder<'a> { + pub fn buffer_offset(mut self, buffer_offset: DeviceSize) -> BufferImageCopyBuilder<'a> { + self.inner.buffer_offset = buffer_offset; + self + } + pub fn buffer_row_length(mut self, buffer_row_length: u32) -> BufferImageCopyBuilder<'a> { + self.inner.buffer_row_length = buffer_row_length; + self + } + pub fn buffer_image_height(mut self, buffer_image_height: u32) -> BufferImageCopyBuilder<'a> { + self.inner.buffer_image_height = buffer_image_height; + self + } + pub fn image_subresource( + mut self, + image_subresource: ImageSubresourceLayers, + ) -> BufferImageCopyBuilder<'a> { + self.inner.image_subresource = image_subresource; + self + } + pub fn image_offset(mut self, image_offset: Offset3D) -> BufferImageCopyBuilder<'a> { + self.inner.image_offset = image_offset; + self + } + pub fn image_extent(mut self, image_extent: Extent3D) -> BufferImageCopyBuilder<'a> { + self.inner.image_extent = image_extent; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BufferImageCopy { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ImageResolve { + pub src_subresource: ImageSubresourceLayers, + pub src_offset: Offset3D, + pub dst_subresource: ImageSubresourceLayers, + pub dst_offset: Offset3D, + pub extent: Extent3D, +} +impl ImageResolve { + pub fn builder<'a>() -> ImageResolveBuilder<'a> { + ImageResolveBuilder { + inner: ImageResolve::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageResolveBuilder<'a> { + inner: ImageResolve, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ImageResolveBuilder<'a> { + type Target = ImageResolve; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageResolveBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageResolveBuilder<'a> { + pub fn src_subresource( + mut self, + src_subresource: ImageSubresourceLayers, + ) -> ImageResolveBuilder<'a> { + self.inner.src_subresource = src_subresource; + self + } + pub fn src_offset(mut self, src_offset: Offset3D) -> ImageResolveBuilder<'a> { + self.inner.src_offset = src_offset; + self + } + pub fn dst_subresource( + mut self, + dst_subresource: ImageSubresourceLayers, + ) -> ImageResolveBuilder<'a> { + self.inner.dst_subresource = dst_subresource; + self + } + pub fn dst_offset(mut self, dst_offset: Offset3D) -> ImageResolveBuilder<'a> { + self.inner.dst_offset = dst_offset; + self + } + pub fn extent(mut self, extent: Extent3D) -> ImageResolveBuilder<'a> { + self.inner.extent = extent; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageResolve { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ShaderModuleCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: ShaderModuleCreateFlags, + pub code_size: usize, + pub p_code: *const u32, +} +impl ::std::default::Default for ShaderModuleCreateInfo { + fn default() -> ShaderModuleCreateInfo { + ShaderModuleCreateInfo { + s_type: StructureType::SHADER_MODULE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: ShaderModuleCreateFlags::default(), + code_size: usize::default(), + p_code: ::std::ptr::null(), + } + } +} +impl ShaderModuleCreateInfo { + pub fn builder<'a>() -> ShaderModuleCreateInfoBuilder<'a> { + ShaderModuleCreateInfoBuilder { + inner: ShaderModuleCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ShaderModuleCreateInfoBuilder<'a> { + inner: ShaderModuleCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsShaderModuleCreateInfo {} +impl<'a> ::std::ops::Deref for ShaderModuleCreateInfoBuilder<'a> { + type Target = ShaderModuleCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ShaderModuleCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ShaderModuleCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: ShaderModuleCreateFlags) -> ShaderModuleCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn code(mut self, code: &'a [u32]) -> ShaderModuleCreateInfoBuilder<'a> { + self.inner.code_size = code.len() * 4; + self.inner.p_code = code.as_ptr() as *const u32; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ShaderModuleCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ShaderModuleCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DescriptorSetLayoutBinding { + pub binding: u32, + pub descriptor_type: DescriptorType, + pub descriptor_count: u32, + pub stage_flags: ShaderStageFlags, + pub p_immutable_samplers: *const Sampler, +} +impl ::std::default::Default for DescriptorSetLayoutBinding { + fn default() -> DescriptorSetLayoutBinding { + DescriptorSetLayoutBinding { + binding: u32::default(), + descriptor_type: DescriptorType::default(), + descriptor_count: u32::default(), + stage_flags: ShaderStageFlags::default(), + p_immutable_samplers: ::std::ptr::null(), + } + } +} +impl DescriptorSetLayoutBinding { + pub fn builder<'a>() -> DescriptorSetLayoutBindingBuilder<'a> { + DescriptorSetLayoutBindingBuilder { + inner: DescriptorSetLayoutBinding::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorSetLayoutBindingBuilder<'a> { + inner: DescriptorSetLayoutBinding, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DescriptorSetLayoutBindingBuilder<'a> { + type Target = DescriptorSetLayoutBinding; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorSetLayoutBindingBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorSetLayoutBindingBuilder<'a> { + pub fn binding(mut self, binding: u32) -> DescriptorSetLayoutBindingBuilder<'a> { + self.inner.binding = binding; + self + } + pub fn descriptor_type( + mut self, + descriptor_type: DescriptorType, + ) -> DescriptorSetLayoutBindingBuilder<'a> { + self.inner.descriptor_type = descriptor_type; + self + } + pub fn descriptor_count( + mut self, + descriptor_count: u32, + ) -> DescriptorSetLayoutBindingBuilder<'a> { + self.inner.descriptor_count = descriptor_count; + self + } + pub fn stage_flags( + mut self, + stage_flags: ShaderStageFlags, + ) -> DescriptorSetLayoutBindingBuilder<'a> { + self.inner.stage_flags = stage_flags; + self + } + pub fn immutable_samplers( + mut self, + immutable_samplers: &'a [Sampler], + ) -> DescriptorSetLayoutBindingBuilder<'a> { + self.inner.descriptor_count = immutable_samplers.len() as _; + self.inner.p_immutable_samplers = immutable_samplers.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorSetLayoutBinding { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DescriptorSetLayoutCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: DescriptorSetLayoutCreateFlags, + pub binding_count: u32, + pub p_bindings: *const DescriptorSetLayoutBinding, +} +impl ::std::default::Default for DescriptorSetLayoutCreateInfo { + fn default() -> DescriptorSetLayoutCreateInfo { + DescriptorSetLayoutCreateInfo { + s_type: StructureType::DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: DescriptorSetLayoutCreateFlags::default(), + binding_count: u32::default(), + p_bindings: ::std::ptr::null(), + } + } +} +impl DescriptorSetLayoutCreateInfo { + pub fn builder<'a>() -> DescriptorSetLayoutCreateInfoBuilder<'a> { + DescriptorSetLayoutCreateInfoBuilder { + inner: DescriptorSetLayoutCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorSetLayoutCreateInfoBuilder<'a> { + inner: DescriptorSetLayoutCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDescriptorSetLayoutCreateInfo {} +impl<'a> ::std::ops::Deref for DescriptorSetLayoutCreateInfoBuilder<'a> { + type Target = DescriptorSetLayoutCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorSetLayoutCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorSetLayoutCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: DescriptorSetLayoutCreateFlags, + ) -> DescriptorSetLayoutCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn bindings( + mut self, + bindings: &'a [DescriptorSetLayoutBinding], + ) -> DescriptorSetLayoutCreateInfoBuilder<'a> { + self.inner.binding_count = bindings.len() as _; + self.inner.p_bindings = bindings.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DescriptorSetLayoutCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorSetLayoutCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DescriptorPoolSize { + pub ty: DescriptorType, + pub descriptor_count: u32, +} +impl DescriptorPoolSize { + pub fn builder<'a>() -> DescriptorPoolSizeBuilder<'a> { + DescriptorPoolSizeBuilder { + inner: DescriptorPoolSize::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorPoolSizeBuilder<'a> { + inner: DescriptorPoolSize, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DescriptorPoolSizeBuilder<'a> { + type Target = DescriptorPoolSize; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorPoolSizeBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorPoolSizeBuilder<'a> { + pub fn ty(mut self, ty: DescriptorType) -> DescriptorPoolSizeBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn descriptor_count(mut self, descriptor_count: u32) -> DescriptorPoolSizeBuilder<'a> { + self.inner.descriptor_count = descriptor_count; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorPoolSize { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DescriptorPoolCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: DescriptorPoolCreateFlags, + pub max_sets: u32, + pub pool_size_count: u32, + pub p_pool_sizes: *const DescriptorPoolSize, +} +impl ::std::default::Default for DescriptorPoolCreateInfo { + fn default() -> DescriptorPoolCreateInfo { + DescriptorPoolCreateInfo { + s_type: StructureType::DESCRIPTOR_POOL_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: DescriptorPoolCreateFlags::default(), + max_sets: u32::default(), + pool_size_count: u32::default(), + p_pool_sizes: ::std::ptr::null(), + } + } +} +impl DescriptorPoolCreateInfo { + pub fn builder<'a>() -> DescriptorPoolCreateInfoBuilder<'a> { + DescriptorPoolCreateInfoBuilder { + inner: DescriptorPoolCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorPoolCreateInfoBuilder<'a> { + inner: DescriptorPoolCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDescriptorPoolCreateInfo {} +impl<'a> ::std::ops::Deref for DescriptorPoolCreateInfoBuilder<'a> { + type Target = DescriptorPoolCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorPoolCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorPoolCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: DescriptorPoolCreateFlags, + ) -> DescriptorPoolCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn max_sets(mut self, max_sets: u32) -> DescriptorPoolCreateInfoBuilder<'a> { + self.inner.max_sets = max_sets; + self + } + pub fn pool_sizes( + mut self, + pool_sizes: &'a [DescriptorPoolSize], + ) -> DescriptorPoolCreateInfoBuilder<'a> { + self.inner.pool_size_count = pool_sizes.len() as _; + self.inner.p_pool_sizes = pool_sizes.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DescriptorPoolCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorPoolCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DescriptorSetAllocateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub descriptor_pool: DescriptorPool, + pub descriptor_set_count: u32, + pub p_set_layouts: *const DescriptorSetLayout, +} +impl ::std::default::Default for DescriptorSetAllocateInfo { + fn default() -> DescriptorSetAllocateInfo { + DescriptorSetAllocateInfo { + s_type: StructureType::DESCRIPTOR_SET_ALLOCATE_INFO, + p_next: ::std::ptr::null(), + descriptor_pool: DescriptorPool::default(), + descriptor_set_count: u32::default(), + p_set_layouts: ::std::ptr::null(), + } + } +} +impl DescriptorSetAllocateInfo { + pub fn builder<'a>() -> DescriptorSetAllocateInfoBuilder<'a> { + DescriptorSetAllocateInfoBuilder { + inner: DescriptorSetAllocateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorSetAllocateInfoBuilder<'a> { + inner: DescriptorSetAllocateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDescriptorSetAllocateInfo {} +impl<'a> ::std::ops::Deref for DescriptorSetAllocateInfoBuilder<'a> { + type Target = DescriptorSetAllocateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorSetAllocateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorSetAllocateInfoBuilder<'a> { + pub fn descriptor_pool( + mut self, + descriptor_pool: DescriptorPool, + ) -> DescriptorSetAllocateInfoBuilder<'a> { + self.inner.descriptor_pool = descriptor_pool; + self + } + pub fn set_layouts( + mut self, + set_layouts: &'a [DescriptorSetLayout], + ) -> DescriptorSetAllocateInfoBuilder<'a> { + self.inner.descriptor_set_count = set_layouts.len() as _; + self.inner.p_set_layouts = set_layouts.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DescriptorSetAllocateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorSetAllocateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct SpecializationMapEntry { + pub constant_id: u32, + pub offset: u32, + pub size: usize, +} +impl SpecializationMapEntry { + pub fn builder<'a>() -> SpecializationMapEntryBuilder<'a> { + SpecializationMapEntryBuilder { + inner: SpecializationMapEntry::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SpecializationMapEntryBuilder<'a> { + inner: SpecializationMapEntry, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SpecializationMapEntryBuilder<'a> { + type Target = SpecializationMapEntry; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SpecializationMapEntryBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SpecializationMapEntryBuilder<'a> { + pub fn constant_id(mut self, constant_id: u32) -> SpecializationMapEntryBuilder<'a> { + self.inner.constant_id = constant_id; + self + } + pub fn offset(mut self, offset: u32) -> SpecializationMapEntryBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn size(mut self, size: usize) -> SpecializationMapEntryBuilder<'a> { + self.inner.size = size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SpecializationMapEntry { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SpecializationInfo { + pub map_entry_count: u32, + pub p_map_entries: *const SpecializationMapEntry, + pub data_size: usize, + pub p_data: *const c_void, +} +impl ::std::default::Default for SpecializationInfo { + fn default() -> SpecializationInfo { + SpecializationInfo { + map_entry_count: u32::default(), + p_map_entries: ::std::ptr::null(), + data_size: usize::default(), + p_data: ::std::ptr::null(), + } + } +} +impl SpecializationInfo { + pub fn builder<'a>() -> SpecializationInfoBuilder<'a> { + SpecializationInfoBuilder { + inner: SpecializationInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SpecializationInfoBuilder<'a> { + inner: SpecializationInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SpecializationInfoBuilder<'a> { + type Target = SpecializationInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SpecializationInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SpecializationInfoBuilder<'a> { + pub fn map_entries( + mut self, + map_entries: &'a [SpecializationMapEntry], + ) -> SpecializationInfoBuilder<'a> { + self.inner.map_entry_count = map_entries.len() as _; + self.inner.p_map_entries = map_entries.as_ptr(); + self + } + pub fn data(mut self, data: &'a [u8]) -> SpecializationInfoBuilder<'a> { + self.inner.data_size = data.len() as _; + self.inner.p_data = data.as_ptr() as *const c_void; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SpecializationInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineShaderStageCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineShaderStageCreateFlags, + pub stage: ShaderStageFlags, + pub module: ShaderModule, + pub p_name: *const c_char, + pub p_specialization_info: *const SpecializationInfo, +} +impl ::std::default::Default for PipelineShaderStageCreateInfo { + fn default() -> PipelineShaderStageCreateInfo { + PipelineShaderStageCreateInfo { + s_type: StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineShaderStageCreateFlags::default(), + stage: ShaderStageFlags::default(), + module: ShaderModule::default(), + p_name: ::std::ptr::null(), + p_specialization_info: ::std::ptr::null(), + } + } +} +impl PipelineShaderStageCreateInfo { + pub fn builder<'a>() -> PipelineShaderStageCreateInfoBuilder<'a> { + PipelineShaderStageCreateInfoBuilder { + inner: PipelineShaderStageCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineShaderStageCreateInfoBuilder<'a> { + inner: PipelineShaderStageCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineShaderStageCreateInfo {} +impl<'a> ::std::ops::Deref for PipelineShaderStageCreateInfoBuilder<'a> { + type Target = PipelineShaderStageCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineShaderStageCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineShaderStageCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineShaderStageCreateFlags, + ) -> PipelineShaderStageCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn stage(mut self, stage: ShaderStageFlags) -> PipelineShaderStageCreateInfoBuilder<'a> { + self.inner.stage = stage; + self + } + pub fn module(mut self, module: ShaderModule) -> PipelineShaderStageCreateInfoBuilder<'a> { + self.inner.module = module; + self + } + pub fn name(mut self, name: &'a ::std::ffi::CStr) -> PipelineShaderStageCreateInfoBuilder<'a> { + self.inner.p_name = name.as_ptr(); + self + } + pub fn specialization_info( + mut self, + specialization_info: &'a SpecializationInfo, + ) -> PipelineShaderStageCreateInfoBuilder<'a> { + self.inner.p_specialization_info = specialization_info; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineShaderStageCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineShaderStageCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ComputePipelineCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineCreateFlags, + pub stage: PipelineShaderStageCreateInfo, + pub layout: PipelineLayout, + pub base_pipeline_handle: Pipeline, + pub base_pipeline_index: i32, +} +impl ::std::default::Default for ComputePipelineCreateInfo { + fn default() -> ComputePipelineCreateInfo { + ComputePipelineCreateInfo { + s_type: StructureType::COMPUTE_PIPELINE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineCreateFlags::default(), + stage: PipelineShaderStageCreateInfo::default(), + layout: PipelineLayout::default(), + base_pipeline_handle: Pipeline::default(), + base_pipeline_index: i32::default(), + } + } +} +impl ComputePipelineCreateInfo { + pub fn builder<'a>() -> ComputePipelineCreateInfoBuilder<'a> { + ComputePipelineCreateInfoBuilder { + inner: ComputePipelineCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ComputePipelineCreateInfoBuilder<'a> { + inner: ComputePipelineCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsComputePipelineCreateInfo {} +impl<'a> ::std::ops::Deref for ComputePipelineCreateInfoBuilder<'a> { + type Target = ComputePipelineCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ComputePipelineCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ComputePipelineCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: PipelineCreateFlags) -> ComputePipelineCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn stage( + mut self, + stage: PipelineShaderStageCreateInfo, + ) -> ComputePipelineCreateInfoBuilder<'a> { + self.inner.stage = stage; + self + } + pub fn layout(mut self, layout: PipelineLayout) -> ComputePipelineCreateInfoBuilder<'a> { + self.inner.layout = layout; + self + } + pub fn base_pipeline_handle( + mut self, + base_pipeline_handle: Pipeline, + ) -> ComputePipelineCreateInfoBuilder<'a> { + self.inner.base_pipeline_handle = base_pipeline_handle; + self + } + pub fn base_pipeline_index( + mut self, + base_pipeline_index: i32, + ) -> ComputePipelineCreateInfoBuilder<'a> { + self.inner.base_pipeline_index = base_pipeline_index; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ComputePipelineCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ComputePipelineCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct VertexInputBindingDescription { + pub binding: u32, + pub stride: u32, + pub input_rate: VertexInputRate, +} +impl VertexInputBindingDescription { + pub fn builder<'a>() -> VertexInputBindingDescriptionBuilder<'a> { + VertexInputBindingDescriptionBuilder { + inner: VertexInputBindingDescription::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct VertexInputBindingDescriptionBuilder<'a> { + inner: VertexInputBindingDescription, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for VertexInputBindingDescriptionBuilder<'a> { + type Target = VertexInputBindingDescription; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for VertexInputBindingDescriptionBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> VertexInputBindingDescriptionBuilder<'a> { + pub fn binding(mut self, binding: u32) -> VertexInputBindingDescriptionBuilder<'a> { + self.inner.binding = binding; + self + } + pub fn stride(mut self, stride: u32) -> VertexInputBindingDescriptionBuilder<'a> { + self.inner.stride = stride; + self + } + pub fn input_rate( + mut self, + input_rate: VertexInputRate, + ) -> VertexInputBindingDescriptionBuilder<'a> { + self.inner.input_rate = input_rate; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> VertexInputBindingDescription { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct VertexInputAttributeDescription { + pub location: u32, + pub binding: u32, + pub format: Format, + pub offset: u32, +} +impl VertexInputAttributeDescription { + pub fn builder<'a>() -> VertexInputAttributeDescriptionBuilder<'a> { + VertexInputAttributeDescriptionBuilder { + inner: VertexInputAttributeDescription::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct VertexInputAttributeDescriptionBuilder<'a> { + inner: VertexInputAttributeDescription, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for VertexInputAttributeDescriptionBuilder<'a> { + type Target = VertexInputAttributeDescription; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for VertexInputAttributeDescriptionBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> VertexInputAttributeDescriptionBuilder<'a> { + pub fn location(mut self, location: u32) -> VertexInputAttributeDescriptionBuilder<'a> { + self.inner.location = location; + self + } + pub fn binding(mut self, binding: u32) -> VertexInputAttributeDescriptionBuilder<'a> { + self.inner.binding = binding; + self + } + pub fn format(mut self, format: Format) -> VertexInputAttributeDescriptionBuilder<'a> { + self.inner.format = format; + self + } + pub fn offset(mut self, offset: u32) -> VertexInputAttributeDescriptionBuilder<'a> { + self.inner.offset = offset; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> VertexInputAttributeDescription { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineVertexInputStateCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineVertexInputStateCreateFlags, + pub vertex_binding_description_count: u32, + pub p_vertex_binding_descriptions: *const VertexInputBindingDescription, + pub vertex_attribute_description_count: u32, + pub p_vertex_attribute_descriptions: *const VertexInputAttributeDescription, +} +impl ::std::default::Default for PipelineVertexInputStateCreateInfo { + fn default() -> PipelineVertexInputStateCreateInfo { + PipelineVertexInputStateCreateInfo { + s_type: StructureType::PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineVertexInputStateCreateFlags::default(), + vertex_binding_description_count: u32::default(), + p_vertex_binding_descriptions: ::std::ptr::null(), + vertex_attribute_description_count: u32::default(), + p_vertex_attribute_descriptions: ::std::ptr::null(), + } + } +} +impl PipelineVertexInputStateCreateInfo { + pub fn builder<'a>() -> PipelineVertexInputStateCreateInfoBuilder<'a> { + PipelineVertexInputStateCreateInfoBuilder { + inner: PipelineVertexInputStateCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineVertexInputStateCreateInfoBuilder<'a> { + inner: PipelineVertexInputStateCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineVertexInputStateCreateInfo {} +impl<'a> ::std::ops::Deref for PipelineVertexInputStateCreateInfoBuilder<'a> { + type Target = PipelineVertexInputStateCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineVertexInputStateCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineVertexInputStateCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineVertexInputStateCreateFlags, + ) -> PipelineVertexInputStateCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn vertex_binding_descriptions( + mut self, + vertex_binding_descriptions: &'a [VertexInputBindingDescription], + ) -> PipelineVertexInputStateCreateInfoBuilder<'a> { + self.inner.vertex_binding_description_count = vertex_binding_descriptions.len() as _; + self.inner.p_vertex_binding_descriptions = vertex_binding_descriptions.as_ptr(); + self + } + pub fn vertex_attribute_descriptions( + mut self, + vertex_attribute_descriptions: &'a [VertexInputAttributeDescription], + ) -> PipelineVertexInputStateCreateInfoBuilder<'a> { + self.inner.vertex_attribute_description_count = vertex_attribute_descriptions.len() as _; + self.inner.p_vertex_attribute_descriptions = vertex_attribute_descriptions.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineVertexInputStateCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineVertexInputStateCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineInputAssemblyStateCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineInputAssemblyStateCreateFlags, + pub topology: PrimitiveTopology, + pub primitive_restart_enable: Bool32, +} +impl ::std::default::Default for PipelineInputAssemblyStateCreateInfo { + fn default() -> PipelineInputAssemblyStateCreateInfo { + PipelineInputAssemblyStateCreateInfo { + s_type: StructureType::PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineInputAssemblyStateCreateFlags::default(), + topology: PrimitiveTopology::default(), + primitive_restart_enable: Bool32::default(), + } + } +} +impl PipelineInputAssemblyStateCreateInfo { + pub fn builder<'a>() -> PipelineInputAssemblyStateCreateInfoBuilder<'a> { + PipelineInputAssemblyStateCreateInfoBuilder { + inner: PipelineInputAssemblyStateCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineInputAssemblyStateCreateInfoBuilder<'a> { + inner: PipelineInputAssemblyStateCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineInputAssemblyStateCreateInfo {} +impl<'a> ::std::ops::Deref for PipelineInputAssemblyStateCreateInfoBuilder<'a> { + type Target = PipelineInputAssemblyStateCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineInputAssemblyStateCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineInputAssemblyStateCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineInputAssemblyStateCreateFlags, + ) -> PipelineInputAssemblyStateCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn topology( + mut self, + topology: PrimitiveTopology, + ) -> PipelineInputAssemblyStateCreateInfoBuilder<'a> { + self.inner.topology = topology; + self + } + pub fn primitive_restart_enable( + mut self, + primitive_restart_enable: bool, + ) -> PipelineInputAssemblyStateCreateInfoBuilder<'a> { + self.inner.primitive_restart_enable = primitive_restart_enable.into(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineInputAssemblyStateCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineInputAssemblyStateCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineTessellationStateCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineTessellationStateCreateFlags, + pub patch_control_points: u32, +} +impl ::std::default::Default for PipelineTessellationStateCreateInfo { + fn default() -> PipelineTessellationStateCreateInfo { + PipelineTessellationStateCreateInfo { + s_type: StructureType::PIPELINE_TESSELLATION_STATE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineTessellationStateCreateFlags::default(), + patch_control_points: u32::default(), + } + } +} +impl PipelineTessellationStateCreateInfo { + pub fn builder<'a>() -> PipelineTessellationStateCreateInfoBuilder<'a> { + PipelineTessellationStateCreateInfoBuilder { + inner: PipelineTessellationStateCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineTessellationStateCreateInfoBuilder<'a> { + inner: PipelineTessellationStateCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineTessellationStateCreateInfo {} +impl<'a> ::std::ops::Deref for PipelineTessellationStateCreateInfoBuilder<'a> { + type Target = PipelineTessellationStateCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineTessellationStateCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineTessellationStateCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineTessellationStateCreateFlags, + ) -> PipelineTessellationStateCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn patch_control_points( + mut self, + patch_control_points: u32, + ) -> PipelineTessellationStateCreateInfoBuilder<'a> { + self.inner.patch_control_points = patch_control_points; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineTessellationStateCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineTessellationStateCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineViewportStateCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineViewportStateCreateFlags, + pub viewport_count: u32, + pub p_viewports: *const Viewport, + pub scissor_count: u32, + pub p_scissors: *const Rect2D, +} +impl ::std::default::Default for PipelineViewportStateCreateInfo { + fn default() -> PipelineViewportStateCreateInfo { + PipelineViewportStateCreateInfo { + s_type: StructureType::PIPELINE_VIEWPORT_STATE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineViewportStateCreateFlags::default(), + viewport_count: u32::default(), + p_viewports: ::std::ptr::null(), + scissor_count: u32::default(), + p_scissors: ::std::ptr::null(), + } + } +} +impl PipelineViewportStateCreateInfo { + pub fn builder<'a>() -> PipelineViewportStateCreateInfoBuilder<'a> { + PipelineViewportStateCreateInfoBuilder { + inner: PipelineViewportStateCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineViewportStateCreateInfoBuilder<'a> { + inner: PipelineViewportStateCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineViewportStateCreateInfo {} +impl<'a> ::std::ops::Deref for PipelineViewportStateCreateInfoBuilder<'a> { + type Target = PipelineViewportStateCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineViewportStateCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineViewportStateCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineViewportStateCreateFlags, + ) -> PipelineViewportStateCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn viewport_count( + mut self, + viewport_count: u32, + ) -> PipelineViewportStateCreateInfoBuilder<'a> { + self.inner.viewport_count = viewport_count; + self + } + pub fn viewports( + mut self, + viewports: &'a [Viewport], + ) -> PipelineViewportStateCreateInfoBuilder<'a> { + self.inner.viewport_count = viewports.len() as _; + self.inner.p_viewports = viewports.as_ptr(); + self + } + pub fn scissor_count( + mut self, + scissor_count: u32, + ) -> PipelineViewportStateCreateInfoBuilder<'a> { + self.inner.scissor_count = scissor_count; + self + } + pub fn scissors( + mut self, + scissors: &'a [Rect2D], + ) -> PipelineViewportStateCreateInfoBuilder<'a> { + self.inner.scissor_count = scissors.len() as _; + self.inner.p_scissors = scissors.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineViewportStateCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineViewportStateCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineRasterizationStateCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineRasterizationStateCreateFlags, + pub depth_clamp_enable: Bool32, + pub rasterizer_discard_enable: Bool32, + pub polygon_mode: PolygonMode, + pub cull_mode: CullModeFlags, + pub front_face: FrontFace, + pub depth_bias_enable: Bool32, + pub depth_bias_constant_factor: f32, + pub depth_bias_clamp: f32, + pub depth_bias_slope_factor: f32, + pub line_width: f32, +} +impl ::std::default::Default for PipelineRasterizationStateCreateInfo { + fn default() -> PipelineRasterizationStateCreateInfo { + PipelineRasterizationStateCreateInfo { + s_type: StructureType::PIPELINE_RASTERIZATION_STATE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineRasterizationStateCreateFlags::default(), + depth_clamp_enable: Bool32::default(), + rasterizer_discard_enable: Bool32::default(), + polygon_mode: PolygonMode::default(), + cull_mode: CullModeFlags::default(), + front_face: FrontFace::default(), + depth_bias_enable: Bool32::default(), + depth_bias_constant_factor: f32::default(), + depth_bias_clamp: f32::default(), + depth_bias_slope_factor: f32::default(), + line_width: f32::default(), + } + } +} +impl PipelineRasterizationStateCreateInfo { + pub fn builder<'a>() -> PipelineRasterizationStateCreateInfoBuilder<'a> { + PipelineRasterizationStateCreateInfoBuilder { + inner: PipelineRasterizationStateCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineRasterizationStateCreateInfoBuilder<'a> { + inner: PipelineRasterizationStateCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineRasterizationStateCreateInfo {} +impl<'a> ::std::ops::Deref for PipelineRasterizationStateCreateInfoBuilder<'a> { + type Target = PipelineRasterizationStateCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineRasterizationStateCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineRasterizationStateCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineRasterizationStateCreateFlags, + ) -> PipelineRasterizationStateCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn depth_clamp_enable( + mut self, + depth_clamp_enable: bool, + ) -> PipelineRasterizationStateCreateInfoBuilder<'a> { + self.inner.depth_clamp_enable = depth_clamp_enable.into(); + self + } + pub fn rasterizer_discard_enable( + mut self, + rasterizer_discard_enable: bool, + ) -> PipelineRasterizationStateCreateInfoBuilder<'a> { + self.inner.rasterizer_discard_enable = rasterizer_discard_enable.into(); + self + } + pub fn polygon_mode( + mut self, + polygon_mode: PolygonMode, + ) -> PipelineRasterizationStateCreateInfoBuilder<'a> { + self.inner.polygon_mode = polygon_mode; + self + } + pub fn cull_mode( + mut self, + cull_mode: CullModeFlags, + ) -> PipelineRasterizationStateCreateInfoBuilder<'a> { + self.inner.cull_mode = cull_mode; + self + } + pub fn front_face( + mut self, + front_face: FrontFace, + ) -> PipelineRasterizationStateCreateInfoBuilder<'a> { + self.inner.front_face = front_face; + self + } + pub fn depth_bias_enable( + mut self, + depth_bias_enable: bool, + ) -> PipelineRasterizationStateCreateInfoBuilder<'a> { + self.inner.depth_bias_enable = depth_bias_enable.into(); + self + } + pub fn depth_bias_constant_factor( + mut self, + depth_bias_constant_factor: f32, + ) -> PipelineRasterizationStateCreateInfoBuilder<'a> { + self.inner.depth_bias_constant_factor = depth_bias_constant_factor; + self + } + pub fn depth_bias_clamp( + mut self, + depth_bias_clamp: f32, + ) -> PipelineRasterizationStateCreateInfoBuilder<'a> { + self.inner.depth_bias_clamp = depth_bias_clamp; + self + } + pub fn depth_bias_slope_factor( + mut self, + depth_bias_slope_factor: f32, + ) -> PipelineRasterizationStateCreateInfoBuilder<'a> { + self.inner.depth_bias_slope_factor = depth_bias_slope_factor; + self + } + pub fn line_width( + mut self, + line_width: f32, + ) -> PipelineRasterizationStateCreateInfoBuilder<'a> { + self.inner.line_width = line_width; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineRasterizationStateCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineRasterizationStateCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineMultisampleStateCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineMultisampleStateCreateFlags, + pub rasterization_samples: SampleCountFlags, + pub sample_shading_enable: Bool32, + pub min_sample_shading: f32, + pub p_sample_mask: *const SampleMask, + pub alpha_to_coverage_enable: Bool32, + pub alpha_to_one_enable: Bool32, +} +impl ::std::default::Default for PipelineMultisampleStateCreateInfo { + fn default() -> PipelineMultisampleStateCreateInfo { + PipelineMultisampleStateCreateInfo { + s_type: StructureType::PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineMultisampleStateCreateFlags::default(), + rasterization_samples: SampleCountFlags::default(), + sample_shading_enable: Bool32::default(), + min_sample_shading: f32::default(), + p_sample_mask: ::std::ptr::null(), + alpha_to_coverage_enable: Bool32::default(), + alpha_to_one_enable: Bool32::default(), + } + } +} +impl PipelineMultisampleStateCreateInfo { + pub fn builder<'a>() -> PipelineMultisampleStateCreateInfoBuilder<'a> { + PipelineMultisampleStateCreateInfoBuilder { + inner: PipelineMultisampleStateCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineMultisampleStateCreateInfoBuilder<'a> { + inner: PipelineMultisampleStateCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineMultisampleStateCreateInfo {} +impl<'a> ::std::ops::Deref for PipelineMultisampleStateCreateInfoBuilder<'a> { + type Target = PipelineMultisampleStateCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineMultisampleStateCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineMultisampleStateCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineMultisampleStateCreateFlags, + ) -> PipelineMultisampleStateCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn rasterization_samples( + mut self, + rasterization_samples: SampleCountFlags, + ) -> PipelineMultisampleStateCreateInfoBuilder<'a> { + self.inner.rasterization_samples = rasterization_samples; + self + } + pub fn sample_shading_enable( + mut self, + sample_shading_enable: bool, + ) -> PipelineMultisampleStateCreateInfoBuilder<'a> { + self.inner.sample_shading_enable = sample_shading_enable.into(); + self + } + pub fn min_sample_shading( + mut self, + min_sample_shading: f32, + ) -> PipelineMultisampleStateCreateInfoBuilder<'a> { + self.inner.min_sample_shading = min_sample_shading; + self + } + pub fn sample_mask( + mut self, + sample_mask: &'a [SampleMask], + ) -> PipelineMultisampleStateCreateInfoBuilder<'a> { + self.inner.p_sample_mask = sample_mask.as_ptr() as *const SampleMask; + self + } + pub fn alpha_to_coverage_enable( + mut self, + alpha_to_coverage_enable: bool, + ) -> PipelineMultisampleStateCreateInfoBuilder<'a> { + self.inner.alpha_to_coverage_enable = alpha_to_coverage_enable.into(); + self + } + pub fn alpha_to_one_enable( + mut self, + alpha_to_one_enable: bool, + ) -> PipelineMultisampleStateCreateInfoBuilder<'a> { + self.inner.alpha_to_one_enable = alpha_to_one_enable.into(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineMultisampleStateCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineMultisampleStateCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct PipelineColorBlendAttachmentState { + pub blend_enable: Bool32, + pub src_color_blend_factor: BlendFactor, + pub dst_color_blend_factor: BlendFactor, + pub color_blend_op: BlendOp, + pub src_alpha_blend_factor: BlendFactor, + pub dst_alpha_blend_factor: BlendFactor, + pub alpha_blend_op: BlendOp, + pub color_write_mask: ColorComponentFlags, +} +impl PipelineColorBlendAttachmentState { + pub fn builder<'a>() -> PipelineColorBlendAttachmentStateBuilder<'a> { + PipelineColorBlendAttachmentStateBuilder { + inner: PipelineColorBlendAttachmentState::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineColorBlendAttachmentStateBuilder<'a> { + inner: PipelineColorBlendAttachmentState, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for PipelineColorBlendAttachmentStateBuilder<'a> { + type Target = PipelineColorBlendAttachmentState; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineColorBlendAttachmentStateBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineColorBlendAttachmentStateBuilder<'a> { + pub fn blend_enable( + mut self, + blend_enable: bool, + ) -> PipelineColorBlendAttachmentStateBuilder<'a> { + self.inner.blend_enable = blend_enable.into(); + self + } + pub fn src_color_blend_factor( + mut self, + src_color_blend_factor: BlendFactor, + ) -> PipelineColorBlendAttachmentStateBuilder<'a> { + self.inner.src_color_blend_factor = src_color_blend_factor; + self + } + pub fn dst_color_blend_factor( + mut self, + dst_color_blend_factor: BlendFactor, + ) -> PipelineColorBlendAttachmentStateBuilder<'a> { + self.inner.dst_color_blend_factor = dst_color_blend_factor; + self + } + pub fn color_blend_op( + mut self, + color_blend_op: BlendOp, + ) -> PipelineColorBlendAttachmentStateBuilder<'a> { + self.inner.color_blend_op = color_blend_op; + self + } + pub fn src_alpha_blend_factor( + mut self, + src_alpha_blend_factor: BlendFactor, + ) -> PipelineColorBlendAttachmentStateBuilder<'a> { + self.inner.src_alpha_blend_factor = src_alpha_blend_factor; + self + } + pub fn dst_alpha_blend_factor( + mut self, + dst_alpha_blend_factor: BlendFactor, + ) -> PipelineColorBlendAttachmentStateBuilder<'a> { + self.inner.dst_alpha_blend_factor = dst_alpha_blend_factor; + self + } + pub fn alpha_blend_op( + mut self, + alpha_blend_op: BlendOp, + ) -> PipelineColorBlendAttachmentStateBuilder<'a> { + self.inner.alpha_blend_op = alpha_blend_op; + self + } + pub fn color_write_mask( + mut self, + color_write_mask: ColorComponentFlags, + ) -> PipelineColorBlendAttachmentStateBuilder<'a> { + self.inner.color_write_mask = color_write_mask; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineColorBlendAttachmentState { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineColorBlendStateCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineColorBlendStateCreateFlags, + pub logic_op_enable: Bool32, + pub logic_op: LogicOp, + pub attachment_count: u32, + pub p_attachments: *const PipelineColorBlendAttachmentState, + pub blend_constants: [f32; 4], +} +impl ::std::default::Default for PipelineColorBlendStateCreateInfo { + fn default() -> PipelineColorBlendStateCreateInfo { + PipelineColorBlendStateCreateInfo { + s_type: StructureType::PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineColorBlendStateCreateFlags::default(), + logic_op_enable: Bool32::default(), + logic_op: LogicOp::default(), + attachment_count: u32::default(), + p_attachments: ::std::ptr::null(), + blend_constants: unsafe { ::std::mem::zeroed() }, + } + } +} +impl PipelineColorBlendStateCreateInfo { + pub fn builder<'a>() -> PipelineColorBlendStateCreateInfoBuilder<'a> { + PipelineColorBlendStateCreateInfoBuilder { + inner: PipelineColorBlendStateCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineColorBlendStateCreateInfoBuilder<'a> { + inner: PipelineColorBlendStateCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineColorBlendStateCreateInfo {} +impl<'a> ::std::ops::Deref for PipelineColorBlendStateCreateInfoBuilder<'a> { + type Target = PipelineColorBlendStateCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineColorBlendStateCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineColorBlendStateCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineColorBlendStateCreateFlags, + ) -> PipelineColorBlendStateCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn logic_op_enable( + mut self, + logic_op_enable: bool, + ) -> PipelineColorBlendStateCreateInfoBuilder<'a> { + self.inner.logic_op_enable = logic_op_enable.into(); + self + } + pub fn logic_op(mut self, logic_op: LogicOp) -> PipelineColorBlendStateCreateInfoBuilder<'a> { + self.inner.logic_op = logic_op; + self + } + pub fn attachments( + mut self, + attachments: &'a [PipelineColorBlendAttachmentState], + ) -> PipelineColorBlendStateCreateInfoBuilder<'a> { + self.inner.attachment_count = attachments.len() as _; + self.inner.p_attachments = attachments.as_ptr(); + self + } + pub fn blend_constants( + mut self, + blend_constants: [f32; 4], + ) -> PipelineColorBlendStateCreateInfoBuilder<'a> { + self.inner.blend_constants = blend_constants; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineColorBlendStateCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineColorBlendStateCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineDynamicStateCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineDynamicStateCreateFlags, + pub dynamic_state_count: u32, + pub p_dynamic_states: *const DynamicState, +} +impl ::std::default::Default for PipelineDynamicStateCreateInfo { + fn default() -> PipelineDynamicStateCreateInfo { + PipelineDynamicStateCreateInfo { + s_type: StructureType::PIPELINE_DYNAMIC_STATE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineDynamicStateCreateFlags::default(), + dynamic_state_count: u32::default(), + p_dynamic_states: ::std::ptr::null(), + } + } +} +impl PipelineDynamicStateCreateInfo { + pub fn builder<'a>() -> PipelineDynamicStateCreateInfoBuilder<'a> { + PipelineDynamicStateCreateInfoBuilder { + inner: PipelineDynamicStateCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineDynamicStateCreateInfoBuilder<'a> { + inner: PipelineDynamicStateCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineDynamicStateCreateInfo {} +impl<'a> ::std::ops::Deref for PipelineDynamicStateCreateInfoBuilder<'a> { + type Target = PipelineDynamicStateCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineDynamicStateCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineDynamicStateCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineDynamicStateCreateFlags, + ) -> PipelineDynamicStateCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn dynamic_states( + mut self, + dynamic_states: &'a [DynamicState], + ) -> PipelineDynamicStateCreateInfoBuilder<'a> { + self.inner.dynamic_state_count = dynamic_states.len() as _; + self.inner.p_dynamic_states = dynamic_states.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineDynamicStateCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineDynamicStateCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct StencilOpState { + pub fail_op: StencilOp, + pub pass_op: StencilOp, + pub depth_fail_op: StencilOp, + pub compare_op: CompareOp, + pub compare_mask: u32, + pub write_mask: u32, + pub reference: u32, +} +impl StencilOpState { + pub fn builder<'a>() -> StencilOpStateBuilder<'a> { + StencilOpStateBuilder { + inner: StencilOpState::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct StencilOpStateBuilder<'a> { + inner: StencilOpState, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for StencilOpStateBuilder<'a> { + type Target = StencilOpState; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for StencilOpStateBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> StencilOpStateBuilder<'a> { + pub fn fail_op(mut self, fail_op: StencilOp) -> StencilOpStateBuilder<'a> { + self.inner.fail_op = fail_op; + self + } + pub fn pass_op(mut self, pass_op: StencilOp) -> StencilOpStateBuilder<'a> { + self.inner.pass_op = pass_op; + self + } + pub fn depth_fail_op(mut self, depth_fail_op: StencilOp) -> StencilOpStateBuilder<'a> { + self.inner.depth_fail_op = depth_fail_op; + self + } + pub fn compare_op(mut self, compare_op: CompareOp) -> StencilOpStateBuilder<'a> { + self.inner.compare_op = compare_op; + self + } + pub fn compare_mask(mut self, compare_mask: u32) -> StencilOpStateBuilder<'a> { + self.inner.compare_mask = compare_mask; + self + } + pub fn write_mask(mut self, write_mask: u32) -> StencilOpStateBuilder<'a> { + self.inner.write_mask = write_mask; + self + } + pub fn reference(mut self, reference: u32) -> StencilOpStateBuilder<'a> { + self.inner.reference = reference; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> StencilOpState { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineDepthStencilStateCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineDepthStencilStateCreateFlags, + pub depth_test_enable: Bool32, + pub depth_write_enable: Bool32, + pub depth_compare_op: CompareOp, + pub depth_bounds_test_enable: Bool32, + pub stencil_test_enable: Bool32, + pub front: StencilOpState, + pub back: StencilOpState, + pub min_depth_bounds: f32, + pub max_depth_bounds: f32, +} +impl ::std::default::Default for PipelineDepthStencilStateCreateInfo { + fn default() -> PipelineDepthStencilStateCreateInfo { + PipelineDepthStencilStateCreateInfo { + s_type: StructureType::PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineDepthStencilStateCreateFlags::default(), + depth_test_enable: Bool32::default(), + depth_write_enable: Bool32::default(), + depth_compare_op: CompareOp::default(), + depth_bounds_test_enable: Bool32::default(), + stencil_test_enable: Bool32::default(), + front: StencilOpState::default(), + back: StencilOpState::default(), + min_depth_bounds: f32::default(), + max_depth_bounds: f32::default(), + } + } +} +impl PipelineDepthStencilStateCreateInfo { + pub fn builder<'a>() -> PipelineDepthStencilStateCreateInfoBuilder<'a> { + PipelineDepthStencilStateCreateInfoBuilder { + inner: PipelineDepthStencilStateCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineDepthStencilStateCreateInfoBuilder<'a> { + inner: PipelineDepthStencilStateCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineDepthStencilStateCreateInfo {} +impl<'a> ::std::ops::Deref for PipelineDepthStencilStateCreateInfoBuilder<'a> { + type Target = PipelineDepthStencilStateCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineDepthStencilStateCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineDepthStencilStateCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineDepthStencilStateCreateFlags, + ) -> PipelineDepthStencilStateCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn depth_test_enable( + mut self, + depth_test_enable: bool, + ) -> PipelineDepthStencilStateCreateInfoBuilder<'a> { + self.inner.depth_test_enable = depth_test_enable.into(); + self + } + pub fn depth_write_enable( + mut self, + depth_write_enable: bool, + ) -> PipelineDepthStencilStateCreateInfoBuilder<'a> { + self.inner.depth_write_enable = depth_write_enable.into(); + self + } + pub fn depth_compare_op( + mut self, + depth_compare_op: CompareOp, + ) -> PipelineDepthStencilStateCreateInfoBuilder<'a> { + self.inner.depth_compare_op = depth_compare_op; + self + } + pub fn depth_bounds_test_enable( + mut self, + depth_bounds_test_enable: bool, + ) -> PipelineDepthStencilStateCreateInfoBuilder<'a> { + self.inner.depth_bounds_test_enable = depth_bounds_test_enable.into(); + self + } + pub fn stencil_test_enable( + mut self, + stencil_test_enable: bool, + ) -> PipelineDepthStencilStateCreateInfoBuilder<'a> { + self.inner.stencil_test_enable = stencil_test_enable.into(); + self + } + pub fn front( + mut self, + front: StencilOpState, + ) -> PipelineDepthStencilStateCreateInfoBuilder<'a> { + self.inner.front = front; + self + } + pub fn back(mut self, back: StencilOpState) -> PipelineDepthStencilStateCreateInfoBuilder<'a> { + self.inner.back = back; + self + } + pub fn min_depth_bounds( + mut self, + min_depth_bounds: f32, + ) -> PipelineDepthStencilStateCreateInfoBuilder<'a> { + self.inner.min_depth_bounds = min_depth_bounds; + self + } + pub fn max_depth_bounds( + mut self, + max_depth_bounds: f32, + ) -> PipelineDepthStencilStateCreateInfoBuilder<'a> { + self.inner.max_depth_bounds = max_depth_bounds; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineDepthStencilStateCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineDepthStencilStateCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct GraphicsPipelineCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineCreateFlags, + pub stage_count: u32, + pub p_stages: *const PipelineShaderStageCreateInfo, + pub p_vertex_input_state: *const PipelineVertexInputStateCreateInfo, + pub p_input_assembly_state: *const PipelineInputAssemblyStateCreateInfo, + pub p_tessellation_state: *const PipelineTessellationStateCreateInfo, + pub p_viewport_state: *const PipelineViewportStateCreateInfo, + pub p_rasterization_state: *const PipelineRasterizationStateCreateInfo, + pub p_multisample_state: *const PipelineMultisampleStateCreateInfo, + pub p_depth_stencil_state: *const PipelineDepthStencilStateCreateInfo, + pub p_color_blend_state: *const PipelineColorBlendStateCreateInfo, + pub p_dynamic_state: *const PipelineDynamicStateCreateInfo, + pub layout: PipelineLayout, + pub render_pass: RenderPass, + pub subpass: u32, + pub base_pipeline_handle: Pipeline, + pub base_pipeline_index: i32, +} +impl ::std::default::Default for GraphicsPipelineCreateInfo { + fn default() -> GraphicsPipelineCreateInfo { + GraphicsPipelineCreateInfo { + s_type: StructureType::GRAPHICS_PIPELINE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineCreateFlags::default(), + stage_count: u32::default(), + p_stages: ::std::ptr::null(), + p_vertex_input_state: ::std::ptr::null(), + p_input_assembly_state: ::std::ptr::null(), + p_tessellation_state: ::std::ptr::null(), + p_viewport_state: ::std::ptr::null(), + p_rasterization_state: ::std::ptr::null(), + p_multisample_state: ::std::ptr::null(), + p_depth_stencil_state: ::std::ptr::null(), + p_color_blend_state: ::std::ptr::null(), + p_dynamic_state: ::std::ptr::null(), + layout: PipelineLayout::default(), + render_pass: RenderPass::default(), + subpass: u32::default(), + base_pipeline_handle: Pipeline::default(), + base_pipeline_index: i32::default(), + } + } +} +impl GraphicsPipelineCreateInfo { + pub fn builder<'a>() -> GraphicsPipelineCreateInfoBuilder<'a> { + GraphicsPipelineCreateInfoBuilder { + inner: GraphicsPipelineCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct GraphicsPipelineCreateInfoBuilder<'a> { + inner: GraphicsPipelineCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsGraphicsPipelineCreateInfo {} +impl<'a> ::std::ops::Deref for GraphicsPipelineCreateInfoBuilder<'a> { + type Target = GraphicsPipelineCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for GraphicsPipelineCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> GraphicsPipelineCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: PipelineCreateFlags) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn stages( + mut self, + stages: &'a [PipelineShaderStageCreateInfo], + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.stage_count = stages.len() as _; + self.inner.p_stages = stages.as_ptr(); + self + } + pub fn vertex_input_state( + mut self, + vertex_input_state: &'a PipelineVertexInputStateCreateInfo, + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.p_vertex_input_state = vertex_input_state; + self + } + pub fn input_assembly_state( + mut self, + input_assembly_state: &'a PipelineInputAssemblyStateCreateInfo, + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.p_input_assembly_state = input_assembly_state; + self + } + pub fn tessellation_state( + mut self, + tessellation_state: &'a PipelineTessellationStateCreateInfo, + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.p_tessellation_state = tessellation_state; + self + } + pub fn viewport_state( + mut self, + viewport_state: &'a PipelineViewportStateCreateInfo, + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.p_viewport_state = viewport_state; + self + } + pub fn rasterization_state( + mut self, + rasterization_state: &'a PipelineRasterizationStateCreateInfo, + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.p_rasterization_state = rasterization_state; + self + } + pub fn multisample_state( + mut self, + multisample_state: &'a PipelineMultisampleStateCreateInfo, + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.p_multisample_state = multisample_state; + self + } + pub fn depth_stencil_state( + mut self, + depth_stencil_state: &'a PipelineDepthStencilStateCreateInfo, + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.p_depth_stencil_state = depth_stencil_state; + self + } + pub fn color_blend_state( + mut self, + color_blend_state: &'a PipelineColorBlendStateCreateInfo, + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.p_color_blend_state = color_blend_state; + self + } + pub fn dynamic_state( + mut self, + dynamic_state: &'a PipelineDynamicStateCreateInfo, + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.p_dynamic_state = dynamic_state; + self + } + pub fn layout(mut self, layout: PipelineLayout) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.layout = layout; + self + } + pub fn render_pass(mut self, render_pass: RenderPass) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.render_pass = render_pass; + self + } + pub fn subpass(mut self, subpass: u32) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.subpass = subpass; + self + } + pub fn base_pipeline_handle( + mut self, + base_pipeline_handle: Pipeline, + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.base_pipeline_handle = base_pipeline_handle; + self + } + pub fn base_pipeline_index( + mut self, + base_pipeline_index: i32, + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + self.inner.base_pipeline_index = base_pipeline_index; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> GraphicsPipelineCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> GraphicsPipelineCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineCacheCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineCacheCreateFlags, + pub initial_data_size: usize, + pub p_initial_data: *const c_void, +} +impl ::std::default::Default for PipelineCacheCreateInfo { + fn default() -> PipelineCacheCreateInfo { + PipelineCacheCreateInfo { + s_type: StructureType::PIPELINE_CACHE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineCacheCreateFlags::default(), + initial_data_size: usize::default(), + p_initial_data: ::std::ptr::null(), + } + } +} +impl PipelineCacheCreateInfo { + pub fn builder<'a>() -> PipelineCacheCreateInfoBuilder<'a> { + PipelineCacheCreateInfoBuilder { + inner: PipelineCacheCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineCacheCreateInfoBuilder<'a> { + inner: PipelineCacheCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineCacheCreateInfo {} +impl<'a> ::std::ops::Deref for PipelineCacheCreateInfoBuilder<'a> { + type Target = PipelineCacheCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineCacheCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineCacheCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: PipelineCacheCreateFlags) -> PipelineCacheCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn initial_data(mut self, initial_data: &'a [u8]) -> PipelineCacheCreateInfoBuilder<'a> { + self.inner.initial_data_size = initial_data.len() as _; + self.inner.p_initial_data = initial_data.as_ptr() as *const c_void; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineCacheCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineCacheCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct PushConstantRange { + pub stage_flags: ShaderStageFlags, + pub offset: u32, + pub size: u32, +} +impl PushConstantRange { + pub fn builder<'a>() -> PushConstantRangeBuilder<'a> { + PushConstantRangeBuilder { + inner: PushConstantRange::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PushConstantRangeBuilder<'a> { + inner: PushConstantRange, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for PushConstantRangeBuilder<'a> { + type Target = PushConstantRange; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PushConstantRangeBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PushConstantRangeBuilder<'a> { + pub fn stage_flags(mut self, stage_flags: ShaderStageFlags) -> PushConstantRangeBuilder<'a> { + self.inner.stage_flags = stage_flags; + self + } + pub fn offset(mut self, offset: u32) -> PushConstantRangeBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn size(mut self, size: u32) -> PushConstantRangeBuilder<'a> { + self.inner.size = size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PushConstantRange { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineLayoutCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineLayoutCreateFlags, + pub set_layout_count: u32, + pub p_set_layouts: *const DescriptorSetLayout, + pub push_constant_range_count: u32, + pub p_push_constant_ranges: *const PushConstantRange, +} +impl ::std::default::Default for PipelineLayoutCreateInfo { + fn default() -> PipelineLayoutCreateInfo { + PipelineLayoutCreateInfo { + s_type: StructureType::PIPELINE_LAYOUT_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: PipelineLayoutCreateFlags::default(), + set_layout_count: u32::default(), + p_set_layouts: ::std::ptr::null(), + push_constant_range_count: u32::default(), + p_push_constant_ranges: ::std::ptr::null(), + } + } +} +impl PipelineLayoutCreateInfo { + pub fn builder<'a>() -> PipelineLayoutCreateInfoBuilder<'a> { + PipelineLayoutCreateInfoBuilder { + inner: PipelineLayoutCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineLayoutCreateInfoBuilder<'a> { + inner: PipelineLayoutCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineLayoutCreateInfo {} +impl<'a> ::std::ops::Deref for PipelineLayoutCreateInfoBuilder<'a> { + type Target = PipelineLayoutCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineLayoutCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineLayoutCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineLayoutCreateFlags, + ) -> PipelineLayoutCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn set_layouts( + mut self, + set_layouts: &'a [DescriptorSetLayout], + ) -> PipelineLayoutCreateInfoBuilder<'a> { + self.inner.set_layout_count = set_layouts.len() as _; + self.inner.p_set_layouts = set_layouts.as_ptr(); + self + } + pub fn push_constant_ranges( + mut self, + push_constant_ranges: &'a [PushConstantRange], + ) -> PipelineLayoutCreateInfoBuilder<'a> { + self.inner.push_constant_range_count = push_constant_ranges.len() as _; + self.inner.p_push_constant_ranges = push_constant_ranges.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineLayoutCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineLayoutCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SamplerCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: SamplerCreateFlags, + pub mag_filter: Filter, + pub min_filter: Filter, + pub mipmap_mode: SamplerMipmapMode, + pub address_mode_u: SamplerAddressMode, + pub address_mode_v: SamplerAddressMode, + pub address_mode_w: SamplerAddressMode, + pub mip_lod_bias: f32, + pub anisotropy_enable: Bool32, + pub max_anisotropy: f32, + pub compare_enable: Bool32, + pub compare_op: CompareOp, + pub min_lod: f32, + pub max_lod: f32, + pub border_color: BorderColor, + pub unnormalized_coordinates: Bool32, +} +impl ::std::default::Default for SamplerCreateInfo { + fn default() -> SamplerCreateInfo { + SamplerCreateInfo { + s_type: StructureType::SAMPLER_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: SamplerCreateFlags::default(), + mag_filter: Filter::default(), + min_filter: Filter::default(), + mipmap_mode: SamplerMipmapMode::default(), + address_mode_u: SamplerAddressMode::default(), + address_mode_v: SamplerAddressMode::default(), + address_mode_w: SamplerAddressMode::default(), + mip_lod_bias: f32::default(), + anisotropy_enable: Bool32::default(), + max_anisotropy: f32::default(), + compare_enable: Bool32::default(), + compare_op: CompareOp::default(), + min_lod: f32::default(), + max_lod: f32::default(), + border_color: BorderColor::default(), + unnormalized_coordinates: Bool32::default(), + } + } +} +impl SamplerCreateInfo { + pub fn builder<'a>() -> SamplerCreateInfoBuilder<'a> { + SamplerCreateInfoBuilder { + inner: SamplerCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SamplerCreateInfoBuilder<'a> { + inner: SamplerCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSamplerCreateInfo {} +impl<'a> ::std::ops::Deref for SamplerCreateInfoBuilder<'a> { + type Target = SamplerCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SamplerCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SamplerCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: SamplerCreateFlags) -> SamplerCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn mag_filter(mut self, mag_filter: Filter) -> SamplerCreateInfoBuilder<'a> { + self.inner.mag_filter = mag_filter; + self + } + pub fn min_filter(mut self, min_filter: Filter) -> SamplerCreateInfoBuilder<'a> { + self.inner.min_filter = min_filter; + self + } + pub fn mipmap_mode(mut self, mipmap_mode: SamplerMipmapMode) -> SamplerCreateInfoBuilder<'a> { + self.inner.mipmap_mode = mipmap_mode; + self + } + pub fn address_mode_u( + mut self, + address_mode_u: SamplerAddressMode, + ) -> SamplerCreateInfoBuilder<'a> { + self.inner.address_mode_u = address_mode_u; + self + } + pub fn address_mode_v( + mut self, + address_mode_v: SamplerAddressMode, + ) -> SamplerCreateInfoBuilder<'a> { + self.inner.address_mode_v = address_mode_v; + self + } + pub fn address_mode_w( + mut self, + address_mode_w: SamplerAddressMode, + ) -> SamplerCreateInfoBuilder<'a> { + self.inner.address_mode_w = address_mode_w; + self + } + pub fn mip_lod_bias(mut self, mip_lod_bias: f32) -> SamplerCreateInfoBuilder<'a> { + self.inner.mip_lod_bias = mip_lod_bias; + self + } + pub fn anisotropy_enable(mut self, anisotropy_enable: bool) -> SamplerCreateInfoBuilder<'a> { + self.inner.anisotropy_enable = anisotropy_enable.into(); + self + } + pub fn max_anisotropy(mut self, max_anisotropy: f32) -> SamplerCreateInfoBuilder<'a> { + self.inner.max_anisotropy = max_anisotropy; + self + } + pub fn compare_enable(mut self, compare_enable: bool) -> SamplerCreateInfoBuilder<'a> { + self.inner.compare_enable = compare_enable.into(); + self + } + pub fn compare_op(mut self, compare_op: CompareOp) -> SamplerCreateInfoBuilder<'a> { + self.inner.compare_op = compare_op; + self + } + pub fn min_lod(mut self, min_lod: f32) -> SamplerCreateInfoBuilder<'a> { + self.inner.min_lod = min_lod; + self + } + pub fn max_lod(mut self, max_lod: f32) -> SamplerCreateInfoBuilder<'a> { + self.inner.max_lod = max_lod; + self + } + pub fn border_color(mut self, border_color: BorderColor) -> SamplerCreateInfoBuilder<'a> { + self.inner.border_color = border_color; + self + } + pub fn unnormalized_coordinates( + mut self, + unnormalized_coordinates: bool, + ) -> SamplerCreateInfoBuilder<'a> { + self.inner.unnormalized_coordinates = unnormalized_coordinates.into(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SamplerCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SamplerCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CommandPoolCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: CommandPoolCreateFlags, + pub queue_family_index: u32, +} +impl ::std::default::Default for CommandPoolCreateInfo { + fn default() -> CommandPoolCreateInfo { + CommandPoolCreateInfo { + s_type: StructureType::COMMAND_POOL_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: CommandPoolCreateFlags::default(), + queue_family_index: u32::default(), + } + } +} +impl CommandPoolCreateInfo { + pub fn builder<'a>() -> CommandPoolCreateInfoBuilder<'a> { + CommandPoolCreateInfoBuilder { + inner: CommandPoolCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CommandPoolCreateInfoBuilder<'a> { + inner: CommandPoolCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCommandPoolCreateInfo {} +impl<'a> ::std::ops::Deref for CommandPoolCreateInfoBuilder<'a> { + type Target = CommandPoolCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CommandPoolCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CommandPoolCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: CommandPoolCreateFlags) -> CommandPoolCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn queue_family_index( + mut self, + queue_family_index: u32, + ) -> CommandPoolCreateInfoBuilder<'a> { + self.inner.queue_family_index = queue_family_index; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CommandPoolCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CommandPoolCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CommandBufferAllocateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub command_pool: CommandPool, + pub level: CommandBufferLevel, + pub command_buffer_count: u32, +} +impl ::std::default::Default for CommandBufferAllocateInfo { + fn default() -> CommandBufferAllocateInfo { + CommandBufferAllocateInfo { + s_type: StructureType::COMMAND_BUFFER_ALLOCATE_INFO, + p_next: ::std::ptr::null(), + command_pool: CommandPool::default(), + level: CommandBufferLevel::default(), + command_buffer_count: u32::default(), + } + } +} +impl CommandBufferAllocateInfo { + pub fn builder<'a>() -> CommandBufferAllocateInfoBuilder<'a> { + CommandBufferAllocateInfoBuilder { + inner: CommandBufferAllocateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CommandBufferAllocateInfoBuilder<'a> { + inner: CommandBufferAllocateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCommandBufferAllocateInfo {} +impl<'a> ::std::ops::Deref for CommandBufferAllocateInfoBuilder<'a> { + type Target = CommandBufferAllocateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CommandBufferAllocateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CommandBufferAllocateInfoBuilder<'a> { + pub fn command_pool( + mut self, + command_pool: CommandPool, + ) -> CommandBufferAllocateInfoBuilder<'a> { + self.inner.command_pool = command_pool; + self + } + pub fn level(mut self, level: CommandBufferLevel) -> CommandBufferAllocateInfoBuilder<'a> { + self.inner.level = level; + self + } + pub fn command_buffer_count( + mut self, + command_buffer_count: u32, + ) -> CommandBufferAllocateInfoBuilder<'a> { + self.inner.command_buffer_count = command_buffer_count; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CommandBufferAllocateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CommandBufferAllocateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CommandBufferInheritanceInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub render_pass: RenderPass, + pub subpass: u32, + pub framebuffer: Framebuffer, + pub occlusion_query_enable: Bool32, + pub query_flags: QueryControlFlags, + pub pipeline_statistics: QueryPipelineStatisticFlags, +} +impl ::std::default::Default for CommandBufferInheritanceInfo { + fn default() -> CommandBufferInheritanceInfo { + CommandBufferInheritanceInfo { + s_type: StructureType::COMMAND_BUFFER_INHERITANCE_INFO, + p_next: ::std::ptr::null(), + render_pass: RenderPass::default(), + subpass: u32::default(), + framebuffer: Framebuffer::default(), + occlusion_query_enable: Bool32::default(), + query_flags: QueryControlFlags::default(), + pipeline_statistics: QueryPipelineStatisticFlags::default(), + } + } +} +impl CommandBufferInheritanceInfo { + pub fn builder<'a>() -> CommandBufferInheritanceInfoBuilder<'a> { + CommandBufferInheritanceInfoBuilder { + inner: CommandBufferInheritanceInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CommandBufferInheritanceInfoBuilder<'a> { + inner: CommandBufferInheritanceInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCommandBufferInheritanceInfo {} +impl<'a> ::std::ops::Deref for CommandBufferInheritanceInfoBuilder<'a> { + type Target = CommandBufferInheritanceInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CommandBufferInheritanceInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CommandBufferInheritanceInfoBuilder<'a> { + pub fn render_pass( + mut self, + render_pass: RenderPass, + ) -> CommandBufferInheritanceInfoBuilder<'a> { + self.inner.render_pass = render_pass; + self + } + pub fn subpass(mut self, subpass: u32) -> CommandBufferInheritanceInfoBuilder<'a> { + self.inner.subpass = subpass; + self + } + pub fn framebuffer( + mut self, + framebuffer: Framebuffer, + ) -> CommandBufferInheritanceInfoBuilder<'a> { + self.inner.framebuffer = framebuffer; + self + } + pub fn occlusion_query_enable( + mut self, + occlusion_query_enable: bool, + ) -> CommandBufferInheritanceInfoBuilder<'a> { + self.inner.occlusion_query_enable = occlusion_query_enable.into(); + self + } + pub fn query_flags( + mut self, + query_flags: QueryControlFlags, + ) -> CommandBufferInheritanceInfoBuilder<'a> { + self.inner.query_flags = query_flags; + self + } + pub fn pipeline_statistics( + mut self, + pipeline_statistics: QueryPipelineStatisticFlags, + ) -> CommandBufferInheritanceInfoBuilder<'a> { + self.inner.pipeline_statistics = pipeline_statistics; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CommandBufferInheritanceInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CommandBufferInheritanceInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CommandBufferBeginInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: CommandBufferUsageFlags, + pub p_inheritance_info: *const CommandBufferInheritanceInfo, +} +impl ::std::default::Default for CommandBufferBeginInfo { + fn default() -> CommandBufferBeginInfo { + CommandBufferBeginInfo { + s_type: StructureType::COMMAND_BUFFER_BEGIN_INFO, + p_next: ::std::ptr::null(), + flags: CommandBufferUsageFlags::default(), + p_inheritance_info: ::std::ptr::null(), + } + } +} +impl CommandBufferBeginInfo { + pub fn builder<'a>() -> CommandBufferBeginInfoBuilder<'a> { + CommandBufferBeginInfoBuilder { + inner: CommandBufferBeginInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CommandBufferBeginInfoBuilder<'a> { + inner: CommandBufferBeginInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCommandBufferBeginInfo {} +impl<'a> ::std::ops::Deref for CommandBufferBeginInfoBuilder<'a> { + type Target = CommandBufferBeginInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CommandBufferBeginInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CommandBufferBeginInfoBuilder<'a> { + pub fn flags(mut self, flags: CommandBufferUsageFlags) -> CommandBufferBeginInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn inheritance_info( + mut self, + inheritance_info: &'a CommandBufferInheritanceInfo, + ) -> CommandBufferBeginInfoBuilder<'a> { + self.inner.p_inheritance_info = inheritance_info; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CommandBufferBeginInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CommandBufferBeginInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct RenderPassBeginInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub render_pass: RenderPass, + pub framebuffer: Framebuffer, + pub render_area: Rect2D, + pub clear_value_count: u32, + pub p_clear_values: *const ClearValue, +} +impl fmt::Debug for RenderPassBeginInfo { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("RenderPassBeginInfo") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("render_pass", &self.render_pass) + .field("framebuffer", &self.framebuffer) + .field("render_area", &self.render_area) + .field("clear_value_count", &self.clear_value_count) + .field("p_clear_values", &"union") + .finish() + } +} +impl ::std::default::Default for RenderPassBeginInfo { + fn default() -> RenderPassBeginInfo { + RenderPassBeginInfo { + s_type: StructureType::RENDER_PASS_BEGIN_INFO, + p_next: ::std::ptr::null(), + render_pass: RenderPass::default(), + framebuffer: Framebuffer::default(), + render_area: Rect2D::default(), + clear_value_count: u32::default(), + p_clear_values: ::std::ptr::null(), + } + } +} +impl RenderPassBeginInfo { + pub fn builder<'a>() -> RenderPassBeginInfoBuilder<'a> { + RenderPassBeginInfoBuilder { + inner: RenderPassBeginInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RenderPassBeginInfoBuilder<'a> { + inner: RenderPassBeginInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsRenderPassBeginInfo {} +impl<'a> ::std::ops::Deref for RenderPassBeginInfoBuilder<'a> { + type Target = RenderPassBeginInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RenderPassBeginInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RenderPassBeginInfoBuilder<'a> { + pub fn render_pass(mut self, render_pass: RenderPass) -> RenderPassBeginInfoBuilder<'a> { + self.inner.render_pass = render_pass; + self + } + pub fn framebuffer(mut self, framebuffer: Framebuffer) -> RenderPassBeginInfoBuilder<'a> { + self.inner.framebuffer = framebuffer; + self + } + pub fn render_area(mut self, render_area: Rect2D) -> RenderPassBeginInfoBuilder<'a> { + self.inner.render_area = render_area; + self + } + pub fn clear_values( + mut self, + clear_values: &'a [ClearValue], + ) -> RenderPassBeginInfoBuilder<'a> { + self.inner.clear_value_count = clear_values.len() as _; + self.inner.p_clear_values = clear_values.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> RenderPassBeginInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RenderPassBeginInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub union ClearColorValue { + pub float32: [f32; 4], + pub int32: [i32; 4], + pub uint32: [u32; 4], +} +impl ::std::default::Default for ClearColorValue { + fn default() -> ClearColorValue { + unsafe { ::std::mem::zeroed() } + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ClearDepthStencilValue { + pub depth: f32, + pub stencil: u32, +} +impl ClearDepthStencilValue { + pub fn builder<'a>() -> ClearDepthStencilValueBuilder<'a> { + ClearDepthStencilValueBuilder { + inner: ClearDepthStencilValue::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ClearDepthStencilValueBuilder<'a> { + inner: ClearDepthStencilValue, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ClearDepthStencilValueBuilder<'a> { + type Target = ClearDepthStencilValue; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ClearDepthStencilValueBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ClearDepthStencilValueBuilder<'a> { + pub fn depth(mut self, depth: f32) -> ClearDepthStencilValueBuilder<'a> { + self.inner.depth = depth; + self + } + pub fn stencil(mut self, stencil: u32) -> ClearDepthStencilValueBuilder<'a> { + self.inner.stencil = stencil; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ClearDepthStencilValue { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub union ClearValue { + pub color: ClearColorValue, + pub depth_stencil: ClearDepthStencilValue, +} +impl ::std::default::Default for ClearValue { + fn default() -> ClearValue { + unsafe { ::std::mem::zeroed() } + } +} +#[repr(C)] +#[derive(Copy, Clone, Default)] +#[doc = ""] +pub struct ClearAttachment { + pub aspect_mask: ImageAspectFlags, + pub color_attachment: u32, + pub clear_value: ClearValue, +} +impl fmt::Debug for ClearAttachment { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("ClearAttachment") + .field("aspect_mask", &self.aspect_mask) + .field("color_attachment", &self.color_attachment) + .field("clear_value", &"union") + .finish() + } +} +impl ClearAttachment { + pub fn builder<'a>() -> ClearAttachmentBuilder<'a> { + ClearAttachmentBuilder { + inner: ClearAttachment::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ClearAttachmentBuilder<'a> { + inner: ClearAttachment, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ClearAttachmentBuilder<'a> { + type Target = ClearAttachment; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ClearAttachmentBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ClearAttachmentBuilder<'a> { + pub fn aspect_mask(mut self, aspect_mask: ImageAspectFlags) -> ClearAttachmentBuilder<'a> { + self.inner.aspect_mask = aspect_mask; + self + } + pub fn color_attachment(mut self, color_attachment: u32) -> ClearAttachmentBuilder<'a> { + self.inner.color_attachment = color_attachment; + self + } + pub fn clear_value(mut self, clear_value: ClearValue) -> ClearAttachmentBuilder<'a> { + self.inner.clear_value = clear_value; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ClearAttachment { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct AttachmentDescription { + pub flags: AttachmentDescriptionFlags, + pub format: Format, + pub samples: SampleCountFlags, + pub load_op: AttachmentLoadOp, + pub store_op: AttachmentStoreOp, + pub stencil_load_op: AttachmentLoadOp, + pub stencil_store_op: AttachmentStoreOp, + pub initial_layout: ImageLayout, + pub final_layout: ImageLayout, +} +impl AttachmentDescription { + pub fn builder<'a>() -> AttachmentDescriptionBuilder<'a> { + AttachmentDescriptionBuilder { + inner: AttachmentDescription::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AttachmentDescriptionBuilder<'a> { + inner: AttachmentDescription, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for AttachmentDescriptionBuilder<'a> { + type Target = AttachmentDescription; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AttachmentDescriptionBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AttachmentDescriptionBuilder<'a> { + pub fn flags(mut self, flags: AttachmentDescriptionFlags) -> AttachmentDescriptionBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn format(mut self, format: Format) -> AttachmentDescriptionBuilder<'a> { + self.inner.format = format; + self + } + pub fn samples(mut self, samples: SampleCountFlags) -> AttachmentDescriptionBuilder<'a> { + self.inner.samples = samples; + self + } + pub fn load_op(mut self, load_op: AttachmentLoadOp) -> AttachmentDescriptionBuilder<'a> { + self.inner.load_op = load_op; + self + } + pub fn store_op(mut self, store_op: AttachmentStoreOp) -> AttachmentDescriptionBuilder<'a> { + self.inner.store_op = store_op; + self + } + pub fn stencil_load_op( + mut self, + stencil_load_op: AttachmentLoadOp, + ) -> AttachmentDescriptionBuilder<'a> { + self.inner.stencil_load_op = stencil_load_op; + self + } + pub fn stencil_store_op( + mut self, + stencil_store_op: AttachmentStoreOp, + ) -> AttachmentDescriptionBuilder<'a> { + self.inner.stencil_store_op = stencil_store_op; + self + } + pub fn initial_layout( + mut self, + initial_layout: ImageLayout, + ) -> AttachmentDescriptionBuilder<'a> { + self.inner.initial_layout = initial_layout; + self + } + pub fn final_layout(mut self, final_layout: ImageLayout) -> AttachmentDescriptionBuilder<'a> { + self.inner.final_layout = final_layout; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AttachmentDescription { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct AttachmentReference { + pub attachment: u32, + pub layout: ImageLayout, +} +impl AttachmentReference { + pub fn builder<'a>() -> AttachmentReferenceBuilder<'a> { + AttachmentReferenceBuilder { + inner: AttachmentReference::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AttachmentReferenceBuilder<'a> { + inner: AttachmentReference, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for AttachmentReferenceBuilder<'a> { + type Target = AttachmentReference; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AttachmentReferenceBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AttachmentReferenceBuilder<'a> { + pub fn attachment(mut self, attachment: u32) -> AttachmentReferenceBuilder<'a> { + self.inner.attachment = attachment; + self + } + pub fn layout(mut self, layout: ImageLayout) -> AttachmentReferenceBuilder<'a> { + self.inner.layout = layout; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AttachmentReference { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SubpassDescription { + pub flags: SubpassDescriptionFlags, + pub pipeline_bind_point: PipelineBindPoint, + pub input_attachment_count: u32, + pub p_input_attachments: *const AttachmentReference, + pub color_attachment_count: u32, + pub p_color_attachments: *const AttachmentReference, + pub p_resolve_attachments: *const AttachmentReference, + pub p_depth_stencil_attachment: *const AttachmentReference, + pub preserve_attachment_count: u32, + pub p_preserve_attachments: *const u32, +} +impl ::std::default::Default for SubpassDescription { + fn default() -> SubpassDescription { + SubpassDescription { + flags: SubpassDescriptionFlags::default(), + pipeline_bind_point: PipelineBindPoint::default(), + input_attachment_count: u32::default(), + p_input_attachments: ::std::ptr::null(), + color_attachment_count: u32::default(), + p_color_attachments: ::std::ptr::null(), + p_resolve_attachments: ::std::ptr::null(), + p_depth_stencil_attachment: ::std::ptr::null(), + preserve_attachment_count: u32::default(), + p_preserve_attachments: ::std::ptr::null(), + } + } +} +impl SubpassDescription { + pub fn builder<'a>() -> SubpassDescriptionBuilder<'a> { + SubpassDescriptionBuilder { + inner: SubpassDescription::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SubpassDescriptionBuilder<'a> { + inner: SubpassDescription, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SubpassDescriptionBuilder<'a> { + type Target = SubpassDescription; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SubpassDescriptionBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SubpassDescriptionBuilder<'a> { + pub fn flags(mut self, flags: SubpassDescriptionFlags) -> SubpassDescriptionBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn pipeline_bind_point( + mut self, + pipeline_bind_point: PipelineBindPoint, + ) -> SubpassDescriptionBuilder<'a> { + self.inner.pipeline_bind_point = pipeline_bind_point; + self + } + pub fn input_attachments( + mut self, + input_attachments: &'a [AttachmentReference], + ) -> SubpassDescriptionBuilder<'a> { + self.inner.input_attachment_count = input_attachments.len() as _; + self.inner.p_input_attachments = input_attachments.as_ptr(); + self + } + pub fn color_attachments( + mut self, + color_attachments: &'a [AttachmentReference], + ) -> SubpassDescriptionBuilder<'a> { + self.inner.color_attachment_count = color_attachments.len() as _; + self.inner.p_color_attachments = color_attachments.as_ptr(); + self + } + pub fn resolve_attachments( + mut self, + resolve_attachments: &'a [AttachmentReference], + ) -> SubpassDescriptionBuilder<'a> { + self.inner.color_attachment_count = resolve_attachments.len() as _; + self.inner.p_resolve_attachments = resolve_attachments.as_ptr(); + self + } + pub fn depth_stencil_attachment( + mut self, + depth_stencil_attachment: &'a AttachmentReference, + ) -> SubpassDescriptionBuilder<'a> { + self.inner.p_depth_stencil_attachment = depth_stencil_attachment; + self + } + pub fn preserve_attachments( + mut self, + preserve_attachments: &'a [u32], + ) -> SubpassDescriptionBuilder<'a> { + self.inner.preserve_attachment_count = preserve_attachments.len() as _; + self.inner.p_preserve_attachments = preserve_attachments.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SubpassDescription { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct SubpassDependency { + pub src_subpass: u32, + pub dst_subpass: u32, + pub src_stage_mask: PipelineStageFlags, + pub dst_stage_mask: PipelineStageFlags, + pub src_access_mask: AccessFlags, + pub dst_access_mask: AccessFlags, + pub dependency_flags: DependencyFlags, +} +impl SubpassDependency { + pub fn builder<'a>() -> SubpassDependencyBuilder<'a> { + SubpassDependencyBuilder { + inner: SubpassDependency::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SubpassDependencyBuilder<'a> { + inner: SubpassDependency, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SubpassDependencyBuilder<'a> { + type Target = SubpassDependency; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SubpassDependencyBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SubpassDependencyBuilder<'a> { + pub fn src_subpass(mut self, src_subpass: u32) -> SubpassDependencyBuilder<'a> { + self.inner.src_subpass = src_subpass; + self + } + pub fn dst_subpass(mut self, dst_subpass: u32) -> SubpassDependencyBuilder<'a> { + self.inner.dst_subpass = dst_subpass; + self + } + pub fn src_stage_mask( + mut self, + src_stage_mask: PipelineStageFlags, + ) -> SubpassDependencyBuilder<'a> { + self.inner.src_stage_mask = src_stage_mask; + self + } + pub fn dst_stage_mask( + mut self, + dst_stage_mask: PipelineStageFlags, + ) -> SubpassDependencyBuilder<'a> { + self.inner.dst_stage_mask = dst_stage_mask; + self + } + pub fn src_access_mask(mut self, src_access_mask: AccessFlags) -> SubpassDependencyBuilder<'a> { + self.inner.src_access_mask = src_access_mask; + self + } + pub fn dst_access_mask(mut self, dst_access_mask: AccessFlags) -> SubpassDependencyBuilder<'a> { + self.inner.dst_access_mask = dst_access_mask; + self + } + pub fn dependency_flags( + mut self, + dependency_flags: DependencyFlags, + ) -> SubpassDependencyBuilder<'a> { + self.inner.dependency_flags = dependency_flags; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SubpassDependency { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct RenderPassCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: RenderPassCreateFlags, + pub attachment_count: u32, + pub p_attachments: *const AttachmentDescription, + pub subpass_count: u32, + pub p_subpasses: *const SubpassDescription, + pub dependency_count: u32, + pub p_dependencies: *const SubpassDependency, +} +impl ::std::default::Default for RenderPassCreateInfo { + fn default() -> RenderPassCreateInfo { + RenderPassCreateInfo { + s_type: StructureType::RENDER_PASS_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: RenderPassCreateFlags::default(), + attachment_count: u32::default(), + p_attachments: ::std::ptr::null(), + subpass_count: u32::default(), + p_subpasses: ::std::ptr::null(), + dependency_count: u32::default(), + p_dependencies: ::std::ptr::null(), + } + } +} +impl RenderPassCreateInfo { + pub fn builder<'a>() -> RenderPassCreateInfoBuilder<'a> { + RenderPassCreateInfoBuilder { + inner: RenderPassCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RenderPassCreateInfoBuilder<'a> { + inner: RenderPassCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsRenderPassCreateInfo {} +impl<'a> ::std::ops::Deref for RenderPassCreateInfoBuilder<'a> { + type Target = RenderPassCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RenderPassCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RenderPassCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: RenderPassCreateFlags) -> RenderPassCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn attachments( + mut self, + attachments: &'a [AttachmentDescription], + ) -> RenderPassCreateInfoBuilder<'a> { + self.inner.attachment_count = attachments.len() as _; + self.inner.p_attachments = attachments.as_ptr(); + self + } + pub fn subpasses( + mut self, + subpasses: &'a [SubpassDescription], + ) -> RenderPassCreateInfoBuilder<'a> { + self.inner.subpass_count = subpasses.len() as _; + self.inner.p_subpasses = subpasses.as_ptr(); + self + } + pub fn dependencies( + mut self, + dependencies: &'a [SubpassDependency], + ) -> RenderPassCreateInfoBuilder<'a> { + self.inner.dependency_count = dependencies.len() as _; + self.inner.p_dependencies = dependencies.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> RenderPassCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RenderPassCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct EventCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: EventCreateFlags, +} +impl ::std::default::Default for EventCreateInfo { + fn default() -> EventCreateInfo { + EventCreateInfo { + s_type: StructureType::EVENT_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: EventCreateFlags::default(), + } + } +} +impl EventCreateInfo { + pub fn builder<'a>() -> EventCreateInfoBuilder<'a> { + EventCreateInfoBuilder { + inner: EventCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct EventCreateInfoBuilder<'a> { + inner: EventCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsEventCreateInfo {} +impl<'a> ::std::ops::Deref for EventCreateInfoBuilder<'a> { + type Target = EventCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for EventCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> EventCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: EventCreateFlags) -> EventCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> EventCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> EventCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct FenceCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: FenceCreateFlags, +} +impl ::std::default::Default for FenceCreateInfo { + fn default() -> FenceCreateInfo { + FenceCreateInfo { + s_type: StructureType::FENCE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: FenceCreateFlags::default(), + } + } +} +impl FenceCreateInfo { + pub fn builder<'a>() -> FenceCreateInfoBuilder<'a> { + FenceCreateInfoBuilder { + inner: FenceCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct FenceCreateInfoBuilder<'a> { + inner: FenceCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsFenceCreateInfo {} +impl<'a> ::std::ops::Deref for FenceCreateInfoBuilder<'a> { + type Target = FenceCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for FenceCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> FenceCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: FenceCreateFlags) -> FenceCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> FenceCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> FenceCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct PhysicalDeviceFeatures { + pub robust_buffer_access: Bool32, + pub full_draw_index_uint32: Bool32, + pub image_cube_array: Bool32, + pub independent_blend: Bool32, + pub geometry_shader: Bool32, + pub tessellation_shader: Bool32, + pub sample_rate_shading: Bool32, + pub dual_src_blend: Bool32, + pub logic_op: Bool32, + pub multi_draw_indirect: Bool32, + pub draw_indirect_first_instance: Bool32, + pub depth_clamp: Bool32, + pub depth_bias_clamp: Bool32, + pub fill_mode_non_solid: Bool32, + pub depth_bounds: Bool32, + pub wide_lines: Bool32, + pub large_points: Bool32, + pub alpha_to_one: Bool32, + pub multi_viewport: Bool32, + pub sampler_anisotropy: Bool32, + pub texture_compression_etc2: Bool32, + pub texture_compression_astc_ldr: Bool32, + pub texture_compression_bc: Bool32, + pub occlusion_query_precise: Bool32, + pub pipeline_statistics_query: Bool32, + pub vertex_pipeline_stores_and_atomics: Bool32, + pub fragment_stores_and_atomics: Bool32, + pub shader_tessellation_and_geometry_point_size: Bool32, + pub shader_image_gather_extended: Bool32, + pub shader_storage_image_extended_formats: Bool32, + pub shader_storage_image_multisample: Bool32, + pub shader_storage_image_read_without_format: Bool32, + pub shader_storage_image_write_without_format: Bool32, + pub shader_uniform_buffer_array_dynamic_indexing: Bool32, + pub shader_sampled_image_array_dynamic_indexing: Bool32, + pub shader_storage_buffer_array_dynamic_indexing: Bool32, + pub shader_storage_image_array_dynamic_indexing: Bool32, + pub shader_clip_distance: Bool32, + pub shader_cull_distance: Bool32, + pub shader_float64: Bool32, + pub shader_int64: Bool32, + pub shader_int16: Bool32, + pub shader_resource_residency: Bool32, + pub shader_resource_min_lod: Bool32, + pub sparse_binding: Bool32, + pub sparse_residency_buffer: Bool32, + pub sparse_residency_image2_d: Bool32, + pub sparse_residency_image3_d: Bool32, + pub sparse_residency2_samples: Bool32, + pub sparse_residency4_samples: Bool32, + pub sparse_residency8_samples: Bool32, + pub sparse_residency16_samples: Bool32, + pub sparse_residency_aliased: Bool32, + pub variable_multisample_rate: Bool32, + pub inherited_queries: Bool32, +} +impl PhysicalDeviceFeatures { + pub fn builder<'a>() -> PhysicalDeviceFeaturesBuilder<'a> { + PhysicalDeviceFeaturesBuilder { + inner: PhysicalDeviceFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceFeaturesBuilder<'a> { + inner: PhysicalDeviceFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for PhysicalDeviceFeaturesBuilder<'a> { + type Target = PhysicalDeviceFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceFeaturesBuilder<'a> { + pub fn robust_buffer_access( + mut self, + robust_buffer_access: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.robust_buffer_access = robust_buffer_access.into(); + self + } + pub fn full_draw_index_uint32( + mut self, + full_draw_index_uint32: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.full_draw_index_uint32 = full_draw_index_uint32.into(); + self + } + pub fn image_cube_array(mut self, image_cube_array: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.image_cube_array = image_cube_array.into(); + self + } + pub fn independent_blend( + mut self, + independent_blend: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.independent_blend = independent_blend.into(); + self + } + pub fn geometry_shader(mut self, geometry_shader: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.geometry_shader = geometry_shader.into(); + self + } + pub fn tessellation_shader( + mut self, + tessellation_shader: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.tessellation_shader = tessellation_shader.into(); + self + } + pub fn sample_rate_shading( + mut self, + sample_rate_shading: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.sample_rate_shading = sample_rate_shading.into(); + self + } + pub fn dual_src_blend(mut self, dual_src_blend: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.dual_src_blend = dual_src_blend.into(); + self + } + pub fn logic_op(mut self, logic_op: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.logic_op = logic_op.into(); + self + } + pub fn multi_draw_indirect( + mut self, + multi_draw_indirect: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.multi_draw_indirect = multi_draw_indirect.into(); + self + } + pub fn draw_indirect_first_instance( + mut self, + draw_indirect_first_instance: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.draw_indirect_first_instance = draw_indirect_first_instance.into(); + self + } + pub fn depth_clamp(mut self, depth_clamp: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.depth_clamp = depth_clamp.into(); + self + } + pub fn depth_bias_clamp(mut self, depth_bias_clamp: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.depth_bias_clamp = depth_bias_clamp.into(); + self + } + pub fn fill_mode_non_solid( + mut self, + fill_mode_non_solid: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.fill_mode_non_solid = fill_mode_non_solid.into(); + self + } + pub fn depth_bounds(mut self, depth_bounds: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.depth_bounds = depth_bounds.into(); + self + } + pub fn wide_lines(mut self, wide_lines: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.wide_lines = wide_lines.into(); + self + } + pub fn large_points(mut self, large_points: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.large_points = large_points.into(); + self + } + pub fn alpha_to_one(mut self, alpha_to_one: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.alpha_to_one = alpha_to_one.into(); + self + } + pub fn multi_viewport(mut self, multi_viewport: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.multi_viewport = multi_viewport.into(); + self + } + pub fn sampler_anisotropy( + mut self, + sampler_anisotropy: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.sampler_anisotropy = sampler_anisotropy.into(); + self + } + pub fn texture_compression_etc2( + mut self, + texture_compression_etc2: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.texture_compression_etc2 = texture_compression_etc2.into(); + self + } + pub fn texture_compression_astc_ldr( + mut self, + texture_compression_astc_ldr: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.texture_compression_astc_ldr = texture_compression_astc_ldr.into(); + self + } + pub fn texture_compression_bc( + mut self, + texture_compression_bc: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.texture_compression_bc = texture_compression_bc.into(); + self + } + pub fn occlusion_query_precise( + mut self, + occlusion_query_precise: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.occlusion_query_precise = occlusion_query_precise.into(); + self + } + pub fn pipeline_statistics_query( + mut self, + pipeline_statistics_query: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.pipeline_statistics_query = pipeline_statistics_query.into(); + self + } + pub fn vertex_pipeline_stores_and_atomics( + mut self, + vertex_pipeline_stores_and_atomics: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.vertex_pipeline_stores_and_atomics = vertex_pipeline_stores_and_atomics.into(); + self + } + pub fn fragment_stores_and_atomics( + mut self, + fragment_stores_and_atomics: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.fragment_stores_and_atomics = fragment_stores_and_atomics.into(); + self + } + pub fn shader_tessellation_and_geometry_point_size( + mut self, + shader_tessellation_and_geometry_point_size: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_tessellation_and_geometry_point_size = + shader_tessellation_and_geometry_point_size.into(); + self + } + pub fn shader_image_gather_extended( + mut self, + shader_image_gather_extended: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_image_gather_extended = shader_image_gather_extended.into(); + self + } + pub fn shader_storage_image_extended_formats( + mut self, + shader_storage_image_extended_formats: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_storage_image_extended_formats = + shader_storage_image_extended_formats.into(); + self + } + pub fn shader_storage_image_multisample( + mut self, + shader_storage_image_multisample: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_storage_image_multisample = shader_storage_image_multisample.into(); + self + } + pub fn shader_storage_image_read_without_format( + mut self, + shader_storage_image_read_without_format: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_storage_image_read_without_format = + shader_storage_image_read_without_format.into(); + self + } + pub fn shader_storage_image_write_without_format( + mut self, + shader_storage_image_write_without_format: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_storage_image_write_without_format = + shader_storage_image_write_without_format.into(); + self + } + pub fn shader_uniform_buffer_array_dynamic_indexing( + mut self, + shader_uniform_buffer_array_dynamic_indexing: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_uniform_buffer_array_dynamic_indexing = + shader_uniform_buffer_array_dynamic_indexing.into(); + self + } + pub fn shader_sampled_image_array_dynamic_indexing( + mut self, + shader_sampled_image_array_dynamic_indexing: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_sampled_image_array_dynamic_indexing = + shader_sampled_image_array_dynamic_indexing.into(); + self + } + pub fn shader_storage_buffer_array_dynamic_indexing( + mut self, + shader_storage_buffer_array_dynamic_indexing: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_storage_buffer_array_dynamic_indexing = + shader_storage_buffer_array_dynamic_indexing.into(); + self + } + pub fn shader_storage_image_array_dynamic_indexing( + mut self, + shader_storage_image_array_dynamic_indexing: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_storage_image_array_dynamic_indexing = + shader_storage_image_array_dynamic_indexing.into(); + self + } + pub fn shader_clip_distance( + mut self, + shader_clip_distance: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_clip_distance = shader_clip_distance.into(); + self + } + pub fn shader_cull_distance( + mut self, + shader_cull_distance: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_cull_distance = shader_cull_distance.into(); + self + } + pub fn shader_float64(mut self, shader_float64: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_float64 = shader_float64.into(); + self + } + pub fn shader_int64(mut self, shader_int64: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_int64 = shader_int64.into(); + self + } + pub fn shader_int16(mut self, shader_int16: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_int16 = shader_int16.into(); + self + } + pub fn shader_resource_residency( + mut self, + shader_resource_residency: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_resource_residency = shader_resource_residency.into(); + self + } + pub fn shader_resource_min_lod( + mut self, + shader_resource_min_lod: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.shader_resource_min_lod = shader_resource_min_lod.into(); + self + } + pub fn sparse_binding(mut self, sparse_binding: bool) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.sparse_binding = sparse_binding.into(); + self + } + pub fn sparse_residency_buffer( + mut self, + sparse_residency_buffer: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.sparse_residency_buffer = sparse_residency_buffer.into(); + self + } + pub fn sparse_residency_image2_d( + mut self, + sparse_residency_image2_d: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.sparse_residency_image2_d = sparse_residency_image2_d.into(); + self + } + pub fn sparse_residency_image3_d( + mut self, + sparse_residency_image3_d: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.sparse_residency_image3_d = sparse_residency_image3_d.into(); + self + } + pub fn sparse_residency2_samples( + mut self, + sparse_residency2_samples: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.sparse_residency2_samples = sparse_residency2_samples.into(); + self + } + pub fn sparse_residency4_samples( + mut self, + sparse_residency4_samples: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.sparse_residency4_samples = sparse_residency4_samples.into(); + self + } + pub fn sparse_residency8_samples( + mut self, + sparse_residency8_samples: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.sparse_residency8_samples = sparse_residency8_samples.into(); + self + } + pub fn sparse_residency16_samples( + mut self, + sparse_residency16_samples: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.sparse_residency16_samples = sparse_residency16_samples.into(); + self + } + pub fn sparse_residency_aliased( + mut self, + sparse_residency_aliased: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.sparse_residency_aliased = sparse_residency_aliased.into(); + self + } + pub fn variable_multisample_rate( + mut self, + variable_multisample_rate: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.variable_multisample_rate = variable_multisample_rate.into(); + self + } + pub fn inherited_queries( + mut self, + inherited_queries: bool, + ) -> PhysicalDeviceFeaturesBuilder<'a> { + self.inner.inherited_queries = inherited_queries.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct PhysicalDeviceSparseProperties { + pub residency_standard2_d_block_shape: Bool32, + pub residency_standard2_d_multisample_block_shape: Bool32, + pub residency_standard3_d_block_shape: Bool32, + pub residency_aligned_mip_size: Bool32, + pub residency_non_resident_strict: Bool32, +} +impl PhysicalDeviceSparseProperties { + pub fn builder<'a>() -> PhysicalDeviceSparsePropertiesBuilder<'a> { + PhysicalDeviceSparsePropertiesBuilder { + inner: PhysicalDeviceSparseProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceSparsePropertiesBuilder<'a> { + inner: PhysicalDeviceSparseProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for PhysicalDeviceSparsePropertiesBuilder<'a> { + type Target = PhysicalDeviceSparseProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceSparsePropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceSparsePropertiesBuilder<'a> { + pub fn residency_standard2_d_block_shape( + mut self, + residency_standard2_d_block_shape: bool, + ) -> PhysicalDeviceSparsePropertiesBuilder<'a> { + self.inner.residency_standard2_d_block_shape = residency_standard2_d_block_shape.into(); + self + } + pub fn residency_standard2_d_multisample_block_shape( + mut self, + residency_standard2_d_multisample_block_shape: bool, + ) -> PhysicalDeviceSparsePropertiesBuilder<'a> { + self.inner.residency_standard2_d_multisample_block_shape = + residency_standard2_d_multisample_block_shape.into(); + self + } + pub fn residency_standard3_d_block_shape( + mut self, + residency_standard3_d_block_shape: bool, + ) -> PhysicalDeviceSparsePropertiesBuilder<'a> { + self.inner.residency_standard3_d_block_shape = residency_standard3_d_block_shape.into(); + self + } + pub fn residency_aligned_mip_size( + mut self, + residency_aligned_mip_size: bool, + ) -> PhysicalDeviceSparsePropertiesBuilder<'a> { + self.inner.residency_aligned_mip_size = residency_aligned_mip_size.into(); + self + } + pub fn residency_non_resident_strict( + mut self, + residency_non_resident_strict: bool, + ) -> PhysicalDeviceSparsePropertiesBuilder<'a> { + self.inner.residency_non_resident_strict = residency_non_resident_strict.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceSparseProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceLimits { + pub max_image_dimension1_d: u32, + pub max_image_dimension2_d: u32, + pub max_image_dimension3_d: u32, + pub max_image_dimension_cube: u32, + pub max_image_array_layers: u32, + pub max_texel_buffer_elements: u32, + pub max_uniform_buffer_range: u32, + pub max_storage_buffer_range: u32, + pub max_push_constants_size: u32, + pub max_memory_allocation_count: u32, + pub max_sampler_allocation_count: u32, + pub buffer_image_granularity: DeviceSize, + pub sparse_address_space_size: DeviceSize, + pub max_bound_descriptor_sets: u32, + pub max_per_stage_descriptor_samplers: u32, + pub max_per_stage_descriptor_uniform_buffers: u32, + pub max_per_stage_descriptor_storage_buffers: u32, + pub max_per_stage_descriptor_sampled_images: u32, + pub max_per_stage_descriptor_storage_images: u32, + pub max_per_stage_descriptor_input_attachments: u32, + pub max_per_stage_resources: u32, + pub max_descriptor_set_samplers: u32, + pub max_descriptor_set_uniform_buffers: u32, + pub max_descriptor_set_uniform_buffers_dynamic: u32, + pub max_descriptor_set_storage_buffers: u32, + pub max_descriptor_set_storage_buffers_dynamic: u32, + pub max_descriptor_set_sampled_images: u32, + pub max_descriptor_set_storage_images: u32, + pub max_descriptor_set_input_attachments: u32, + pub max_vertex_input_attributes: u32, + pub max_vertex_input_bindings: u32, + pub max_vertex_input_attribute_offset: u32, + pub max_vertex_input_binding_stride: u32, + pub max_vertex_output_components: u32, + pub max_tessellation_generation_level: u32, + pub max_tessellation_patch_size: u32, + pub max_tessellation_control_per_vertex_input_components: u32, + pub max_tessellation_control_per_vertex_output_components: u32, + pub max_tessellation_control_per_patch_output_components: u32, + pub max_tessellation_control_total_output_components: u32, + pub max_tessellation_evaluation_input_components: u32, + pub max_tessellation_evaluation_output_components: u32, + pub max_geometry_shader_invocations: u32, + pub max_geometry_input_components: u32, + pub max_geometry_output_components: u32, + pub max_geometry_output_vertices: u32, + pub max_geometry_total_output_components: u32, + pub max_fragment_input_components: u32, + pub max_fragment_output_attachments: u32, + pub max_fragment_dual_src_attachments: u32, + pub max_fragment_combined_output_resources: u32, + pub max_compute_shared_memory_size: u32, + pub max_compute_work_group_count: [u32; 3], + pub max_compute_work_group_invocations: u32, + pub max_compute_work_group_size: [u32; 3], + pub sub_pixel_precision_bits: u32, + pub sub_texel_precision_bits: u32, + pub mipmap_precision_bits: u32, + pub max_draw_indexed_index_value: u32, + pub max_draw_indirect_count: u32, + pub max_sampler_lod_bias: f32, + pub max_sampler_anisotropy: f32, + pub max_viewports: u32, + pub max_viewport_dimensions: [u32; 2], + pub viewport_bounds_range: [f32; 2], + pub viewport_sub_pixel_bits: u32, + pub min_memory_map_alignment: usize, + pub min_texel_buffer_offset_alignment: DeviceSize, + pub min_uniform_buffer_offset_alignment: DeviceSize, + pub min_storage_buffer_offset_alignment: DeviceSize, + pub min_texel_offset: i32, + pub max_texel_offset: u32, + pub min_texel_gather_offset: i32, + pub max_texel_gather_offset: u32, + pub min_interpolation_offset: f32, + pub max_interpolation_offset: f32, + pub sub_pixel_interpolation_offset_bits: u32, + pub max_framebuffer_width: u32, + pub max_framebuffer_height: u32, + pub max_framebuffer_layers: u32, + pub framebuffer_color_sample_counts: SampleCountFlags, + pub framebuffer_depth_sample_counts: SampleCountFlags, + pub framebuffer_stencil_sample_counts: SampleCountFlags, + pub framebuffer_no_attachments_sample_counts: SampleCountFlags, + pub max_color_attachments: u32, + pub sampled_image_color_sample_counts: SampleCountFlags, + pub sampled_image_integer_sample_counts: SampleCountFlags, + pub sampled_image_depth_sample_counts: SampleCountFlags, + pub sampled_image_stencil_sample_counts: SampleCountFlags, + pub storage_image_sample_counts: SampleCountFlags, + pub max_sample_mask_words: u32, + pub timestamp_compute_and_graphics: Bool32, + pub timestamp_period: f32, + pub max_clip_distances: u32, + pub max_cull_distances: u32, + pub max_combined_clip_and_cull_distances: u32, + pub discrete_queue_priorities: u32, + pub point_size_range: [f32; 2], + pub line_width_range: [f32; 2], + pub point_size_granularity: f32, + pub line_width_granularity: f32, + pub strict_lines: Bool32, + pub standard_sample_locations: Bool32, + pub optimal_buffer_copy_offset_alignment: DeviceSize, + pub optimal_buffer_copy_row_pitch_alignment: DeviceSize, + pub non_coherent_atom_size: DeviceSize, +} +impl ::std::default::Default for PhysicalDeviceLimits { + fn default() -> PhysicalDeviceLimits { + PhysicalDeviceLimits { + max_image_dimension1_d: u32::default(), + max_image_dimension2_d: u32::default(), + max_image_dimension3_d: u32::default(), + max_image_dimension_cube: u32::default(), + max_image_array_layers: u32::default(), + max_texel_buffer_elements: u32::default(), + max_uniform_buffer_range: u32::default(), + max_storage_buffer_range: u32::default(), + max_push_constants_size: u32::default(), + max_memory_allocation_count: u32::default(), + max_sampler_allocation_count: u32::default(), + buffer_image_granularity: DeviceSize::default(), + sparse_address_space_size: DeviceSize::default(), + max_bound_descriptor_sets: u32::default(), + max_per_stage_descriptor_samplers: u32::default(), + max_per_stage_descriptor_uniform_buffers: u32::default(), + max_per_stage_descriptor_storage_buffers: u32::default(), + max_per_stage_descriptor_sampled_images: u32::default(), + max_per_stage_descriptor_storage_images: u32::default(), + max_per_stage_descriptor_input_attachments: u32::default(), + max_per_stage_resources: u32::default(), + max_descriptor_set_samplers: u32::default(), + max_descriptor_set_uniform_buffers: u32::default(), + max_descriptor_set_uniform_buffers_dynamic: u32::default(), + max_descriptor_set_storage_buffers: u32::default(), + max_descriptor_set_storage_buffers_dynamic: u32::default(), + max_descriptor_set_sampled_images: u32::default(), + max_descriptor_set_storage_images: u32::default(), + max_descriptor_set_input_attachments: u32::default(), + max_vertex_input_attributes: u32::default(), + max_vertex_input_bindings: u32::default(), + max_vertex_input_attribute_offset: u32::default(), + max_vertex_input_binding_stride: u32::default(), + max_vertex_output_components: u32::default(), + max_tessellation_generation_level: u32::default(), + max_tessellation_patch_size: u32::default(), + max_tessellation_control_per_vertex_input_components: u32::default(), + max_tessellation_control_per_vertex_output_components: u32::default(), + max_tessellation_control_per_patch_output_components: u32::default(), + max_tessellation_control_total_output_components: u32::default(), + max_tessellation_evaluation_input_components: u32::default(), + max_tessellation_evaluation_output_components: u32::default(), + max_geometry_shader_invocations: u32::default(), + max_geometry_input_components: u32::default(), + max_geometry_output_components: u32::default(), + max_geometry_output_vertices: u32::default(), + max_geometry_total_output_components: u32::default(), + max_fragment_input_components: u32::default(), + max_fragment_output_attachments: u32::default(), + max_fragment_dual_src_attachments: u32::default(), + max_fragment_combined_output_resources: u32::default(), + max_compute_shared_memory_size: u32::default(), + max_compute_work_group_count: unsafe { ::std::mem::zeroed() }, + max_compute_work_group_invocations: u32::default(), + max_compute_work_group_size: unsafe { ::std::mem::zeroed() }, + sub_pixel_precision_bits: u32::default(), + sub_texel_precision_bits: u32::default(), + mipmap_precision_bits: u32::default(), + max_draw_indexed_index_value: u32::default(), + max_draw_indirect_count: u32::default(), + max_sampler_lod_bias: f32::default(), + max_sampler_anisotropy: f32::default(), + max_viewports: u32::default(), + max_viewport_dimensions: unsafe { ::std::mem::zeroed() }, + viewport_bounds_range: unsafe { ::std::mem::zeroed() }, + viewport_sub_pixel_bits: u32::default(), + min_memory_map_alignment: usize::default(), + min_texel_buffer_offset_alignment: DeviceSize::default(), + min_uniform_buffer_offset_alignment: DeviceSize::default(), + min_storage_buffer_offset_alignment: DeviceSize::default(), + min_texel_offset: i32::default(), + max_texel_offset: u32::default(), + min_texel_gather_offset: i32::default(), + max_texel_gather_offset: u32::default(), + min_interpolation_offset: f32::default(), + max_interpolation_offset: f32::default(), + sub_pixel_interpolation_offset_bits: u32::default(), + max_framebuffer_width: u32::default(), + max_framebuffer_height: u32::default(), + max_framebuffer_layers: u32::default(), + framebuffer_color_sample_counts: SampleCountFlags::default(), + framebuffer_depth_sample_counts: SampleCountFlags::default(), + framebuffer_stencil_sample_counts: SampleCountFlags::default(), + framebuffer_no_attachments_sample_counts: SampleCountFlags::default(), + max_color_attachments: u32::default(), + sampled_image_color_sample_counts: SampleCountFlags::default(), + sampled_image_integer_sample_counts: SampleCountFlags::default(), + sampled_image_depth_sample_counts: SampleCountFlags::default(), + sampled_image_stencil_sample_counts: SampleCountFlags::default(), + storage_image_sample_counts: SampleCountFlags::default(), + max_sample_mask_words: u32::default(), + timestamp_compute_and_graphics: Bool32::default(), + timestamp_period: f32::default(), + max_clip_distances: u32::default(), + max_cull_distances: u32::default(), + max_combined_clip_and_cull_distances: u32::default(), + discrete_queue_priorities: u32::default(), + point_size_range: unsafe { ::std::mem::zeroed() }, + line_width_range: unsafe { ::std::mem::zeroed() }, + point_size_granularity: f32::default(), + line_width_granularity: f32::default(), + strict_lines: Bool32::default(), + standard_sample_locations: Bool32::default(), + optimal_buffer_copy_offset_alignment: DeviceSize::default(), + optimal_buffer_copy_row_pitch_alignment: DeviceSize::default(), + non_coherent_atom_size: DeviceSize::default(), + } + } +} +impl PhysicalDeviceLimits { + pub fn builder<'a>() -> PhysicalDeviceLimitsBuilder<'a> { + PhysicalDeviceLimitsBuilder { + inner: PhysicalDeviceLimits::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceLimitsBuilder<'a> { + inner: PhysicalDeviceLimits, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for PhysicalDeviceLimitsBuilder<'a> { + type Target = PhysicalDeviceLimits; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceLimitsBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceLimitsBuilder<'a> { + pub fn max_image_dimension1_d( + mut self, + max_image_dimension1_d: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_image_dimension1_d = max_image_dimension1_d; + self + } + pub fn max_image_dimension2_d( + mut self, + max_image_dimension2_d: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_image_dimension2_d = max_image_dimension2_d; + self + } + pub fn max_image_dimension3_d( + mut self, + max_image_dimension3_d: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_image_dimension3_d = max_image_dimension3_d; + self + } + pub fn max_image_dimension_cube( + mut self, + max_image_dimension_cube: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_image_dimension_cube = max_image_dimension_cube; + self + } + pub fn max_image_array_layers( + mut self, + max_image_array_layers: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_image_array_layers = max_image_array_layers; + self + } + pub fn max_texel_buffer_elements( + mut self, + max_texel_buffer_elements: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_texel_buffer_elements = max_texel_buffer_elements; + self + } + pub fn max_uniform_buffer_range( + mut self, + max_uniform_buffer_range: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_uniform_buffer_range = max_uniform_buffer_range; + self + } + pub fn max_storage_buffer_range( + mut self, + max_storage_buffer_range: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_storage_buffer_range = max_storage_buffer_range; + self + } + pub fn max_push_constants_size( + mut self, + max_push_constants_size: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_push_constants_size = max_push_constants_size; + self + } + pub fn max_memory_allocation_count( + mut self, + max_memory_allocation_count: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_memory_allocation_count = max_memory_allocation_count; + self + } + pub fn max_sampler_allocation_count( + mut self, + max_sampler_allocation_count: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_sampler_allocation_count = max_sampler_allocation_count; + self + } + pub fn buffer_image_granularity( + mut self, + buffer_image_granularity: DeviceSize, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.buffer_image_granularity = buffer_image_granularity; + self + } + pub fn sparse_address_space_size( + mut self, + sparse_address_space_size: DeviceSize, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.sparse_address_space_size = sparse_address_space_size; + self + } + pub fn max_bound_descriptor_sets( + mut self, + max_bound_descriptor_sets: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_bound_descriptor_sets = max_bound_descriptor_sets; + self + } + pub fn max_per_stage_descriptor_samplers( + mut self, + max_per_stage_descriptor_samplers: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_per_stage_descriptor_samplers = max_per_stage_descriptor_samplers; + self + } + pub fn max_per_stage_descriptor_uniform_buffers( + mut self, + max_per_stage_descriptor_uniform_buffers: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_per_stage_descriptor_uniform_buffers = + max_per_stage_descriptor_uniform_buffers; + self + } + pub fn max_per_stage_descriptor_storage_buffers( + mut self, + max_per_stage_descriptor_storage_buffers: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_per_stage_descriptor_storage_buffers = + max_per_stage_descriptor_storage_buffers; + self + } + pub fn max_per_stage_descriptor_sampled_images( + mut self, + max_per_stage_descriptor_sampled_images: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_per_stage_descriptor_sampled_images = + max_per_stage_descriptor_sampled_images; + self + } + pub fn max_per_stage_descriptor_storage_images( + mut self, + max_per_stage_descriptor_storage_images: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_per_stage_descriptor_storage_images = + max_per_stage_descriptor_storage_images; + self + } + pub fn max_per_stage_descriptor_input_attachments( + mut self, + max_per_stage_descriptor_input_attachments: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_per_stage_descriptor_input_attachments = + max_per_stage_descriptor_input_attachments; + self + } + pub fn max_per_stage_resources( + mut self, + max_per_stage_resources: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_per_stage_resources = max_per_stage_resources; + self + } + pub fn max_descriptor_set_samplers( + mut self, + max_descriptor_set_samplers: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_descriptor_set_samplers = max_descriptor_set_samplers; + self + } + pub fn max_descriptor_set_uniform_buffers( + mut self, + max_descriptor_set_uniform_buffers: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_descriptor_set_uniform_buffers = max_descriptor_set_uniform_buffers; + self + } + pub fn max_descriptor_set_uniform_buffers_dynamic( + mut self, + max_descriptor_set_uniform_buffers_dynamic: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_descriptor_set_uniform_buffers_dynamic = + max_descriptor_set_uniform_buffers_dynamic; + self + } + pub fn max_descriptor_set_storage_buffers( + mut self, + max_descriptor_set_storage_buffers: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_descriptor_set_storage_buffers = max_descriptor_set_storage_buffers; + self + } + pub fn max_descriptor_set_storage_buffers_dynamic( + mut self, + max_descriptor_set_storage_buffers_dynamic: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_descriptor_set_storage_buffers_dynamic = + max_descriptor_set_storage_buffers_dynamic; + self + } + pub fn max_descriptor_set_sampled_images( + mut self, + max_descriptor_set_sampled_images: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_descriptor_set_sampled_images = max_descriptor_set_sampled_images; + self + } + pub fn max_descriptor_set_storage_images( + mut self, + max_descriptor_set_storage_images: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_descriptor_set_storage_images = max_descriptor_set_storage_images; + self + } + pub fn max_descriptor_set_input_attachments( + mut self, + max_descriptor_set_input_attachments: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_descriptor_set_input_attachments = max_descriptor_set_input_attachments; + self + } + pub fn max_vertex_input_attributes( + mut self, + max_vertex_input_attributes: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_vertex_input_attributes = max_vertex_input_attributes; + self + } + pub fn max_vertex_input_bindings( + mut self, + max_vertex_input_bindings: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_vertex_input_bindings = max_vertex_input_bindings; + self + } + pub fn max_vertex_input_attribute_offset( + mut self, + max_vertex_input_attribute_offset: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_vertex_input_attribute_offset = max_vertex_input_attribute_offset; + self + } + pub fn max_vertex_input_binding_stride( + mut self, + max_vertex_input_binding_stride: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_vertex_input_binding_stride = max_vertex_input_binding_stride; + self + } + pub fn max_vertex_output_components( + mut self, + max_vertex_output_components: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_vertex_output_components = max_vertex_output_components; + self + } + pub fn max_tessellation_generation_level( + mut self, + max_tessellation_generation_level: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_tessellation_generation_level = max_tessellation_generation_level; + self + } + pub fn max_tessellation_patch_size( + mut self, + max_tessellation_patch_size: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_tessellation_patch_size = max_tessellation_patch_size; + self + } + pub fn max_tessellation_control_per_vertex_input_components( + mut self, + max_tessellation_control_per_vertex_input_components: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner + .max_tessellation_control_per_vertex_input_components = + max_tessellation_control_per_vertex_input_components; + self + } + pub fn max_tessellation_control_per_vertex_output_components( + mut self, + max_tessellation_control_per_vertex_output_components: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner + .max_tessellation_control_per_vertex_output_components = + max_tessellation_control_per_vertex_output_components; + self + } + pub fn max_tessellation_control_per_patch_output_components( + mut self, + max_tessellation_control_per_patch_output_components: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner + .max_tessellation_control_per_patch_output_components = + max_tessellation_control_per_patch_output_components; + self + } + pub fn max_tessellation_control_total_output_components( + mut self, + max_tessellation_control_total_output_components: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_tessellation_control_total_output_components = + max_tessellation_control_total_output_components; + self + } + pub fn max_tessellation_evaluation_input_components( + mut self, + max_tessellation_evaluation_input_components: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_tessellation_evaluation_input_components = + max_tessellation_evaluation_input_components; + self + } + pub fn max_tessellation_evaluation_output_components( + mut self, + max_tessellation_evaluation_output_components: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_tessellation_evaluation_output_components = + max_tessellation_evaluation_output_components; + self + } + pub fn max_geometry_shader_invocations( + mut self, + max_geometry_shader_invocations: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_geometry_shader_invocations = max_geometry_shader_invocations; + self + } + pub fn max_geometry_input_components( + mut self, + max_geometry_input_components: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_geometry_input_components = max_geometry_input_components; + self + } + pub fn max_geometry_output_components( + mut self, + max_geometry_output_components: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_geometry_output_components = max_geometry_output_components; + self + } + pub fn max_geometry_output_vertices( + mut self, + max_geometry_output_vertices: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_geometry_output_vertices = max_geometry_output_vertices; + self + } + pub fn max_geometry_total_output_components( + mut self, + max_geometry_total_output_components: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_geometry_total_output_components = max_geometry_total_output_components; + self + } + pub fn max_fragment_input_components( + mut self, + max_fragment_input_components: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_fragment_input_components = max_fragment_input_components; + self + } + pub fn max_fragment_output_attachments( + mut self, + max_fragment_output_attachments: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_fragment_output_attachments = max_fragment_output_attachments; + self + } + pub fn max_fragment_dual_src_attachments( + mut self, + max_fragment_dual_src_attachments: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_fragment_dual_src_attachments = max_fragment_dual_src_attachments; + self + } + pub fn max_fragment_combined_output_resources( + mut self, + max_fragment_combined_output_resources: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_fragment_combined_output_resources = max_fragment_combined_output_resources; + self + } + pub fn max_compute_shared_memory_size( + mut self, + max_compute_shared_memory_size: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_compute_shared_memory_size = max_compute_shared_memory_size; + self + } + pub fn max_compute_work_group_count( + mut self, + max_compute_work_group_count: [u32; 3], + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_compute_work_group_count = max_compute_work_group_count; + self + } + pub fn max_compute_work_group_invocations( + mut self, + max_compute_work_group_invocations: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_compute_work_group_invocations = max_compute_work_group_invocations; + self + } + pub fn max_compute_work_group_size( + mut self, + max_compute_work_group_size: [u32; 3], + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_compute_work_group_size = max_compute_work_group_size; + self + } + pub fn sub_pixel_precision_bits( + mut self, + sub_pixel_precision_bits: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.sub_pixel_precision_bits = sub_pixel_precision_bits; + self + } + pub fn sub_texel_precision_bits( + mut self, + sub_texel_precision_bits: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.sub_texel_precision_bits = sub_texel_precision_bits; + self + } + pub fn mipmap_precision_bits( + mut self, + mipmap_precision_bits: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.mipmap_precision_bits = mipmap_precision_bits; + self + } + pub fn max_draw_indexed_index_value( + mut self, + max_draw_indexed_index_value: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_draw_indexed_index_value = max_draw_indexed_index_value; + self + } + pub fn max_draw_indirect_count( + mut self, + max_draw_indirect_count: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_draw_indirect_count = max_draw_indirect_count; + self + } + pub fn max_sampler_lod_bias( + mut self, + max_sampler_lod_bias: f32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_sampler_lod_bias = max_sampler_lod_bias; + self + } + pub fn max_sampler_anisotropy( + mut self, + max_sampler_anisotropy: f32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_sampler_anisotropy = max_sampler_anisotropy; + self + } + pub fn max_viewports(mut self, max_viewports: u32) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_viewports = max_viewports; + self + } + pub fn max_viewport_dimensions( + mut self, + max_viewport_dimensions: [u32; 2], + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_viewport_dimensions = max_viewport_dimensions; + self + } + pub fn viewport_bounds_range( + mut self, + viewport_bounds_range: [f32; 2], + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.viewport_bounds_range = viewport_bounds_range; + self + } + pub fn viewport_sub_pixel_bits( + mut self, + viewport_sub_pixel_bits: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.viewport_sub_pixel_bits = viewport_sub_pixel_bits; + self + } + pub fn min_memory_map_alignment( + mut self, + min_memory_map_alignment: usize, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.min_memory_map_alignment = min_memory_map_alignment; + self + } + pub fn min_texel_buffer_offset_alignment( + mut self, + min_texel_buffer_offset_alignment: DeviceSize, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.min_texel_buffer_offset_alignment = min_texel_buffer_offset_alignment; + self + } + pub fn min_uniform_buffer_offset_alignment( + mut self, + min_uniform_buffer_offset_alignment: DeviceSize, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.min_uniform_buffer_offset_alignment = min_uniform_buffer_offset_alignment; + self + } + pub fn min_storage_buffer_offset_alignment( + mut self, + min_storage_buffer_offset_alignment: DeviceSize, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.min_storage_buffer_offset_alignment = min_storage_buffer_offset_alignment; + self + } + pub fn min_texel_offset(mut self, min_texel_offset: i32) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.min_texel_offset = min_texel_offset; + self + } + pub fn max_texel_offset(mut self, max_texel_offset: u32) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_texel_offset = max_texel_offset; + self + } + pub fn min_texel_gather_offset( + mut self, + min_texel_gather_offset: i32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.min_texel_gather_offset = min_texel_gather_offset; + self + } + pub fn max_texel_gather_offset( + mut self, + max_texel_gather_offset: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_texel_gather_offset = max_texel_gather_offset; + self + } + pub fn min_interpolation_offset( + mut self, + min_interpolation_offset: f32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.min_interpolation_offset = min_interpolation_offset; + self + } + pub fn max_interpolation_offset( + mut self, + max_interpolation_offset: f32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_interpolation_offset = max_interpolation_offset; + self + } + pub fn sub_pixel_interpolation_offset_bits( + mut self, + sub_pixel_interpolation_offset_bits: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.sub_pixel_interpolation_offset_bits = sub_pixel_interpolation_offset_bits; + self + } + pub fn max_framebuffer_width( + mut self, + max_framebuffer_width: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_framebuffer_width = max_framebuffer_width; + self + } + pub fn max_framebuffer_height( + mut self, + max_framebuffer_height: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_framebuffer_height = max_framebuffer_height; + self + } + pub fn max_framebuffer_layers( + mut self, + max_framebuffer_layers: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_framebuffer_layers = max_framebuffer_layers; + self + } + pub fn framebuffer_color_sample_counts( + mut self, + framebuffer_color_sample_counts: SampleCountFlags, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.framebuffer_color_sample_counts = framebuffer_color_sample_counts; + self + } + pub fn framebuffer_depth_sample_counts( + mut self, + framebuffer_depth_sample_counts: SampleCountFlags, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.framebuffer_depth_sample_counts = framebuffer_depth_sample_counts; + self + } + pub fn framebuffer_stencil_sample_counts( + mut self, + framebuffer_stencil_sample_counts: SampleCountFlags, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.framebuffer_stencil_sample_counts = framebuffer_stencil_sample_counts; + self + } + pub fn framebuffer_no_attachments_sample_counts( + mut self, + framebuffer_no_attachments_sample_counts: SampleCountFlags, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.framebuffer_no_attachments_sample_counts = + framebuffer_no_attachments_sample_counts; + self + } + pub fn max_color_attachments( + mut self, + max_color_attachments: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_color_attachments = max_color_attachments; + self + } + pub fn sampled_image_color_sample_counts( + mut self, + sampled_image_color_sample_counts: SampleCountFlags, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.sampled_image_color_sample_counts = sampled_image_color_sample_counts; + self + } + pub fn sampled_image_integer_sample_counts( + mut self, + sampled_image_integer_sample_counts: SampleCountFlags, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.sampled_image_integer_sample_counts = sampled_image_integer_sample_counts; + self + } + pub fn sampled_image_depth_sample_counts( + mut self, + sampled_image_depth_sample_counts: SampleCountFlags, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.sampled_image_depth_sample_counts = sampled_image_depth_sample_counts; + self + } + pub fn sampled_image_stencil_sample_counts( + mut self, + sampled_image_stencil_sample_counts: SampleCountFlags, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.sampled_image_stencil_sample_counts = sampled_image_stencil_sample_counts; + self + } + pub fn storage_image_sample_counts( + mut self, + storage_image_sample_counts: SampleCountFlags, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.storage_image_sample_counts = storage_image_sample_counts; + self + } + pub fn max_sample_mask_words( + mut self, + max_sample_mask_words: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_sample_mask_words = max_sample_mask_words; + self + } + pub fn timestamp_compute_and_graphics( + mut self, + timestamp_compute_and_graphics: bool, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.timestamp_compute_and_graphics = timestamp_compute_and_graphics.into(); + self + } + pub fn timestamp_period(mut self, timestamp_period: f32) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.timestamp_period = timestamp_period; + self + } + pub fn max_clip_distances( + mut self, + max_clip_distances: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_clip_distances = max_clip_distances; + self + } + pub fn max_cull_distances( + mut self, + max_cull_distances: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_cull_distances = max_cull_distances; + self + } + pub fn max_combined_clip_and_cull_distances( + mut self, + max_combined_clip_and_cull_distances: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.max_combined_clip_and_cull_distances = max_combined_clip_and_cull_distances; + self + } + pub fn discrete_queue_priorities( + mut self, + discrete_queue_priorities: u32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.discrete_queue_priorities = discrete_queue_priorities; + self + } + pub fn point_size_range( + mut self, + point_size_range: [f32; 2], + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.point_size_range = point_size_range; + self + } + pub fn line_width_range( + mut self, + line_width_range: [f32; 2], + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.line_width_range = line_width_range; + self + } + pub fn point_size_granularity( + mut self, + point_size_granularity: f32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.point_size_granularity = point_size_granularity; + self + } + pub fn line_width_granularity( + mut self, + line_width_granularity: f32, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.line_width_granularity = line_width_granularity; + self + } + pub fn strict_lines(mut self, strict_lines: bool) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.strict_lines = strict_lines.into(); + self + } + pub fn standard_sample_locations( + mut self, + standard_sample_locations: bool, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.standard_sample_locations = standard_sample_locations.into(); + self + } + pub fn optimal_buffer_copy_offset_alignment( + mut self, + optimal_buffer_copy_offset_alignment: DeviceSize, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.optimal_buffer_copy_offset_alignment = optimal_buffer_copy_offset_alignment; + self + } + pub fn optimal_buffer_copy_row_pitch_alignment( + mut self, + optimal_buffer_copy_row_pitch_alignment: DeviceSize, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.optimal_buffer_copy_row_pitch_alignment = + optimal_buffer_copy_row_pitch_alignment; + self + } + pub fn non_coherent_atom_size( + mut self, + non_coherent_atom_size: DeviceSize, + ) -> PhysicalDeviceLimitsBuilder<'a> { + self.inner.non_coherent_atom_size = non_coherent_atom_size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceLimits { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SemaphoreCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: SemaphoreCreateFlags, +} +impl ::std::default::Default for SemaphoreCreateInfo { + fn default() -> SemaphoreCreateInfo { + SemaphoreCreateInfo { + s_type: StructureType::SEMAPHORE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: SemaphoreCreateFlags::default(), + } + } +} +impl SemaphoreCreateInfo { + pub fn builder<'a>() -> SemaphoreCreateInfoBuilder<'a> { + SemaphoreCreateInfoBuilder { + inner: SemaphoreCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SemaphoreCreateInfoBuilder<'a> { + inner: SemaphoreCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSemaphoreCreateInfo {} +impl<'a> ::std::ops::Deref for SemaphoreCreateInfoBuilder<'a> { + type Target = SemaphoreCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SemaphoreCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SemaphoreCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: SemaphoreCreateFlags) -> SemaphoreCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SemaphoreCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SemaphoreCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct QueryPoolCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: QueryPoolCreateFlags, + pub query_type: QueryType, + pub query_count: u32, + pub pipeline_statistics: QueryPipelineStatisticFlags, +} +impl ::std::default::Default for QueryPoolCreateInfo { + fn default() -> QueryPoolCreateInfo { + QueryPoolCreateInfo { + s_type: StructureType::QUERY_POOL_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: QueryPoolCreateFlags::default(), + query_type: QueryType::default(), + query_count: u32::default(), + pipeline_statistics: QueryPipelineStatisticFlags::default(), + } + } +} +impl QueryPoolCreateInfo { + pub fn builder<'a>() -> QueryPoolCreateInfoBuilder<'a> { + QueryPoolCreateInfoBuilder { + inner: QueryPoolCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct QueryPoolCreateInfoBuilder<'a> { + inner: QueryPoolCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsQueryPoolCreateInfo {} +impl<'a> ::std::ops::Deref for QueryPoolCreateInfoBuilder<'a> { + type Target = QueryPoolCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for QueryPoolCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> QueryPoolCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: QueryPoolCreateFlags) -> QueryPoolCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn query_type(mut self, query_type: QueryType) -> QueryPoolCreateInfoBuilder<'a> { + self.inner.query_type = query_type; + self + } + pub fn query_count(mut self, query_count: u32) -> QueryPoolCreateInfoBuilder<'a> { + self.inner.query_count = query_count; + self + } + pub fn pipeline_statistics( + mut self, + pipeline_statistics: QueryPipelineStatisticFlags, + ) -> QueryPoolCreateInfoBuilder<'a> { + self.inner.pipeline_statistics = pipeline_statistics; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> QueryPoolCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> QueryPoolCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct FramebufferCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: FramebufferCreateFlags, + pub render_pass: RenderPass, + pub attachment_count: u32, + pub p_attachments: *const ImageView, + pub width: u32, + pub height: u32, + pub layers: u32, +} +impl ::std::default::Default for FramebufferCreateInfo { + fn default() -> FramebufferCreateInfo { + FramebufferCreateInfo { + s_type: StructureType::FRAMEBUFFER_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: FramebufferCreateFlags::default(), + render_pass: RenderPass::default(), + attachment_count: u32::default(), + p_attachments: ::std::ptr::null(), + width: u32::default(), + height: u32::default(), + layers: u32::default(), + } + } +} +impl FramebufferCreateInfo { + pub fn builder<'a>() -> FramebufferCreateInfoBuilder<'a> { + FramebufferCreateInfoBuilder { + inner: FramebufferCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct FramebufferCreateInfoBuilder<'a> { + inner: FramebufferCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsFramebufferCreateInfo {} +impl<'a> ::std::ops::Deref for FramebufferCreateInfoBuilder<'a> { + type Target = FramebufferCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for FramebufferCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> FramebufferCreateInfoBuilder<'a> { + pub fn flags(mut self, flags: FramebufferCreateFlags) -> FramebufferCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn render_pass(mut self, render_pass: RenderPass) -> FramebufferCreateInfoBuilder<'a> { + self.inner.render_pass = render_pass; + self + } + pub fn attachments(mut self, attachments: &'a [ImageView]) -> FramebufferCreateInfoBuilder<'a> { + self.inner.attachment_count = attachments.len() as _; + self.inner.p_attachments = attachments.as_ptr(); + self + } + pub fn width(mut self, width: u32) -> FramebufferCreateInfoBuilder<'a> { + self.inner.width = width; + self + } + pub fn height(mut self, height: u32) -> FramebufferCreateInfoBuilder<'a> { + self.inner.height = height; + self + } + pub fn layers(mut self, layers: u32) -> FramebufferCreateInfoBuilder<'a> { + self.inner.layers = layers; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> FramebufferCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> FramebufferCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DrawIndirectCommand { + pub vertex_count: u32, + pub instance_count: u32, + pub first_vertex: u32, + pub first_instance: u32, +} +impl DrawIndirectCommand { + pub fn builder<'a>() -> DrawIndirectCommandBuilder<'a> { + DrawIndirectCommandBuilder { + inner: DrawIndirectCommand::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DrawIndirectCommandBuilder<'a> { + inner: DrawIndirectCommand, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DrawIndirectCommandBuilder<'a> { + type Target = DrawIndirectCommand; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DrawIndirectCommandBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DrawIndirectCommandBuilder<'a> { + pub fn vertex_count(mut self, vertex_count: u32) -> DrawIndirectCommandBuilder<'a> { + self.inner.vertex_count = vertex_count; + self + } + pub fn instance_count(mut self, instance_count: u32) -> DrawIndirectCommandBuilder<'a> { + self.inner.instance_count = instance_count; + self + } + pub fn first_vertex(mut self, first_vertex: u32) -> DrawIndirectCommandBuilder<'a> { + self.inner.first_vertex = first_vertex; + self + } + pub fn first_instance(mut self, first_instance: u32) -> DrawIndirectCommandBuilder<'a> { + self.inner.first_instance = first_instance; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DrawIndirectCommand { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DrawIndexedIndirectCommand { + pub index_count: u32, + pub instance_count: u32, + pub first_index: u32, + pub vertex_offset: i32, + pub first_instance: u32, +} +impl DrawIndexedIndirectCommand { + pub fn builder<'a>() -> DrawIndexedIndirectCommandBuilder<'a> { + DrawIndexedIndirectCommandBuilder { + inner: DrawIndexedIndirectCommand::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DrawIndexedIndirectCommandBuilder<'a> { + inner: DrawIndexedIndirectCommand, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DrawIndexedIndirectCommandBuilder<'a> { + type Target = DrawIndexedIndirectCommand; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DrawIndexedIndirectCommandBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DrawIndexedIndirectCommandBuilder<'a> { + pub fn index_count(mut self, index_count: u32) -> DrawIndexedIndirectCommandBuilder<'a> { + self.inner.index_count = index_count; + self + } + pub fn instance_count(mut self, instance_count: u32) -> DrawIndexedIndirectCommandBuilder<'a> { + self.inner.instance_count = instance_count; + self + } + pub fn first_index(mut self, first_index: u32) -> DrawIndexedIndirectCommandBuilder<'a> { + self.inner.first_index = first_index; + self + } + pub fn vertex_offset(mut self, vertex_offset: i32) -> DrawIndexedIndirectCommandBuilder<'a> { + self.inner.vertex_offset = vertex_offset; + self + } + pub fn first_instance(mut self, first_instance: u32) -> DrawIndexedIndirectCommandBuilder<'a> { + self.inner.first_instance = first_instance; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DrawIndexedIndirectCommand { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DispatchIndirectCommand { + pub x: u32, + pub y: u32, + pub z: u32, +} +impl DispatchIndirectCommand { + pub fn builder<'a>() -> DispatchIndirectCommandBuilder<'a> { + DispatchIndirectCommandBuilder { + inner: DispatchIndirectCommand::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DispatchIndirectCommandBuilder<'a> { + inner: DispatchIndirectCommand, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DispatchIndirectCommandBuilder<'a> { + type Target = DispatchIndirectCommand; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DispatchIndirectCommandBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DispatchIndirectCommandBuilder<'a> { + pub fn x(mut self, x: u32) -> DispatchIndirectCommandBuilder<'a> { + self.inner.x = x; + self + } + pub fn y(mut self, y: u32) -> DispatchIndirectCommandBuilder<'a> { + self.inner.y = y; + self + } + pub fn z(mut self, z: u32) -> DispatchIndirectCommandBuilder<'a> { + self.inner.z = z; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DispatchIndirectCommand { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SubmitInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub wait_semaphore_count: u32, + pub p_wait_semaphores: *const Semaphore, + pub p_wait_dst_stage_mask: *const PipelineStageFlags, + pub command_buffer_count: u32, + pub p_command_buffers: *const CommandBuffer, + pub signal_semaphore_count: u32, + pub p_signal_semaphores: *const Semaphore, +} +impl ::std::default::Default for SubmitInfo { + fn default() -> SubmitInfo { + SubmitInfo { + s_type: StructureType::SUBMIT_INFO, + p_next: ::std::ptr::null(), + wait_semaphore_count: u32::default(), + p_wait_semaphores: ::std::ptr::null(), + p_wait_dst_stage_mask: ::std::ptr::null(), + command_buffer_count: u32::default(), + p_command_buffers: ::std::ptr::null(), + signal_semaphore_count: u32::default(), + p_signal_semaphores: ::std::ptr::null(), + } + } +} +impl SubmitInfo { + pub fn builder<'a>() -> SubmitInfoBuilder<'a> { + SubmitInfoBuilder { + inner: SubmitInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SubmitInfoBuilder<'a> { + inner: SubmitInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSubmitInfo {} +impl<'a> ::std::ops::Deref for SubmitInfoBuilder<'a> { + type Target = SubmitInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SubmitInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SubmitInfoBuilder<'a> { + pub fn wait_semaphores(mut self, wait_semaphores: &'a [Semaphore]) -> SubmitInfoBuilder<'a> { + self.inner.wait_semaphore_count = wait_semaphores.len() as _; + self.inner.p_wait_semaphores = wait_semaphores.as_ptr(); + self + } + pub fn wait_dst_stage_mask( + mut self, + wait_dst_stage_mask: &'a [PipelineStageFlags], + ) -> SubmitInfoBuilder<'a> { + self.inner.wait_semaphore_count = wait_dst_stage_mask.len() as _; + self.inner.p_wait_dst_stage_mask = wait_dst_stage_mask.as_ptr(); + self + } + pub fn command_buffers( + mut self, + command_buffers: &'a [CommandBuffer], + ) -> SubmitInfoBuilder<'a> { + self.inner.command_buffer_count = command_buffers.len() as _; + self.inner.p_command_buffers = command_buffers.as_ptr(); + self + } + pub fn signal_semaphores( + mut self, + signal_semaphores: &'a [Semaphore], + ) -> SubmitInfoBuilder<'a> { + self.inner.signal_semaphore_count = signal_semaphores.len() as _; + self.inner.p_signal_semaphores = signal_semaphores.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next(mut self, next: &'a mut T) -> SubmitInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SubmitInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DisplayPropertiesKHR { + pub display: DisplayKHR, + pub display_name: *const c_char, + pub physical_dimensions: Extent2D, + pub physical_resolution: Extent2D, + pub supported_transforms: SurfaceTransformFlagsKHR, + pub plane_reorder_possible: Bool32, + pub persistent_content: Bool32, +} +impl ::std::default::Default for DisplayPropertiesKHR { + fn default() -> DisplayPropertiesKHR { + DisplayPropertiesKHR { + display: DisplayKHR::default(), + display_name: ::std::ptr::null(), + physical_dimensions: Extent2D::default(), + physical_resolution: Extent2D::default(), + supported_transforms: SurfaceTransformFlagsKHR::default(), + plane_reorder_possible: Bool32::default(), + persistent_content: Bool32::default(), + } + } +} +impl DisplayPropertiesKHR { + pub fn builder<'a>() -> DisplayPropertiesKHRBuilder<'a> { + DisplayPropertiesKHRBuilder { + inner: DisplayPropertiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayPropertiesKHRBuilder<'a> { + inner: DisplayPropertiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DisplayPropertiesKHRBuilder<'a> { + type Target = DisplayPropertiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayPropertiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayPropertiesKHRBuilder<'a> { + pub fn display(mut self, display: DisplayKHR) -> DisplayPropertiesKHRBuilder<'a> { + self.inner.display = display; + self + } + pub fn display_name(mut self, display_name: *const c_char) -> DisplayPropertiesKHRBuilder<'a> { + self.inner.display_name = display_name; + self + } + pub fn physical_dimensions( + mut self, + physical_dimensions: Extent2D, + ) -> DisplayPropertiesKHRBuilder<'a> { + self.inner.physical_dimensions = physical_dimensions; + self + } + pub fn physical_resolution( + mut self, + physical_resolution: Extent2D, + ) -> DisplayPropertiesKHRBuilder<'a> { + self.inner.physical_resolution = physical_resolution; + self + } + pub fn supported_transforms( + mut self, + supported_transforms: SurfaceTransformFlagsKHR, + ) -> DisplayPropertiesKHRBuilder<'a> { + self.inner.supported_transforms = supported_transforms; + self + } + pub fn plane_reorder_possible( + mut self, + plane_reorder_possible: bool, + ) -> DisplayPropertiesKHRBuilder<'a> { + self.inner.plane_reorder_possible = plane_reorder_possible.into(); + self + } + pub fn persistent_content( + mut self, + persistent_content: bool, + ) -> DisplayPropertiesKHRBuilder<'a> { + self.inner.persistent_content = persistent_content.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayPropertiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DisplayPlanePropertiesKHR { + pub current_display: DisplayKHR, + pub current_stack_index: u32, +} +impl DisplayPlanePropertiesKHR { + pub fn builder<'a>() -> DisplayPlanePropertiesKHRBuilder<'a> { + DisplayPlanePropertiesKHRBuilder { + inner: DisplayPlanePropertiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayPlanePropertiesKHRBuilder<'a> { + inner: DisplayPlanePropertiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DisplayPlanePropertiesKHRBuilder<'a> { + type Target = DisplayPlanePropertiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayPlanePropertiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayPlanePropertiesKHRBuilder<'a> { + pub fn current_display( + mut self, + current_display: DisplayKHR, + ) -> DisplayPlanePropertiesKHRBuilder<'a> { + self.inner.current_display = current_display; + self + } + pub fn current_stack_index( + mut self, + current_stack_index: u32, + ) -> DisplayPlanePropertiesKHRBuilder<'a> { + self.inner.current_stack_index = current_stack_index; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayPlanePropertiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DisplayModeParametersKHR { + pub visible_region: Extent2D, + pub refresh_rate: u32, +} +impl DisplayModeParametersKHR { + pub fn builder<'a>() -> DisplayModeParametersKHRBuilder<'a> { + DisplayModeParametersKHRBuilder { + inner: DisplayModeParametersKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayModeParametersKHRBuilder<'a> { + inner: DisplayModeParametersKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DisplayModeParametersKHRBuilder<'a> { + type Target = DisplayModeParametersKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayModeParametersKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayModeParametersKHRBuilder<'a> { + pub fn visible_region( + mut self, + visible_region: Extent2D, + ) -> DisplayModeParametersKHRBuilder<'a> { + self.inner.visible_region = visible_region; + self + } + pub fn refresh_rate(mut self, refresh_rate: u32) -> DisplayModeParametersKHRBuilder<'a> { + self.inner.refresh_rate = refresh_rate; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayModeParametersKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DisplayModePropertiesKHR { + pub display_mode: DisplayModeKHR, + pub parameters: DisplayModeParametersKHR, +} +impl DisplayModePropertiesKHR { + pub fn builder<'a>() -> DisplayModePropertiesKHRBuilder<'a> { + DisplayModePropertiesKHRBuilder { + inner: DisplayModePropertiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayModePropertiesKHRBuilder<'a> { + inner: DisplayModePropertiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DisplayModePropertiesKHRBuilder<'a> { + type Target = DisplayModePropertiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayModePropertiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayModePropertiesKHRBuilder<'a> { + pub fn display_mode( + mut self, + display_mode: DisplayModeKHR, + ) -> DisplayModePropertiesKHRBuilder<'a> { + self.inner.display_mode = display_mode; + self + } + pub fn parameters( + mut self, + parameters: DisplayModeParametersKHR, + ) -> DisplayModePropertiesKHRBuilder<'a> { + self.inner.parameters = parameters; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayModePropertiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DisplayModeCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: DisplayModeCreateFlagsKHR, + pub parameters: DisplayModeParametersKHR, +} +impl ::std::default::Default for DisplayModeCreateInfoKHR { + fn default() -> DisplayModeCreateInfoKHR { + DisplayModeCreateInfoKHR { + s_type: StructureType::DISPLAY_MODE_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + flags: DisplayModeCreateFlagsKHR::default(), + parameters: DisplayModeParametersKHR::default(), + } + } +} +impl DisplayModeCreateInfoKHR { + pub fn builder<'a>() -> DisplayModeCreateInfoKHRBuilder<'a> { + DisplayModeCreateInfoKHRBuilder { + inner: DisplayModeCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayModeCreateInfoKHRBuilder<'a> { + inner: DisplayModeCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDisplayModeCreateInfoKHR {} +impl<'a> ::std::ops::Deref for DisplayModeCreateInfoKHRBuilder<'a> { + type Target = DisplayModeCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayModeCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayModeCreateInfoKHRBuilder<'a> { + pub fn flags( + mut self, + flags: DisplayModeCreateFlagsKHR, + ) -> DisplayModeCreateInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn parameters( + mut self, + parameters: DisplayModeParametersKHR, + ) -> DisplayModeCreateInfoKHRBuilder<'a> { + self.inner.parameters = parameters; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DisplayModeCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayModeCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DisplayPlaneCapabilitiesKHR { + pub supported_alpha: DisplayPlaneAlphaFlagsKHR, + pub min_src_position: Offset2D, + pub max_src_position: Offset2D, + pub min_src_extent: Extent2D, + pub max_src_extent: Extent2D, + pub min_dst_position: Offset2D, + pub max_dst_position: Offset2D, + pub min_dst_extent: Extent2D, + pub max_dst_extent: Extent2D, +} +impl DisplayPlaneCapabilitiesKHR { + pub fn builder<'a>() -> DisplayPlaneCapabilitiesKHRBuilder<'a> { + DisplayPlaneCapabilitiesKHRBuilder { + inner: DisplayPlaneCapabilitiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayPlaneCapabilitiesKHRBuilder<'a> { + inner: DisplayPlaneCapabilitiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DisplayPlaneCapabilitiesKHRBuilder<'a> { + type Target = DisplayPlaneCapabilitiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayPlaneCapabilitiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayPlaneCapabilitiesKHRBuilder<'a> { + pub fn supported_alpha( + mut self, + supported_alpha: DisplayPlaneAlphaFlagsKHR, + ) -> DisplayPlaneCapabilitiesKHRBuilder<'a> { + self.inner.supported_alpha = supported_alpha; + self + } + pub fn min_src_position( + mut self, + min_src_position: Offset2D, + ) -> DisplayPlaneCapabilitiesKHRBuilder<'a> { + self.inner.min_src_position = min_src_position; + self + } + pub fn max_src_position( + mut self, + max_src_position: Offset2D, + ) -> DisplayPlaneCapabilitiesKHRBuilder<'a> { + self.inner.max_src_position = max_src_position; + self + } + pub fn min_src_extent( + mut self, + min_src_extent: Extent2D, + ) -> DisplayPlaneCapabilitiesKHRBuilder<'a> { + self.inner.min_src_extent = min_src_extent; + self + } + pub fn max_src_extent( + mut self, + max_src_extent: Extent2D, + ) -> DisplayPlaneCapabilitiesKHRBuilder<'a> { + self.inner.max_src_extent = max_src_extent; + self + } + pub fn min_dst_position( + mut self, + min_dst_position: Offset2D, + ) -> DisplayPlaneCapabilitiesKHRBuilder<'a> { + self.inner.min_dst_position = min_dst_position; + self + } + pub fn max_dst_position( + mut self, + max_dst_position: Offset2D, + ) -> DisplayPlaneCapabilitiesKHRBuilder<'a> { + self.inner.max_dst_position = max_dst_position; + self + } + pub fn min_dst_extent( + mut self, + min_dst_extent: Extent2D, + ) -> DisplayPlaneCapabilitiesKHRBuilder<'a> { + self.inner.min_dst_extent = min_dst_extent; + self + } + pub fn max_dst_extent( + mut self, + max_dst_extent: Extent2D, + ) -> DisplayPlaneCapabilitiesKHRBuilder<'a> { + self.inner.max_dst_extent = max_dst_extent; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayPlaneCapabilitiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DisplaySurfaceCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: DisplaySurfaceCreateFlagsKHR, + pub display_mode: DisplayModeKHR, + pub plane_index: u32, + pub plane_stack_index: u32, + pub transform: SurfaceTransformFlagsKHR, + pub global_alpha: f32, + pub alpha_mode: DisplayPlaneAlphaFlagsKHR, + pub image_extent: Extent2D, +} +impl ::std::default::Default for DisplaySurfaceCreateInfoKHR { + fn default() -> DisplaySurfaceCreateInfoKHR { + DisplaySurfaceCreateInfoKHR { + s_type: StructureType::DISPLAY_SURFACE_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + flags: DisplaySurfaceCreateFlagsKHR::default(), + display_mode: DisplayModeKHR::default(), + plane_index: u32::default(), + plane_stack_index: u32::default(), + transform: SurfaceTransformFlagsKHR::default(), + global_alpha: f32::default(), + alpha_mode: DisplayPlaneAlphaFlagsKHR::default(), + image_extent: Extent2D::default(), + } + } +} +impl DisplaySurfaceCreateInfoKHR { + pub fn builder<'a>() -> DisplaySurfaceCreateInfoKHRBuilder<'a> { + DisplaySurfaceCreateInfoKHRBuilder { + inner: DisplaySurfaceCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplaySurfaceCreateInfoKHRBuilder<'a> { + inner: DisplaySurfaceCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDisplaySurfaceCreateInfoKHR {} +impl<'a> ::std::ops::Deref for DisplaySurfaceCreateInfoKHRBuilder<'a> { + type Target = DisplaySurfaceCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplaySurfaceCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplaySurfaceCreateInfoKHRBuilder<'a> { + pub fn flags( + mut self, + flags: DisplaySurfaceCreateFlagsKHR, + ) -> DisplaySurfaceCreateInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn display_mode( + mut self, + display_mode: DisplayModeKHR, + ) -> DisplaySurfaceCreateInfoKHRBuilder<'a> { + self.inner.display_mode = display_mode; + self + } + pub fn plane_index(mut self, plane_index: u32) -> DisplaySurfaceCreateInfoKHRBuilder<'a> { + self.inner.plane_index = plane_index; + self + } + pub fn plane_stack_index( + mut self, + plane_stack_index: u32, + ) -> DisplaySurfaceCreateInfoKHRBuilder<'a> { + self.inner.plane_stack_index = plane_stack_index; + self + } + pub fn transform( + mut self, + transform: SurfaceTransformFlagsKHR, + ) -> DisplaySurfaceCreateInfoKHRBuilder<'a> { + self.inner.transform = transform; + self + } + pub fn global_alpha(mut self, global_alpha: f32) -> DisplaySurfaceCreateInfoKHRBuilder<'a> { + self.inner.global_alpha = global_alpha; + self + } + pub fn alpha_mode( + mut self, + alpha_mode: DisplayPlaneAlphaFlagsKHR, + ) -> DisplaySurfaceCreateInfoKHRBuilder<'a> { + self.inner.alpha_mode = alpha_mode; + self + } + pub fn image_extent( + mut self, + image_extent: Extent2D, + ) -> DisplaySurfaceCreateInfoKHRBuilder<'a> { + self.inner.image_extent = image_extent; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DisplaySurfaceCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplaySurfaceCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DisplayPresentInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub src_rect: Rect2D, + pub dst_rect: Rect2D, + pub persistent: Bool32, +} +impl ::std::default::Default for DisplayPresentInfoKHR { + fn default() -> DisplayPresentInfoKHR { + DisplayPresentInfoKHR { + s_type: StructureType::DISPLAY_PRESENT_INFO_KHR, + p_next: ::std::ptr::null(), + src_rect: Rect2D::default(), + dst_rect: Rect2D::default(), + persistent: Bool32::default(), + } + } +} +impl DisplayPresentInfoKHR { + pub fn builder<'a>() -> DisplayPresentInfoKHRBuilder<'a> { + DisplayPresentInfoKHRBuilder { + inner: DisplayPresentInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayPresentInfoKHRBuilder<'a> { + inner: DisplayPresentInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPresentInfoKHR for DisplayPresentInfoKHRBuilder<'_> {} +unsafe impl ExtendsPresentInfoKHR for DisplayPresentInfoKHR {} +impl<'a> ::std::ops::Deref for DisplayPresentInfoKHRBuilder<'a> { + type Target = DisplayPresentInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayPresentInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayPresentInfoKHRBuilder<'a> { + pub fn src_rect(mut self, src_rect: Rect2D) -> DisplayPresentInfoKHRBuilder<'a> { + self.inner.src_rect = src_rect; + self + } + pub fn dst_rect(mut self, dst_rect: Rect2D) -> DisplayPresentInfoKHRBuilder<'a> { + self.inner.dst_rect = dst_rect; + self + } + pub fn persistent(mut self, persistent: bool) -> DisplayPresentInfoKHRBuilder<'a> { + self.inner.persistent = persistent.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayPresentInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct SurfaceCapabilitiesKHR { + pub min_image_count: u32, + pub max_image_count: u32, + pub current_extent: Extent2D, + pub min_image_extent: Extent2D, + pub max_image_extent: Extent2D, + pub max_image_array_layers: u32, + pub supported_transforms: SurfaceTransformFlagsKHR, + pub current_transform: SurfaceTransformFlagsKHR, + pub supported_composite_alpha: CompositeAlphaFlagsKHR, + pub supported_usage_flags: ImageUsageFlags, +} +impl SurfaceCapabilitiesKHR { + pub fn builder<'a>() -> SurfaceCapabilitiesKHRBuilder<'a> { + SurfaceCapabilitiesKHRBuilder { + inner: SurfaceCapabilitiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SurfaceCapabilitiesKHRBuilder<'a> { + inner: SurfaceCapabilitiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SurfaceCapabilitiesKHRBuilder<'a> { + type Target = SurfaceCapabilitiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SurfaceCapabilitiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SurfaceCapabilitiesKHRBuilder<'a> { + pub fn min_image_count(mut self, min_image_count: u32) -> SurfaceCapabilitiesKHRBuilder<'a> { + self.inner.min_image_count = min_image_count; + self + } + pub fn max_image_count(mut self, max_image_count: u32) -> SurfaceCapabilitiesKHRBuilder<'a> { + self.inner.max_image_count = max_image_count; + self + } + pub fn current_extent(mut self, current_extent: Extent2D) -> SurfaceCapabilitiesKHRBuilder<'a> { + self.inner.current_extent = current_extent; + self + } + pub fn min_image_extent( + mut self, + min_image_extent: Extent2D, + ) -> SurfaceCapabilitiesKHRBuilder<'a> { + self.inner.min_image_extent = min_image_extent; + self + } + pub fn max_image_extent( + mut self, + max_image_extent: Extent2D, + ) -> SurfaceCapabilitiesKHRBuilder<'a> { + self.inner.max_image_extent = max_image_extent; + self + } + pub fn max_image_array_layers( + mut self, + max_image_array_layers: u32, + ) -> SurfaceCapabilitiesKHRBuilder<'a> { + self.inner.max_image_array_layers = max_image_array_layers; + self + } + pub fn supported_transforms( + mut self, + supported_transforms: SurfaceTransformFlagsKHR, + ) -> SurfaceCapabilitiesKHRBuilder<'a> { + self.inner.supported_transforms = supported_transforms; + self + } + pub fn current_transform( + mut self, + current_transform: SurfaceTransformFlagsKHR, + ) -> SurfaceCapabilitiesKHRBuilder<'a> { + self.inner.current_transform = current_transform; + self + } + pub fn supported_composite_alpha( + mut self, + supported_composite_alpha: CompositeAlphaFlagsKHR, + ) -> SurfaceCapabilitiesKHRBuilder<'a> { + self.inner.supported_composite_alpha = supported_composite_alpha; + self + } + pub fn supported_usage_flags( + mut self, + supported_usage_flags: ImageUsageFlags, + ) -> SurfaceCapabilitiesKHRBuilder<'a> { + self.inner.supported_usage_flags = supported_usage_flags; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SurfaceCapabilitiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AndroidSurfaceCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: AndroidSurfaceCreateFlagsKHR, + pub window: *mut ANativeWindow, +} +impl ::std::default::Default for AndroidSurfaceCreateInfoKHR { + fn default() -> AndroidSurfaceCreateInfoKHR { + AndroidSurfaceCreateInfoKHR { + s_type: StructureType::ANDROID_SURFACE_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + flags: AndroidSurfaceCreateFlagsKHR::default(), + window: ::std::ptr::null_mut(), + } + } +} +impl AndroidSurfaceCreateInfoKHR { + pub fn builder<'a>() -> AndroidSurfaceCreateInfoKHRBuilder<'a> { + AndroidSurfaceCreateInfoKHRBuilder { + inner: AndroidSurfaceCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AndroidSurfaceCreateInfoKHRBuilder<'a> { + inner: AndroidSurfaceCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAndroidSurfaceCreateInfoKHR {} +impl<'a> ::std::ops::Deref for AndroidSurfaceCreateInfoKHRBuilder<'a> { + type Target = AndroidSurfaceCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AndroidSurfaceCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AndroidSurfaceCreateInfoKHRBuilder<'a> { + pub fn flags( + mut self, + flags: AndroidSurfaceCreateFlagsKHR, + ) -> AndroidSurfaceCreateInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn window(mut self, window: *mut ANativeWindow) -> AndroidSurfaceCreateInfoKHRBuilder<'a> { + self.inner.window = window; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AndroidSurfaceCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AndroidSurfaceCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ViSurfaceCreateInfoNN { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: ViSurfaceCreateFlagsNN, + pub window: *mut c_void, +} +impl ::std::default::Default for ViSurfaceCreateInfoNN { + fn default() -> ViSurfaceCreateInfoNN { + ViSurfaceCreateInfoNN { + s_type: StructureType::VI_SURFACE_CREATE_INFO_NN, + p_next: ::std::ptr::null(), + flags: ViSurfaceCreateFlagsNN::default(), + window: ::std::ptr::null_mut(), + } + } +} +impl ViSurfaceCreateInfoNN { + pub fn builder<'a>() -> ViSurfaceCreateInfoNNBuilder<'a> { + ViSurfaceCreateInfoNNBuilder { + inner: ViSurfaceCreateInfoNN::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ViSurfaceCreateInfoNNBuilder<'a> { + inner: ViSurfaceCreateInfoNN, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsViSurfaceCreateInfoNN {} +impl<'a> ::std::ops::Deref for ViSurfaceCreateInfoNNBuilder<'a> { + type Target = ViSurfaceCreateInfoNN; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ViSurfaceCreateInfoNNBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ViSurfaceCreateInfoNNBuilder<'a> { + pub fn flags(mut self, flags: ViSurfaceCreateFlagsNN) -> ViSurfaceCreateInfoNNBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn window(mut self, window: *mut c_void) -> ViSurfaceCreateInfoNNBuilder<'a> { + self.inner.window = window; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ViSurfaceCreateInfoNNBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ViSurfaceCreateInfoNN { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct WaylandSurfaceCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: WaylandSurfaceCreateFlagsKHR, + pub display: *mut wl_display, + pub surface: *mut wl_surface, +} +impl ::std::default::Default for WaylandSurfaceCreateInfoKHR { + fn default() -> WaylandSurfaceCreateInfoKHR { + WaylandSurfaceCreateInfoKHR { + s_type: StructureType::WAYLAND_SURFACE_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + flags: WaylandSurfaceCreateFlagsKHR::default(), + display: ::std::ptr::null_mut(), + surface: ::std::ptr::null_mut(), + } + } +} +impl WaylandSurfaceCreateInfoKHR { + pub fn builder<'a>() -> WaylandSurfaceCreateInfoKHRBuilder<'a> { + WaylandSurfaceCreateInfoKHRBuilder { + inner: WaylandSurfaceCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct WaylandSurfaceCreateInfoKHRBuilder<'a> { + inner: WaylandSurfaceCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsWaylandSurfaceCreateInfoKHR {} +impl<'a> ::std::ops::Deref for WaylandSurfaceCreateInfoKHRBuilder<'a> { + type Target = WaylandSurfaceCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for WaylandSurfaceCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> WaylandSurfaceCreateInfoKHRBuilder<'a> { + pub fn flags( + mut self, + flags: WaylandSurfaceCreateFlagsKHR, + ) -> WaylandSurfaceCreateInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn display(mut self, display: *mut wl_display) -> WaylandSurfaceCreateInfoKHRBuilder<'a> { + self.inner.display = display; + self + } + pub fn surface(mut self, surface: *mut wl_surface) -> WaylandSurfaceCreateInfoKHRBuilder<'a> { + self.inner.surface = surface; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> WaylandSurfaceCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> WaylandSurfaceCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct Win32SurfaceCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: Win32SurfaceCreateFlagsKHR, + pub hinstance: HINSTANCE, + pub hwnd: HWND, +} +impl ::std::default::Default for Win32SurfaceCreateInfoKHR { + fn default() -> Win32SurfaceCreateInfoKHR { + Win32SurfaceCreateInfoKHR { + s_type: StructureType::WIN32_SURFACE_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + flags: Win32SurfaceCreateFlagsKHR::default(), + hinstance: unsafe { ::std::mem::zeroed() }, + hwnd: unsafe { ::std::mem::zeroed() }, + } + } +} +impl Win32SurfaceCreateInfoKHR { + pub fn builder<'a>() -> Win32SurfaceCreateInfoKHRBuilder<'a> { + Win32SurfaceCreateInfoKHRBuilder { + inner: Win32SurfaceCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct Win32SurfaceCreateInfoKHRBuilder<'a> { + inner: Win32SurfaceCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsWin32SurfaceCreateInfoKHR {} +impl<'a> ::std::ops::Deref for Win32SurfaceCreateInfoKHRBuilder<'a> { + type Target = Win32SurfaceCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for Win32SurfaceCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> Win32SurfaceCreateInfoKHRBuilder<'a> { + pub fn flags( + mut self, + flags: Win32SurfaceCreateFlagsKHR, + ) -> Win32SurfaceCreateInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn hinstance(mut self, hinstance: HINSTANCE) -> Win32SurfaceCreateInfoKHRBuilder<'a> { + self.inner.hinstance = hinstance; + self + } + pub fn hwnd(mut self, hwnd: HWND) -> Win32SurfaceCreateInfoKHRBuilder<'a> { + self.inner.hwnd = hwnd; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> Win32SurfaceCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> Win32SurfaceCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct XlibSurfaceCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: XlibSurfaceCreateFlagsKHR, + pub dpy: *mut Display, + pub window: Window, +} +impl ::std::default::Default for XlibSurfaceCreateInfoKHR { + fn default() -> XlibSurfaceCreateInfoKHR { + XlibSurfaceCreateInfoKHR { + s_type: StructureType::XLIB_SURFACE_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + flags: XlibSurfaceCreateFlagsKHR::default(), + dpy: ::std::ptr::null_mut(), + window: Window::default(), + } + } +} +impl XlibSurfaceCreateInfoKHR { + pub fn builder<'a>() -> XlibSurfaceCreateInfoKHRBuilder<'a> { + XlibSurfaceCreateInfoKHRBuilder { + inner: XlibSurfaceCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct XlibSurfaceCreateInfoKHRBuilder<'a> { + inner: XlibSurfaceCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsXlibSurfaceCreateInfoKHR {} +impl<'a> ::std::ops::Deref for XlibSurfaceCreateInfoKHRBuilder<'a> { + type Target = XlibSurfaceCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for XlibSurfaceCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> XlibSurfaceCreateInfoKHRBuilder<'a> { + pub fn flags( + mut self, + flags: XlibSurfaceCreateFlagsKHR, + ) -> XlibSurfaceCreateInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn dpy(mut self, dpy: *mut Display) -> XlibSurfaceCreateInfoKHRBuilder<'a> { + self.inner.dpy = dpy; + self + } + pub fn window(mut self, window: Window) -> XlibSurfaceCreateInfoKHRBuilder<'a> { + self.inner.window = window; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> XlibSurfaceCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> XlibSurfaceCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct XcbSurfaceCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: XcbSurfaceCreateFlagsKHR, + pub connection: *mut xcb_connection_t, + pub window: xcb_window_t, +} +impl ::std::default::Default for XcbSurfaceCreateInfoKHR { + fn default() -> XcbSurfaceCreateInfoKHR { + XcbSurfaceCreateInfoKHR { + s_type: StructureType::XCB_SURFACE_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + flags: XcbSurfaceCreateFlagsKHR::default(), + connection: ::std::ptr::null_mut(), + window: xcb_window_t::default(), + } + } +} +impl XcbSurfaceCreateInfoKHR { + pub fn builder<'a>() -> XcbSurfaceCreateInfoKHRBuilder<'a> { + XcbSurfaceCreateInfoKHRBuilder { + inner: XcbSurfaceCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct XcbSurfaceCreateInfoKHRBuilder<'a> { + inner: XcbSurfaceCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsXcbSurfaceCreateInfoKHR {} +impl<'a> ::std::ops::Deref for XcbSurfaceCreateInfoKHRBuilder<'a> { + type Target = XcbSurfaceCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for XcbSurfaceCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> XcbSurfaceCreateInfoKHRBuilder<'a> { + pub fn flags(mut self, flags: XcbSurfaceCreateFlagsKHR) -> XcbSurfaceCreateInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn connection( + mut self, + connection: *mut xcb_connection_t, + ) -> XcbSurfaceCreateInfoKHRBuilder<'a> { + self.inner.connection = connection; + self + } + pub fn window(mut self, window: xcb_window_t) -> XcbSurfaceCreateInfoKHRBuilder<'a> { + self.inner.window = window; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> XcbSurfaceCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> XcbSurfaceCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImagePipeSurfaceCreateInfoFUCHSIA { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: ImagePipeSurfaceCreateFlagsFUCHSIA, + pub image_pipe_handle: zx_handle_t, +} +impl ::std::default::Default for ImagePipeSurfaceCreateInfoFUCHSIA { + fn default() -> ImagePipeSurfaceCreateInfoFUCHSIA { + ImagePipeSurfaceCreateInfoFUCHSIA { + s_type: StructureType::IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA, + p_next: ::std::ptr::null(), + flags: ImagePipeSurfaceCreateFlagsFUCHSIA::default(), + image_pipe_handle: zx_handle_t::default(), + } + } +} +impl ImagePipeSurfaceCreateInfoFUCHSIA { + pub fn builder<'a>() -> ImagePipeSurfaceCreateInfoFUCHSIABuilder<'a> { + ImagePipeSurfaceCreateInfoFUCHSIABuilder { + inner: ImagePipeSurfaceCreateInfoFUCHSIA::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImagePipeSurfaceCreateInfoFUCHSIABuilder<'a> { + inner: ImagePipeSurfaceCreateInfoFUCHSIA, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImagePipeSurfaceCreateInfoFUCHSIA {} +impl<'a> ::std::ops::Deref for ImagePipeSurfaceCreateInfoFUCHSIABuilder<'a> { + type Target = ImagePipeSurfaceCreateInfoFUCHSIA; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImagePipeSurfaceCreateInfoFUCHSIABuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImagePipeSurfaceCreateInfoFUCHSIABuilder<'a> { + pub fn flags( + mut self, + flags: ImagePipeSurfaceCreateFlagsFUCHSIA, + ) -> ImagePipeSurfaceCreateInfoFUCHSIABuilder<'a> { + self.inner.flags = flags; + self + } + pub fn image_pipe_handle( + mut self, + image_pipe_handle: zx_handle_t, + ) -> ImagePipeSurfaceCreateInfoFUCHSIABuilder<'a> { + self.inner.image_pipe_handle = image_pipe_handle; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImagePipeSurfaceCreateInfoFUCHSIABuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImagePipeSurfaceCreateInfoFUCHSIA { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct SurfaceFormatKHR { + pub format: Format, + pub color_space: ColorSpaceKHR, +} +impl SurfaceFormatKHR { + pub fn builder<'a>() -> SurfaceFormatKHRBuilder<'a> { + SurfaceFormatKHRBuilder { + inner: SurfaceFormatKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SurfaceFormatKHRBuilder<'a> { + inner: SurfaceFormatKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SurfaceFormatKHRBuilder<'a> { + type Target = SurfaceFormatKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SurfaceFormatKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SurfaceFormatKHRBuilder<'a> { + pub fn format(mut self, format: Format) -> SurfaceFormatKHRBuilder<'a> { + self.inner.format = format; + self + } + pub fn color_space(mut self, color_space: ColorSpaceKHR) -> SurfaceFormatKHRBuilder<'a> { + self.inner.color_space = color_space; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SurfaceFormatKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SwapchainCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: SwapchainCreateFlagsKHR, + pub surface: SurfaceKHR, + pub min_image_count: u32, + pub image_format: Format, + pub image_color_space: ColorSpaceKHR, + pub image_extent: Extent2D, + pub image_array_layers: u32, + pub image_usage: ImageUsageFlags, + pub image_sharing_mode: SharingMode, + pub queue_family_index_count: u32, + pub p_queue_family_indices: *const u32, + pub pre_transform: SurfaceTransformFlagsKHR, + pub composite_alpha: CompositeAlphaFlagsKHR, + pub present_mode: PresentModeKHR, + pub clipped: Bool32, + pub old_swapchain: SwapchainKHR, +} +impl ::std::default::Default for SwapchainCreateInfoKHR { + fn default() -> SwapchainCreateInfoKHR { + SwapchainCreateInfoKHR { + s_type: StructureType::SWAPCHAIN_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + flags: SwapchainCreateFlagsKHR::default(), + surface: SurfaceKHR::default(), + min_image_count: u32::default(), + image_format: Format::default(), + image_color_space: ColorSpaceKHR::default(), + image_extent: Extent2D::default(), + image_array_layers: u32::default(), + image_usage: ImageUsageFlags::default(), + image_sharing_mode: SharingMode::default(), + queue_family_index_count: u32::default(), + p_queue_family_indices: ::std::ptr::null(), + pre_transform: SurfaceTransformFlagsKHR::default(), + composite_alpha: CompositeAlphaFlagsKHR::default(), + present_mode: PresentModeKHR::default(), + clipped: Bool32::default(), + old_swapchain: SwapchainKHR::default(), + } + } +} +impl SwapchainCreateInfoKHR { + pub fn builder<'a>() -> SwapchainCreateInfoKHRBuilder<'a> { + SwapchainCreateInfoKHRBuilder { + inner: SwapchainCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SwapchainCreateInfoKHRBuilder<'a> { + inner: SwapchainCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSwapchainCreateInfoKHR {} +impl<'a> ::std::ops::Deref for SwapchainCreateInfoKHRBuilder<'a> { + type Target = SwapchainCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SwapchainCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SwapchainCreateInfoKHRBuilder<'a> { + pub fn flags(mut self, flags: SwapchainCreateFlagsKHR) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn surface(mut self, surface: SurfaceKHR) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.surface = surface; + self + } + pub fn min_image_count(mut self, min_image_count: u32) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.min_image_count = min_image_count; + self + } + pub fn image_format(mut self, image_format: Format) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.image_format = image_format; + self + } + pub fn image_color_space( + mut self, + image_color_space: ColorSpaceKHR, + ) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.image_color_space = image_color_space; + self + } + pub fn image_extent(mut self, image_extent: Extent2D) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.image_extent = image_extent; + self + } + pub fn image_array_layers( + mut self, + image_array_layers: u32, + ) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.image_array_layers = image_array_layers; + self + } + pub fn image_usage( + mut self, + image_usage: ImageUsageFlags, + ) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.image_usage = image_usage; + self + } + pub fn image_sharing_mode( + mut self, + image_sharing_mode: SharingMode, + ) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.image_sharing_mode = image_sharing_mode; + self + } + pub fn queue_family_indices( + mut self, + queue_family_indices: &'a [u32], + ) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.queue_family_index_count = queue_family_indices.len() as _; + self.inner.p_queue_family_indices = queue_family_indices.as_ptr(); + self + } + pub fn pre_transform( + mut self, + pre_transform: SurfaceTransformFlagsKHR, + ) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.pre_transform = pre_transform; + self + } + pub fn composite_alpha( + mut self, + composite_alpha: CompositeAlphaFlagsKHR, + ) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.composite_alpha = composite_alpha; + self + } + pub fn present_mode( + mut self, + present_mode: PresentModeKHR, + ) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.present_mode = present_mode; + self + } + pub fn clipped(mut self, clipped: bool) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.clipped = clipped.into(); + self + } + pub fn old_swapchain( + mut self, + old_swapchain: SwapchainKHR, + ) -> SwapchainCreateInfoKHRBuilder<'a> { + self.inner.old_swapchain = old_swapchain; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SwapchainCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SwapchainCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PresentInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub wait_semaphore_count: u32, + pub p_wait_semaphores: *const Semaphore, + pub swapchain_count: u32, + pub p_swapchains: *const SwapchainKHR, + pub p_image_indices: *const u32, + pub p_results: *mut Result, +} +impl ::std::default::Default for PresentInfoKHR { + fn default() -> PresentInfoKHR { + PresentInfoKHR { + s_type: StructureType::PRESENT_INFO_KHR, + p_next: ::std::ptr::null(), + wait_semaphore_count: u32::default(), + p_wait_semaphores: ::std::ptr::null(), + swapchain_count: u32::default(), + p_swapchains: ::std::ptr::null(), + p_image_indices: ::std::ptr::null(), + p_results: ::std::ptr::null_mut(), + } + } +} +impl PresentInfoKHR { + pub fn builder<'a>() -> PresentInfoKHRBuilder<'a> { + PresentInfoKHRBuilder { + inner: PresentInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PresentInfoKHRBuilder<'a> { + inner: PresentInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPresentInfoKHR {} +impl<'a> ::std::ops::Deref for PresentInfoKHRBuilder<'a> { + type Target = PresentInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PresentInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PresentInfoKHRBuilder<'a> { + pub fn wait_semaphores( + mut self, + wait_semaphores: &'a [Semaphore], + ) -> PresentInfoKHRBuilder<'a> { + self.inner.wait_semaphore_count = wait_semaphores.len() as _; + self.inner.p_wait_semaphores = wait_semaphores.as_ptr(); + self + } + pub fn swapchains(mut self, swapchains: &'a [SwapchainKHR]) -> PresentInfoKHRBuilder<'a> { + self.inner.swapchain_count = swapchains.len() as _; + self.inner.p_swapchains = swapchains.as_ptr(); + self + } + pub fn image_indices(mut self, image_indices: &'a [u32]) -> PresentInfoKHRBuilder<'a> { + self.inner.swapchain_count = image_indices.len() as _; + self.inner.p_image_indices = image_indices.as_ptr(); + self + } + pub fn results(mut self, results: &'a mut [Result]) -> PresentInfoKHRBuilder<'a> { + self.inner.swapchain_count = results.len() as _; + self.inner.p_results = results.as_mut_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PresentInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PresentInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct DebugReportCallbackCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: DebugReportFlagsEXT, + pub pfn_callback: PFN_vkDebugReportCallbackEXT, + pub p_user_data: *mut c_void, +} +impl fmt::Debug for DebugReportCallbackCreateInfoEXT { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("DebugReportCallbackCreateInfoEXT") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("flags", &self.flags) + .field("pfn_callback", &(self.pfn_callback.map(|x| x as *const ()))) + .field("p_user_data", &self.p_user_data) + .finish() + } +} +impl ::std::default::Default for DebugReportCallbackCreateInfoEXT { + fn default() -> DebugReportCallbackCreateInfoEXT { + DebugReportCallbackCreateInfoEXT { + s_type: StructureType::DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + flags: DebugReportFlagsEXT::default(), + pfn_callback: PFN_vkDebugReportCallbackEXT::default(), + p_user_data: ::std::ptr::null_mut(), + } + } +} +impl DebugReportCallbackCreateInfoEXT { + pub fn builder<'a>() -> DebugReportCallbackCreateInfoEXTBuilder<'a> { + DebugReportCallbackCreateInfoEXTBuilder { + inner: DebugReportCallbackCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DebugReportCallbackCreateInfoEXTBuilder<'a> { + inner: DebugReportCallbackCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsInstanceCreateInfo for DebugReportCallbackCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsInstanceCreateInfo for DebugReportCallbackCreateInfoEXT {} +impl<'a> ::std::ops::Deref for DebugReportCallbackCreateInfoEXTBuilder<'a> { + type Target = DebugReportCallbackCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DebugReportCallbackCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DebugReportCallbackCreateInfoEXTBuilder<'a> { + pub fn flags( + mut self, + flags: DebugReportFlagsEXT, + ) -> DebugReportCallbackCreateInfoEXTBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn pfn_callback( + mut self, + pfn_callback: PFN_vkDebugReportCallbackEXT, + ) -> DebugReportCallbackCreateInfoEXTBuilder<'a> { + self.inner.pfn_callback = pfn_callback; + self + } + pub fn user_data( + mut self, + user_data: *mut c_void, + ) -> DebugReportCallbackCreateInfoEXTBuilder<'a> { + self.inner.p_user_data = user_data; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DebugReportCallbackCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ValidationFlagsEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub disabled_validation_check_count: u32, + pub p_disabled_validation_checks: *const ValidationCheckEXT, +} +impl ::std::default::Default for ValidationFlagsEXT { + fn default() -> ValidationFlagsEXT { + ValidationFlagsEXT { + s_type: StructureType::VALIDATION_FLAGS_EXT, + p_next: ::std::ptr::null(), + disabled_validation_check_count: u32::default(), + p_disabled_validation_checks: ::std::ptr::null(), + } + } +} +impl ValidationFlagsEXT { + pub fn builder<'a>() -> ValidationFlagsEXTBuilder<'a> { + ValidationFlagsEXTBuilder { + inner: ValidationFlagsEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ValidationFlagsEXTBuilder<'a> { + inner: ValidationFlagsEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsInstanceCreateInfo for ValidationFlagsEXTBuilder<'_> {} +unsafe impl ExtendsInstanceCreateInfo for ValidationFlagsEXT {} +impl<'a> ::std::ops::Deref for ValidationFlagsEXTBuilder<'a> { + type Target = ValidationFlagsEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ValidationFlagsEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ValidationFlagsEXTBuilder<'a> { + pub fn disabled_validation_checks( + mut self, + disabled_validation_checks: &'a [ValidationCheckEXT], + ) -> ValidationFlagsEXTBuilder<'a> { + self.inner.disabled_validation_check_count = disabled_validation_checks.len() as _; + self.inner.p_disabled_validation_checks = disabled_validation_checks.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ValidationFlagsEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ValidationFeaturesEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub enabled_validation_feature_count: u32, + pub p_enabled_validation_features: *const ValidationFeatureEnableEXT, + pub disabled_validation_feature_count: u32, + pub p_disabled_validation_features: *const ValidationFeatureDisableEXT, +} +impl ::std::default::Default for ValidationFeaturesEXT { + fn default() -> ValidationFeaturesEXT { + ValidationFeaturesEXT { + s_type: StructureType::VALIDATION_FEATURES_EXT, + p_next: ::std::ptr::null(), + enabled_validation_feature_count: u32::default(), + p_enabled_validation_features: ::std::ptr::null(), + disabled_validation_feature_count: u32::default(), + p_disabled_validation_features: ::std::ptr::null(), + } + } +} +impl ValidationFeaturesEXT { + pub fn builder<'a>() -> ValidationFeaturesEXTBuilder<'a> { + ValidationFeaturesEXTBuilder { + inner: ValidationFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ValidationFeaturesEXTBuilder<'a> { + inner: ValidationFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsInstanceCreateInfo for ValidationFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsInstanceCreateInfo for ValidationFeaturesEXT {} +impl<'a> ::std::ops::Deref for ValidationFeaturesEXTBuilder<'a> { + type Target = ValidationFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ValidationFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ValidationFeaturesEXTBuilder<'a> { + pub fn enabled_validation_features( + mut self, + enabled_validation_features: &'a [ValidationFeatureEnableEXT], + ) -> ValidationFeaturesEXTBuilder<'a> { + self.inner.enabled_validation_feature_count = enabled_validation_features.len() as _; + self.inner.p_enabled_validation_features = enabled_validation_features.as_ptr(); + self + } + pub fn disabled_validation_features( + mut self, + disabled_validation_features: &'a [ValidationFeatureDisableEXT], + ) -> ValidationFeaturesEXTBuilder<'a> { + self.inner.disabled_validation_feature_count = disabled_validation_features.len() as _; + self.inner.p_disabled_validation_features = disabled_validation_features.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ValidationFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineRasterizationStateRasterizationOrderAMD { + pub s_type: StructureType, + pub p_next: *const c_void, + pub rasterization_order: RasterizationOrderAMD, +} +impl ::std::default::Default for PipelineRasterizationStateRasterizationOrderAMD { + fn default() -> PipelineRasterizationStateRasterizationOrderAMD { + PipelineRasterizationStateRasterizationOrderAMD { + s_type: StructureType::PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD, + p_next: ::std::ptr::null(), + rasterization_order: RasterizationOrderAMD::default(), + } + } +} +impl PipelineRasterizationStateRasterizationOrderAMD { + pub fn builder<'a>() -> PipelineRasterizationStateRasterizationOrderAMDBuilder<'a> { + PipelineRasterizationStateRasterizationOrderAMDBuilder { + inner: PipelineRasterizationStateRasterizationOrderAMD::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineRasterizationStateRasterizationOrderAMDBuilder<'a> { + inner: PipelineRasterizationStateRasterizationOrderAMD, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineRasterizationStateCreateInfo + for PipelineRasterizationStateRasterizationOrderAMDBuilder<'_> +{ +} +unsafe impl ExtendsPipelineRasterizationStateCreateInfo + for PipelineRasterizationStateRasterizationOrderAMD +{ +} +impl<'a> ::std::ops::Deref for PipelineRasterizationStateRasterizationOrderAMDBuilder<'a> { + type Target = PipelineRasterizationStateRasterizationOrderAMD; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineRasterizationStateRasterizationOrderAMDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineRasterizationStateRasterizationOrderAMDBuilder<'a> { + pub fn rasterization_order( + mut self, + rasterization_order: RasterizationOrderAMD, + ) -> PipelineRasterizationStateRasterizationOrderAMDBuilder<'a> { + self.inner.rasterization_order = rasterization_order; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineRasterizationStateRasterizationOrderAMD { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DebugMarkerObjectNameInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub object_type: DebugReportObjectTypeEXT, + pub object: u64, + pub p_object_name: *const c_char, +} +impl ::std::default::Default for DebugMarkerObjectNameInfoEXT { + fn default() -> DebugMarkerObjectNameInfoEXT { + DebugMarkerObjectNameInfoEXT { + s_type: StructureType::DEBUG_MARKER_OBJECT_NAME_INFO_EXT, + p_next: ::std::ptr::null(), + object_type: DebugReportObjectTypeEXT::default(), + object: u64::default(), + p_object_name: ::std::ptr::null(), + } + } +} +impl DebugMarkerObjectNameInfoEXT { + pub fn builder<'a>() -> DebugMarkerObjectNameInfoEXTBuilder<'a> { + DebugMarkerObjectNameInfoEXTBuilder { + inner: DebugMarkerObjectNameInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DebugMarkerObjectNameInfoEXTBuilder<'a> { + inner: DebugMarkerObjectNameInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDebugMarkerObjectNameInfoEXT {} +impl<'a> ::std::ops::Deref for DebugMarkerObjectNameInfoEXTBuilder<'a> { + type Target = DebugMarkerObjectNameInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DebugMarkerObjectNameInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DebugMarkerObjectNameInfoEXTBuilder<'a> { + pub fn object_type( + mut self, + object_type: DebugReportObjectTypeEXT, + ) -> DebugMarkerObjectNameInfoEXTBuilder<'a> { + self.inner.object_type = object_type; + self + } + pub fn object(mut self, object: u64) -> DebugMarkerObjectNameInfoEXTBuilder<'a> { + self.inner.object = object; + self + } + pub fn object_name( + mut self, + object_name: &'a ::std::ffi::CStr, + ) -> DebugMarkerObjectNameInfoEXTBuilder<'a> { + self.inner.p_object_name = object_name.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DebugMarkerObjectNameInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DebugMarkerObjectNameInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DebugMarkerObjectTagInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub object_type: DebugReportObjectTypeEXT, + pub object: u64, + pub tag_name: u64, + pub tag_size: usize, + pub p_tag: *const c_void, +} +impl ::std::default::Default for DebugMarkerObjectTagInfoEXT { + fn default() -> DebugMarkerObjectTagInfoEXT { + DebugMarkerObjectTagInfoEXT { + s_type: StructureType::DEBUG_MARKER_OBJECT_TAG_INFO_EXT, + p_next: ::std::ptr::null(), + object_type: DebugReportObjectTypeEXT::default(), + object: u64::default(), + tag_name: u64::default(), + tag_size: usize::default(), + p_tag: ::std::ptr::null(), + } + } +} +impl DebugMarkerObjectTagInfoEXT { + pub fn builder<'a>() -> DebugMarkerObjectTagInfoEXTBuilder<'a> { + DebugMarkerObjectTagInfoEXTBuilder { + inner: DebugMarkerObjectTagInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DebugMarkerObjectTagInfoEXTBuilder<'a> { + inner: DebugMarkerObjectTagInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDebugMarkerObjectTagInfoEXT {} +impl<'a> ::std::ops::Deref for DebugMarkerObjectTagInfoEXTBuilder<'a> { + type Target = DebugMarkerObjectTagInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DebugMarkerObjectTagInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DebugMarkerObjectTagInfoEXTBuilder<'a> { + pub fn object_type( + mut self, + object_type: DebugReportObjectTypeEXT, + ) -> DebugMarkerObjectTagInfoEXTBuilder<'a> { + self.inner.object_type = object_type; + self + } + pub fn object(mut self, object: u64) -> DebugMarkerObjectTagInfoEXTBuilder<'a> { + self.inner.object = object; + self + } + pub fn tag_name(mut self, tag_name: u64) -> DebugMarkerObjectTagInfoEXTBuilder<'a> { + self.inner.tag_name = tag_name; + self + } + pub fn tag(mut self, tag: &'a [u8]) -> DebugMarkerObjectTagInfoEXTBuilder<'a> { + self.inner.tag_size = tag.len() as _; + self.inner.p_tag = tag.as_ptr() as *const c_void; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DebugMarkerObjectTagInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DebugMarkerObjectTagInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DebugMarkerMarkerInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub p_marker_name: *const c_char, + pub color: [f32; 4], +} +impl ::std::default::Default for DebugMarkerMarkerInfoEXT { + fn default() -> DebugMarkerMarkerInfoEXT { + DebugMarkerMarkerInfoEXT { + s_type: StructureType::DEBUG_MARKER_MARKER_INFO_EXT, + p_next: ::std::ptr::null(), + p_marker_name: ::std::ptr::null(), + color: unsafe { ::std::mem::zeroed() }, + } + } +} +impl DebugMarkerMarkerInfoEXT { + pub fn builder<'a>() -> DebugMarkerMarkerInfoEXTBuilder<'a> { + DebugMarkerMarkerInfoEXTBuilder { + inner: DebugMarkerMarkerInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DebugMarkerMarkerInfoEXTBuilder<'a> { + inner: DebugMarkerMarkerInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDebugMarkerMarkerInfoEXT {} +impl<'a> ::std::ops::Deref for DebugMarkerMarkerInfoEXTBuilder<'a> { + type Target = DebugMarkerMarkerInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DebugMarkerMarkerInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DebugMarkerMarkerInfoEXTBuilder<'a> { + pub fn marker_name( + mut self, + marker_name: &'a ::std::ffi::CStr, + ) -> DebugMarkerMarkerInfoEXTBuilder<'a> { + self.inner.p_marker_name = marker_name.as_ptr(); + self + } + pub fn color(mut self, color: [f32; 4]) -> DebugMarkerMarkerInfoEXTBuilder<'a> { + self.inner.color = color; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DebugMarkerMarkerInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DebugMarkerMarkerInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DedicatedAllocationImageCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub dedicated_allocation: Bool32, +} +impl ::std::default::Default for DedicatedAllocationImageCreateInfoNV { + fn default() -> DedicatedAllocationImageCreateInfoNV { + DedicatedAllocationImageCreateInfoNV { + s_type: StructureType::DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + dedicated_allocation: Bool32::default(), + } + } +} +impl DedicatedAllocationImageCreateInfoNV { + pub fn builder<'a>() -> DedicatedAllocationImageCreateInfoNVBuilder<'a> { + DedicatedAllocationImageCreateInfoNVBuilder { + inner: DedicatedAllocationImageCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DedicatedAllocationImageCreateInfoNVBuilder<'a> { + inner: DedicatedAllocationImageCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageCreateInfo for DedicatedAllocationImageCreateInfoNVBuilder<'_> {} +unsafe impl ExtendsImageCreateInfo for DedicatedAllocationImageCreateInfoNV {} +impl<'a> ::std::ops::Deref for DedicatedAllocationImageCreateInfoNVBuilder<'a> { + type Target = DedicatedAllocationImageCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DedicatedAllocationImageCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DedicatedAllocationImageCreateInfoNVBuilder<'a> { + pub fn dedicated_allocation( + mut self, + dedicated_allocation: bool, + ) -> DedicatedAllocationImageCreateInfoNVBuilder<'a> { + self.inner.dedicated_allocation = dedicated_allocation.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DedicatedAllocationImageCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DedicatedAllocationBufferCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub dedicated_allocation: Bool32, +} +impl ::std::default::Default for DedicatedAllocationBufferCreateInfoNV { + fn default() -> DedicatedAllocationBufferCreateInfoNV { + DedicatedAllocationBufferCreateInfoNV { + s_type: StructureType::DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + dedicated_allocation: Bool32::default(), + } + } +} +impl DedicatedAllocationBufferCreateInfoNV { + pub fn builder<'a>() -> DedicatedAllocationBufferCreateInfoNVBuilder<'a> { + DedicatedAllocationBufferCreateInfoNVBuilder { + inner: DedicatedAllocationBufferCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DedicatedAllocationBufferCreateInfoNVBuilder<'a> { + inner: DedicatedAllocationBufferCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsBufferCreateInfo for DedicatedAllocationBufferCreateInfoNVBuilder<'_> {} +unsafe impl ExtendsBufferCreateInfo for DedicatedAllocationBufferCreateInfoNV {} +impl<'a> ::std::ops::Deref for DedicatedAllocationBufferCreateInfoNVBuilder<'a> { + type Target = DedicatedAllocationBufferCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DedicatedAllocationBufferCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DedicatedAllocationBufferCreateInfoNVBuilder<'a> { + pub fn dedicated_allocation( + mut self, + dedicated_allocation: bool, + ) -> DedicatedAllocationBufferCreateInfoNVBuilder<'a> { + self.inner.dedicated_allocation = dedicated_allocation.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DedicatedAllocationBufferCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DedicatedAllocationMemoryAllocateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub image: Image, + pub buffer: Buffer, +} +impl ::std::default::Default for DedicatedAllocationMemoryAllocateInfoNV { + fn default() -> DedicatedAllocationMemoryAllocateInfoNV { + DedicatedAllocationMemoryAllocateInfoNV { + s_type: StructureType::DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV, + p_next: ::std::ptr::null(), + image: Image::default(), + buffer: Buffer::default(), + } + } +} +impl DedicatedAllocationMemoryAllocateInfoNV { + pub fn builder<'a>() -> DedicatedAllocationMemoryAllocateInfoNVBuilder<'a> { + DedicatedAllocationMemoryAllocateInfoNVBuilder { + inner: DedicatedAllocationMemoryAllocateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DedicatedAllocationMemoryAllocateInfoNVBuilder<'a> { + inner: DedicatedAllocationMemoryAllocateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for DedicatedAllocationMemoryAllocateInfoNVBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for DedicatedAllocationMemoryAllocateInfoNV {} +impl<'a> ::std::ops::Deref for DedicatedAllocationMemoryAllocateInfoNVBuilder<'a> { + type Target = DedicatedAllocationMemoryAllocateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DedicatedAllocationMemoryAllocateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DedicatedAllocationMemoryAllocateInfoNVBuilder<'a> { + pub fn image(mut self, image: Image) -> DedicatedAllocationMemoryAllocateInfoNVBuilder<'a> { + self.inner.image = image; + self + } + pub fn buffer(mut self, buffer: Buffer) -> DedicatedAllocationMemoryAllocateInfoNVBuilder<'a> { + self.inner.buffer = buffer; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DedicatedAllocationMemoryAllocateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ExternalImageFormatPropertiesNV { + pub image_format_properties: ImageFormatProperties, + pub external_memory_features: ExternalMemoryFeatureFlagsNV, + pub export_from_imported_handle_types: ExternalMemoryHandleTypeFlagsNV, + pub compatible_handle_types: ExternalMemoryHandleTypeFlagsNV, +} +impl ExternalImageFormatPropertiesNV { + pub fn builder<'a>() -> ExternalImageFormatPropertiesNVBuilder<'a> { + ExternalImageFormatPropertiesNVBuilder { + inner: ExternalImageFormatPropertiesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExternalImageFormatPropertiesNVBuilder<'a> { + inner: ExternalImageFormatPropertiesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ExternalImageFormatPropertiesNVBuilder<'a> { + type Target = ExternalImageFormatPropertiesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExternalImageFormatPropertiesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExternalImageFormatPropertiesNVBuilder<'a> { + pub fn image_format_properties( + mut self, + image_format_properties: ImageFormatProperties, + ) -> ExternalImageFormatPropertiesNVBuilder<'a> { + self.inner.image_format_properties = image_format_properties; + self + } + pub fn external_memory_features( + mut self, + external_memory_features: ExternalMemoryFeatureFlagsNV, + ) -> ExternalImageFormatPropertiesNVBuilder<'a> { + self.inner.external_memory_features = external_memory_features; + self + } + pub fn export_from_imported_handle_types( + mut self, + export_from_imported_handle_types: ExternalMemoryHandleTypeFlagsNV, + ) -> ExternalImageFormatPropertiesNVBuilder<'a> { + self.inner.export_from_imported_handle_types = export_from_imported_handle_types; + self + } + pub fn compatible_handle_types( + mut self, + compatible_handle_types: ExternalMemoryHandleTypeFlagsNV, + ) -> ExternalImageFormatPropertiesNVBuilder<'a> { + self.inner.compatible_handle_types = compatible_handle_types; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExternalImageFormatPropertiesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExternalMemoryImageCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_types: ExternalMemoryHandleTypeFlagsNV, +} +impl ::std::default::Default for ExternalMemoryImageCreateInfoNV { + fn default() -> ExternalMemoryImageCreateInfoNV { + ExternalMemoryImageCreateInfoNV { + s_type: StructureType::EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + handle_types: ExternalMemoryHandleTypeFlagsNV::default(), + } + } +} +impl ExternalMemoryImageCreateInfoNV { + pub fn builder<'a>() -> ExternalMemoryImageCreateInfoNVBuilder<'a> { + ExternalMemoryImageCreateInfoNVBuilder { + inner: ExternalMemoryImageCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExternalMemoryImageCreateInfoNVBuilder<'a> { + inner: ExternalMemoryImageCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageCreateInfo for ExternalMemoryImageCreateInfoNVBuilder<'_> {} +unsafe impl ExtendsImageCreateInfo for ExternalMemoryImageCreateInfoNV {} +impl<'a> ::std::ops::Deref for ExternalMemoryImageCreateInfoNVBuilder<'a> { + type Target = ExternalMemoryImageCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExternalMemoryImageCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExternalMemoryImageCreateInfoNVBuilder<'a> { + pub fn handle_types( + mut self, + handle_types: ExternalMemoryHandleTypeFlagsNV, + ) -> ExternalMemoryImageCreateInfoNVBuilder<'a> { + self.inner.handle_types = handle_types; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExternalMemoryImageCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExportMemoryAllocateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_types: ExternalMemoryHandleTypeFlagsNV, +} +impl ::std::default::Default for ExportMemoryAllocateInfoNV { + fn default() -> ExportMemoryAllocateInfoNV { + ExportMemoryAllocateInfoNV { + s_type: StructureType::EXPORT_MEMORY_ALLOCATE_INFO_NV, + p_next: ::std::ptr::null(), + handle_types: ExternalMemoryHandleTypeFlagsNV::default(), + } + } +} +impl ExportMemoryAllocateInfoNV { + pub fn builder<'a>() -> ExportMemoryAllocateInfoNVBuilder<'a> { + ExportMemoryAllocateInfoNVBuilder { + inner: ExportMemoryAllocateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExportMemoryAllocateInfoNVBuilder<'a> { + inner: ExportMemoryAllocateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for ExportMemoryAllocateInfoNVBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for ExportMemoryAllocateInfoNV {} +impl<'a> ::std::ops::Deref for ExportMemoryAllocateInfoNVBuilder<'a> { + type Target = ExportMemoryAllocateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExportMemoryAllocateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExportMemoryAllocateInfoNVBuilder<'a> { + pub fn handle_types( + mut self, + handle_types: ExternalMemoryHandleTypeFlagsNV, + ) -> ExportMemoryAllocateInfoNVBuilder<'a> { + self.inner.handle_types = handle_types; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExportMemoryAllocateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImportMemoryWin32HandleInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_type: ExternalMemoryHandleTypeFlagsNV, + pub handle: HANDLE, +} +impl ::std::default::Default for ImportMemoryWin32HandleInfoNV { + fn default() -> ImportMemoryWin32HandleInfoNV { + ImportMemoryWin32HandleInfoNV { + s_type: StructureType::IMPORT_MEMORY_WIN32_HANDLE_INFO_NV, + p_next: ::std::ptr::null(), + handle_type: ExternalMemoryHandleTypeFlagsNV::default(), + handle: unsafe { ::std::mem::zeroed() }, + } + } +} +impl ImportMemoryWin32HandleInfoNV { + pub fn builder<'a>() -> ImportMemoryWin32HandleInfoNVBuilder<'a> { + ImportMemoryWin32HandleInfoNVBuilder { + inner: ImportMemoryWin32HandleInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImportMemoryWin32HandleInfoNVBuilder<'a> { + inner: ImportMemoryWin32HandleInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for ImportMemoryWin32HandleInfoNVBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for ImportMemoryWin32HandleInfoNV {} +impl<'a> ::std::ops::Deref for ImportMemoryWin32HandleInfoNVBuilder<'a> { + type Target = ImportMemoryWin32HandleInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImportMemoryWin32HandleInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImportMemoryWin32HandleInfoNVBuilder<'a> { + pub fn handle_type( + mut self, + handle_type: ExternalMemoryHandleTypeFlagsNV, + ) -> ImportMemoryWin32HandleInfoNVBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + pub fn handle(mut self, handle: HANDLE) -> ImportMemoryWin32HandleInfoNVBuilder<'a> { + self.inner.handle = handle; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImportMemoryWin32HandleInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExportMemoryWin32HandleInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub p_attributes: *const SECURITY_ATTRIBUTES, + pub dw_access: DWORD, +} +impl ::std::default::Default for ExportMemoryWin32HandleInfoNV { + fn default() -> ExportMemoryWin32HandleInfoNV { + ExportMemoryWin32HandleInfoNV { + s_type: StructureType::EXPORT_MEMORY_WIN32_HANDLE_INFO_NV, + p_next: ::std::ptr::null(), + p_attributes: ::std::ptr::null(), + dw_access: DWORD::default(), + } + } +} +impl ExportMemoryWin32HandleInfoNV { + pub fn builder<'a>() -> ExportMemoryWin32HandleInfoNVBuilder<'a> { + ExportMemoryWin32HandleInfoNVBuilder { + inner: ExportMemoryWin32HandleInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExportMemoryWin32HandleInfoNVBuilder<'a> { + inner: ExportMemoryWin32HandleInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for ExportMemoryWin32HandleInfoNVBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for ExportMemoryWin32HandleInfoNV {} +impl<'a> ::std::ops::Deref for ExportMemoryWin32HandleInfoNVBuilder<'a> { + type Target = ExportMemoryWin32HandleInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExportMemoryWin32HandleInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExportMemoryWin32HandleInfoNVBuilder<'a> { + pub fn attributes( + mut self, + attributes: &'a SECURITY_ATTRIBUTES, + ) -> ExportMemoryWin32HandleInfoNVBuilder<'a> { + self.inner.p_attributes = attributes; + self + } + pub fn dw_access(mut self, dw_access: DWORD) -> ExportMemoryWin32HandleInfoNVBuilder<'a> { + self.inner.dw_access = dw_access; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExportMemoryWin32HandleInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct Win32KeyedMutexAcquireReleaseInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub acquire_count: u32, + pub p_acquire_syncs: *const DeviceMemory, + pub p_acquire_keys: *const u64, + pub p_acquire_timeout_milliseconds: *const u32, + pub release_count: u32, + pub p_release_syncs: *const DeviceMemory, + pub p_release_keys: *const u64, +} +impl ::std::default::Default for Win32KeyedMutexAcquireReleaseInfoNV { + fn default() -> Win32KeyedMutexAcquireReleaseInfoNV { + Win32KeyedMutexAcquireReleaseInfoNV { + s_type: StructureType::WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV, + p_next: ::std::ptr::null(), + acquire_count: u32::default(), + p_acquire_syncs: ::std::ptr::null(), + p_acquire_keys: ::std::ptr::null(), + p_acquire_timeout_milliseconds: ::std::ptr::null(), + release_count: u32::default(), + p_release_syncs: ::std::ptr::null(), + p_release_keys: ::std::ptr::null(), + } + } +} +impl Win32KeyedMutexAcquireReleaseInfoNV { + pub fn builder<'a>() -> Win32KeyedMutexAcquireReleaseInfoNVBuilder<'a> { + Win32KeyedMutexAcquireReleaseInfoNVBuilder { + inner: Win32KeyedMutexAcquireReleaseInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct Win32KeyedMutexAcquireReleaseInfoNVBuilder<'a> { + inner: Win32KeyedMutexAcquireReleaseInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSubmitInfo for Win32KeyedMutexAcquireReleaseInfoNVBuilder<'_> {} +unsafe impl ExtendsSubmitInfo for Win32KeyedMutexAcquireReleaseInfoNV {} +impl<'a> ::std::ops::Deref for Win32KeyedMutexAcquireReleaseInfoNVBuilder<'a> { + type Target = Win32KeyedMutexAcquireReleaseInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for Win32KeyedMutexAcquireReleaseInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> Win32KeyedMutexAcquireReleaseInfoNVBuilder<'a> { + pub fn acquire_syncs( + mut self, + acquire_syncs: &'a [DeviceMemory], + ) -> Win32KeyedMutexAcquireReleaseInfoNVBuilder<'a> { + self.inner.acquire_count = acquire_syncs.len() as _; + self.inner.p_acquire_syncs = acquire_syncs.as_ptr(); + self + } + pub fn acquire_keys( + mut self, + acquire_keys: &'a [u64], + ) -> Win32KeyedMutexAcquireReleaseInfoNVBuilder<'a> { + self.inner.acquire_count = acquire_keys.len() as _; + self.inner.p_acquire_keys = acquire_keys.as_ptr(); + self + } + pub fn acquire_timeout_milliseconds( + mut self, + acquire_timeout_milliseconds: &'a [u32], + ) -> Win32KeyedMutexAcquireReleaseInfoNVBuilder<'a> { + self.inner.acquire_count = acquire_timeout_milliseconds.len() as _; + self.inner.p_acquire_timeout_milliseconds = acquire_timeout_milliseconds.as_ptr(); + self + } + pub fn release_syncs( + mut self, + release_syncs: &'a [DeviceMemory], + ) -> Win32KeyedMutexAcquireReleaseInfoNVBuilder<'a> { + self.inner.release_count = release_syncs.len() as _; + self.inner.p_release_syncs = release_syncs.as_ptr(); + self + } + pub fn release_keys( + mut self, + release_keys: &'a [u64], + ) -> Win32KeyedMutexAcquireReleaseInfoNVBuilder<'a> { + self.inner.release_count = release_keys.len() as _; + self.inner.p_release_keys = release_keys.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> Win32KeyedMutexAcquireReleaseInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceGeneratedCommandsFeaturesNVX { + pub s_type: StructureType, + pub p_next: *const c_void, + pub compute_binding_point_support: Bool32, +} +impl ::std::default::Default for DeviceGeneratedCommandsFeaturesNVX { + fn default() -> DeviceGeneratedCommandsFeaturesNVX { + DeviceGeneratedCommandsFeaturesNVX { + s_type: StructureType::DEVICE_GENERATED_COMMANDS_FEATURES_NVX, + p_next: ::std::ptr::null(), + compute_binding_point_support: Bool32::default(), + } + } +} +impl DeviceGeneratedCommandsFeaturesNVX { + pub fn builder<'a>() -> DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { + DeviceGeneratedCommandsFeaturesNVXBuilder { + inner: DeviceGeneratedCommandsFeaturesNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { + inner: DeviceGeneratedCommandsFeaturesNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDeviceGeneratedCommandsFeaturesNVX {} +impl<'a> ::std::ops::Deref for DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { + type Target = DeviceGeneratedCommandsFeaturesNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { + pub fn compute_binding_point_support( + mut self, + compute_binding_point_support: bool, + ) -> DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { + self.inner.compute_binding_point_support = compute_binding_point_support.into(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceGeneratedCommandsFeaturesNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceGeneratedCommandsLimitsNVX { + pub s_type: StructureType, + pub p_next: *const c_void, + pub max_indirect_commands_layout_token_count: u32, + pub max_object_entry_counts: u32, + pub min_sequence_count_buffer_offset_alignment: u32, + pub min_sequence_index_buffer_offset_alignment: u32, + pub min_commands_token_buffer_offset_alignment: u32, +} +impl ::std::default::Default for DeviceGeneratedCommandsLimitsNVX { + fn default() -> DeviceGeneratedCommandsLimitsNVX { + DeviceGeneratedCommandsLimitsNVX { + s_type: StructureType::DEVICE_GENERATED_COMMANDS_LIMITS_NVX, + p_next: ::std::ptr::null(), + max_indirect_commands_layout_token_count: u32::default(), + max_object_entry_counts: u32::default(), + min_sequence_count_buffer_offset_alignment: u32::default(), + min_sequence_index_buffer_offset_alignment: u32::default(), + min_commands_token_buffer_offset_alignment: u32::default(), + } + } +} +impl DeviceGeneratedCommandsLimitsNVX { + pub fn builder<'a>() -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { + DeviceGeneratedCommandsLimitsNVXBuilder { + inner: DeviceGeneratedCommandsLimitsNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceGeneratedCommandsLimitsNVXBuilder<'a> { + inner: DeviceGeneratedCommandsLimitsNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDeviceGeneratedCommandsLimitsNVX {} +impl<'a> ::std::ops::Deref for DeviceGeneratedCommandsLimitsNVXBuilder<'a> { + type Target = DeviceGeneratedCommandsLimitsNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceGeneratedCommandsLimitsNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { + pub fn max_indirect_commands_layout_token_count( + mut self, + max_indirect_commands_layout_token_count: u32, + ) -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { + self.inner.max_indirect_commands_layout_token_count = + max_indirect_commands_layout_token_count; + self + } + pub fn max_object_entry_counts( + mut self, + max_object_entry_counts: u32, + ) -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { + self.inner.max_object_entry_counts = max_object_entry_counts; + self + } + pub fn min_sequence_count_buffer_offset_alignment( + mut self, + min_sequence_count_buffer_offset_alignment: u32, + ) -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { + self.inner.min_sequence_count_buffer_offset_alignment = + min_sequence_count_buffer_offset_alignment; + self + } + pub fn min_sequence_index_buffer_offset_alignment( + mut self, + min_sequence_index_buffer_offset_alignment: u32, + ) -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { + self.inner.min_sequence_index_buffer_offset_alignment = + min_sequence_index_buffer_offset_alignment; + self + } + pub fn min_commands_token_buffer_offset_alignment( + mut self, + min_commands_token_buffer_offset_alignment: u32, + ) -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { + self.inner.min_commands_token_buffer_offset_alignment = + min_commands_token_buffer_offset_alignment; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceGeneratedCommandsLimitsNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct IndirectCommandsTokenNVX { + pub token_type: IndirectCommandsTokenTypeNVX, + pub buffer: Buffer, + pub offset: DeviceSize, +} +impl IndirectCommandsTokenNVX { + pub fn builder<'a>() -> IndirectCommandsTokenNVXBuilder<'a> { + IndirectCommandsTokenNVXBuilder { + inner: IndirectCommandsTokenNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct IndirectCommandsTokenNVXBuilder<'a> { + inner: IndirectCommandsTokenNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for IndirectCommandsTokenNVXBuilder<'a> { + type Target = IndirectCommandsTokenNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for IndirectCommandsTokenNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> IndirectCommandsTokenNVXBuilder<'a> { + pub fn token_type( + mut self, + token_type: IndirectCommandsTokenTypeNVX, + ) -> IndirectCommandsTokenNVXBuilder<'a> { + self.inner.token_type = token_type; + self + } + pub fn buffer(mut self, buffer: Buffer) -> IndirectCommandsTokenNVXBuilder<'a> { + self.inner.buffer = buffer; + self + } + pub fn offset(mut self, offset: DeviceSize) -> IndirectCommandsTokenNVXBuilder<'a> { + self.inner.offset = offset; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> IndirectCommandsTokenNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct IndirectCommandsLayoutTokenNVX { + pub token_type: IndirectCommandsTokenTypeNVX, + pub binding_unit: u32, + pub dynamic_count: u32, + pub divisor: u32, +} +impl IndirectCommandsLayoutTokenNVX { + pub fn builder<'a>() -> IndirectCommandsLayoutTokenNVXBuilder<'a> { + IndirectCommandsLayoutTokenNVXBuilder { + inner: IndirectCommandsLayoutTokenNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct IndirectCommandsLayoutTokenNVXBuilder<'a> { + inner: IndirectCommandsLayoutTokenNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for IndirectCommandsLayoutTokenNVXBuilder<'a> { + type Target = IndirectCommandsLayoutTokenNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for IndirectCommandsLayoutTokenNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> IndirectCommandsLayoutTokenNVXBuilder<'a> { + pub fn token_type( + mut self, + token_type: IndirectCommandsTokenTypeNVX, + ) -> IndirectCommandsLayoutTokenNVXBuilder<'a> { + self.inner.token_type = token_type; + self + } + pub fn binding_unit(mut self, binding_unit: u32) -> IndirectCommandsLayoutTokenNVXBuilder<'a> { + self.inner.binding_unit = binding_unit; + self + } + pub fn dynamic_count( + mut self, + dynamic_count: u32, + ) -> IndirectCommandsLayoutTokenNVXBuilder<'a> { + self.inner.dynamic_count = dynamic_count; + self + } + pub fn divisor(mut self, divisor: u32) -> IndirectCommandsLayoutTokenNVXBuilder<'a> { + self.inner.divisor = divisor; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> IndirectCommandsLayoutTokenNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct IndirectCommandsLayoutCreateInfoNVX { + pub s_type: StructureType, + pub p_next: *const c_void, + pub pipeline_bind_point: PipelineBindPoint, + pub flags: IndirectCommandsLayoutUsageFlagsNVX, + pub token_count: u32, + pub p_tokens: *const IndirectCommandsLayoutTokenNVX, +} +impl ::std::default::Default for IndirectCommandsLayoutCreateInfoNVX { + fn default() -> IndirectCommandsLayoutCreateInfoNVX { + IndirectCommandsLayoutCreateInfoNVX { + s_type: StructureType::INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX, + p_next: ::std::ptr::null(), + pipeline_bind_point: PipelineBindPoint::default(), + flags: IndirectCommandsLayoutUsageFlagsNVX::default(), + token_count: u32::default(), + p_tokens: ::std::ptr::null(), + } + } +} +impl IndirectCommandsLayoutCreateInfoNVX { + pub fn builder<'a>() -> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { + IndirectCommandsLayoutCreateInfoNVXBuilder { + inner: IndirectCommandsLayoutCreateInfoNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { + inner: IndirectCommandsLayoutCreateInfoNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsIndirectCommandsLayoutCreateInfoNVX {} +impl<'a> ::std::ops::Deref for IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { + type Target = IndirectCommandsLayoutCreateInfoNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { + pub fn pipeline_bind_point( + mut self, + pipeline_bind_point: PipelineBindPoint, + ) -> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { + self.inner.pipeline_bind_point = pipeline_bind_point; + self + } + pub fn flags( + mut self, + flags: IndirectCommandsLayoutUsageFlagsNVX, + ) -> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn tokens( + mut self, + tokens: &'a [IndirectCommandsLayoutTokenNVX], + ) -> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { + self.inner.token_count = tokens.len() as _; + self.inner.p_tokens = tokens.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> IndirectCommandsLayoutCreateInfoNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CmdProcessCommandsInfoNVX { + pub s_type: StructureType, + pub p_next: *const c_void, + pub object_table: ObjectTableNVX, + pub indirect_commands_layout: IndirectCommandsLayoutNVX, + pub indirect_commands_token_count: u32, + pub p_indirect_commands_tokens: *const IndirectCommandsTokenNVX, + pub max_sequences_count: u32, + pub target_command_buffer: CommandBuffer, + pub sequences_count_buffer: Buffer, + pub sequences_count_offset: DeviceSize, + pub sequences_index_buffer: Buffer, + pub sequences_index_offset: DeviceSize, +} +impl ::std::default::Default for CmdProcessCommandsInfoNVX { + fn default() -> CmdProcessCommandsInfoNVX { + CmdProcessCommandsInfoNVX { + s_type: StructureType::CMD_PROCESS_COMMANDS_INFO_NVX, + p_next: ::std::ptr::null(), + object_table: ObjectTableNVX::default(), + indirect_commands_layout: IndirectCommandsLayoutNVX::default(), + indirect_commands_token_count: u32::default(), + p_indirect_commands_tokens: ::std::ptr::null(), + max_sequences_count: u32::default(), + target_command_buffer: CommandBuffer::default(), + sequences_count_buffer: Buffer::default(), + sequences_count_offset: DeviceSize::default(), + sequences_index_buffer: Buffer::default(), + sequences_index_offset: DeviceSize::default(), + } + } +} +impl CmdProcessCommandsInfoNVX { + pub fn builder<'a>() -> CmdProcessCommandsInfoNVXBuilder<'a> { + CmdProcessCommandsInfoNVXBuilder { + inner: CmdProcessCommandsInfoNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CmdProcessCommandsInfoNVXBuilder<'a> { + inner: CmdProcessCommandsInfoNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCmdProcessCommandsInfoNVX {} +impl<'a> ::std::ops::Deref for CmdProcessCommandsInfoNVXBuilder<'a> { + type Target = CmdProcessCommandsInfoNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CmdProcessCommandsInfoNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CmdProcessCommandsInfoNVXBuilder<'a> { + pub fn object_table( + mut self, + object_table: ObjectTableNVX, + ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + self.inner.object_table = object_table; + self + } + pub fn indirect_commands_layout( + mut self, + indirect_commands_layout: IndirectCommandsLayoutNVX, + ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + self.inner.indirect_commands_layout = indirect_commands_layout; + self + } + pub fn indirect_commands_tokens( + mut self, + indirect_commands_tokens: &'a [IndirectCommandsTokenNVX], + ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + self.inner.indirect_commands_token_count = indirect_commands_tokens.len() as _; + self.inner.p_indirect_commands_tokens = indirect_commands_tokens.as_ptr(); + self + } + pub fn max_sequences_count( + mut self, + max_sequences_count: u32, + ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + self.inner.max_sequences_count = max_sequences_count; + self + } + pub fn target_command_buffer( + mut self, + target_command_buffer: CommandBuffer, + ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + self.inner.target_command_buffer = target_command_buffer; + self + } + pub fn sequences_count_buffer( + mut self, + sequences_count_buffer: Buffer, + ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + self.inner.sequences_count_buffer = sequences_count_buffer; + self + } + pub fn sequences_count_offset( + mut self, + sequences_count_offset: DeviceSize, + ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + self.inner.sequences_count_offset = sequences_count_offset; + self + } + pub fn sequences_index_buffer( + mut self, + sequences_index_buffer: Buffer, + ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + self.inner.sequences_index_buffer = sequences_index_buffer; + self + } + pub fn sequences_index_offset( + mut self, + sequences_index_offset: DeviceSize, + ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + self.inner.sequences_index_offset = sequences_index_offset; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CmdProcessCommandsInfoNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CmdReserveSpaceForCommandsInfoNVX { + pub s_type: StructureType, + pub p_next: *const c_void, + pub object_table: ObjectTableNVX, + pub indirect_commands_layout: IndirectCommandsLayoutNVX, + pub max_sequences_count: u32, +} +impl ::std::default::Default for CmdReserveSpaceForCommandsInfoNVX { + fn default() -> CmdReserveSpaceForCommandsInfoNVX { + CmdReserveSpaceForCommandsInfoNVX { + s_type: StructureType::CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX, + p_next: ::std::ptr::null(), + object_table: ObjectTableNVX::default(), + indirect_commands_layout: IndirectCommandsLayoutNVX::default(), + max_sequences_count: u32::default(), + } + } +} +impl CmdReserveSpaceForCommandsInfoNVX { + pub fn builder<'a>() -> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { + CmdReserveSpaceForCommandsInfoNVXBuilder { + inner: CmdReserveSpaceForCommandsInfoNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { + inner: CmdReserveSpaceForCommandsInfoNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCmdReserveSpaceForCommandsInfoNVX {} +impl<'a> ::std::ops::Deref for CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { + type Target = CmdReserveSpaceForCommandsInfoNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { + pub fn object_table( + mut self, + object_table: ObjectTableNVX, + ) -> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { + self.inner.object_table = object_table; + self + } + pub fn indirect_commands_layout( + mut self, + indirect_commands_layout: IndirectCommandsLayoutNVX, + ) -> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { + self.inner.indirect_commands_layout = indirect_commands_layout; + self + } + pub fn max_sequences_count( + mut self, + max_sequences_count: u32, + ) -> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { + self.inner.max_sequences_count = max_sequences_count; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CmdReserveSpaceForCommandsInfoNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ObjectTableCreateInfoNVX { + pub s_type: StructureType, + pub p_next: *const c_void, + pub object_count: u32, + pub p_object_entry_types: *const ObjectEntryTypeNVX, + pub p_object_entry_counts: *const u32, + pub p_object_entry_usage_flags: *const ObjectEntryUsageFlagsNVX, + pub max_uniform_buffers_per_descriptor: u32, + pub max_storage_buffers_per_descriptor: u32, + pub max_storage_images_per_descriptor: u32, + pub max_sampled_images_per_descriptor: u32, + pub max_pipeline_layouts: u32, +} +impl ::std::default::Default for ObjectTableCreateInfoNVX { + fn default() -> ObjectTableCreateInfoNVX { + ObjectTableCreateInfoNVX { + s_type: StructureType::OBJECT_TABLE_CREATE_INFO_NVX, + p_next: ::std::ptr::null(), + object_count: u32::default(), + p_object_entry_types: ::std::ptr::null(), + p_object_entry_counts: ::std::ptr::null(), + p_object_entry_usage_flags: ::std::ptr::null(), + max_uniform_buffers_per_descriptor: u32::default(), + max_storage_buffers_per_descriptor: u32::default(), + max_storage_images_per_descriptor: u32::default(), + max_sampled_images_per_descriptor: u32::default(), + max_pipeline_layouts: u32::default(), + } + } +} +impl ObjectTableCreateInfoNVX { + pub fn builder<'a>() -> ObjectTableCreateInfoNVXBuilder<'a> { + ObjectTableCreateInfoNVXBuilder { + inner: ObjectTableCreateInfoNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ObjectTableCreateInfoNVXBuilder<'a> { + inner: ObjectTableCreateInfoNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsObjectTableCreateInfoNVX {} +impl<'a> ::std::ops::Deref for ObjectTableCreateInfoNVXBuilder<'a> { + type Target = ObjectTableCreateInfoNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ObjectTableCreateInfoNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ObjectTableCreateInfoNVXBuilder<'a> { + pub fn object_entry_types( + mut self, + object_entry_types: &'a [ObjectEntryTypeNVX], + ) -> ObjectTableCreateInfoNVXBuilder<'a> { + self.inner.object_count = object_entry_types.len() as _; + self.inner.p_object_entry_types = object_entry_types.as_ptr(); + self + } + pub fn object_entry_counts( + mut self, + object_entry_counts: &'a [u32], + ) -> ObjectTableCreateInfoNVXBuilder<'a> { + self.inner.object_count = object_entry_counts.len() as _; + self.inner.p_object_entry_counts = object_entry_counts.as_ptr(); + self + } + pub fn object_entry_usage_flags( + mut self, + object_entry_usage_flags: &'a [ObjectEntryUsageFlagsNVX], + ) -> ObjectTableCreateInfoNVXBuilder<'a> { + self.inner.object_count = object_entry_usage_flags.len() as _; + self.inner.p_object_entry_usage_flags = object_entry_usage_flags.as_ptr(); + self + } + pub fn max_uniform_buffers_per_descriptor( + mut self, + max_uniform_buffers_per_descriptor: u32, + ) -> ObjectTableCreateInfoNVXBuilder<'a> { + self.inner.max_uniform_buffers_per_descriptor = max_uniform_buffers_per_descriptor; + self + } + pub fn max_storage_buffers_per_descriptor( + mut self, + max_storage_buffers_per_descriptor: u32, + ) -> ObjectTableCreateInfoNVXBuilder<'a> { + self.inner.max_storage_buffers_per_descriptor = max_storage_buffers_per_descriptor; + self + } + pub fn max_storage_images_per_descriptor( + mut self, + max_storage_images_per_descriptor: u32, + ) -> ObjectTableCreateInfoNVXBuilder<'a> { + self.inner.max_storage_images_per_descriptor = max_storage_images_per_descriptor; + self + } + pub fn max_sampled_images_per_descriptor( + mut self, + max_sampled_images_per_descriptor: u32, + ) -> ObjectTableCreateInfoNVXBuilder<'a> { + self.inner.max_sampled_images_per_descriptor = max_sampled_images_per_descriptor; + self + } + pub fn max_pipeline_layouts( + mut self, + max_pipeline_layouts: u32, + ) -> ObjectTableCreateInfoNVXBuilder<'a> { + self.inner.max_pipeline_layouts = max_pipeline_layouts; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ObjectTableCreateInfoNVXBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ObjectTableCreateInfoNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ObjectTableEntryNVX { + pub ty: ObjectEntryTypeNVX, + pub flags: ObjectEntryUsageFlagsNVX, +} +impl ObjectTableEntryNVX { + pub fn builder<'a>() -> ObjectTableEntryNVXBuilder<'a> { + ObjectTableEntryNVXBuilder { + inner: ObjectTableEntryNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ObjectTableEntryNVXBuilder<'a> { + inner: ObjectTableEntryNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ObjectTableEntryNVXBuilder<'a> { + type Target = ObjectTableEntryNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ObjectTableEntryNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ObjectTableEntryNVXBuilder<'a> { + pub fn ty(mut self, ty: ObjectEntryTypeNVX) -> ObjectTableEntryNVXBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn flags(mut self, flags: ObjectEntryUsageFlagsNVX) -> ObjectTableEntryNVXBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ObjectTableEntryNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ObjectTablePipelineEntryNVX { + pub ty: ObjectEntryTypeNVX, + pub flags: ObjectEntryUsageFlagsNVX, + pub pipeline: Pipeline, +} +impl ObjectTablePipelineEntryNVX { + pub fn builder<'a>() -> ObjectTablePipelineEntryNVXBuilder<'a> { + ObjectTablePipelineEntryNVXBuilder { + inner: ObjectTablePipelineEntryNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ObjectTablePipelineEntryNVXBuilder<'a> { + inner: ObjectTablePipelineEntryNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ObjectTablePipelineEntryNVXBuilder<'a> { + type Target = ObjectTablePipelineEntryNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ObjectTablePipelineEntryNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ObjectTablePipelineEntryNVXBuilder<'a> { + pub fn ty(mut self, ty: ObjectEntryTypeNVX) -> ObjectTablePipelineEntryNVXBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn flags( + mut self, + flags: ObjectEntryUsageFlagsNVX, + ) -> ObjectTablePipelineEntryNVXBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn pipeline(mut self, pipeline: Pipeline) -> ObjectTablePipelineEntryNVXBuilder<'a> { + self.inner.pipeline = pipeline; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ObjectTablePipelineEntryNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ObjectTableDescriptorSetEntryNVX { + pub ty: ObjectEntryTypeNVX, + pub flags: ObjectEntryUsageFlagsNVX, + pub pipeline_layout: PipelineLayout, + pub descriptor_set: DescriptorSet, +} +impl ObjectTableDescriptorSetEntryNVX { + pub fn builder<'a>() -> ObjectTableDescriptorSetEntryNVXBuilder<'a> { + ObjectTableDescriptorSetEntryNVXBuilder { + inner: ObjectTableDescriptorSetEntryNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ObjectTableDescriptorSetEntryNVXBuilder<'a> { + inner: ObjectTableDescriptorSetEntryNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ObjectTableDescriptorSetEntryNVXBuilder<'a> { + type Target = ObjectTableDescriptorSetEntryNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ObjectTableDescriptorSetEntryNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ObjectTableDescriptorSetEntryNVXBuilder<'a> { + pub fn ty(mut self, ty: ObjectEntryTypeNVX) -> ObjectTableDescriptorSetEntryNVXBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn flags( + mut self, + flags: ObjectEntryUsageFlagsNVX, + ) -> ObjectTableDescriptorSetEntryNVXBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn pipeline_layout( + mut self, + pipeline_layout: PipelineLayout, + ) -> ObjectTableDescriptorSetEntryNVXBuilder<'a> { + self.inner.pipeline_layout = pipeline_layout; + self + } + pub fn descriptor_set( + mut self, + descriptor_set: DescriptorSet, + ) -> ObjectTableDescriptorSetEntryNVXBuilder<'a> { + self.inner.descriptor_set = descriptor_set; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ObjectTableDescriptorSetEntryNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ObjectTableVertexBufferEntryNVX { + pub ty: ObjectEntryTypeNVX, + pub flags: ObjectEntryUsageFlagsNVX, + pub buffer: Buffer, +} +impl ObjectTableVertexBufferEntryNVX { + pub fn builder<'a>() -> ObjectTableVertexBufferEntryNVXBuilder<'a> { + ObjectTableVertexBufferEntryNVXBuilder { + inner: ObjectTableVertexBufferEntryNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ObjectTableVertexBufferEntryNVXBuilder<'a> { + inner: ObjectTableVertexBufferEntryNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ObjectTableVertexBufferEntryNVXBuilder<'a> { + type Target = ObjectTableVertexBufferEntryNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ObjectTableVertexBufferEntryNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ObjectTableVertexBufferEntryNVXBuilder<'a> { + pub fn ty(mut self, ty: ObjectEntryTypeNVX) -> ObjectTableVertexBufferEntryNVXBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn flags( + mut self, + flags: ObjectEntryUsageFlagsNVX, + ) -> ObjectTableVertexBufferEntryNVXBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn buffer(mut self, buffer: Buffer) -> ObjectTableVertexBufferEntryNVXBuilder<'a> { + self.inner.buffer = buffer; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ObjectTableVertexBufferEntryNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ObjectTableIndexBufferEntryNVX { + pub ty: ObjectEntryTypeNVX, + pub flags: ObjectEntryUsageFlagsNVX, + pub buffer: Buffer, + pub index_type: IndexType, +} +impl ObjectTableIndexBufferEntryNVX { + pub fn builder<'a>() -> ObjectTableIndexBufferEntryNVXBuilder<'a> { + ObjectTableIndexBufferEntryNVXBuilder { + inner: ObjectTableIndexBufferEntryNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ObjectTableIndexBufferEntryNVXBuilder<'a> { + inner: ObjectTableIndexBufferEntryNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ObjectTableIndexBufferEntryNVXBuilder<'a> { + type Target = ObjectTableIndexBufferEntryNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ObjectTableIndexBufferEntryNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ObjectTableIndexBufferEntryNVXBuilder<'a> { + pub fn ty(mut self, ty: ObjectEntryTypeNVX) -> ObjectTableIndexBufferEntryNVXBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn flags( + mut self, + flags: ObjectEntryUsageFlagsNVX, + ) -> ObjectTableIndexBufferEntryNVXBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn buffer(mut self, buffer: Buffer) -> ObjectTableIndexBufferEntryNVXBuilder<'a> { + self.inner.buffer = buffer; + self + } + pub fn index_type( + mut self, + index_type: IndexType, + ) -> ObjectTableIndexBufferEntryNVXBuilder<'a> { + self.inner.index_type = index_type; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ObjectTableIndexBufferEntryNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ObjectTablePushConstantEntryNVX { + pub ty: ObjectEntryTypeNVX, + pub flags: ObjectEntryUsageFlagsNVX, + pub pipeline_layout: PipelineLayout, + pub stage_flags: ShaderStageFlags, +} +impl ObjectTablePushConstantEntryNVX { + pub fn builder<'a>() -> ObjectTablePushConstantEntryNVXBuilder<'a> { + ObjectTablePushConstantEntryNVXBuilder { + inner: ObjectTablePushConstantEntryNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ObjectTablePushConstantEntryNVXBuilder<'a> { + inner: ObjectTablePushConstantEntryNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ObjectTablePushConstantEntryNVXBuilder<'a> { + type Target = ObjectTablePushConstantEntryNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ObjectTablePushConstantEntryNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ObjectTablePushConstantEntryNVXBuilder<'a> { + pub fn ty(mut self, ty: ObjectEntryTypeNVX) -> ObjectTablePushConstantEntryNVXBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn flags( + mut self, + flags: ObjectEntryUsageFlagsNVX, + ) -> ObjectTablePushConstantEntryNVXBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn pipeline_layout( + mut self, + pipeline_layout: PipelineLayout, + ) -> ObjectTablePushConstantEntryNVXBuilder<'a> { + self.inner.pipeline_layout = pipeline_layout; + self + } + pub fn stage_flags( + mut self, + stage_flags: ShaderStageFlags, + ) -> ObjectTablePushConstantEntryNVXBuilder<'a> { + self.inner.stage_flags = stage_flags; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ObjectTablePushConstantEntryNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceFeatures2 { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub features: PhysicalDeviceFeatures, +} +impl ::std::default::Default for PhysicalDeviceFeatures2 { + fn default() -> PhysicalDeviceFeatures2 { + PhysicalDeviceFeatures2 { + s_type: StructureType::PHYSICAL_DEVICE_FEATURES_2, + p_next: ::std::ptr::null_mut(), + features: PhysicalDeviceFeatures::default(), + } + } +} +impl PhysicalDeviceFeatures2 { + pub fn builder<'a>() -> PhysicalDeviceFeatures2Builder<'a> { + PhysicalDeviceFeatures2Builder { + inner: PhysicalDeviceFeatures2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceFeatures2Builder<'a> { + inner: PhysicalDeviceFeatures2, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceFeatures2Builder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceFeatures2 {} +impl<'a> ::std::ops::Deref for PhysicalDeviceFeatures2Builder<'a> { + type Target = PhysicalDeviceFeatures2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceFeatures2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceFeatures2Builder<'a> { + pub fn features( + mut self, + features: PhysicalDeviceFeatures, + ) -> PhysicalDeviceFeatures2Builder<'a> { + self.inner.features = features; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceFeatures2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceProperties2 { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub properties: PhysicalDeviceProperties, +} +impl ::std::default::Default for PhysicalDeviceProperties2 { + fn default() -> PhysicalDeviceProperties2 { + PhysicalDeviceProperties2 { + s_type: StructureType::PHYSICAL_DEVICE_PROPERTIES_2, + p_next: ::std::ptr::null_mut(), + properties: PhysicalDeviceProperties::default(), + } + } +} +impl PhysicalDeviceProperties2 { + pub fn builder<'a>() -> PhysicalDeviceProperties2Builder<'a> { + PhysicalDeviceProperties2Builder { + inner: PhysicalDeviceProperties2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceProperties2Builder<'a> { + inner: PhysicalDeviceProperties2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDeviceProperties2 {} +impl<'a> ::std::ops::Deref for PhysicalDeviceProperties2Builder<'a> { + type Target = PhysicalDeviceProperties2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceProperties2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceProperties2Builder<'a> { + pub fn properties( + mut self, + properties: PhysicalDeviceProperties, + ) -> PhysicalDeviceProperties2Builder<'a> { + self.inner.properties = properties; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PhysicalDeviceProperties2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceProperties2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct FormatProperties2 { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub format_properties: FormatProperties, +} +impl ::std::default::Default for FormatProperties2 { + fn default() -> FormatProperties2 { + FormatProperties2 { + s_type: StructureType::FORMAT_PROPERTIES_2, + p_next: ::std::ptr::null_mut(), + format_properties: FormatProperties::default(), + } + } +} +impl FormatProperties2 { + pub fn builder<'a>() -> FormatProperties2Builder<'a> { + FormatProperties2Builder { + inner: FormatProperties2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct FormatProperties2Builder<'a> { + inner: FormatProperties2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsFormatProperties2 {} +impl<'a> ::std::ops::Deref for FormatProperties2Builder<'a> { + type Target = FormatProperties2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for FormatProperties2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> FormatProperties2Builder<'a> { + pub fn format_properties( + mut self, + format_properties: FormatProperties, + ) -> FormatProperties2Builder<'a> { + self.inner.format_properties = format_properties; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> FormatProperties2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> FormatProperties2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageFormatProperties2 { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub image_format_properties: ImageFormatProperties, +} +impl ::std::default::Default for ImageFormatProperties2 { + fn default() -> ImageFormatProperties2 { + ImageFormatProperties2 { + s_type: StructureType::IMAGE_FORMAT_PROPERTIES_2, + p_next: ::std::ptr::null_mut(), + image_format_properties: ImageFormatProperties::default(), + } + } +} +impl ImageFormatProperties2 { + pub fn builder<'a>() -> ImageFormatProperties2Builder<'a> { + ImageFormatProperties2Builder { + inner: ImageFormatProperties2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageFormatProperties2Builder<'a> { + inner: ImageFormatProperties2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImageFormatProperties2 {} +impl<'a> ::std::ops::Deref for ImageFormatProperties2Builder<'a> { + type Target = ImageFormatProperties2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageFormatProperties2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageFormatProperties2Builder<'a> { + pub fn image_format_properties( + mut self, + image_format_properties: ImageFormatProperties, + ) -> ImageFormatProperties2Builder<'a> { + self.inner.image_format_properties = image_format_properties; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImageFormatProperties2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageFormatProperties2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceImageFormatInfo2 { + pub s_type: StructureType, + pub p_next: *const c_void, + pub format: Format, + pub ty: ImageType, + pub tiling: ImageTiling, + pub usage: ImageUsageFlags, + pub flags: ImageCreateFlags, +} +impl ::std::default::Default for PhysicalDeviceImageFormatInfo2 { + fn default() -> PhysicalDeviceImageFormatInfo2 { + PhysicalDeviceImageFormatInfo2 { + s_type: StructureType::PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + p_next: ::std::ptr::null(), + format: Format::default(), + ty: ImageType::default(), + tiling: ImageTiling::default(), + usage: ImageUsageFlags::default(), + flags: ImageCreateFlags::default(), + } + } +} +impl PhysicalDeviceImageFormatInfo2 { + pub fn builder<'a>() -> PhysicalDeviceImageFormatInfo2Builder<'a> { + PhysicalDeviceImageFormatInfo2Builder { + inner: PhysicalDeviceImageFormatInfo2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceImageFormatInfo2Builder<'a> { + inner: PhysicalDeviceImageFormatInfo2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDeviceImageFormatInfo2 {} +impl<'a> ::std::ops::Deref for PhysicalDeviceImageFormatInfo2Builder<'a> { + type Target = PhysicalDeviceImageFormatInfo2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceImageFormatInfo2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceImageFormatInfo2Builder<'a> { + pub fn format(mut self, format: Format) -> PhysicalDeviceImageFormatInfo2Builder<'a> { + self.inner.format = format; + self + } + pub fn ty(mut self, ty: ImageType) -> PhysicalDeviceImageFormatInfo2Builder<'a> { + self.inner.ty = ty; + self + } + pub fn tiling(mut self, tiling: ImageTiling) -> PhysicalDeviceImageFormatInfo2Builder<'a> { + self.inner.tiling = tiling; + self + } + pub fn usage(mut self, usage: ImageUsageFlags) -> PhysicalDeviceImageFormatInfo2Builder<'a> { + self.inner.usage = usage; + self + } + pub fn flags(mut self, flags: ImageCreateFlags) -> PhysicalDeviceImageFormatInfo2Builder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PhysicalDeviceImageFormatInfo2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceImageFormatInfo2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct QueueFamilyProperties2 { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub queue_family_properties: QueueFamilyProperties, +} +impl ::std::default::Default for QueueFamilyProperties2 { + fn default() -> QueueFamilyProperties2 { + QueueFamilyProperties2 { + s_type: StructureType::QUEUE_FAMILY_PROPERTIES_2, + p_next: ::std::ptr::null_mut(), + queue_family_properties: QueueFamilyProperties::default(), + } + } +} +impl QueueFamilyProperties2 { + pub fn builder<'a>() -> QueueFamilyProperties2Builder<'a> { + QueueFamilyProperties2Builder { + inner: QueueFamilyProperties2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct QueueFamilyProperties2Builder<'a> { + inner: QueueFamilyProperties2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsQueueFamilyProperties2 {} +impl<'a> ::std::ops::Deref for QueueFamilyProperties2Builder<'a> { + type Target = QueueFamilyProperties2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for QueueFamilyProperties2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> QueueFamilyProperties2Builder<'a> { + pub fn queue_family_properties( + mut self, + queue_family_properties: QueueFamilyProperties, + ) -> QueueFamilyProperties2Builder<'a> { + self.inner.queue_family_properties = queue_family_properties; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> QueueFamilyProperties2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> QueueFamilyProperties2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceMemoryProperties2 { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub memory_properties: PhysicalDeviceMemoryProperties, +} +impl ::std::default::Default for PhysicalDeviceMemoryProperties2 { + fn default() -> PhysicalDeviceMemoryProperties2 { + PhysicalDeviceMemoryProperties2 { + s_type: StructureType::PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, + p_next: ::std::ptr::null_mut(), + memory_properties: PhysicalDeviceMemoryProperties::default(), + } + } +} +impl PhysicalDeviceMemoryProperties2 { + pub fn builder<'a>() -> PhysicalDeviceMemoryProperties2Builder<'a> { + PhysicalDeviceMemoryProperties2Builder { + inner: PhysicalDeviceMemoryProperties2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceMemoryProperties2Builder<'a> { + inner: PhysicalDeviceMemoryProperties2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDeviceMemoryProperties2 {} +impl<'a> ::std::ops::Deref for PhysicalDeviceMemoryProperties2Builder<'a> { + type Target = PhysicalDeviceMemoryProperties2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceMemoryProperties2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceMemoryProperties2Builder<'a> { + pub fn memory_properties( + mut self, + memory_properties: PhysicalDeviceMemoryProperties, + ) -> PhysicalDeviceMemoryProperties2Builder<'a> { + self.inner.memory_properties = memory_properties; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PhysicalDeviceMemoryProperties2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceMemoryProperties2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SparseImageFormatProperties2 { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub properties: SparseImageFormatProperties, +} +impl ::std::default::Default for SparseImageFormatProperties2 { + fn default() -> SparseImageFormatProperties2 { + SparseImageFormatProperties2 { + s_type: StructureType::SPARSE_IMAGE_FORMAT_PROPERTIES_2, + p_next: ::std::ptr::null_mut(), + properties: SparseImageFormatProperties::default(), + } + } +} +impl SparseImageFormatProperties2 { + pub fn builder<'a>() -> SparseImageFormatProperties2Builder<'a> { + SparseImageFormatProperties2Builder { + inner: SparseImageFormatProperties2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SparseImageFormatProperties2Builder<'a> { + inner: SparseImageFormatProperties2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSparseImageFormatProperties2 {} +impl<'a> ::std::ops::Deref for SparseImageFormatProperties2Builder<'a> { + type Target = SparseImageFormatProperties2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SparseImageFormatProperties2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SparseImageFormatProperties2Builder<'a> { + pub fn properties( + mut self, + properties: SparseImageFormatProperties, + ) -> SparseImageFormatProperties2Builder<'a> { + self.inner.properties = properties; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SparseImageFormatProperties2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SparseImageFormatProperties2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceSparseImageFormatInfo2 { + pub s_type: StructureType, + pub p_next: *const c_void, + pub format: Format, + pub ty: ImageType, + pub samples: SampleCountFlags, + pub usage: ImageUsageFlags, + pub tiling: ImageTiling, +} +impl ::std::default::Default for PhysicalDeviceSparseImageFormatInfo2 { + fn default() -> PhysicalDeviceSparseImageFormatInfo2 { + PhysicalDeviceSparseImageFormatInfo2 { + s_type: StructureType::PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, + p_next: ::std::ptr::null(), + format: Format::default(), + ty: ImageType::default(), + samples: SampleCountFlags::default(), + usage: ImageUsageFlags::default(), + tiling: ImageTiling::default(), + } + } +} +impl PhysicalDeviceSparseImageFormatInfo2 { + pub fn builder<'a>() -> PhysicalDeviceSparseImageFormatInfo2Builder<'a> { + PhysicalDeviceSparseImageFormatInfo2Builder { + inner: PhysicalDeviceSparseImageFormatInfo2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceSparseImageFormatInfo2Builder<'a> { + inner: PhysicalDeviceSparseImageFormatInfo2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDeviceSparseImageFormatInfo2 {} +impl<'a> ::std::ops::Deref for PhysicalDeviceSparseImageFormatInfo2Builder<'a> { + type Target = PhysicalDeviceSparseImageFormatInfo2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceSparseImageFormatInfo2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceSparseImageFormatInfo2Builder<'a> { + pub fn format(mut self, format: Format) -> PhysicalDeviceSparseImageFormatInfo2Builder<'a> { + self.inner.format = format; + self + } + pub fn ty(mut self, ty: ImageType) -> PhysicalDeviceSparseImageFormatInfo2Builder<'a> { + self.inner.ty = ty; + self + } + pub fn samples( + mut self, + samples: SampleCountFlags, + ) -> PhysicalDeviceSparseImageFormatInfo2Builder<'a> { + self.inner.samples = samples; + self + } + pub fn usage( + mut self, + usage: ImageUsageFlags, + ) -> PhysicalDeviceSparseImageFormatInfo2Builder<'a> { + self.inner.usage = usage; + self + } + pub fn tiling( + mut self, + tiling: ImageTiling, + ) -> PhysicalDeviceSparseImageFormatInfo2Builder<'a> { + self.inner.tiling = tiling; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PhysicalDeviceSparseImageFormatInfo2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceSparseImageFormatInfo2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDevicePushDescriptorPropertiesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_push_descriptors: u32, +} +impl ::std::default::Default for PhysicalDevicePushDescriptorPropertiesKHR { + fn default() -> PhysicalDevicePushDescriptorPropertiesKHR { + PhysicalDevicePushDescriptorPropertiesKHR { + s_type: StructureType::PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR, + p_next: ::std::ptr::null_mut(), + max_push_descriptors: u32::default(), + } + } +} +impl PhysicalDevicePushDescriptorPropertiesKHR { + pub fn builder<'a>() -> PhysicalDevicePushDescriptorPropertiesKHRBuilder<'a> { + PhysicalDevicePushDescriptorPropertiesKHRBuilder { + inner: PhysicalDevicePushDescriptorPropertiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDevicePushDescriptorPropertiesKHRBuilder<'a> { + inner: PhysicalDevicePushDescriptorPropertiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDevicePushDescriptorPropertiesKHRBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDevicePushDescriptorPropertiesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDevicePushDescriptorPropertiesKHRBuilder<'a> { + type Target = PhysicalDevicePushDescriptorPropertiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDevicePushDescriptorPropertiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDevicePushDescriptorPropertiesKHRBuilder<'a> { + pub fn max_push_descriptors( + mut self, + max_push_descriptors: u32, + ) -> PhysicalDevicePushDescriptorPropertiesKHRBuilder<'a> { + self.inner.max_push_descriptors = max_push_descriptors; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDevicePushDescriptorPropertiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ConformanceVersionKHR { + pub major: u8, + pub minor: u8, + pub subminor: u8, + pub patch: u8, +} +impl ConformanceVersionKHR { + pub fn builder<'a>() -> ConformanceVersionKHRBuilder<'a> { + ConformanceVersionKHRBuilder { + inner: ConformanceVersionKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ConformanceVersionKHRBuilder<'a> { + inner: ConformanceVersionKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ConformanceVersionKHRBuilder<'a> { + type Target = ConformanceVersionKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ConformanceVersionKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ConformanceVersionKHRBuilder<'a> { + pub fn major(mut self, major: u8) -> ConformanceVersionKHRBuilder<'a> { + self.inner.major = major; + self + } + pub fn minor(mut self, minor: u8) -> ConformanceVersionKHRBuilder<'a> { + self.inner.minor = minor; + self + } + pub fn subminor(mut self, subminor: u8) -> ConformanceVersionKHRBuilder<'a> { + self.inner.subminor = subminor; + self + } + pub fn patch(mut self, patch: u8) -> ConformanceVersionKHRBuilder<'a> { + self.inner.patch = patch; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ConformanceVersionKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceDriverPropertiesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub driver_id: DriverIdKHR, + pub driver_name: [c_char; MAX_DRIVER_NAME_SIZE_KHR], + pub driver_info: [c_char; MAX_DRIVER_INFO_SIZE_KHR], + pub conformance_version: ConformanceVersionKHR, +} +impl fmt::Debug for PhysicalDeviceDriverPropertiesKHR { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("PhysicalDeviceDriverPropertiesKHR") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("driver_id", &self.driver_id) + .field("driver_name", &unsafe { + ::std::ffi::CStr::from_ptr(self.driver_name.as_ptr() as *const c_char) + }) + .field("driver_info", &unsafe { + ::std::ffi::CStr::from_ptr(self.driver_info.as_ptr() as *const c_char) + }) + .field("conformance_version", &self.conformance_version) + .finish() + } +} +impl ::std::default::Default for PhysicalDeviceDriverPropertiesKHR { + fn default() -> PhysicalDeviceDriverPropertiesKHR { + PhysicalDeviceDriverPropertiesKHR { + s_type: StructureType::PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR, + p_next: ::std::ptr::null_mut(), + driver_id: DriverIdKHR::default(), + driver_name: unsafe { ::std::mem::zeroed() }, + driver_info: unsafe { ::std::mem::zeroed() }, + conformance_version: ConformanceVersionKHR::default(), + } + } +} +impl PhysicalDeviceDriverPropertiesKHR { + pub fn builder<'a>() -> PhysicalDeviceDriverPropertiesKHRBuilder<'a> { + PhysicalDeviceDriverPropertiesKHRBuilder { + inner: PhysicalDeviceDriverPropertiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceDriverPropertiesKHRBuilder<'a> { + inner: PhysicalDeviceDriverPropertiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDriverPropertiesKHRBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDriverPropertiesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDriverPropertiesKHRBuilder<'a> { + type Target = PhysicalDeviceDriverPropertiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDriverPropertiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceDriverPropertiesKHRBuilder<'a> { + pub fn driver_id( + mut self, + driver_id: DriverIdKHR, + ) -> PhysicalDeviceDriverPropertiesKHRBuilder<'a> { + self.inner.driver_id = driver_id; + self + } + pub fn driver_name( + mut self, + driver_name: [c_char; MAX_DRIVER_NAME_SIZE_KHR], + ) -> PhysicalDeviceDriverPropertiesKHRBuilder<'a> { + self.inner.driver_name = driver_name; + self + } + pub fn driver_info( + mut self, + driver_info: [c_char; MAX_DRIVER_INFO_SIZE_KHR], + ) -> PhysicalDeviceDriverPropertiesKHRBuilder<'a> { + self.inner.driver_info = driver_info; + self + } + pub fn conformance_version( + mut self, + conformance_version: ConformanceVersionKHR, + ) -> PhysicalDeviceDriverPropertiesKHRBuilder<'a> { + self.inner.conformance_version = conformance_version; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceDriverPropertiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PresentRegionsKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub swapchain_count: u32, + pub p_regions: *const PresentRegionKHR, +} +impl ::std::default::Default for PresentRegionsKHR { + fn default() -> PresentRegionsKHR { + PresentRegionsKHR { + s_type: StructureType::PRESENT_REGIONS_KHR, + p_next: ::std::ptr::null(), + swapchain_count: u32::default(), + p_regions: ::std::ptr::null(), + } + } +} +impl PresentRegionsKHR { + pub fn builder<'a>() -> PresentRegionsKHRBuilder<'a> { + PresentRegionsKHRBuilder { + inner: PresentRegionsKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PresentRegionsKHRBuilder<'a> { + inner: PresentRegionsKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPresentInfoKHR for PresentRegionsKHRBuilder<'_> {} +unsafe impl ExtendsPresentInfoKHR for PresentRegionsKHR {} +impl<'a> ::std::ops::Deref for PresentRegionsKHRBuilder<'a> { + type Target = PresentRegionsKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PresentRegionsKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PresentRegionsKHRBuilder<'a> { + pub fn regions(mut self, regions: &'a [PresentRegionKHR]) -> PresentRegionsKHRBuilder<'a> { + self.inner.swapchain_count = regions.len() as _; + self.inner.p_regions = regions.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PresentRegionsKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PresentRegionKHR { + pub rectangle_count: u32, + pub p_rectangles: *const RectLayerKHR, +} +impl ::std::default::Default for PresentRegionKHR { + fn default() -> PresentRegionKHR { + PresentRegionKHR { + rectangle_count: u32::default(), + p_rectangles: ::std::ptr::null(), + } + } +} +impl PresentRegionKHR { + pub fn builder<'a>() -> PresentRegionKHRBuilder<'a> { + PresentRegionKHRBuilder { + inner: PresentRegionKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PresentRegionKHRBuilder<'a> { + inner: PresentRegionKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for PresentRegionKHRBuilder<'a> { + type Target = PresentRegionKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PresentRegionKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PresentRegionKHRBuilder<'a> { + pub fn rectangles(mut self, rectangles: &'a [RectLayerKHR]) -> PresentRegionKHRBuilder<'a> { + self.inner.rectangle_count = rectangles.len() as _; + self.inner.p_rectangles = rectangles.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PresentRegionKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct RectLayerKHR { + pub offset: Offset2D, + pub extent: Extent2D, + pub layer: u32, +} +impl RectLayerKHR { + pub fn builder<'a>() -> RectLayerKHRBuilder<'a> { + RectLayerKHRBuilder { + inner: RectLayerKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RectLayerKHRBuilder<'a> { + inner: RectLayerKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for RectLayerKHRBuilder<'a> { + type Target = RectLayerKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RectLayerKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RectLayerKHRBuilder<'a> { + pub fn offset(mut self, offset: Offset2D) -> RectLayerKHRBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn extent(mut self, extent: Extent2D) -> RectLayerKHRBuilder<'a> { + self.inner.extent = extent; + self + } + pub fn layer(mut self, layer: u32) -> RectLayerKHRBuilder<'a> { + self.inner.layer = layer; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RectLayerKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceVariablePointerFeatures { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub variable_pointers_storage_buffer: Bool32, + pub variable_pointers: Bool32, +} +impl ::std::default::Default for PhysicalDeviceVariablePointerFeatures { + fn default() -> PhysicalDeviceVariablePointerFeatures { + PhysicalDeviceVariablePointerFeatures { + s_type: StructureType::PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES, + p_next: ::std::ptr::null_mut(), + variable_pointers_storage_buffer: Bool32::default(), + variable_pointers: Bool32::default(), + } + } +} +impl PhysicalDeviceVariablePointerFeatures { + pub fn builder<'a>() -> PhysicalDeviceVariablePointerFeaturesBuilder<'a> { + PhysicalDeviceVariablePointerFeaturesBuilder { + inner: PhysicalDeviceVariablePointerFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceVariablePointerFeaturesBuilder<'a> { + inner: PhysicalDeviceVariablePointerFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVariablePointerFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVariablePointerFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceVariablePointerFeaturesBuilder<'a> { + type Target = PhysicalDeviceVariablePointerFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceVariablePointerFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceVariablePointerFeaturesBuilder<'a> { + pub fn variable_pointers_storage_buffer( + mut self, + variable_pointers_storage_buffer: bool, + ) -> PhysicalDeviceVariablePointerFeaturesBuilder<'a> { + self.inner.variable_pointers_storage_buffer = variable_pointers_storage_buffer.into(); + self + } + pub fn variable_pointers( + mut self, + variable_pointers: bool, + ) -> PhysicalDeviceVariablePointerFeaturesBuilder<'a> { + self.inner.variable_pointers = variable_pointers.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceVariablePointerFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ExternalMemoryProperties { + pub external_memory_features: ExternalMemoryFeatureFlags, + pub export_from_imported_handle_types: ExternalMemoryHandleTypeFlags, + pub compatible_handle_types: ExternalMemoryHandleTypeFlags, +} +impl ExternalMemoryProperties { + pub fn builder<'a>() -> ExternalMemoryPropertiesBuilder<'a> { + ExternalMemoryPropertiesBuilder { + inner: ExternalMemoryProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExternalMemoryPropertiesBuilder<'a> { + inner: ExternalMemoryProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ExternalMemoryPropertiesBuilder<'a> { + type Target = ExternalMemoryProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExternalMemoryPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExternalMemoryPropertiesBuilder<'a> { + pub fn external_memory_features( + mut self, + external_memory_features: ExternalMemoryFeatureFlags, + ) -> ExternalMemoryPropertiesBuilder<'a> { + self.inner.external_memory_features = external_memory_features; + self + } + pub fn export_from_imported_handle_types( + mut self, + export_from_imported_handle_types: ExternalMemoryHandleTypeFlags, + ) -> ExternalMemoryPropertiesBuilder<'a> { + self.inner.export_from_imported_handle_types = export_from_imported_handle_types; + self + } + pub fn compatible_handle_types( + mut self, + compatible_handle_types: ExternalMemoryHandleTypeFlags, + ) -> ExternalMemoryPropertiesBuilder<'a> { + self.inner.compatible_handle_types = compatible_handle_types; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExternalMemoryProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceExternalImageFormatInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_type: ExternalMemoryHandleTypeFlags, +} +impl ::std::default::Default for PhysicalDeviceExternalImageFormatInfo { + fn default() -> PhysicalDeviceExternalImageFormatInfo { + PhysicalDeviceExternalImageFormatInfo { + s_type: StructureType::PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, + p_next: ::std::ptr::null(), + handle_type: ExternalMemoryHandleTypeFlags::default(), + } + } +} +impl PhysicalDeviceExternalImageFormatInfo { + pub fn builder<'a>() -> PhysicalDeviceExternalImageFormatInfoBuilder<'a> { + PhysicalDeviceExternalImageFormatInfoBuilder { + inner: PhysicalDeviceExternalImageFormatInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceExternalImageFormatInfoBuilder<'a> { + inner: PhysicalDeviceExternalImageFormatInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 + for PhysicalDeviceExternalImageFormatInfoBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for PhysicalDeviceExternalImageFormatInfo {} +impl<'a> ::std::ops::Deref for PhysicalDeviceExternalImageFormatInfoBuilder<'a> { + type Target = PhysicalDeviceExternalImageFormatInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceExternalImageFormatInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceExternalImageFormatInfoBuilder<'a> { + pub fn handle_type( + mut self, + handle_type: ExternalMemoryHandleTypeFlags, + ) -> PhysicalDeviceExternalImageFormatInfoBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceExternalImageFormatInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExternalImageFormatProperties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub external_memory_properties: ExternalMemoryProperties, +} +impl ::std::default::Default for ExternalImageFormatProperties { + fn default() -> ExternalImageFormatProperties { + ExternalImageFormatProperties { + s_type: StructureType::EXTERNAL_IMAGE_FORMAT_PROPERTIES, + p_next: ::std::ptr::null_mut(), + external_memory_properties: ExternalMemoryProperties::default(), + } + } +} +impl ExternalImageFormatProperties { + pub fn builder<'a>() -> ExternalImageFormatPropertiesBuilder<'a> { + ExternalImageFormatPropertiesBuilder { + inner: ExternalImageFormatProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExternalImageFormatPropertiesBuilder<'a> { + inner: ExternalImageFormatProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageFormatProperties2 for ExternalImageFormatPropertiesBuilder<'_> {} +unsafe impl ExtendsImageFormatProperties2 for ExternalImageFormatProperties {} +impl<'a> ::std::ops::Deref for ExternalImageFormatPropertiesBuilder<'a> { + type Target = ExternalImageFormatProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExternalImageFormatPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExternalImageFormatPropertiesBuilder<'a> { + pub fn external_memory_properties( + mut self, + external_memory_properties: ExternalMemoryProperties, + ) -> ExternalImageFormatPropertiesBuilder<'a> { + self.inner.external_memory_properties = external_memory_properties; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExternalImageFormatProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceExternalBufferInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: BufferCreateFlags, + pub usage: BufferUsageFlags, + pub handle_type: ExternalMemoryHandleTypeFlags, +} +impl ::std::default::Default for PhysicalDeviceExternalBufferInfo { + fn default() -> PhysicalDeviceExternalBufferInfo { + PhysicalDeviceExternalBufferInfo { + s_type: StructureType::PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, + p_next: ::std::ptr::null(), + flags: BufferCreateFlags::default(), + usage: BufferUsageFlags::default(), + handle_type: ExternalMemoryHandleTypeFlags::default(), + } + } +} +impl PhysicalDeviceExternalBufferInfo { + pub fn builder<'a>() -> PhysicalDeviceExternalBufferInfoBuilder<'a> { + PhysicalDeviceExternalBufferInfoBuilder { + inner: PhysicalDeviceExternalBufferInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceExternalBufferInfoBuilder<'a> { + inner: PhysicalDeviceExternalBufferInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDeviceExternalBufferInfo {} +impl<'a> ::std::ops::Deref for PhysicalDeviceExternalBufferInfoBuilder<'a> { + type Target = PhysicalDeviceExternalBufferInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceExternalBufferInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceExternalBufferInfoBuilder<'a> { + pub fn flags( + mut self, + flags: BufferCreateFlags, + ) -> PhysicalDeviceExternalBufferInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn usage(mut self, usage: BufferUsageFlags) -> PhysicalDeviceExternalBufferInfoBuilder<'a> { + self.inner.usage = usage; + self + } + pub fn handle_type( + mut self, + handle_type: ExternalMemoryHandleTypeFlags, + ) -> PhysicalDeviceExternalBufferInfoBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PhysicalDeviceExternalBufferInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceExternalBufferInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExternalBufferProperties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub external_memory_properties: ExternalMemoryProperties, +} +impl ::std::default::Default for ExternalBufferProperties { + fn default() -> ExternalBufferProperties { + ExternalBufferProperties { + s_type: StructureType::EXTERNAL_BUFFER_PROPERTIES, + p_next: ::std::ptr::null_mut(), + external_memory_properties: ExternalMemoryProperties::default(), + } + } +} +impl ExternalBufferProperties { + pub fn builder<'a>() -> ExternalBufferPropertiesBuilder<'a> { + ExternalBufferPropertiesBuilder { + inner: ExternalBufferProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExternalBufferPropertiesBuilder<'a> { + inner: ExternalBufferProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsExternalBufferProperties {} +impl<'a> ::std::ops::Deref for ExternalBufferPropertiesBuilder<'a> { + type Target = ExternalBufferProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExternalBufferPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExternalBufferPropertiesBuilder<'a> { + pub fn external_memory_properties( + mut self, + external_memory_properties: ExternalMemoryProperties, + ) -> ExternalBufferPropertiesBuilder<'a> { + self.inner.external_memory_properties = external_memory_properties; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ExternalBufferPropertiesBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExternalBufferProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceIDProperties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub device_uuid: [u8; UUID_SIZE], + pub driver_uuid: [u8; UUID_SIZE], + pub device_luid: [u8; LUID_SIZE], + pub device_node_mask: u32, + pub device_luid_valid: Bool32, +} +impl ::std::default::Default for PhysicalDeviceIDProperties { + fn default() -> PhysicalDeviceIDProperties { + PhysicalDeviceIDProperties { + s_type: StructureType::PHYSICAL_DEVICE_ID_PROPERTIES, + p_next: ::std::ptr::null_mut(), + device_uuid: unsafe { ::std::mem::zeroed() }, + driver_uuid: unsafe { ::std::mem::zeroed() }, + device_luid: unsafe { ::std::mem::zeroed() }, + device_node_mask: u32::default(), + device_luid_valid: Bool32::default(), + } + } +} +impl PhysicalDeviceIDProperties { + pub fn builder<'a>() -> PhysicalDeviceIDPropertiesBuilder<'a> { + PhysicalDeviceIDPropertiesBuilder { + inner: PhysicalDeviceIDProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceIDPropertiesBuilder<'a> { + inner: PhysicalDeviceIDProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceIDPropertiesBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceIDProperties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceIDPropertiesBuilder<'a> { + type Target = PhysicalDeviceIDProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceIDPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceIDPropertiesBuilder<'a> { + pub fn device_uuid( + mut self, + device_uuid: [u8; UUID_SIZE], + ) -> PhysicalDeviceIDPropertiesBuilder<'a> { + self.inner.device_uuid = device_uuid; + self + } + pub fn driver_uuid( + mut self, + driver_uuid: [u8; UUID_SIZE], + ) -> PhysicalDeviceIDPropertiesBuilder<'a> { + self.inner.driver_uuid = driver_uuid; + self + } + pub fn device_luid( + mut self, + device_luid: [u8; LUID_SIZE], + ) -> PhysicalDeviceIDPropertiesBuilder<'a> { + self.inner.device_luid = device_luid; + self + } + pub fn device_node_mask( + mut self, + device_node_mask: u32, + ) -> PhysicalDeviceIDPropertiesBuilder<'a> { + self.inner.device_node_mask = device_node_mask; + self + } + pub fn device_luid_valid( + mut self, + device_luid_valid: bool, + ) -> PhysicalDeviceIDPropertiesBuilder<'a> { + self.inner.device_luid_valid = device_luid_valid.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceIDProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExternalMemoryImageCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_types: ExternalMemoryHandleTypeFlags, +} +impl ::std::default::Default for ExternalMemoryImageCreateInfo { + fn default() -> ExternalMemoryImageCreateInfo { + ExternalMemoryImageCreateInfo { + s_type: StructureType::EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + p_next: ::std::ptr::null(), + handle_types: ExternalMemoryHandleTypeFlags::default(), + } + } +} +impl ExternalMemoryImageCreateInfo { + pub fn builder<'a>() -> ExternalMemoryImageCreateInfoBuilder<'a> { + ExternalMemoryImageCreateInfoBuilder { + inner: ExternalMemoryImageCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExternalMemoryImageCreateInfoBuilder<'a> { + inner: ExternalMemoryImageCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageCreateInfo for ExternalMemoryImageCreateInfoBuilder<'_> {} +unsafe impl ExtendsImageCreateInfo for ExternalMemoryImageCreateInfo {} +impl<'a> ::std::ops::Deref for ExternalMemoryImageCreateInfoBuilder<'a> { + type Target = ExternalMemoryImageCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExternalMemoryImageCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExternalMemoryImageCreateInfoBuilder<'a> { + pub fn handle_types( + mut self, + handle_types: ExternalMemoryHandleTypeFlags, + ) -> ExternalMemoryImageCreateInfoBuilder<'a> { + self.inner.handle_types = handle_types; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExternalMemoryImageCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExternalMemoryBufferCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_types: ExternalMemoryHandleTypeFlags, +} +impl ::std::default::Default for ExternalMemoryBufferCreateInfo { + fn default() -> ExternalMemoryBufferCreateInfo { + ExternalMemoryBufferCreateInfo { + s_type: StructureType::EXTERNAL_MEMORY_BUFFER_CREATE_INFO, + p_next: ::std::ptr::null(), + handle_types: ExternalMemoryHandleTypeFlags::default(), + } + } +} +impl ExternalMemoryBufferCreateInfo { + pub fn builder<'a>() -> ExternalMemoryBufferCreateInfoBuilder<'a> { + ExternalMemoryBufferCreateInfoBuilder { + inner: ExternalMemoryBufferCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExternalMemoryBufferCreateInfoBuilder<'a> { + inner: ExternalMemoryBufferCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsBufferCreateInfo for ExternalMemoryBufferCreateInfoBuilder<'_> {} +unsafe impl ExtendsBufferCreateInfo for ExternalMemoryBufferCreateInfo {} +impl<'a> ::std::ops::Deref for ExternalMemoryBufferCreateInfoBuilder<'a> { + type Target = ExternalMemoryBufferCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExternalMemoryBufferCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExternalMemoryBufferCreateInfoBuilder<'a> { + pub fn handle_types( + mut self, + handle_types: ExternalMemoryHandleTypeFlags, + ) -> ExternalMemoryBufferCreateInfoBuilder<'a> { + self.inner.handle_types = handle_types; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExternalMemoryBufferCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExportMemoryAllocateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_types: ExternalMemoryHandleTypeFlags, +} +impl ::std::default::Default for ExportMemoryAllocateInfo { + fn default() -> ExportMemoryAllocateInfo { + ExportMemoryAllocateInfo { + s_type: StructureType::EXPORT_MEMORY_ALLOCATE_INFO, + p_next: ::std::ptr::null(), + handle_types: ExternalMemoryHandleTypeFlags::default(), + } + } +} +impl ExportMemoryAllocateInfo { + pub fn builder<'a>() -> ExportMemoryAllocateInfoBuilder<'a> { + ExportMemoryAllocateInfoBuilder { + inner: ExportMemoryAllocateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExportMemoryAllocateInfoBuilder<'a> { + inner: ExportMemoryAllocateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for ExportMemoryAllocateInfoBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for ExportMemoryAllocateInfo {} +impl<'a> ::std::ops::Deref for ExportMemoryAllocateInfoBuilder<'a> { + type Target = ExportMemoryAllocateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExportMemoryAllocateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExportMemoryAllocateInfoBuilder<'a> { + pub fn handle_types( + mut self, + handle_types: ExternalMemoryHandleTypeFlags, + ) -> ExportMemoryAllocateInfoBuilder<'a> { + self.inner.handle_types = handle_types; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExportMemoryAllocateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImportMemoryWin32HandleInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_type: ExternalMemoryHandleTypeFlags, + pub handle: HANDLE, + pub name: LPCWSTR, +} +impl ::std::default::Default for ImportMemoryWin32HandleInfoKHR { + fn default() -> ImportMemoryWin32HandleInfoKHR { + ImportMemoryWin32HandleInfoKHR { + s_type: StructureType::IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR, + p_next: ::std::ptr::null(), + handle_type: ExternalMemoryHandleTypeFlags::default(), + handle: unsafe { ::std::mem::zeroed() }, + name: unsafe { ::std::mem::zeroed() }, + } + } +} +impl ImportMemoryWin32HandleInfoKHR { + pub fn builder<'a>() -> ImportMemoryWin32HandleInfoKHRBuilder<'a> { + ImportMemoryWin32HandleInfoKHRBuilder { + inner: ImportMemoryWin32HandleInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImportMemoryWin32HandleInfoKHRBuilder<'a> { + inner: ImportMemoryWin32HandleInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for ImportMemoryWin32HandleInfoKHRBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for ImportMemoryWin32HandleInfoKHR {} +impl<'a> ::std::ops::Deref for ImportMemoryWin32HandleInfoKHRBuilder<'a> { + type Target = ImportMemoryWin32HandleInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImportMemoryWin32HandleInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImportMemoryWin32HandleInfoKHRBuilder<'a> { + pub fn handle_type( + mut self, + handle_type: ExternalMemoryHandleTypeFlags, + ) -> ImportMemoryWin32HandleInfoKHRBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + pub fn handle(mut self, handle: HANDLE) -> ImportMemoryWin32HandleInfoKHRBuilder<'a> { + self.inner.handle = handle; + self + } + pub fn name(mut self, name: LPCWSTR) -> ImportMemoryWin32HandleInfoKHRBuilder<'a> { + self.inner.name = name; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImportMemoryWin32HandleInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExportMemoryWin32HandleInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub p_attributes: *const SECURITY_ATTRIBUTES, + pub dw_access: DWORD, + pub name: LPCWSTR, +} +impl ::std::default::Default for ExportMemoryWin32HandleInfoKHR { + fn default() -> ExportMemoryWin32HandleInfoKHR { + ExportMemoryWin32HandleInfoKHR { + s_type: StructureType::EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR, + p_next: ::std::ptr::null(), + p_attributes: ::std::ptr::null(), + dw_access: DWORD::default(), + name: unsafe { ::std::mem::zeroed() }, + } + } +} +impl ExportMemoryWin32HandleInfoKHR { + pub fn builder<'a>() -> ExportMemoryWin32HandleInfoKHRBuilder<'a> { + ExportMemoryWin32HandleInfoKHRBuilder { + inner: ExportMemoryWin32HandleInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExportMemoryWin32HandleInfoKHRBuilder<'a> { + inner: ExportMemoryWin32HandleInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for ExportMemoryWin32HandleInfoKHRBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for ExportMemoryWin32HandleInfoKHR {} +impl<'a> ::std::ops::Deref for ExportMemoryWin32HandleInfoKHRBuilder<'a> { + type Target = ExportMemoryWin32HandleInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExportMemoryWin32HandleInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExportMemoryWin32HandleInfoKHRBuilder<'a> { + pub fn attributes( + mut self, + attributes: &'a SECURITY_ATTRIBUTES, + ) -> ExportMemoryWin32HandleInfoKHRBuilder<'a> { + self.inner.p_attributes = attributes; + self + } + pub fn dw_access(mut self, dw_access: DWORD) -> ExportMemoryWin32HandleInfoKHRBuilder<'a> { + self.inner.dw_access = dw_access; + self + } + pub fn name(mut self, name: LPCWSTR) -> ExportMemoryWin32HandleInfoKHRBuilder<'a> { + self.inner.name = name; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExportMemoryWin32HandleInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryWin32HandlePropertiesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub memory_type_bits: u32, +} +impl ::std::default::Default for MemoryWin32HandlePropertiesKHR { + fn default() -> MemoryWin32HandlePropertiesKHR { + MemoryWin32HandlePropertiesKHR { + s_type: StructureType::MEMORY_WIN32_HANDLE_PROPERTIES_KHR, + p_next: ::std::ptr::null_mut(), + memory_type_bits: u32::default(), + } + } +} +impl MemoryWin32HandlePropertiesKHR { + pub fn builder<'a>() -> MemoryWin32HandlePropertiesKHRBuilder<'a> { + MemoryWin32HandlePropertiesKHRBuilder { + inner: MemoryWin32HandlePropertiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryWin32HandlePropertiesKHRBuilder<'a> { + inner: MemoryWin32HandlePropertiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMemoryWin32HandlePropertiesKHR {} +impl<'a> ::std::ops::Deref for MemoryWin32HandlePropertiesKHRBuilder<'a> { + type Target = MemoryWin32HandlePropertiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryWin32HandlePropertiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryWin32HandlePropertiesKHRBuilder<'a> { + pub fn memory_type_bits( + mut self, + memory_type_bits: u32, + ) -> MemoryWin32HandlePropertiesKHRBuilder<'a> { + self.inner.memory_type_bits = memory_type_bits; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MemoryWin32HandlePropertiesKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryWin32HandlePropertiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryGetWin32HandleInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub memory: DeviceMemory, + pub handle_type: ExternalMemoryHandleTypeFlags, +} +impl ::std::default::Default for MemoryGetWin32HandleInfoKHR { + fn default() -> MemoryGetWin32HandleInfoKHR { + MemoryGetWin32HandleInfoKHR { + s_type: StructureType::MEMORY_GET_WIN32_HANDLE_INFO_KHR, + p_next: ::std::ptr::null(), + memory: DeviceMemory::default(), + handle_type: ExternalMemoryHandleTypeFlags::default(), + } + } +} +impl MemoryGetWin32HandleInfoKHR { + pub fn builder<'a>() -> MemoryGetWin32HandleInfoKHRBuilder<'a> { + MemoryGetWin32HandleInfoKHRBuilder { + inner: MemoryGetWin32HandleInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryGetWin32HandleInfoKHRBuilder<'a> { + inner: MemoryGetWin32HandleInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMemoryGetWin32HandleInfoKHR {} +impl<'a> ::std::ops::Deref for MemoryGetWin32HandleInfoKHRBuilder<'a> { + type Target = MemoryGetWin32HandleInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryGetWin32HandleInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryGetWin32HandleInfoKHRBuilder<'a> { + pub fn memory(mut self, memory: DeviceMemory) -> MemoryGetWin32HandleInfoKHRBuilder<'a> { + self.inner.memory = memory; + self + } + pub fn handle_type( + mut self, + handle_type: ExternalMemoryHandleTypeFlags, + ) -> MemoryGetWin32HandleInfoKHRBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MemoryGetWin32HandleInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryGetWin32HandleInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImportMemoryFdInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_type: ExternalMemoryHandleTypeFlags, + pub fd: c_int, +} +impl ::std::default::Default for ImportMemoryFdInfoKHR { + fn default() -> ImportMemoryFdInfoKHR { + ImportMemoryFdInfoKHR { + s_type: StructureType::IMPORT_MEMORY_FD_INFO_KHR, + p_next: ::std::ptr::null(), + handle_type: ExternalMemoryHandleTypeFlags::default(), + fd: c_int::default(), + } + } +} +impl ImportMemoryFdInfoKHR { + pub fn builder<'a>() -> ImportMemoryFdInfoKHRBuilder<'a> { + ImportMemoryFdInfoKHRBuilder { + inner: ImportMemoryFdInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImportMemoryFdInfoKHRBuilder<'a> { + inner: ImportMemoryFdInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for ImportMemoryFdInfoKHRBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for ImportMemoryFdInfoKHR {} +impl<'a> ::std::ops::Deref for ImportMemoryFdInfoKHRBuilder<'a> { + type Target = ImportMemoryFdInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImportMemoryFdInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImportMemoryFdInfoKHRBuilder<'a> { + pub fn handle_type( + mut self, + handle_type: ExternalMemoryHandleTypeFlags, + ) -> ImportMemoryFdInfoKHRBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + pub fn fd(mut self, fd: c_int) -> ImportMemoryFdInfoKHRBuilder<'a> { + self.inner.fd = fd; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImportMemoryFdInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryFdPropertiesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub memory_type_bits: u32, +} +impl ::std::default::Default for MemoryFdPropertiesKHR { + fn default() -> MemoryFdPropertiesKHR { + MemoryFdPropertiesKHR { + s_type: StructureType::MEMORY_FD_PROPERTIES_KHR, + p_next: ::std::ptr::null_mut(), + memory_type_bits: u32::default(), + } + } +} +impl MemoryFdPropertiesKHR { + pub fn builder<'a>() -> MemoryFdPropertiesKHRBuilder<'a> { + MemoryFdPropertiesKHRBuilder { + inner: MemoryFdPropertiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryFdPropertiesKHRBuilder<'a> { + inner: MemoryFdPropertiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMemoryFdPropertiesKHR {} +impl<'a> ::std::ops::Deref for MemoryFdPropertiesKHRBuilder<'a> { + type Target = MemoryFdPropertiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryFdPropertiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryFdPropertiesKHRBuilder<'a> { + pub fn memory_type_bits(mut self, memory_type_bits: u32) -> MemoryFdPropertiesKHRBuilder<'a> { + self.inner.memory_type_bits = memory_type_bits; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MemoryFdPropertiesKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryFdPropertiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryGetFdInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub memory: DeviceMemory, + pub handle_type: ExternalMemoryHandleTypeFlags, +} +impl ::std::default::Default for MemoryGetFdInfoKHR { + fn default() -> MemoryGetFdInfoKHR { + MemoryGetFdInfoKHR { + s_type: StructureType::MEMORY_GET_FD_INFO_KHR, + p_next: ::std::ptr::null(), + memory: DeviceMemory::default(), + handle_type: ExternalMemoryHandleTypeFlags::default(), + } + } +} +impl MemoryGetFdInfoKHR { + pub fn builder<'a>() -> MemoryGetFdInfoKHRBuilder<'a> { + MemoryGetFdInfoKHRBuilder { + inner: MemoryGetFdInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryGetFdInfoKHRBuilder<'a> { + inner: MemoryGetFdInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMemoryGetFdInfoKHR {} +impl<'a> ::std::ops::Deref for MemoryGetFdInfoKHRBuilder<'a> { + type Target = MemoryGetFdInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryGetFdInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryGetFdInfoKHRBuilder<'a> { + pub fn memory(mut self, memory: DeviceMemory) -> MemoryGetFdInfoKHRBuilder<'a> { + self.inner.memory = memory; + self + } + pub fn handle_type( + mut self, + handle_type: ExternalMemoryHandleTypeFlags, + ) -> MemoryGetFdInfoKHRBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MemoryGetFdInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryGetFdInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct Win32KeyedMutexAcquireReleaseInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub acquire_count: u32, + pub p_acquire_syncs: *const DeviceMemory, + pub p_acquire_keys: *const u64, + pub p_acquire_timeouts: *const u32, + pub release_count: u32, + pub p_release_syncs: *const DeviceMemory, + pub p_release_keys: *const u64, +} +impl ::std::default::Default for Win32KeyedMutexAcquireReleaseInfoKHR { + fn default() -> Win32KeyedMutexAcquireReleaseInfoKHR { + Win32KeyedMutexAcquireReleaseInfoKHR { + s_type: StructureType::WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR, + p_next: ::std::ptr::null(), + acquire_count: u32::default(), + p_acquire_syncs: ::std::ptr::null(), + p_acquire_keys: ::std::ptr::null(), + p_acquire_timeouts: ::std::ptr::null(), + release_count: u32::default(), + p_release_syncs: ::std::ptr::null(), + p_release_keys: ::std::ptr::null(), + } + } +} +impl Win32KeyedMutexAcquireReleaseInfoKHR { + pub fn builder<'a>() -> Win32KeyedMutexAcquireReleaseInfoKHRBuilder<'a> { + Win32KeyedMutexAcquireReleaseInfoKHRBuilder { + inner: Win32KeyedMutexAcquireReleaseInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct Win32KeyedMutexAcquireReleaseInfoKHRBuilder<'a> { + inner: Win32KeyedMutexAcquireReleaseInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSubmitInfo for Win32KeyedMutexAcquireReleaseInfoKHRBuilder<'_> {} +unsafe impl ExtendsSubmitInfo for Win32KeyedMutexAcquireReleaseInfoKHR {} +impl<'a> ::std::ops::Deref for Win32KeyedMutexAcquireReleaseInfoKHRBuilder<'a> { + type Target = Win32KeyedMutexAcquireReleaseInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for Win32KeyedMutexAcquireReleaseInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> Win32KeyedMutexAcquireReleaseInfoKHRBuilder<'a> { + pub fn acquire_syncs( + mut self, + acquire_syncs: &'a [DeviceMemory], + ) -> Win32KeyedMutexAcquireReleaseInfoKHRBuilder<'a> { + self.inner.acquire_count = acquire_syncs.len() as _; + self.inner.p_acquire_syncs = acquire_syncs.as_ptr(); + self + } + pub fn acquire_keys( + mut self, + acquire_keys: &'a [u64], + ) -> Win32KeyedMutexAcquireReleaseInfoKHRBuilder<'a> { + self.inner.acquire_count = acquire_keys.len() as _; + self.inner.p_acquire_keys = acquire_keys.as_ptr(); + self + } + pub fn acquire_timeouts( + mut self, + acquire_timeouts: &'a [u32], + ) -> Win32KeyedMutexAcquireReleaseInfoKHRBuilder<'a> { + self.inner.acquire_count = acquire_timeouts.len() as _; + self.inner.p_acquire_timeouts = acquire_timeouts.as_ptr(); + self + } + pub fn release_syncs( + mut self, + release_syncs: &'a [DeviceMemory], + ) -> Win32KeyedMutexAcquireReleaseInfoKHRBuilder<'a> { + self.inner.release_count = release_syncs.len() as _; + self.inner.p_release_syncs = release_syncs.as_ptr(); + self + } + pub fn release_keys( + mut self, + release_keys: &'a [u64], + ) -> Win32KeyedMutexAcquireReleaseInfoKHRBuilder<'a> { + self.inner.release_count = release_keys.len() as _; + self.inner.p_release_keys = release_keys.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> Win32KeyedMutexAcquireReleaseInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceExternalSemaphoreInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_type: ExternalSemaphoreHandleTypeFlags, +} +impl ::std::default::Default for PhysicalDeviceExternalSemaphoreInfo { + fn default() -> PhysicalDeviceExternalSemaphoreInfo { + PhysicalDeviceExternalSemaphoreInfo { + s_type: StructureType::PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, + p_next: ::std::ptr::null(), + handle_type: ExternalSemaphoreHandleTypeFlags::default(), + } + } +} +impl PhysicalDeviceExternalSemaphoreInfo { + pub fn builder<'a>() -> PhysicalDeviceExternalSemaphoreInfoBuilder<'a> { + PhysicalDeviceExternalSemaphoreInfoBuilder { + inner: PhysicalDeviceExternalSemaphoreInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceExternalSemaphoreInfoBuilder<'a> { + inner: PhysicalDeviceExternalSemaphoreInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDeviceExternalSemaphoreInfo {} +impl<'a> ::std::ops::Deref for PhysicalDeviceExternalSemaphoreInfoBuilder<'a> { + type Target = PhysicalDeviceExternalSemaphoreInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceExternalSemaphoreInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceExternalSemaphoreInfoBuilder<'a> { + pub fn handle_type( + mut self, + handle_type: ExternalSemaphoreHandleTypeFlags, + ) -> PhysicalDeviceExternalSemaphoreInfoBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PhysicalDeviceExternalSemaphoreInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceExternalSemaphoreInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExternalSemaphoreProperties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub export_from_imported_handle_types: ExternalSemaphoreHandleTypeFlags, + pub compatible_handle_types: ExternalSemaphoreHandleTypeFlags, + pub external_semaphore_features: ExternalSemaphoreFeatureFlags, +} +impl ::std::default::Default for ExternalSemaphoreProperties { + fn default() -> ExternalSemaphoreProperties { + ExternalSemaphoreProperties { + s_type: StructureType::EXTERNAL_SEMAPHORE_PROPERTIES, + p_next: ::std::ptr::null_mut(), + export_from_imported_handle_types: ExternalSemaphoreHandleTypeFlags::default(), + compatible_handle_types: ExternalSemaphoreHandleTypeFlags::default(), + external_semaphore_features: ExternalSemaphoreFeatureFlags::default(), + } + } +} +impl ExternalSemaphoreProperties { + pub fn builder<'a>() -> ExternalSemaphorePropertiesBuilder<'a> { + ExternalSemaphorePropertiesBuilder { + inner: ExternalSemaphoreProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExternalSemaphorePropertiesBuilder<'a> { + inner: ExternalSemaphoreProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsExternalSemaphoreProperties {} +impl<'a> ::std::ops::Deref for ExternalSemaphorePropertiesBuilder<'a> { + type Target = ExternalSemaphoreProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExternalSemaphorePropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExternalSemaphorePropertiesBuilder<'a> { + pub fn export_from_imported_handle_types( + mut self, + export_from_imported_handle_types: ExternalSemaphoreHandleTypeFlags, + ) -> ExternalSemaphorePropertiesBuilder<'a> { + self.inner.export_from_imported_handle_types = export_from_imported_handle_types; + self + } + pub fn compatible_handle_types( + mut self, + compatible_handle_types: ExternalSemaphoreHandleTypeFlags, + ) -> ExternalSemaphorePropertiesBuilder<'a> { + self.inner.compatible_handle_types = compatible_handle_types; + self + } + pub fn external_semaphore_features( + mut self, + external_semaphore_features: ExternalSemaphoreFeatureFlags, + ) -> ExternalSemaphorePropertiesBuilder<'a> { + self.inner.external_semaphore_features = external_semaphore_features; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ExternalSemaphorePropertiesBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExternalSemaphoreProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExportSemaphoreCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_types: ExternalSemaphoreHandleTypeFlags, +} +impl ::std::default::Default for ExportSemaphoreCreateInfo { + fn default() -> ExportSemaphoreCreateInfo { + ExportSemaphoreCreateInfo { + s_type: StructureType::EXPORT_SEMAPHORE_CREATE_INFO, + p_next: ::std::ptr::null(), + handle_types: ExternalSemaphoreHandleTypeFlags::default(), + } + } +} +impl ExportSemaphoreCreateInfo { + pub fn builder<'a>() -> ExportSemaphoreCreateInfoBuilder<'a> { + ExportSemaphoreCreateInfoBuilder { + inner: ExportSemaphoreCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExportSemaphoreCreateInfoBuilder<'a> { + inner: ExportSemaphoreCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSemaphoreCreateInfo for ExportSemaphoreCreateInfoBuilder<'_> {} +unsafe impl ExtendsSemaphoreCreateInfo for ExportSemaphoreCreateInfo {} +impl<'a> ::std::ops::Deref for ExportSemaphoreCreateInfoBuilder<'a> { + type Target = ExportSemaphoreCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExportSemaphoreCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExportSemaphoreCreateInfoBuilder<'a> { + pub fn handle_types( + mut self, + handle_types: ExternalSemaphoreHandleTypeFlags, + ) -> ExportSemaphoreCreateInfoBuilder<'a> { + self.inner.handle_types = handle_types; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExportSemaphoreCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImportSemaphoreWin32HandleInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub semaphore: Semaphore, + pub flags: SemaphoreImportFlags, + pub handle_type: ExternalSemaphoreHandleTypeFlags, + pub handle: HANDLE, + pub name: LPCWSTR, +} +impl ::std::default::Default for ImportSemaphoreWin32HandleInfoKHR { + fn default() -> ImportSemaphoreWin32HandleInfoKHR { + ImportSemaphoreWin32HandleInfoKHR { + s_type: StructureType::IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, + p_next: ::std::ptr::null(), + semaphore: Semaphore::default(), + flags: SemaphoreImportFlags::default(), + handle_type: ExternalSemaphoreHandleTypeFlags::default(), + handle: unsafe { ::std::mem::zeroed() }, + name: unsafe { ::std::mem::zeroed() }, + } + } +} +impl ImportSemaphoreWin32HandleInfoKHR { + pub fn builder<'a>() -> ImportSemaphoreWin32HandleInfoKHRBuilder<'a> { + ImportSemaphoreWin32HandleInfoKHRBuilder { + inner: ImportSemaphoreWin32HandleInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImportSemaphoreWin32HandleInfoKHRBuilder<'a> { + inner: ImportSemaphoreWin32HandleInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImportSemaphoreWin32HandleInfoKHR {} +impl<'a> ::std::ops::Deref for ImportSemaphoreWin32HandleInfoKHRBuilder<'a> { + type Target = ImportSemaphoreWin32HandleInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImportSemaphoreWin32HandleInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImportSemaphoreWin32HandleInfoKHRBuilder<'a> { + pub fn semaphore( + mut self, + semaphore: Semaphore, + ) -> ImportSemaphoreWin32HandleInfoKHRBuilder<'a> { + self.inner.semaphore = semaphore; + self + } + pub fn flags( + mut self, + flags: SemaphoreImportFlags, + ) -> ImportSemaphoreWin32HandleInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn handle_type( + mut self, + handle_type: ExternalSemaphoreHandleTypeFlags, + ) -> ImportSemaphoreWin32HandleInfoKHRBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + pub fn handle(mut self, handle: HANDLE) -> ImportSemaphoreWin32HandleInfoKHRBuilder<'a> { + self.inner.handle = handle; + self + } + pub fn name(mut self, name: LPCWSTR) -> ImportSemaphoreWin32HandleInfoKHRBuilder<'a> { + self.inner.name = name; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImportSemaphoreWin32HandleInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImportSemaphoreWin32HandleInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExportSemaphoreWin32HandleInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub p_attributes: *const SECURITY_ATTRIBUTES, + pub dw_access: DWORD, + pub name: LPCWSTR, +} +impl ::std::default::Default for ExportSemaphoreWin32HandleInfoKHR { + fn default() -> ExportSemaphoreWin32HandleInfoKHR { + ExportSemaphoreWin32HandleInfoKHR { + s_type: StructureType::EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, + p_next: ::std::ptr::null(), + p_attributes: ::std::ptr::null(), + dw_access: DWORD::default(), + name: unsafe { ::std::mem::zeroed() }, + } + } +} +impl ExportSemaphoreWin32HandleInfoKHR { + pub fn builder<'a>() -> ExportSemaphoreWin32HandleInfoKHRBuilder<'a> { + ExportSemaphoreWin32HandleInfoKHRBuilder { + inner: ExportSemaphoreWin32HandleInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExportSemaphoreWin32HandleInfoKHRBuilder<'a> { + inner: ExportSemaphoreWin32HandleInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSemaphoreCreateInfo for ExportSemaphoreWin32HandleInfoKHRBuilder<'_> {} +unsafe impl ExtendsSemaphoreCreateInfo for ExportSemaphoreWin32HandleInfoKHR {} +impl<'a> ::std::ops::Deref for ExportSemaphoreWin32HandleInfoKHRBuilder<'a> { + type Target = ExportSemaphoreWin32HandleInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExportSemaphoreWin32HandleInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExportSemaphoreWin32HandleInfoKHRBuilder<'a> { + pub fn attributes( + mut self, + attributes: &'a SECURITY_ATTRIBUTES, + ) -> ExportSemaphoreWin32HandleInfoKHRBuilder<'a> { + self.inner.p_attributes = attributes; + self + } + pub fn dw_access(mut self, dw_access: DWORD) -> ExportSemaphoreWin32HandleInfoKHRBuilder<'a> { + self.inner.dw_access = dw_access; + self + } + pub fn name(mut self, name: LPCWSTR) -> ExportSemaphoreWin32HandleInfoKHRBuilder<'a> { + self.inner.name = name; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExportSemaphoreWin32HandleInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct D3D12FenceSubmitInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub wait_semaphore_values_count: u32, + pub p_wait_semaphore_values: *const u64, + pub signal_semaphore_values_count: u32, + pub p_signal_semaphore_values: *const u64, +} +impl ::std::default::Default for D3D12FenceSubmitInfoKHR { + fn default() -> D3D12FenceSubmitInfoKHR { + D3D12FenceSubmitInfoKHR { + s_type: StructureType::D3D12_FENCE_SUBMIT_INFO_KHR, + p_next: ::std::ptr::null(), + wait_semaphore_values_count: u32::default(), + p_wait_semaphore_values: ::std::ptr::null(), + signal_semaphore_values_count: u32::default(), + p_signal_semaphore_values: ::std::ptr::null(), + } + } +} +impl D3D12FenceSubmitInfoKHR { + pub fn builder<'a>() -> D3D12FenceSubmitInfoKHRBuilder<'a> { + D3D12FenceSubmitInfoKHRBuilder { + inner: D3D12FenceSubmitInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct D3D12FenceSubmitInfoKHRBuilder<'a> { + inner: D3D12FenceSubmitInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSubmitInfo for D3D12FenceSubmitInfoKHRBuilder<'_> {} +unsafe impl ExtendsSubmitInfo for D3D12FenceSubmitInfoKHR {} +impl<'a> ::std::ops::Deref for D3D12FenceSubmitInfoKHRBuilder<'a> { + type Target = D3D12FenceSubmitInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for D3D12FenceSubmitInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> D3D12FenceSubmitInfoKHRBuilder<'a> { + pub fn wait_semaphore_values( + mut self, + wait_semaphore_values: &'a [u64], + ) -> D3D12FenceSubmitInfoKHRBuilder<'a> { + self.inner.wait_semaphore_values_count = wait_semaphore_values.len() as _; + self.inner.p_wait_semaphore_values = wait_semaphore_values.as_ptr(); + self + } + pub fn signal_semaphore_values( + mut self, + signal_semaphore_values: &'a [u64], + ) -> D3D12FenceSubmitInfoKHRBuilder<'a> { + self.inner.signal_semaphore_values_count = signal_semaphore_values.len() as _; + self.inner.p_signal_semaphore_values = signal_semaphore_values.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> D3D12FenceSubmitInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SemaphoreGetWin32HandleInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub semaphore: Semaphore, + pub handle_type: ExternalSemaphoreHandleTypeFlags, +} +impl ::std::default::Default for SemaphoreGetWin32HandleInfoKHR { + fn default() -> SemaphoreGetWin32HandleInfoKHR { + SemaphoreGetWin32HandleInfoKHR { + s_type: StructureType::SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, + p_next: ::std::ptr::null(), + semaphore: Semaphore::default(), + handle_type: ExternalSemaphoreHandleTypeFlags::default(), + } + } +} +impl SemaphoreGetWin32HandleInfoKHR { + pub fn builder<'a>() -> SemaphoreGetWin32HandleInfoKHRBuilder<'a> { + SemaphoreGetWin32HandleInfoKHRBuilder { + inner: SemaphoreGetWin32HandleInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SemaphoreGetWin32HandleInfoKHRBuilder<'a> { + inner: SemaphoreGetWin32HandleInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSemaphoreGetWin32HandleInfoKHR {} +impl<'a> ::std::ops::Deref for SemaphoreGetWin32HandleInfoKHRBuilder<'a> { + type Target = SemaphoreGetWin32HandleInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SemaphoreGetWin32HandleInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SemaphoreGetWin32HandleInfoKHRBuilder<'a> { + pub fn semaphore(mut self, semaphore: Semaphore) -> SemaphoreGetWin32HandleInfoKHRBuilder<'a> { + self.inner.semaphore = semaphore; + self + } + pub fn handle_type( + mut self, + handle_type: ExternalSemaphoreHandleTypeFlags, + ) -> SemaphoreGetWin32HandleInfoKHRBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SemaphoreGetWin32HandleInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SemaphoreGetWin32HandleInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImportSemaphoreFdInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub semaphore: Semaphore, + pub flags: SemaphoreImportFlags, + pub handle_type: ExternalSemaphoreHandleTypeFlags, + pub fd: c_int, +} +impl ::std::default::Default for ImportSemaphoreFdInfoKHR { + fn default() -> ImportSemaphoreFdInfoKHR { + ImportSemaphoreFdInfoKHR { + s_type: StructureType::IMPORT_SEMAPHORE_FD_INFO_KHR, + p_next: ::std::ptr::null(), + semaphore: Semaphore::default(), + flags: SemaphoreImportFlags::default(), + handle_type: ExternalSemaphoreHandleTypeFlags::default(), + fd: c_int::default(), + } + } +} +impl ImportSemaphoreFdInfoKHR { + pub fn builder<'a>() -> ImportSemaphoreFdInfoKHRBuilder<'a> { + ImportSemaphoreFdInfoKHRBuilder { + inner: ImportSemaphoreFdInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImportSemaphoreFdInfoKHRBuilder<'a> { + inner: ImportSemaphoreFdInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImportSemaphoreFdInfoKHR {} +impl<'a> ::std::ops::Deref for ImportSemaphoreFdInfoKHRBuilder<'a> { + type Target = ImportSemaphoreFdInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImportSemaphoreFdInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImportSemaphoreFdInfoKHRBuilder<'a> { + pub fn semaphore(mut self, semaphore: Semaphore) -> ImportSemaphoreFdInfoKHRBuilder<'a> { + self.inner.semaphore = semaphore; + self + } + pub fn flags(mut self, flags: SemaphoreImportFlags) -> ImportSemaphoreFdInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn handle_type( + mut self, + handle_type: ExternalSemaphoreHandleTypeFlags, + ) -> ImportSemaphoreFdInfoKHRBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + pub fn fd(mut self, fd: c_int) -> ImportSemaphoreFdInfoKHRBuilder<'a> { + self.inner.fd = fd; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImportSemaphoreFdInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImportSemaphoreFdInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SemaphoreGetFdInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub semaphore: Semaphore, + pub handle_type: ExternalSemaphoreHandleTypeFlags, +} +impl ::std::default::Default for SemaphoreGetFdInfoKHR { + fn default() -> SemaphoreGetFdInfoKHR { + SemaphoreGetFdInfoKHR { + s_type: StructureType::SEMAPHORE_GET_FD_INFO_KHR, + p_next: ::std::ptr::null(), + semaphore: Semaphore::default(), + handle_type: ExternalSemaphoreHandleTypeFlags::default(), + } + } +} +impl SemaphoreGetFdInfoKHR { + pub fn builder<'a>() -> SemaphoreGetFdInfoKHRBuilder<'a> { + SemaphoreGetFdInfoKHRBuilder { + inner: SemaphoreGetFdInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SemaphoreGetFdInfoKHRBuilder<'a> { + inner: SemaphoreGetFdInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSemaphoreGetFdInfoKHR {} +impl<'a> ::std::ops::Deref for SemaphoreGetFdInfoKHRBuilder<'a> { + type Target = SemaphoreGetFdInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SemaphoreGetFdInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SemaphoreGetFdInfoKHRBuilder<'a> { + pub fn semaphore(mut self, semaphore: Semaphore) -> SemaphoreGetFdInfoKHRBuilder<'a> { + self.inner.semaphore = semaphore; + self + } + pub fn handle_type( + mut self, + handle_type: ExternalSemaphoreHandleTypeFlags, + ) -> SemaphoreGetFdInfoKHRBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SemaphoreGetFdInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SemaphoreGetFdInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceExternalFenceInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_type: ExternalFenceHandleTypeFlags, +} +impl ::std::default::Default for PhysicalDeviceExternalFenceInfo { + fn default() -> PhysicalDeviceExternalFenceInfo { + PhysicalDeviceExternalFenceInfo { + s_type: StructureType::PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, + p_next: ::std::ptr::null(), + handle_type: ExternalFenceHandleTypeFlags::default(), + } + } +} +impl PhysicalDeviceExternalFenceInfo { + pub fn builder<'a>() -> PhysicalDeviceExternalFenceInfoBuilder<'a> { + PhysicalDeviceExternalFenceInfoBuilder { + inner: PhysicalDeviceExternalFenceInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceExternalFenceInfoBuilder<'a> { + inner: PhysicalDeviceExternalFenceInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDeviceExternalFenceInfo {} +impl<'a> ::std::ops::Deref for PhysicalDeviceExternalFenceInfoBuilder<'a> { + type Target = PhysicalDeviceExternalFenceInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceExternalFenceInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceExternalFenceInfoBuilder<'a> { + pub fn handle_type( + mut self, + handle_type: ExternalFenceHandleTypeFlags, + ) -> PhysicalDeviceExternalFenceInfoBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PhysicalDeviceExternalFenceInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceExternalFenceInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExternalFenceProperties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub export_from_imported_handle_types: ExternalFenceHandleTypeFlags, + pub compatible_handle_types: ExternalFenceHandleTypeFlags, + pub external_fence_features: ExternalFenceFeatureFlags, +} +impl ::std::default::Default for ExternalFenceProperties { + fn default() -> ExternalFenceProperties { + ExternalFenceProperties { + s_type: StructureType::EXTERNAL_FENCE_PROPERTIES, + p_next: ::std::ptr::null_mut(), + export_from_imported_handle_types: ExternalFenceHandleTypeFlags::default(), + compatible_handle_types: ExternalFenceHandleTypeFlags::default(), + external_fence_features: ExternalFenceFeatureFlags::default(), + } + } +} +impl ExternalFenceProperties { + pub fn builder<'a>() -> ExternalFencePropertiesBuilder<'a> { + ExternalFencePropertiesBuilder { + inner: ExternalFenceProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExternalFencePropertiesBuilder<'a> { + inner: ExternalFenceProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsExternalFenceProperties {} +impl<'a> ::std::ops::Deref for ExternalFencePropertiesBuilder<'a> { + type Target = ExternalFenceProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExternalFencePropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExternalFencePropertiesBuilder<'a> { + pub fn export_from_imported_handle_types( + mut self, + export_from_imported_handle_types: ExternalFenceHandleTypeFlags, + ) -> ExternalFencePropertiesBuilder<'a> { + self.inner.export_from_imported_handle_types = export_from_imported_handle_types; + self + } + pub fn compatible_handle_types( + mut self, + compatible_handle_types: ExternalFenceHandleTypeFlags, + ) -> ExternalFencePropertiesBuilder<'a> { + self.inner.compatible_handle_types = compatible_handle_types; + self + } + pub fn external_fence_features( + mut self, + external_fence_features: ExternalFenceFeatureFlags, + ) -> ExternalFencePropertiesBuilder<'a> { + self.inner.external_fence_features = external_fence_features; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ExternalFencePropertiesBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExternalFenceProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExportFenceCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_types: ExternalFenceHandleTypeFlags, +} +impl ::std::default::Default for ExportFenceCreateInfo { + fn default() -> ExportFenceCreateInfo { + ExportFenceCreateInfo { + s_type: StructureType::EXPORT_FENCE_CREATE_INFO, + p_next: ::std::ptr::null(), + handle_types: ExternalFenceHandleTypeFlags::default(), + } + } +} +impl ExportFenceCreateInfo { + pub fn builder<'a>() -> ExportFenceCreateInfoBuilder<'a> { + ExportFenceCreateInfoBuilder { + inner: ExportFenceCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExportFenceCreateInfoBuilder<'a> { + inner: ExportFenceCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsFenceCreateInfo for ExportFenceCreateInfoBuilder<'_> {} +unsafe impl ExtendsFenceCreateInfo for ExportFenceCreateInfo {} +impl<'a> ::std::ops::Deref for ExportFenceCreateInfoBuilder<'a> { + type Target = ExportFenceCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExportFenceCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExportFenceCreateInfoBuilder<'a> { + pub fn handle_types( + mut self, + handle_types: ExternalFenceHandleTypeFlags, + ) -> ExportFenceCreateInfoBuilder<'a> { + self.inner.handle_types = handle_types; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExportFenceCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImportFenceWin32HandleInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub fence: Fence, + pub flags: FenceImportFlags, + pub handle_type: ExternalFenceHandleTypeFlags, + pub handle: HANDLE, + pub name: LPCWSTR, +} +impl ::std::default::Default for ImportFenceWin32HandleInfoKHR { + fn default() -> ImportFenceWin32HandleInfoKHR { + ImportFenceWin32HandleInfoKHR { + s_type: StructureType::IMPORT_FENCE_WIN32_HANDLE_INFO_KHR, + p_next: ::std::ptr::null(), + fence: Fence::default(), + flags: FenceImportFlags::default(), + handle_type: ExternalFenceHandleTypeFlags::default(), + handle: unsafe { ::std::mem::zeroed() }, + name: unsafe { ::std::mem::zeroed() }, + } + } +} +impl ImportFenceWin32HandleInfoKHR { + pub fn builder<'a>() -> ImportFenceWin32HandleInfoKHRBuilder<'a> { + ImportFenceWin32HandleInfoKHRBuilder { + inner: ImportFenceWin32HandleInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImportFenceWin32HandleInfoKHRBuilder<'a> { + inner: ImportFenceWin32HandleInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImportFenceWin32HandleInfoKHR {} +impl<'a> ::std::ops::Deref for ImportFenceWin32HandleInfoKHRBuilder<'a> { + type Target = ImportFenceWin32HandleInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImportFenceWin32HandleInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImportFenceWin32HandleInfoKHRBuilder<'a> { + pub fn fence(mut self, fence: Fence) -> ImportFenceWin32HandleInfoKHRBuilder<'a> { + self.inner.fence = fence; + self + } + pub fn flags(mut self, flags: FenceImportFlags) -> ImportFenceWin32HandleInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn handle_type( + mut self, + handle_type: ExternalFenceHandleTypeFlags, + ) -> ImportFenceWin32HandleInfoKHRBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + pub fn handle(mut self, handle: HANDLE) -> ImportFenceWin32HandleInfoKHRBuilder<'a> { + self.inner.handle = handle; + self + } + pub fn name(mut self, name: LPCWSTR) -> ImportFenceWin32HandleInfoKHRBuilder<'a> { + self.inner.name = name; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImportFenceWin32HandleInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImportFenceWin32HandleInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExportFenceWin32HandleInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub p_attributes: *const SECURITY_ATTRIBUTES, + pub dw_access: DWORD, + pub name: LPCWSTR, +} +impl ::std::default::Default for ExportFenceWin32HandleInfoKHR { + fn default() -> ExportFenceWin32HandleInfoKHR { + ExportFenceWin32HandleInfoKHR { + s_type: StructureType::EXPORT_FENCE_WIN32_HANDLE_INFO_KHR, + p_next: ::std::ptr::null(), + p_attributes: ::std::ptr::null(), + dw_access: DWORD::default(), + name: unsafe { ::std::mem::zeroed() }, + } + } +} +impl ExportFenceWin32HandleInfoKHR { + pub fn builder<'a>() -> ExportFenceWin32HandleInfoKHRBuilder<'a> { + ExportFenceWin32HandleInfoKHRBuilder { + inner: ExportFenceWin32HandleInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExportFenceWin32HandleInfoKHRBuilder<'a> { + inner: ExportFenceWin32HandleInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsFenceCreateInfo for ExportFenceWin32HandleInfoKHRBuilder<'_> {} +unsafe impl ExtendsFenceCreateInfo for ExportFenceWin32HandleInfoKHR {} +impl<'a> ::std::ops::Deref for ExportFenceWin32HandleInfoKHRBuilder<'a> { + type Target = ExportFenceWin32HandleInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExportFenceWin32HandleInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExportFenceWin32HandleInfoKHRBuilder<'a> { + pub fn attributes( + mut self, + attributes: &'a SECURITY_ATTRIBUTES, + ) -> ExportFenceWin32HandleInfoKHRBuilder<'a> { + self.inner.p_attributes = attributes; + self + } + pub fn dw_access(mut self, dw_access: DWORD) -> ExportFenceWin32HandleInfoKHRBuilder<'a> { + self.inner.dw_access = dw_access; + self + } + pub fn name(mut self, name: LPCWSTR) -> ExportFenceWin32HandleInfoKHRBuilder<'a> { + self.inner.name = name; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExportFenceWin32HandleInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct FenceGetWin32HandleInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub fence: Fence, + pub handle_type: ExternalFenceHandleTypeFlags, +} +impl ::std::default::Default for FenceGetWin32HandleInfoKHR { + fn default() -> FenceGetWin32HandleInfoKHR { + FenceGetWin32HandleInfoKHR { + s_type: StructureType::FENCE_GET_WIN32_HANDLE_INFO_KHR, + p_next: ::std::ptr::null(), + fence: Fence::default(), + handle_type: ExternalFenceHandleTypeFlags::default(), + } + } +} +impl FenceGetWin32HandleInfoKHR { + pub fn builder<'a>() -> FenceGetWin32HandleInfoKHRBuilder<'a> { + FenceGetWin32HandleInfoKHRBuilder { + inner: FenceGetWin32HandleInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct FenceGetWin32HandleInfoKHRBuilder<'a> { + inner: FenceGetWin32HandleInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsFenceGetWin32HandleInfoKHR {} +impl<'a> ::std::ops::Deref for FenceGetWin32HandleInfoKHRBuilder<'a> { + type Target = FenceGetWin32HandleInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for FenceGetWin32HandleInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> FenceGetWin32HandleInfoKHRBuilder<'a> { + pub fn fence(mut self, fence: Fence) -> FenceGetWin32HandleInfoKHRBuilder<'a> { + self.inner.fence = fence; + self + } + pub fn handle_type( + mut self, + handle_type: ExternalFenceHandleTypeFlags, + ) -> FenceGetWin32HandleInfoKHRBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> FenceGetWin32HandleInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> FenceGetWin32HandleInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImportFenceFdInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub fence: Fence, + pub flags: FenceImportFlags, + pub handle_type: ExternalFenceHandleTypeFlags, + pub fd: c_int, +} +impl ::std::default::Default for ImportFenceFdInfoKHR { + fn default() -> ImportFenceFdInfoKHR { + ImportFenceFdInfoKHR { + s_type: StructureType::IMPORT_FENCE_FD_INFO_KHR, + p_next: ::std::ptr::null(), + fence: Fence::default(), + flags: FenceImportFlags::default(), + handle_type: ExternalFenceHandleTypeFlags::default(), + fd: c_int::default(), + } + } +} +impl ImportFenceFdInfoKHR { + pub fn builder<'a>() -> ImportFenceFdInfoKHRBuilder<'a> { + ImportFenceFdInfoKHRBuilder { + inner: ImportFenceFdInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImportFenceFdInfoKHRBuilder<'a> { + inner: ImportFenceFdInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImportFenceFdInfoKHR {} +impl<'a> ::std::ops::Deref for ImportFenceFdInfoKHRBuilder<'a> { + type Target = ImportFenceFdInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImportFenceFdInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImportFenceFdInfoKHRBuilder<'a> { + pub fn fence(mut self, fence: Fence) -> ImportFenceFdInfoKHRBuilder<'a> { + self.inner.fence = fence; + self + } + pub fn flags(mut self, flags: FenceImportFlags) -> ImportFenceFdInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn handle_type( + mut self, + handle_type: ExternalFenceHandleTypeFlags, + ) -> ImportFenceFdInfoKHRBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + pub fn fd(mut self, fd: c_int) -> ImportFenceFdInfoKHRBuilder<'a> { + self.inner.fd = fd; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImportFenceFdInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImportFenceFdInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct FenceGetFdInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub fence: Fence, + pub handle_type: ExternalFenceHandleTypeFlags, +} +impl ::std::default::Default for FenceGetFdInfoKHR { + fn default() -> FenceGetFdInfoKHR { + FenceGetFdInfoKHR { + s_type: StructureType::FENCE_GET_FD_INFO_KHR, + p_next: ::std::ptr::null(), + fence: Fence::default(), + handle_type: ExternalFenceHandleTypeFlags::default(), + } + } +} +impl FenceGetFdInfoKHR { + pub fn builder<'a>() -> FenceGetFdInfoKHRBuilder<'a> { + FenceGetFdInfoKHRBuilder { + inner: FenceGetFdInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct FenceGetFdInfoKHRBuilder<'a> { + inner: FenceGetFdInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsFenceGetFdInfoKHR {} +impl<'a> ::std::ops::Deref for FenceGetFdInfoKHRBuilder<'a> { + type Target = FenceGetFdInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for FenceGetFdInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> FenceGetFdInfoKHRBuilder<'a> { + pub fn fence(mut self, fence: Fence) -> FenceGetFdInfoKHRBuilder<'a> { + self.inner.fence = fence; + self + } + pub fn handle_type( + mut self, + handle_type: ExternalFenceHandleTypeFlags, + ) -> FenceGetFdInfoKHRBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> FenceGetFdInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> FenceGetFdInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceMultiviewFeatures { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub multiview: Bool32, + pub multiview_geometry_shader: Bool32, + pub multiview_tessellation_shader: Bool32, +} +impl ::std::default::Default for PhysicalDeviceMultiviewFeatures { + fn default() -> PhysicalDeviceMultiviewFeatures { + PhysicalDeviceMultiviewFeatures { + s_type: StructureType::PHYSICAL_DEVICE_MULTIVIEW_FEATURES, + p_next: ::std::ptr::null_mut(), + multiview: Bool32::default(), + multiview_geometry_shader: Bool32::default(), + multiview_tessellation_shader: Bool32::default(), + } + } +} +impl PhysicalDeviceMultiviewFeatures { + pub fn builder<'a>() -> PhysicalDeviceMultiviewFeaturesBuilder<'a> { + PhysicalDeviceMultiviewFeaturesBuilder { + inner: PhysicalDeviceMultiviewFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceMultiviewFeaturesBuilder<'a> { + inner: PhysicalDeviceMultiviewFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceMultiviewFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceMultiviewFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceMultiviewFeaturesBuilder<'a> { + type Target = PhysicalDeviceMultiviewFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceMultiviewFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceMultiviewFeaturesBuilder<'a> { + pub fn multiview(mut self, multiview: bool) -> PhysicalDeviceMultiviewFeaturesBuilder<'a> { + self.inner.multiview = multiview.into(); + self + } + pub fn multiview_geometry_shader( + mut self, + multiview_geometry_shader: bool, + ) -> PhysicalDeviceMultiviewFeaturesBuilder<'a> { + self.inner.multiview_geometry_shader = multiview_geometry_shader.into(); + self + } + pub fn multiview_tessellation_shader( + mut self, + multiview_tessellation_shader: bool, + ) -> PhysicalDeviceMultiviewFeaturesBuilder<'a> { + self.inner.multiview_tessellation_shader = multiview_tessellation_shader.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceMultiviewFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceMultiviewProperties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_multiview_view_count: u32, + pub max_multiview_instance_index: u32, +} +impl ::std::default::Default for PhysicalDeviceMultiviewProperties { + fn default() -> PhysicalDeviceMultiviewProperties { + PhysicalDeviceMultiviewProperties { + s_type: StructureType::PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES, + p_next: ::std::ptr::null_mut(), + max_multiview_view_count: u32::default(), + max_multiview_instance_index: u32::default(), + } + } +} +impl PhysicalDeviceMultiviewProperties { + pub fn builder<'a>() -> PhysicalDeviceMultiviewPropertiesBuilder<'a> { + PhysicalDeviceMultiviewPropertiesBuilder { + inner: PhysicalDeviceMultiviewProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceMultiviewPropertiesBuilder<'a> { + inner: PhysicalDeviceMultiviewProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceMultiviewPropertiesBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceMultiviewProperties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceMultiviewPropertiesBuilder<'a> { + type Target = PhysicalDeviceMultiviewProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceMultiviewPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceMultiviewPropertiesBuilder<'a> { + pub fn max_multiview_view_count( + mut self, + max_multiview_view_count: u32, + ) -> PhysicalDeviceMultiviewPropertiesBuilder<'a> { + self.inner.max_multiview_view_count = max_multiview_view_count; + self + } + pub fn max_multiview_instance_index( + mut self, + max_multiview_instance_index: u32, + ) -> PhysicalDeviceMultiviewPropertiesBuilder<'a> { + self.inner.max_multiview_instance_index = max_multiview_instance_index; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceMultiviewProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct RenderPassMultiviewCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub subpass_count: u32, + pub p_view_masks: *const u32, + pub dependency_count: u32, + pub p_view_offsets: *const i32, + pub correlation_mask_count: u32, + pub p_correlation_masks: *const u32, +} +impl ::std::default::Default for RenderPassMultiviewCreateInfo { + fn default() -> RenderPassMultiviewCreateInfo { + RenderPassMultiviewCreateInfo { + s_type: StructureType::RENDER_PASS_MULTIVIEW_CREATE_INFO, + p_next: ::std::ptr::null(), + subpass_count: u32::default(), + p_view_masks: ::std::ptr::null(), + dependency_count: u32::default(), + p_view_offsets: ::std::ptr::null(), + correlation_mask_count: u32::default(), + p_correlation_masks: ::std::ptr::null(), + } + } +} +impl RenderPassMultiviewCreateInfo { + pub fn builder<'a>() -> RenderPassMultiviewCreateInfoBuilder<'a> { + RenderPassMultiviewCreateInfoBuilder { + inner: RenderPassMultiviewCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RenderPassMultiviewCreateInfoBuilder<'a> { + inner: RenderPassMultiviewCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsRenderPassCreateInfo for RenderPassMultiviewCreateInfoBuilder<'_> {} +unsafe impl ExtendsRenderPassCreateInfo for RenderPassMultiviewCreateInfo {} +impl<'a> ::std::ops::Deref for RenderPassMultiviewCreateInfoBuilder<'a> { + type Target = RenderPassMultiviewCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RenderPassMultiviewCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RenderPassMultiviewCreateInfoBuilder<'a> { + pub fn view_masks(mut self, view_masks: &'a [u32]) -> RenderPassMultiviewCreateInfoBuilder<'a> { + self.inner.subpass_count = view_masks.len() as _; + self.inner.p_view_masks = view_masks.as_ptr(); + self + } + pub fn view_offsets( + mut self, + view_offsets: &'a [i32], + ) -> RenderPassMultiviewCreateInfoBuilder<'a> { + self.inner.dependency_count = view_offsets.len() as _; + self.inner.p_view_offsets = view_offsets.as_ptr(); + self + } + pub fn correlation_masks( + mut self, + correlation_masks: &'a [u32], + ) -> RenderPassMultiviewCreateInfoBuilder<'a> { + self.inner.correlation_mask_count = correlation_masks.len() as _; + self.inner.p_correlation_masks = correlation_masks.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RenderPassMultiviewCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SurfaceCapabilities2EXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub min_image_count: u32, + pub max_image_count: u32, + pub current_extent: Extent2D, + pub min_image_extent: Extent2D, + pub max_image_extent: Extent2D, + pub max_image_array_layers: u32, + pub supported_transforms: SurfaceTransformFlagsKHR, + pub current_transform: SurfaceTransformFlagsKHR, + pub supported_composite_alpha: CompositeAlphaFlagsKHR, + pub supported_usage_flags: ImageUsageFlags, + pub supported_surface_counters: SurfaceCounterFlagsEXT, +} +impl ::std::default::Default for SurfaceCapabilities2EXT { + fn default() -> SurfaceCapabilities2EXT { + SurfaceCapabilities2EXT { + s_type: StructureType::SURFACE_CAPABILITIES_2_EXT, + p_next: ::std::ptr::null_mut(), + min_image_count: u32::default(), + max_image_count: u32::default(), + current_extent: Extent2D::default(), + min_image_extent: Extent2D::default(), + max_image_extent: Extent2D::default(), + max_image_array_layers: u32::default(), + supported_transforms: SurfaceTransformFlagsKHR::default(), + current_transform: SurfaceTransformFlagsKHR::default(), + supported_composite_alpha: CompositeAlphaFlagsKHR::default(), + supported_usage_flags: ImageUsageFlags::default(), + supported_surface_counters: SurfaceCounterFlagsEXT::default(), + } + } +} +impl SurfaceCapabilities2EXT { + pub fn builder<'a>() -> SurfaceCapabilities2EXTBuilder<'a> { + SurfaceCapabilities2EXTBuilder { + inner: SurfaceCapabilities2EXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SurfaceCapabilities2EXTBuilder<'a> { + inner: SurfaceCapabilities2EXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSurfaceCapabilities2EXT {} +impl<'a> ::std::ops::Deref for SurfaceCapabilities2EXTBuilder<'a> { + type Target = SurfaceCapabilities2EXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SurfaceCapabilities2EXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SurfaceCapabilities2EXTBuilder<'a> { + pub fn min_image_count(mut self, min_image_count: u32) -> SurfaceCapabilities2EXTBuilder<'a> { + self.inner.min_image_count = min_image_count; + self + } + pub fn max_image_count(mut self, max_image_count: u32) -> SurfaceCapabilities2EXTBuilder<'a> { + self.inner.max_image_count = max_image_count; + self + } + pub fn current_extent( + mut self, + current_extent: Extent2D, + ) -> SurfaceCapabilities2EXTBuilder<'a> { + self.inner.current_extent = current_extent; + self + } + pub fn min_image_extent( + mut self, + min_image_extent: Extent2D, + ) -> SurfaceCapabilities2EXTBuilder<'a> { + self.inner.min_image_extent = min_image_extent; + self + } + pub fn max_image_extent( + mut self, + max_image_extent: Extent2D, + ) -> SurfaceCapabilities2EXTBuilder<'a> { + self.inner.max_image_extent = max_image_extent; + self + } + pub fn max_image_array_layers( + mut self, + max_image_array_layers: u32, + ) -> SurfaceCapabilities2EXTBuilder<'a> { + self.inner.max_image_array_layers = max_image_array_layers; + self + } + pub fn supported_transforms( + mut self, + supported_transforms: SurfaceTransformFlagsKHR, + ) -> SurfaceCapabilities2EXTBuilder<'a> { + self.inner.supported_transforms = supported_transforms; + self + } + pub fn current_transform( + mut self, + current_transform: SurfaceTransformFlagsKHR, + ) -> SurfaceCapabilities2EXTBuilder<'a> { + self.inner.current_transform = current_transform; + self + } + pub fn supported_composite_alpha( + mut self, + supported_composite_alpha: CompositeAlphaFlagsKHR, + ) -> SurfaceCapabilities2EXTBuilder<'a> { + self.inner.supported_composite_alpha = supported_composite_alpha; + self + } + pub fn supported_usage_flags( + mut self, + supported_usage_flags: ImageUsageFlags, + ) -> SurfaceCapabilities2EXTBuilder<'a> { + self.inner.supported_usage_flags = supported_usage_flags; + self + } + pub fn supported_surface_counters( + mut self, + supported_surface_counters: SurfaceCounterFlagsEXT, + ) -> SurfaceCapabilities2EXTBuilder<'a> { + self.inner.supported_surface_counters = supported_surface_counters; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SurfaceCapabilities2EXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SurfaceCapabilities2EXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DisplayPowerInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub power_state: DisplayPowerStateEXT, +} +impl ::std::default::Default for DisplayPowerInfoEXT { + fn default() -> DisplayPowerInfoEXT { + DisplayPowerInfoEXT { + s_type: StructureType::DISPLAY_POWER_INFO_EXT, + p_next: ::std::ptr::null(), + power_state: DisplayPowerStateEXT::default(), + } + } +} +impl DisplayPowerInfoEXT { + pub fn builder<'a>() -> DisplayPowerInfoEXTBuilder<'a> { + DisplayPowerInfoEXTBuilder { + inner: DisplayPowerInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayPowerInfoEXTBuilder<'a> { + inner: DisplayPowerInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDisplayPowerInfoEXT {} +impl<'a> ::std::ops::Deref for DisplayPowerInfoEXTBuilder<'a> { + type Target = DisplayPowerInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayPowerInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayPowerInfoEXTBuilder<'a> { + pub fn power_state( + mut self, + power_state: DisplayPowerStateEXT, + ) -> DisplayPowerInfoEXTBuilder<'a> { + self.inner.power_state = power_state; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DisplayPowerInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayPowerInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceEventInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub device_event: DeviceEventTypeEXT, +} +impl ::std::default::Default for DeviceEventInfoEXT { + fn default() -> DeviceEventInfoEXT { + DeviceEventInfoEXT { + s_type: StructureType::DEVICE_EVENT_INFO_EXT, + p_next: ::std::ptr::null(), + device_event: DeviceEventTypeEXT::default(), + } + } +} +impl DeviceEventInfoEXT { + pub fn builder<'a>() -> DeviceEventInfoEXTBuilder<'a> { + DeviceEventInfoEXTBuilder { + inner: DeviceEventInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceEventInfoEXTBuilder<'a> { + inner: DeviceEventInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDeviceEventInfoEXT {} +impl<'a> ::std::ops::Deref for DeviceEventInfoEXTBuilder<'a> { + type Target = DeviceEventInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceEventInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceEventInfoEXTBuilder<'a> { + pub fn device_event( + mut self, + device_event: DeviceEventTypeEXT, + ) -> DeviceEventInfoEXTBuilder<'a> { + self.inner.device_event = device_event; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DeviceEventInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceEventInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DisplayEventInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub display_event: DisplayEventTypeEXT, +} +impl ::std::default::Default for DisplayEventInfoEXT { + fn default() -> DisplayEventInfoEXT { + DisplayEventInfoEXT { + s_type: StructureType::DISPLAY_EVENT_INFO_EXT, + p_next: ::std::ptr::null(), + display_event: DisplayEventTypeEXT::default(), + } + } +} +impl DisplayEventInfoEXT { + pub fn builder<'a>() -> DisplayEventInfoEXTBuilder<'a> { + DisplayEventInfoEXTBuilder { + inner: DisplayEventInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayEventInfoEXTBuilder<'a> { + inner: DisplayEventInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDisplayEventInfoEXT {} +impl<'a> ::std::ops::Deref for DisplayEventInfoEXTBuilder<'a> { + type Target = DisplayEventInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayEventInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayEventInfoEXTBuilder<'a> { + pub fn display_event( + mut self, + display_event: DisplayEventTypeEXT, + ) -> DisplayEventInfoEXTBuilder<'a> { + self.inner.display_event = display_event; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DisplayEventInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayEventInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SwapchainCounterCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub surface_counters: SurfaceCounterFlagsEXT, +} +impl ::std::default::Default for SwapchainCounterCreateInfoEXT { + fn default() -> SwapchainCounterCreateInfoEXT { + SwapchainCounterCreateInfoEXT { + s_type: StructureType::SWAPCHAIN_COUNTER_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + surface_counters: SurfaceCounterFlagsEXT::default(), + } + } +} +impl SwapchainCounterCreateInfoEXT { + pub fn builder<'a>() -> SwapchainCounterCreateInfoEXTBuilder<'a> { + SwapchainCounterCreateInfoEXTBuilder { + inner: SwapchainCounterCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SwapchainCounterCreateInfoEXTBuilder<'a> { + inner: SwapchainCounterCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSwapchainCreateInfoKHR for SwapchainCounterCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsSwapchainCreateInfoKHR for SwapchainCounterCreateInfoEXT {} +impl<'a> ::std::ops::Deref for SwapchainCounterCreateInfoEXTBuilder<'a> { + type Target = SwapchainCounterCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SwapchainCounterCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SwapchainCounterCreateInfoEXTBuilder<'a> { + pub fn surface_counters( + mut self, + surface_counters: SurfaceCounterFlagsEXT, + ) -> SwapchainCounterCreateInfoEXTBuilder<'a> { + self.inner.surface_counters = surface_counters; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SwapchainCounterCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceGroupProperties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub physical_device_count: u32, + pub physical_devices: [PhysicalDevice; MAX_DEVICE_GROUP_SIZE], + pub subset_allocation: Bool32, +} +impl ::std::default::Default for PhysicalDeviceGroupProperties { + fn default() -> PhysicalDeviceGroupProperties { + PhysicalDeviceGroupProperties { + s_type: StructureType::PHYSICAL_DEVICE_GROUP_PROPERTIES, + p_next: ::std::ptr::null_mut(), + physical_device_count: u32::default(), + physical_devices: unsafe { ::std::mem::zeroed() }, + subset_allocation: Bool32::default(), + } + } +} +impl PhysicalDeviceGroupProperties { + pub fn builder<'a>() -> PhysicalDeviceGroupPropertiesBuilder<'a> { + PhysicalDeviceGroupPropertiesBuilder { + inner: PhysicalDeviceGroupProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceGroupPropertiesBuilder<'a> { + inner: PhysicalDeviceGroupProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDeviceGroupProperties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceGroupPropertiesBuilder<'a> { + type Target = PhysicalDeviceGroupProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceGroupPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceGroupPropertiesBuilder<'a> { + pub fn physical_device_count( + mut self, + physical_device_count: u32, + ) -> PhysicalDeviceGroupPropertiesBuilder<'a> { + self.inner.physical_device_count = physical_device_count; + self + } + pub fn physical_devices( + mut self, + physical_devices: [PhysicalDevice; MAX_DEVICE_GROUP_SIZE], + ) -> PhysicalDeviceGroupPropertiesBuilder<'a> { + self.inner.physical_devices = physical_devices; + self + } + pub fn subset_allocation( + mut self, + subset_allocation: bool, + ) -> PhysicalDeviceGroupPropertiesBuilder<'a> { + self.inner.subset_allocation = subset_allocation.into(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PhysicalDeviceGroupPropertiesBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceGroupProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryAllocateFlagsInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: MemoryAllocateFlags, + pub device_mask: u32, +} +impl ::std::default::Default for MemoryAllocateFlagsInfo { + fn default() -> MemoryAllocateFlagsInfo { + MemoryAllocateFlagsInfo { + s_type: StructureType::MEMORY_ALLOCATE_FLAGS_INFO, + p_next: ::std::ptr::null(), + flags: MemoryAllocateFlags::default(), + device_mask: u32::default(), + } + } +} +impl MemoryAllocateFlagsInfo { + pub fn builder<'a>() -> MemoryAllocateFlagsInfoBuilder<'a> { + MemoryAllocateFlagsInfoBuilder { + inner: MemoryAllocateFlagsInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryAllocateFlagsInfoBuilder<'a> { + inner: MemoryAllocateFlagsInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for MemoryAllocateFlagsInfoBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for MemoryAllocateFlagsInfo {} +impl<'a> ::std::ops::Deref for MemoryAllocateFlagsInfoBuilder<'a> { + type Target = MemoryAllocateFlagsInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryAllocateFlagsInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryAllocateFlagsInfoBuilder<'a> { + pub fn flags(mut self, flags: MemoryAllocateFlags) -> MemoryAllocateFlagsInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn device_mask(mut self, device_mask: u32) -> MemoryAllocateFlagsInfoBuilder<'a> { + self.inner.device_mask = device_mask; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryAllocateFlagsInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BindBufferMemoryInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub buffer: Buffer, + pub memory: DeviceMemory, + pub memory_offset: DeviceSize, +} +impl ::std::default::Default for BindBufferMemoryInfo { + fn default() -> BindBufferMemoryInfo { + BindBufferMemoryInfo { + s_type: StructureType::BIND_BUFFER_MEMORY_INFO, + p_next: ::std::ptr::null(), + buffer: Buffer::default(), + memory: DeviceMemory::default(), + memory_offset: DeviceSize::default(), + } + } +} +impl BindBufferMemoryInfo { + pub fn builder<'a>() -> BindBufferMemoryInfoBuilder<'a> { + BindBufferMemoryInfoBuilder { + inner: BindBufferMemoryInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BindBufferMemoryInfoBuilder<'a> { + inner: BindBufferMemoryInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsBindBufferMemoryInfo {} +impl<'a> ::std::ops::Deref for BindBufferMemoryInfoBuilder<'a> { + type Target = BindBufferMemoryInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BindBufferMemoryInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BindBufferMemoryInfoBuilder<'a> { + pub fn buffer(mut self, buffer: Buffer) -> BindBufferMemoryInfoBuilder<'a> { + self.inner.buffer = buffer; + self + } + pub fn memory(mut self, memory: DeviceMemory) -> BindBufferMemoryInfoBuilder<'a> { + self.inner.memory = memory; + self + } + pub fn memory_offset(mut self, memory_offset: DeviceSize) -> BindBufferMemoryInfoBuilder<'a> { + self.inner.memory_offset = memory_offset; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> BindBufferMemoryInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BindBufferMemoryInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BindBufferMemoryDeviceGroupInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub device_index_count: u32, + pub p_device_indices: *const u32, +} +impl ::std::default::Default for BindBufferMemoryDeviceGroupInfo { + fn default() -> BindBufferMemoryDeviceGroupInfo { + BindBufferMemoryDeviceGroupInfo { + s_type: StructureType::BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, + p_next: ::std::ptr::null(), + device_index_count: u32::default(), + p_device_indices: ::std::ptr::null(), + } + } +} +impl BindBufferMemoryDeviceGroupInfo { + pub fn builder<'a>() -> BindBufferMemoryDeviceGroupInfoBuilder<'a> { + BindBufferMemoryDeviceGroupInfoBuilder { + inner: BindBufferMemoryDeviceGroupInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BindBufferMemoryDeviceGroupInfoBuilder<'a> { + inner: BindBufferMemoryDeviceGroupInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsBindBufferMemoryInfo for BindBufferMemoryDeviceGroupInfoBuilder<'_> {} +unsafe impl ExtendsBindBufferMemoryInfo for BindBufferMemoryDeviceGroupInfo {} +impl<'a> ::std::ops::Deref for BindBufferMemoryDeviceGroupInfoBuilder<'a> { + type Target = BindBufferMemoryDeviceGroupInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BindBufferMemoryDeviceGroupInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BindBufferMemoryDeviceGroupInfoBuilder<'a> { + pub fn device_indices( + mut self, + device_indices: &'a [u32], + ) -> BindBufferMemoryDeviceGroupInfoBuilder<'a> { + self.inner.device_index_count = device_indices.len() as _; + self.inner.p_device_indices = device_indices.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BindBufferMemoryDeviceGroupInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BindImageMemoryInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub image: Image, + pub memory: DeviceMemory, + pub memory_offset: DeviceSize, +} +impl ::std::default::Default for BindImageMemoryInfo { + fn default() -> BindImageMemoryInfo { + BindImageMemoryInfo { + s_type: StructureType::BIND_IMAGE_MEMORY_INFO, + p_next: ::std::ptr::null(), + image: Image::default(), + memory: DeviceMemory::default(), + memory_offset: DeviceSize::default(), + } + } +} +impl BindImageMemoryInfo { + pub fn builder<'a>() -> BindImageMemoryInfoBuilder<'a> { + BindImageMemoryInfoBuilder { + inner: BindImageMemoryInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BindImageMemoryInfoBuilder<'a> { + inner: BindImageMemoryInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsBindImageMemoryInfo {} +impl<'a> ::std::ops::Deref for BindImageMemoryInfoBuilder<'a> { + type Target = BindImageMemoryInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BindImageMemoryInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BindImageMemoryInfoBuilder<'a> { + pub fn image(mut self, image: Image) -> BindImageMemoryInfoBuilder<'a> { + self.inner.image = image; + self + } + pub fn memory(mut self, memory: DeviceMemory) -> BindImageMemoryInfoBuilder<'a> { + self.inner.memory = memory; + self + } + pub fn memory_offset(mut self, memory_offset: DeviceSize) -> BindImageMemoryInfoBuilder<'a> { + self.inner.memory_offset = memory_offset; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> BindImageMemoryInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BindImageMemoryInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BindImageMemoryDeviceGroupInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub device_index_count: u32, + pub p_device_indices: *const u32, + pub split_instance_bind_region_count: u32, + pub p_split_instance_bind_regions: *const Rect2D, +} +impl ::std::default::Default for BindImageMemoryDeviceGroupInfo { + fn default() -> BindImageMemoryDeviceGroupInfo { + BindImageMemoryDeviceGroupInfo { + s_type: StructureType::BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, + p_next: ::std::ptr::null(), + device_index_count: u32::default(), + p_device_indices: ::std::ptr::null(), + split_instance_bind_region_count: u32::default(), + p_split_instance_bind_regions: ::std::ptr::null(), + } + } +} +impl BindImageMemoryDeviceGroupInfo { + pub fn builder<'a>() -> BindImageMemoryDeviceGroupInfoBuilder<'a> { + BindImageMemoryDeviceGroupInfoBuilder { + inner: BindImageMemoryDeviceGroupInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BindImageMemoryDeviceGroupInfoBuilder<'a> { + inner: BindImageMemoryDeviceGroupInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsBindImageMemoryInfo for BindImageMemoryDeviceGroupInfoBuilder<'_> {} +unsafe impl ExtendsBindImageMemoryInfo for BindImageMemoryDeviceGroupInfo {} +impl<'a> ::std::ops::Deref for BindImageMemoryDeviceGroupInfoBuilder<'a> { + type Target = BindImageMemoryDeviceGroupInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BindImageMemoryDeviceGroupInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BindImageMemoryDeviceGroupInfoBuilder<'a> { + pub fn device_indices( + mut self, + device_indices: &'a [u32], + ) -> BindImageMemoryDeviceGroupInfoBuilder<'a> { + self.inner.device_index_count = device_indices.len() as _; + self.inner.p_device_indices = device_indices.as_ptr(); + self + } + pub fn split_instance_bind_regions( + mut self, + split_instance_bind_regions: &'a [Rect2D], + ) -> BindImageMemoryDeviceGroupInfoBuilder<'a> { + self.inner.split_instance_bind_region_count = split_instance_bind_regions.len() as _; + self.inner.p_split_instance_bind_regions = split_instance_bind_regions.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BindImageMemoryDeviceGroupInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceGroupRenderPassBeginInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub device_mask: u32, + pub device_render_area_count: u32, + pub p_device_render_areas: *const Rect2D, +} +impl ::std::default::Default for DeviceGroupRenderPassBeginInfo { + fn default() -> DeviceGroupRenderPassBeginInfo { + DeviceGroupRenderPassBeginInfo { + s_type: StructureType::DEVICE_GROUP_RENDER_PASS_BEGIN_INFO, + p_next: ::std::ptr::null(), + device_mask: u32::default(), + device_render_area_count: u32::default(), + p_device_render_areas: ::std::ptr::null(), + } + } +} +impl DeviceGroupRenderPassBeginInfo { + pub fn builder<'a>() -> DeviceGroupRenderPassBeginInfoBuilder<'a> { + DeviceGroupRenderPassBeginInfoBuilder { + inner: DeviceGroupRenderPassBeginInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceGroupRenderPassBeginInfoBuilder<'a> { + inner: DeviceGroupRenderPassBeginInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsRenderPassBeginInfo for DeviceGroupRenderPassBeginInfoBuilder<'_> {} +unsafe impl ExtendsRenderPassBeginInfo for DeviceGroupRenderPassBeginInfo {} +impl<'a> ::std::ops::Deref for DeviceGroupRenderPassBeginInfoBuilder<'a> { + type Target = DeviceGroupRenderPassBeginInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceGroupRenderPassBeginInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceGroupRenderPassBeginInfoBuilder<'a> { + pub fn device_mask(mut self, device_mask: u32) -> DeviceGroupRenderPassBeginInfoBuilder<'a> { + self.inner.device_mask = device_mask; + self + } + pub fn device_render_areas( + mut self, + device_render_areas: &'a [Rect2D], + ) -> DeviceGroupRenderPassBeginInfoBuilder<'a> { + self.inner.device_render_area_count = device_render_areas.len() as _; + self.inner.p_device_render_areas = device_render_areas.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceGroupRenderPassBeginInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceGroupCommandBufferBeginInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub device_mask: u32, +} +impl ::std::default::Default for DeviceGroupCommandBufferBeginInfo { + fn default() -> DeviceGroupCommandBufferBeginInfo { + DeviceGroupCommandBufferBeginInfo { + s_type: StructureType::DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO, + p_next: ::std::ptr::null(), + device_mask: u32::default(), + } + } +} +impl DeviceGroupCommandBufferBeginInfo { + pub fn builder<'a>() -> DeviceGroupCommandBufferBeginInfoBuilder<'a> { + DeviceGroupCommandBufferBeginInfoBuilder { + inner: DeviceGroupCommandBufferBeginInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceGroupCommandBufferBeginInfoBuilder<'a> { + inner: DeviceGroupCommandBufferBeginInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsCommandBufferBeginInfo for DeviceGroupCommandBufferBeginInfoBuilder<'_> {} +unsafe impl ExtendsCommandBufferBeginInfo for DeviceGroupCommandBufferBeginInfo {} +impl<'a> ::std::ops::Deref for DeviceGroupCommandBufferBeginInfoBuilder<'a> { + type Target = DeviceGroupCommandBufferBeginInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceGroupCommandBufferBeginInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceGroupCommandBufferBeginInfoBuilder<'a> { + pub fn device_mask(mut self, device_mask: u32) -> DeviceGroupCommandBufferBeginInfoBuilder<'a> { + self.inner.device_mask = device_mask; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceGroupCommandBufferBeginInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceGroupSubmitInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub wait_semaphore_count: u32, + pub p_wait_semaphore_device_indices: *const u32, + pub command_buffer_count: u32, + pub p_command_buffer_device_masks: *const u32, + pub signal_semaphore_count: u32, + pub p_signal_semaphore_device_indices: *const u32, +} +impl ::std::default::Default for DeviceGroupSubmitInfo { + fn default() -> DeviceGroupSubmitInfo { + DeviceGroupSubmitInfo { + s_type: StructureType::DEVICE_GROUP_SUBMIT_INFO, + p_next: ::std::ptr::null(), + wait_semaphore_count: u32::default(), + p_wait_semaphore_device_indices: ::std::ptr::null(), + command_buffer_count: u32::default(), + p_command_buffer_device_masks: ::std::ptr::null(), + signal_semaphore_count: u32::default(), + p_signal_semaphore_device_indices: ::std::ptr::null(), + } + } +} +impl DeviceGroupSubmitInfo { + pub fn builder<'a>() -> DeviceGroupSubmitInfoBuilder<'a> { + DeviceGroupSubmitInfoBuilder { + inner: DeviceGroupSubmitInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceGroupSubmitInfoBuilder<'a> { + inner: DeviceGroupSubmitInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSubmitInfo for DeviceGroupSubmitInfoBuilder<'_> {} +unsafe impl ExtendsSubmitInfo for DeviceGroupSubmitInfo {} +impl<'a> ::std::ops::Deref for DeviceGroupSubmitInfoBuilder<'a> { + type Target = DeviceGroupSubmitInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceGroupSubmitInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceGroupSubmitInfoBuilder<'a> { + pub fn wait_semaphore_device_indices( + mut self, + wait_semaphore_device_indices: &'a [u32], + ) -> DeviceGroupSubmitInfoBuilder<'a> { + self.inner.wait_semaphore_count = wait_semaphore_device_indices.len() as _; + self.inner.p_wait_semaphore_device_indices = wait_semaphore_device_indices.as_ptr(); + self + } + pub fn command_buffer_device_masks( + mut self, + command_buffer_device_masks: &'a [u32], + ) -> DeviceGroupSubmitInfoBuilder<'a> { + self.inner.command_buffer_count = command_buffer_device_masks.len() as _; + self.inner.p_command_buffer_device_masks = command_buffer_device_masks.as_ptr(); + self + } + pub fn signal_semaphore_device_indices( + mut self, + signal_semaphore_device_indices: &'a [u32], + ) -> DeviceGroupSubmitInfoBuilder<'a> { + self.inner.signal_semaphore_count = signal_semaphore_device_indices.len() as _; + self.inner.p_signal_semaphore_device_indices = signal_semaphore_device_indices.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceGroupSubmitInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceGroupBindSparseInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub resource_device_index: u32, + pub memory_device_index: u32, +} +impl ::std::default::Default for DeviceGroupBindSparseInfo { + fn default() -> DeviceGroupBindSparseInfo { + DeviceGroupBindSparseInfo { + s_type: StructureType::DEVICE_GROUP_BIND_SPARSE_INFO, + p_next: ::std::ptr::null(), + resource_device_index: u32::default(), + memory_device_index: u32::default(), + } + } +} +impl DeviceGroupBindSparseInfo { + pub fn builder<'a>() -> DeviceGroupBindSparseInfoBuilder<'a> { + DeviceGroupBindSparseInfoBuilder { + inner: DeviceGroupBindSparseInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceGroupBindSparseInfoBuilder<'a> { + inner: DeviceGroupBindSparseInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsBindSparseInfo for DeviceGroupBindSparseInfoBuilder<'_> {} +unsafe impl ExtendsBindSparseInfo for DeviceGroupBindSparseInfo {} +impl<'a> ::std::ops::Deref for DeviceGroupBindSparseInfoBuilder<'a> { + type Target = DeviceGroupBindSparseInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceGroupBindSparseInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceGroupBindSparseInfoBuilder<'a> { + pub fn resource_device_index( + mut self, + resource_device_index: u32, + ) -> DeviceGroupBindSparseInfoBuilder<'a> { + self.inner.resource_device_index = resource_device_index; + self + } + pub fn memory_device_index( + mut self, + memory_device_index: u32, + ) -> DeviceGroupBindSparseInfoBuilder<'a> { + self.inner.memory_device_index = memory_device_index; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceGroupBindSparseInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceGroupPresentCapabilitiesKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub present_mask: [u32; MAX_DEVICE_GROUP_SIZE], + pub modes: DeviceGroupPresentModeFlagsKHR, +} +impl ::std::default::Default for DeviceGroupPresentCapabilitiesKHR { + fn default() -> DeviceGroupPresentCapabilitiesKHR { + DeviceGroupPresentCapabilitiesKHR { + s_type: StructureType::DEVICE_GROUP_PRESENT_CAPABILITIES_KHR, + p_next: ::std::ptr::null(), + present_mask: unsafe { ::std::mem::zeroed() }, + modes: DeviceGroupPresentModeFlagsKHR::default(), + } + } +} +impl DeviceGroupPresentCapabilitiesKHR { + pub fn builder<'a>() -> DeviceGroupPresentCapabilitiesKHRBuilder<'a> { + DeviceGroupPresentCapabilitiesKHRBuilder { + inner: DeviceGroupPresentCapabilitiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceGroupPresentCapabilitiesKHRBuilder<'a> { + inner: DeviceGroupPresentCapabilitiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDeviceGroupPresentCapabilitiesKHR {} +impl<'a> ::std::ops::Deref for DeviceGroupPresentCapabilitiesKHRBuilder<'a> { + type Target = DeviceGroupPresentCapabilitiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceGroupPresentCapabilitiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceGroupPresentCapabilitiesKHRBuilder<'a> { + pub fn present_mask( + mut self, + present_mask: [u32; MAX_DEVICE_GROUP_SIZE], + ) -> DeviceGroupPresentCapabilitiesKHRBuilder<'a> { + self.inner.present_mask = present_mask; + self + } + pub fn modes( + mut self, + modes: DeviceGroupPresentModeFlagsKHR, + ) -> DeviceGroupPresentCapabilitiesKHRBuilder<'a> { + self.inner.modes = modes; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DeviceGroupPresentCapabilitiesKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceGroupPresentCapabilitiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageSwapchainCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub swapchain: SwapchainKHR, +} +impl ::std::default::Default for ImageSwapchainCreateInfoKHR { + fn default() -> ImageSwapchainCreateInfoKHR { + ImageSwapchainCreateInfoKHR { + s_type: StructureType::IMAGE_SWAPCHAIN_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + swapchain: SwapchainKHR::default(), + } + } +} +impl ImageSwapchainCreateInfoKHR { + pub fn builder<'a>() -> ImageSwapchainCreateInfoKHRBuilder<'a> { + ImageSwapchainCreateInfoKHRBuilder { + inner: ImageSwapchainCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageSwapchainCreateInfoKHRBuilder<'a> { + inner: ImageSwapchainCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageCreateInfo for ImageSwapchainCreateInfoKHRBuilder<'_> {} +unsafe impl ExtendsImageCreateInfo for ImageSwapchainCreateInfoKHR {} +impl<'a> ::std::ops::Deref for ImageSwapchainCreateInfoKHRBuilder<'a> { + type Target = ImageSwapchainCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageSwapchainCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageSwapchainCreateInfoKHRBuilder<'a> { + pub fn swapchain(mut self, swapchain: SwapchainKHR) -> ImageSwapchainCreateInfoKHRBuilder<'a> { + self.inner.swapchain = swapchain; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageSwapchainCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BindImageMemorySwapchainInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub swapchain: SwapchainKHR, + pub image_index: u32, +} +impl ::std::default::Default for BindImageMemorySwapchainInfoKHR { + fn default() -> BindImageMemorySwapchainInfoKHR { + BindImageMemorySwapchainInfoKHR { + s_type: StructureType::BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR, + p_next: ::std::ptr::null(), + swapchain: SwapchainKHR::default(), + image_index: u32::default(), + } + } +} +impl BindImageMemorySwapchainInfoKHR { + pub fn builder<'a>() -> BindImageMemorySwapchainInfoKHRBuilder<'a> { + BindImageMemorySwapchainInfoKHRBuilder { + inner: BindImageMemorySwapchainInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BindImageMemorySwapchainInfoKHRBuilder<'a> { + inner: BindImageMemorySwapchainInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsBindImageMemoryInfo for BindImageMemorySwapchainInfoKHRBuilder<'_> {} +unsafe impl ExtendsBindImageMemoryInfo for BindImageMemorySwapchainInfoKHR {} +impl<'a> ::std::ops::Deref for BindImageMemorySwapchainInfoKHRBuilder<'a> { + type Target = BindImageMemorySwapchainInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BindImageMemorySwapchainInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BindImageMemorySwapchainInfoKHRBuilder<'a> { + pub fn swapchain( + mut self, + swapchain: SwapchainKHR, + ) -> BindImageMemorySwapchainInfoKHRBuilder<'a> { + self.inner.swapchain = swapchain; + self + } + pub fn image_index(mut self, image_index: u32) -> BindImageMemorySwapchainInfoKHRBuilder<'a> { + self.inner.image_index = image_index; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BindImageMemorySwapchainInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AcquireNextImageInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub swapchain: SwapchainKHR, + pub timeout: u64, + pub semaphore: Semaphore, + pub fence: Fence, + pub device_mask: u32, +} +impl ::std::default::Default for AcquireNextImageInfoKHR { + fn default() -> AcquireNextImageInfoKHR { + AcquireNextImageInfoKHR { + s_type: StructureType::ACQUIRE_NEXT_IMAGE_INFO_KHR, + p_next: ::std::ptr::null(), + swapchain: SwapchainKHR::default(), + timeout: u64::default(), + semaphore: Semaphore::default(), + fence: Fence::default(), + device_mask: u32::default(), + } + } +} +impl AcquireNextImageInfoKHR { + pub fn builder<'a>() -> AcquireNextImageInfoKHRBuilder<'a> { + AcquireNextImageInfoKHRBuilder { + inner: AcquireNextImageInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AcquireNextImageInfoKHRBuilder<'a> { + inner: AcquireNextImageInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAcquireNextImageInfoKHR {} +impl<'a> ::std::ops::Deref for AcquireNextImageInfoKHRBuilder<'a> { + type Target = AcquireNextImageInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AcquireNextImageInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AcquireNextImageInfoKHRBuilder<'a> { + pub fn swapchain(mut self, swapchain: SwapchainKHR) -> AcquireNextImageInfoKHRBuilder<'a> { + self.inner.swapchain = swapchain; + self + } + pub fn timeout(mut self, timeout: u64) -> AcquireNextImageInfoKHRBuilder<'a> { + self.inner.timeout = timeout; + self + } + pub fn semaphore(mut self, semaphore: Semaphore) -> AcquireNextImageInfoKHRBuilder<'a> { + self.inner.semaphore = semaphore; + self + } + pub fn fence(mut self, fence: Fence) -> AcquireNextImageInfoKHRBuilder<'a> { + self.inner.fence = fence; + self + } + pub fn device_mask(mut self, device_mask: u32) -> AcquireNextImageInfoKHRBuilder<'a> { + self.inner.device_mask = device_mask; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AcquireNextImageInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AcquireNextImageInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceGroupPresentInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub swapchain_count: u32, + pub p_device_masks: *const u32, + pub mode: DeviceGroupPresentModeFlagsKHR, +} +impl ::std::default::Default for DeviceGroupPresentInfoKHR { + fn default() -> DeviceGroupPresentInfoKHR { + DeviceGroupPresentInfoKHR { + s_type: StructureType::DEVICE_GROUP_PRESENT_INFO_KHR, + p_next: ::std::ptr::null(), + swapchain_count: u32::default(), + p_device_masks: ::std::ptr::null(), + mode: DeviceGroupPresentModeFlagsKHR::default(), + } + } +} +impl DeviceGroupPresentInfoKHR { + pub fn builder<'a>() -> DeviceGroupPresentInfoKHRBuilder<'a> { + DeviceGroupPresentInfoKHRBuilder { + inner: DeviceGroupPresentInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceGroupPresentInfoKHRBuilder<'a> { + inner: DeviceGroupPresentInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPresentInfoKHR for DeviceGroupPresentInfoKHRBuilder<'_> {} +unsafe impl ExtendsPresentInfoKHR for DeviceGroupPresentInfoKHR {} +impl<'a> ::std::ops::Deref for DeviceGroupPresentInfoKHRBuilder<'a> { + type Target = DeviceGroupPresentInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceGroupPresentInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceGroupPresentInfoKHRBuilder<'a> { + pub fn device_masks(mut self, device_masks: &'a [u32]) -> DeviceGroupPresentInfoKHRBuilder<'a> { + self.inner.swapchain_count = device_masks.len() as _; + self.inner.p_device_masks = device_masks.as_ptr(); + self + } + pub fn mode( + mut self, + mode: DeviceGroupPresentModeFlagsKHR, + ) -> DeviceGroupPresentInfoKHRBuilder<'a> { + self.inner.mode = mode; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceGroupPresentInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceGroupDeviceCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub physical_device_count: u32, + pub p_physical_devices: *const PhysicalDevice, +} +impl ::std::default::Default for DeviceGroupDeviceCreateInfo { + fn default() -> DeviceGroupDeviceCreateInfo { + DeviceGroupDeviceCreateInfo { + s_type: StructureType::DEVICE_GROUP_DEVICE_CREATE_INFO, + p_next: ::std::ptr::null(), + physical_device_count: u32::default(), + p_physical_devices: ::std::ptr::null(), + } + } +} +impl DeviceGroupDeviceCreateInfo { + pub fn builder<'a>() -> DeviceGroupDeviceCreateInfoBuilder<'a> { + DeviceGroupDeviceCreateInfoBuilder { + inner: DeviceGroupDeviceCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceGroupDeviceCreateInfoBuilder<'a> { + inner: DeviceGroupDeviceCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for DeviceGroupDeviceCreateInfoBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for DeviceGroupDeviceCreateInfo {} +impl<'a> ::std::ops::Deref for DeviceGroupDeviceCreateInfoBuilder<'a> { + type Target = DeviceGroupDeviceCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceGroupDeviceCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceGroupDeviceCreateInfoBuilder<'a> { + pub fn physical_devices( + mut self, + physical_devices: &'a [PhysicalDevice], + ) -> DeviceGroupDeviceCreateInfoBuilder<'a> { + self.inner.physical_device_count = physical_devices.len() as _; + self.inner.p_physical_devices = physical_devices.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceGroupDeviceCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceGroupSwapchainCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub modes: DeviceGroupPresentModeFlagsKHR, +} +impl ::std::default::Default for DeviceGroupSwapchainCreateInfoKHR { + fn default() -> DeviceGroupSwapchainCreateInfoKHR { + DeviceGroupSwapchainCreateInfoKHR { + s_type: StructureType::DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + modes: DeviceGroupPresentModeFlagsKHR::default(), + } + } +} +impl DeviceGroupSwapchainCreateInfoKHR { + pub fn builder<'a>() -> DeviceGroupSwapchainCreateInfoKHRBuilder<'a> { + DeviceGroupSwapchainCreateInfoKHRBuilder { + inner: DeviceGroupSwapchainCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceGroupSwapchainCreateInfoKHRBuilder<'a> { + inner: DeviceGroupSwapchainCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSwapchainCreateInfoKHR for DeviceGroupSwapchainCreateInfoKHRBuilder<'_> {} +unsafe impl ExtendsSwapchainCreateInfoKHR for DeviceGroupSwapchainCreateInfoKHR {} +impl<'a> ::std::ops::Deref for DeviceGroupSwapchainCreateInfoKHRBuilder<'a> { + type Target = DeviceGroupSwapchainCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceGroupSwapchainCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceGroupSwapchainCreateInfoKHRBuilder<'a> { + pub fn modes( + mut self, + modes: DeviceGroupPresentModeFlagsKHR, + ) -> DeviceGroupSwapchainCreateInfoKHRBuilder<'a> { + self.inner.modes = modes; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceGroupSwapchainCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DescriptorUpdateTemplateEntry { + pub dst_binding: u32, + pub dst_array_element: u32, + pub descriptor_count: u32, + pub descriptor_type: DescriptorType, + pub offset: usize, + pub stride: usize, +} +impl DescriptorUpdateTemplateEntry { + pub fn builder<'a>() -> DescriptorUpdateTemplateEntryBuilder<'a> { + DescriptorUpdateTemplateEntryBuilder { + inner: DescriptorUpdateTemplateEntry::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorUpdateTemplateEntryBuilder<'a> { + inner: DescriptorUpdateTemplateEntry, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DescriptorUpdateTemplateEntryBuilder<'a> { + type Target = DescriptorUpdateTemplateEntry; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorUpdateTemplateEntryBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorUpdateTemplateEntryBuilder<'a> { + pub fn dst_binding(mut self, dst_binding: u32) -> DescriptorUpdateTemplateEntryBuilder<'a> { + self.inner.dst_binding = dst_binding; + self + } + pub fn dst_array_element( + mut self, + dst_array_element: u32, + ) -> DescriptorUpdateTemplateEntryBuilder<'a> { + self.inner.dst_array_element = dst_array_element; + self + } + pub fn descriptor_count( + mut self, + descriptor_count: u32, + ) -> DescriptorUpdateTemplateEntryBuilder<'a> { + self.inner.descriptor_count = descriptor_count; + self + } + pub fn descriptor_type( + mut self, + descriptor_type: DescriptorType, + ) -> DescriptorUpdateTemplateEntryBuilder<'a> { + self.inner.descriptor_type = descriptor_type; + self + } + pub fn offset(mut self, offset: usize) -> DescriptorUpdateTemplateEntryBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn stride(mut self, stride: usize) -> DescriptorUpdateTemplateEntryBuilder<'a> { + self.inner.stride = stride; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorUpdateTemplateEntry { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DescriptorUpdateTemplateCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: DescriptorUpdateTemplateCreateFlags, + pub descriptor_update_entry_count: u32, + pub p_descriptor_update_entries: *const DescriptorUpdateTemplateEntry, + pub template_type: DescriptorUpdateTemplateType, + pub descriptor_set_layout: DescriptorSetLayout, + pub pipeline_bind_point: PipelineBindPoint, + pub pipeline_layout: PipelineLayout, + pub set: u32, +} +impl ::std::default::Default for DescriptorUpdateTemplateCreateInfo { + fn default() -> DescriptorUpdateTemplateCreateInfo { + DescriptorUpdateTemplateCreateInfo { + s_type: StructureType::DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, + p_next: ::std::ptr::null(), + flags: DescriptorUpdateTemplateCreateFlags::default(), + descriptor_update_entry_count: u32::default(), + p_descriptor_update_entries: ::std::ptr::null(), + template_type: DescriptorUpdateTemplateType::default(), + descriptor_set_layout: DescriptorSetLayout::default(), + pipeline_bind_point: PipelineBindPoint::default(), + pipeline_layout: PipelineLayout::default(), + set: u32::default(), + } + } +} +impl DescriptorUpdateTemplateCreateInfo { + pub fn builder<'a>() -> DescriptorUpdateTemplateCreateInfoBuilder<'a> { + DescriptorUpdateTemplateCreateInfoBuilder { + inner: DescriptorUpdateTemplateCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorUpdateTemplateCreateInfoBuilder<'a> { + inner: DescriptorUpdateTemplateCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDescriptorUpdateTemplateCreateInfo {} +impl<'a> ::std::ops::Deref for DescriptorUpdateTemplateCreateInfoBuilder<'a> { + type Target = DescriptorUpdateTemplateCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorUpdateTemplateCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorUpdateTemplateCreateInfoBuilder<'a> { + pub fn flags( + mut self, + flags: DescriptorUpdateTemplateCreateFlags, + ) -> DescriptorUpdateTemplateCreateInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn descriptor_update_entries( + mut self, + descriptor_update_entries: &'a [DescriptorUpdateTemplateEntry], + ) -> DescriptorUpdateTemplateCreateInfoBuilder<'a> { + self.inner.descriptor_update_entry_count = descriptor_update_entries.len() as _; + self.inner.p_descriptor_update_entries = descriptor_update_entries.as_ptr(); + self + } + pub fn template_type( + mut self, + template_type: DescriptorUpdateTemplateType, + ) -> DescriptorUpdateTemplateCreateInfoBuilder<'a> { + self.inner.template_type = template_type; + self + } + pub fn descriptor_set_layout( + mut self, + descriptor_set_layout: DescriptorSetLayout, + ) -> DescriptorUpdateTemplateCreateInfoBuilder<'a> { + self.inner.descriptor_set_layout = descriptor_set_layout; + self + } + pub fn pipeline_bind_point( + mut self, + pipeline_bind_point: PipelineBindPoint, + ) -> DescriptorUpdateTemplateCreateInfoBuilder<'a> { + self.inner.pipeline_bind_point = pipeline_bind_point; + self + } + pub fn pipeline_layout( + mut self, + pipeline_layout: PipelineLayout, + ) -> DescriptorUpdateTemplateCreateInfoBuilder<'a> { + self.inner.pipeline_layout = pipeline_layout; + self + } + pub fn set(mut self, set: u32) -> DescriptorUpdateTemplateCreateInfoBuilder<'a> { + self.inner.set = set; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DescriptorUpdateTemplateCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorUpdateTemplateCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct XYColorEXT { + pub x: f32, + pub y: f32, +} +impl XYColorEXT { + pub fn builder<'a>() -> XYColorEXTBuilder<'a> { + XYColorEXTBuilder { + inner: XYColorEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct XYColorEXTBuilder<'a> { + inner: XYColorEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for XYColorEXTBuilder<'a> { + type Target = XYColorEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for XYColorEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> XYColorEXTBuilder<'a> { + pub fn x(mut self, x: f32) -> XYColorEXTBuilder<'a> { + self.inner.x = x; + self + } + pub fn y(mut self, y: f32) -> XYColorEXTBuilder<'a> { + self.inner.y = y; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> XYColorEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct HdrMetadataEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub display_primary_red: XYColorEXT, + pub display_primary_green: XYColorEXT, + pub display_primary_blue: XYColorEXT, + pub white_point: XYColorEXT, + pub max_luminance: f32, + pub min_luminance: f32, + pub max_content_light_level: f32, + pub max_frame_average_light_level: f32, +} +impl ::std::default::Default for HdrMetadataEXT { + fn default() -> HdrMetadataEXT { + HdrMetadataEXT { + s_type: StructureType::HDR_METADATA_EXT, + p_next: ::std::ptr::null(), + display_primary_red: XYColorEXT::default(), + display_primary_green: XYColorEXT::default(), + display_primary_blue: XYColorEXT::default(), + white_point: XYColorEXT::default(), + max_luminance: f32::default(), + min_luminance: f32::default(), + max_content_light_level: f32::default(), + max_frame_average_light_level: f32::default(), + } + } +} +impl HdrMetadataEXT { + pub fn builder<'a>() -> HdrMetadataEXTBuilder<'a> { + HdrMetadataEXTBuilder { + inner: HdrMetadataEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct HdrMetadataEXTBuilder<'a> { + inner: HdrMetadataEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsHdrMetadataEXT {} +impl<'a> ::std::ops::Deref for HdrMetadataEXTBuilder<'a> { + type Target = HdrMetadataEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for HdrMetadataEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> HdrMetadataEXTBuilder<'a> { + pub fn display_primary_red( + mut self, + display_primary_red: XYColorEXT, + ) -> HdrMetadataEXTBuilder<'a> { + self.inner.display_primary_red = display_primary_red; + self + } + pub fn display_primary_green( + mut self, + display_primary_green: XYColorEXT, + ) -> HdrMetadataEXTBuilder<'a> { + self.inner.display_primary_green = display_primary_green; + self + } + pub fn display_primary_blue( + mut self, + display_primary_blue: XYColorEXT, + ) -> HdrMetadataEXTBuilder<'a> { + self.inner.display_primary_blue = display_primary_blue; + self + } + pub fn white_point(mut self, white_point: XYColorEXT) -> HdrMetadataEXTBuilder<'a> { + self.inner.white_point = white_point; + self + } + pub fn max_luminance(mut self, max_luminance: f32) -> HdrMetadataEXTBuilder<'a> { + self.inner.max_luminance = max_luminance; + self + } + pub fn min_luminance(mut self, min_luminance: f32) -> HdrMetadataEXTBuilder<'a> { + self.inner.min_luminance = min_luminance; + self + } + pub fn max_content_light_level( + mut self, + max_content_light_level: f32, + ) -> HdrMetadataEXTBuilder<'a> { + self.inner.max_content_light_level = max_content_light_level; + self + } + pub fn max_frame_average_light_level( + mut self, + max_frame_average_light_level: f32, + ) -> HdrMetadataEXTBuilder<'a> { + self.inner.max_frame_average_light_level = max_frame_average_light_level; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> HdrMetadataEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> HdrMetadataEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct RefreshCycleDurationGOOGLE { + pub refresh_duration: u64, +} +impl RefreshCycleDurationGOOGLE { + pub fn builder<'a>() -> RefreshCycleDurationGOOGLEBuilder<'a> { + RefreshCycleDurationGOOGLEBuilder { + inner: RefreshCycleDurationGOOGLE::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RefreshCycleDurationGOOGLEBuilder<'a> { + inner: RefreshCycleDurationGOOGLE, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for RefreshCycleDurationGOOGLEBuilder<'a> { + type Target = RefreshCycleDurationGOOGLE; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RefreshCycleDurationGOOGLEBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RefreshCycleDurationGOOGLEBuilder<'a> { + pub fn refresh_duration( + mut self, + refresh_duration: u64, + ) -> RefreshCycleDurationGOOGLEBuilder<'a> { + self.inner.refresh_duration = refresh_duration; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RefreshCycleDurationGOOGLE { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct PastPresentationTimingGOOGLE { + pub present_id: u32, + pub desired_present_time: u64, + pub actual_present_time: u64, + pub earliest_present_time: u64, + pub present_margin: u64, +} +impl PastPresentationTimingGOOGLE { + pub fn builder<'a>() -> PastPresentationTimingGOOGLEBuilder<'a> { + PastPresentationTimingGOOGLEBuilder { + inner: PastPresentationTimingGOOGLE::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PastPresentationTimingGOOGLEBuilder<'a> { + inner: PastPresentationTimingGOOGLE, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for PastPresentationTimingGOOGLEBuilder<'a> { + type Target = PastPresentationTimingGOOGLE; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PastPresentationTimingGOOGLEBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PastPresentationTimingGOOGLEBuilder<'a> { + pub fn present_id(mut self, present_id: u32) -> PastPresentationTimingGOOGLEBuilder<'a> { + self.inner.present_id = present_id; + self + } + pub fn desired_present_time( + mut self, + desired_present_time: u64, + ) -> PastPresentationTimingGOOGLEBuilder<'a> { + self.inner.desired_present_time = desired_present_time; + self + } + pub fn actual_present_time( + mut self, + actual_present_time: u64, + ) -> PastPresentationTimingGOOGLEBuilder<'a> { + self.inner.actual_present_time = actual_present_time; + self + } + pub fn earliest_present_time( + mut self, + earliest_present_time: u64, + ) -> PastPresentationTimingGOOGLEBuilder<'a> { + self.inner.earliest_present_time = earliest_present_time; + self + } + pub fn present_margin( + mut self, + present_margin: u64, + ) -> PastPresentationTimingGOOGLEBuilder<'a> { + self.inner.present_margin = present_margin; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PastPresentationTimingGOOGLE { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PresentTimesInfoGOOGLE { + pub s_type: StructureType, + pub p_next: *const c_void, + pub swapchain_count: u32, + pub p_times: *const PresentTimeGOOGLE, +} +impl ::std::default::Default for PresentTimesInfoGOOGLE { + fn default() -> PresentTimesInfoGOOGLE { + PresentTimesInfoGOOGLE { + s_type: StructureType::PRESENT_TIMES_INFO_GOOGLE, + p_next: ::std::ptr::null(), + swapchain_count: u32::default(), + p_times: ::std::ptr::null(), + } + } +} +impl PresentTimesInfoGOOGLE { + pub fn builder<'a>() -> PresentTimesInfoGOOGLEBuilder<'a> { + PresentTimesInfoGOOGLEBuilder { + inner: PresentTimesInfoGOOGLE::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PresentTimesInfoGOOGLEBuilder<'a> { + inner: PresentTimesInfoGOOGLE, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPresentInfoKHR for PresentTimesInfoGOOGLEBuilder<'_> {} +unsafe impl ExtendsPresentInfoKHR for PresentTimesInfoGOOGLE {} +impl<'a> ::std::ops::Deref for PresentTimesInfoGOOGLEBuilder<'a> { + type Target = PresentTimesInfoGOOGLE; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PresentTimesInfoGOOGLEBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PresentTimesInfoGOOGLEBuilder<'a> { + pub fn times(mut self, times: &'a [PresentTimeGOOGLE]) -> PresentTimesInfoGOOGLEBuilder<'a> { + self.inner.swapchain_count = times.len() as _; + self.inner.p_times = times.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PresentTimesInfoGOOGLE { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct PresentTimeGOOGLE { + pub present_id: u32, + pub desired_present_time: u64, +} +impl PresentTimeGOOGLE { + pub fn builder<'a>() -> PresentTimeGOOGLEBuilder<'a> { + PresentTimeGOOGLEBuilder { + inner: PresentTimeGOOGLE::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PresentTimeGOOGLEBuilder<'a> { + inner: PresentTimeGOOGLE, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for PresentTimeGOOGLEBuilder<'a> { + type Target = PresentTimeGOOGLE; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PresentTimeGOOGLEBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PresentTimeGOOGLEBuilder<'a> { + pub fn present_id(mut self, present_id: u32) -> PresentTimeGOOGLEBuilder<'a> { + self.inner.present_id = present_id; + self + } + pub fn desired_present_time( + mut self, + desired_present_time: u64, + ) -> PresentTimeGOOGLEBuilder<'a> { + self.inner.desired_present_time = desired_present_time; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PresentTimeGOOGLE { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct IOSSurfaceCreateInfoMVK { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: IOSSurfaceCreateFlagsMVK, + pub p_view: *const c_void, +} +impl ::std::default::Default for IOSSurfaceCreateInfoMVK { + fn default() -> IOSSurfaceCreateInfoMVK { + IOSSurfaceCreateInfoMVK { + s_type: StructureType::IOS_SURFACE_CREATE_INFO_M, + p_next: ::std::ptr::null(), + flags: IOSSurfaceCreateFlagsMVK::default(), + p_view: ::std::ptr::null(), + } + } +} +impl IOSSurfaceCreateInfoMVK { + pub fn builder<'a>() -> IOSSurfaceCreateInfoMVKBuilder<'a> { + IOSSurfaceCreateInfoMVKBuilder { + inner: IOSSurfaceCreateInfoMVK::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct IOSSurfaceCreateInfoMVKBuilder<'a> { + inner: IOSSurfaceCreateInfoMVK, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsIOSSurfaceCreateInfoMVK {} +impl<'a> ::std::ops::Deref for IOSSurfaceCreateInfoMVKBuilder<'a> { + type Target = IOSSurfaceCreateInfoMVK; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for IOSSurfaceCreateInfoMVKBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> IOSSurfaceCreateInfoMVKBuilder<'a> { + pub fn flags(mut self, flags: IOSSurfaceCreateFlagsMVK) -> IOSSurfaceCreateInfoMVKBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn view(mut self, view: &'a c_void) -> IOSSurfaceCreateInfoMVKBuilder<'a> { + self.inner.p_view = view; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> IOSSurfaceCreateInfoMVKBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> IOSSurfaceCreateInfoMVK { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MacOSSurfaceCreateInfoMVK { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: MacOSSurfaceCreateFlagsMVK, + pub p_view: *const c_void, +} +impl ::std::default::Default for MacOSSurfaceCreateInfoMVK { + fn default() -> MacOSSurfaceCreateInfoMVK { + MacOSSurfaceCreateInfoMVK { + s_type: StructureType::MACOS_SURFACE_CREATE_INFO_M, + p_next: ::std::ptr::null(), + flags: MacOSSurfaceCreateFlagsMVK::default(), + p_view: ::std::ptr::null(), + } + } +} +impl MacOSSurfaceCreateInfoMVK { + pub fn builder<'a>() -> MacOSSurfaceCreateInfoMVKBuilder<'a> { + MacOSSurfaceCreateInfoMVKBuilder { + inner: MacOSSurfaceCreateInfoMVK::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MacOSSurfaceCreateInfoMVKBuilder<'a> { + inner: MacOSSurfaceCreateInfoMVK, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMacOSSurfaceCreateInfoMVK {} +impl<'a> ::std::ops::Deref for MacOSSurfaceCreateInfoMVKBuilder<'a> { + type Target = MacOSSurfaceCreateInfoMVK; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MacOSSurfaceCreateInfoMVKBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MacOSSurfaceCreateInfoMVKBuilder<'a> { + pub fn flags( + mut self, + flags: MacOSSurfaceCreateFlagsMVK, + ) -> MacOSSurfaceCreateInfoMVKBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn view(mut self, view: &'a c_void) -> MacOSSurfaceCreateInfoMVKBuilder<'a> { + self.inner.p_view = view; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MacOSSurfaceCreateInfoMVKBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MacOSSurfaceCreateInfoMVK { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ViewportWScalingNV { + pub xcoeff: f32, + pub ycoeff: f32, +} +impl ViewportWScalingNV { + pub fn builder<'a>() -> ViewportWScalingNVBuilder<'a> { + ViewportWScalingNVBuilder { + inner: ViewportWScalingNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ViewportWScalingNVBuilder<'a> { + inner: ViewportWScalingNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ViewportWScalingNVBuilder<'a> { + type Target = ViewportWScalingNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ViewportWScalingNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ViewportWScalingNVBuilder<'a> { + pub fn xcoeff(mut self, xcoeff: f32) -> ViewportWScalingNVBuilder<'a> { + self.inner.xcoeff = xcoeff; + self + } + pub fn ycoeff(mut self, ycoeff: f32) -> ViewportWScalingNVBuilder<'a> { + self.inner.ycoeff = ycoeff; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ViewportWScalingNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineViewportWScalingStateCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub viewport_w_scaling_enable: Bool32, + pub viewport_count: u32, + pub p_viewport_w_scalings: *const ViewportWScalingNV, +} +impl ::std::default::Default for PipelineViewportWScalingStateCreateInfoNV { + fn default() -> PipelineViewportWScalingStateCreateInfoNV { + PipelineViewportWScalingStateCreateInfoNV { + s_type: StructureType::PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + viewport_w_scaling_enable: Bool32::default(), + viewport_count: u32::default(), + p_viewport_w_scalings: ::std::ptr::null(), + } + } +} +impl PipelineViewportWScalingStateCreateInfoNV { + pub fn builder<'a>() -> PipelineViewportWScalingStateCreateInfoNVBuilder<'a> { + PipelineViewportWScalingStateCreateInfoNVBuilder { + inner: PipelineViewportWScalingStateCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineViewportWScalingStateCreateInfoNVBuilder<'a> { + inner: PipelineViewportWScalingStateCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineViewportStateCreateInfo + for PipelineViewportWScalingStateCreateInfoNVBuilder<'_> +{ +} +unsafe impl ExtendsPipelineViewportStateCreateInfo for PipelineViewportWScalingStateCreateInfoNV {} +impl<'a> ::std::ops::Deref for PipelineViewportWScalingStateCreateInfoNVBuilder<'a> { + type Target = PipelineViewportWScalingStateCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineViewportWScalingStateCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineViewportWScalingStateCreateInfoNVBuilder<'a> { + pub fn viewport_w_scaling_enable( + mut self, + viewport_w_scaling_enable: bool, + ) -> PipelineViewportWScalingStateCreateInfoNVBuilder<'a> { + self.inner.viewport_w_scaling_enable = viewport_w_scaling_enable.into(); + self + } + pub fn viewport_w_scalings( + mut self, + viewport_w_scalings: &'a [ViewportWScalingNV], + ) -> PipelineViewportWScalingStateCreateInfoNVBuilder<'a> { + self.inner.viewport_count = viewport_w_scalings.len() as _; + self.inner.p_viewport_w_scalings = viewport_w_scalings.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineViewportWScalingStateCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ViewportSwizzleNV { + pub x: ViewportCoordinateSwizzleNV, + pub y: ViewportCoordinateSwizzleNV, + pub z: ViewportCoordinateSwizzleNV, + pub w: ViewportCoordinateSwizzleNV, +} +impl ViewportSwizzleNV { + pub fn builder<'a>() -> ViewportSwizzleNVBuilder<'a> { + ViewportSwizzleNVBuilder { + inner: ViewportSwizzleNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ViewportSwizzleNVBuilder<'a> { + inner: ViewportSwizzleNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ViewportSwizzleNVBuilder<'a> { + type Target = ViewportSwizzleNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ViewportSwizzleNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ViewportSwizzleNVBuilder<'a> { + pub fn x(mut self, x: ViewportCoordinateSwizzleNV) -> ViewportSwizzleNVBuilder<'a> { + self.inner.x = x; + self + } + pub fn y(mut self, y: ViewportCoordinateSwizzleNV) -> ViewportSwizzleNVBuilder<'a> { + self.inner.y = y; + self + } + pub fn z(mut self, z: ViewportCoordinateSwizzleNV) -> ViewportSwizzleNVBuilder<'a> { + self.inner.z = z; + self + } + pub fn w(mut self, w: ViewportCoordinateSwizzleNV) -> ViewportSwizzleNVBuilder<'a> { + self.inner.w = w; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ViewportSwizzleNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineViewportSwizzleStateCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineViewportSwizzleStateCreateFlagsNV, + pub viewport_count: u32, + pub p_viewport_swizzles: *const ViewportSwizzleNV, +} +impl ::std::default::Default for PipelineViewportSwizzleStateCreateInfoNV { + fn default() -> PipelineViewportSwizzleStateCreateInfoNV { + PipelineViewportSwizzleStateCreateInfoNV { + s_type: StructureType::PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + flags: PipelineViewportSwizzleStateCreateFlagsNV::default(), + viewport_count: u32::default(), + p_viewport_swizzles: ::std::ptr::null(), + } + } +} +impl PipelineViewportSwizzleStateCreateInfoNV { + pub fn builder<'a>() -> PipelineViewportSwizzleStateCreateInfoNVBuilder<'a> { + PipelineViewportSwizzleStateCreateInfoNVBuilder { + inner: PipelineViewportSwizzleStateCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineViewportSwizzleStateCreateInfoNVBuilder<'a> { + inner: PipelineViewportSwizzleStateCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineViewportStateCreateInfo + for PipelineViewportSwizzleStateCreateInfoNVBuilder<'_> +{ +} +unsafe impl ExtendsPipelineViewportStateCreateInfo for PipelineViewportSwizzleStateCreateInfoNV {} +impl<'a> ::std::ops::Deref for PipelineViewportSwizzleStateCreateInfoNVBuilder<'a> { + type Target = PipelineViewportSwizzleStateCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineViewportSwizzleStateCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineViewportSwizzleStateCreateInfoNVBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineViewportSwizzleStateCreateFlagsNV, + ) -> PipelineViewportSwizzleStateCreateInfoNVBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn viewport_swizzles( + mut self, + viewport_swizzles: &'a [ViewportSwizzleNV], + ) -> PipelineViewportSwizzleStateCreateInfoNVBuilder<'a> { + self.inner.viewport_count = viewport_swizzles.len() as _; + self.inner.p_viewport_swizzles = viewport_swizzles.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineViewportSwizzleStateCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceDiscardRectanglePropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_discard_rectangles: u32, +} +impl ::std::default::Default for PhysicalDeviceDiscardRectanglePropertiesEXT { + fn default() -> PhysicalDeviceDiscardRectanglePropertiesEXT { + PhysicalDeviceDiscardRectanglePropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + max_discard_rectangles: u32::default(), + } + } +} +impl PhysicalDeviceDiscardRectanglePropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceDiscardRectanglePropertiesEXTBuilder<'a> { + PhysicalDeviceDiscardRectanglePropertiesEXTBuilder { + inner: PhysicalDeviceDiscardRectanglePropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceDiscardRectanglePropertiesEXTBuilder<'a> { + inner: PhysicalDeviceDiscardRectanglePropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceDiscardRectanglePropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDiscardRectanglePropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDiscardRectanglePropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceDiscardRectanglePropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDiscardRectanglePropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceDiscardRectanglePropertiesEXTBuilder<'a> { + pub fn max_discard_rectangles( + mut self, + max_discard_rectangles: u32, + ) -> PhysicalDeviceDiscardRectanglePropertiesEXTBuilder<'a> { + self.inner.max_discard_rectangles = max_discard_rectangles; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceDiscardRectanglePropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineDiscardRectangleStateCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineDiscardRectangleStateCreateFlagsEXT, + pub discard_rectangle_mode: DiscardRectangleModeEXT, + pub discard_rectangle_count: u32, + pub p_discard_rectangles: *const Rect2D, +} +impl ::std::default::Default for PipelineDiscardRectangleStateCreateInfoEXT { + fn default() -> PipelineDiscardRectangleStateCreateInfoEXT { + PipelineDiscardRectangleStateCreateInfoEXT { + s_type: StructureType::PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + flags: PipelineDiscardRectangleStateCreateFlagsEXT::default(), + discard_rectangle_mode: DiscardRectangleModeEXT::default(), + discard_rectangle_count: u32::default(), + p_discard_rectangles: ::std::ptr::null(), + } + } +} +impl PipelineDiscardRectangleStateCreateInfoEXT { + pub fn builder<'a>() -> PipelineDiscardRectangleStateCreateInfoEXTBuilder<'a> { + PipelineDiscardRectangleStateCreateInfoEXTBuilder { + inner: PipelineDiscardRectangleStateCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineDiscardRectangleStateCreateInfoEXTBuilder<'a> { + inner: PipelineDiscardRectangleStateCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsGraphicsPipelineCreateInfo + for PipelineDiscardRectangleStateCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsGraphicsPipelineCreateInfo for PipelineDiscardRectangleStateCreateInfoEXT {} +impl<'a> ::std::ops::Deref for PipelineDiscardRectangleStateCreateInfoEXTBuilder<'a> { + type Target = PipelineDiscardRectangleStateCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineDiscardRectangleStateCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineDiscardRectangleStateCreateInfoEXTBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineDiscardRectangleStateCreateFlagsEXT, + ) -> PipelineDiscardRectangleStateCreateInfoEXTBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn discard_rectangle_mode( + mut self, + discard_rectangle_mode: DiscardRectangleModeEXT, + ) -> PipelineDiscardRectangleStateCreateInfoEXTBuilder<'a> { + self.inner.discard_rectangle_mode = discard_rectangle_mode; + self + } + pub fn discard_rectangles( + mut self, + discard_rectangles: &'a [Rect2D], + ) -> PipelineDiscardRectangleStateCreateInfoEXTBuilder<'a> { + self.inner.discard_rectangle_count = discard_rectangles.len() as _; + self.inner.p_discard_rectangles = discard_rectangles.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineDiscardRectangleStateCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub per_view_position_all_components: Bool32, +} +impl ::std::default::Default for PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + fn default() -> PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + s_type: StructureType::PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX, + p_next: ::std::ptr::null_mut(), + per_view_position_all_components: Bool32::default(), + } + } +} +impl PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + pub fn builder<'a>() -> PhysicalDeviceMultiviewPerViewAttributesPropertiesNVXBuilder<'a> { + PhysicalDeviceMultiviewPerViewAttributesPropertiesNVXBuilder { + inner: PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceMultiviewPerViewAttributesPropertiesNVXBuilder<'a> { + inner: PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceMultiviewPerViewAttributesPropertiesNVXBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX +{ +} +impl<'a> ::std::ops::Deref for PhysicalDeviceMultiviewPerViewAttributesPropertiesNVXBuilder<'a> { + type Target = PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceMultiviewPerViewAttributesPropertiesNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceMultiviewPerViewAttributesPropertiesNVXBuilder<'a> { + pub fn per_view_position_all_components( + mut self, + per_view_position_all_components: bool, + ) -> PhysicalDeviceMultiviewPerViewAttributesPropertiesNVXBuilder<'a> { + self.inner.per_view_position_all_components = per_view_position_all_components.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct InputAttachmentAspectReference { + pub subpass: u32, + pub input_attachment_index: u32, + pub aspect_mask: ImageAspectFlags, +} +impl InputAttachmentAspectReference { + pub fn builder<'a>() -> InputAttachmentAspectReferenceBuilder<'a> { + InputAttachmentAspectReferenceBuilder { + inner: InputAttachmentAspectReference::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct InputAttachmentAspectReferenceBuilder<'a> { + inner: InputAttachmentAspectReference, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for InputAttachmentAspectReferenceBuilder<'a> { + type Target = InputAttachmentAspectReference; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for InputAttachmentAspectReferenceBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> InputAttachmentAspectReferenceBuilder<'a> { + pub fn subpass(mut self, subpass: u32) -> InputAttachmentAspectReferenceBuilder<'a> { + self.inner.subpass = subpass; + self + } + pub fn input_attachment_index( + mut self, + input_attachment_index: u32, + ) -> InputAttachmentAspectReferenceBuilder<'a> { + self.inner.input_attachment_index = input_attachment_index; + self + } + pub fn aspect_mask( + mut self, + aspect_mask: ImageAspectFlags, + ) -> InputAttachmentAspectReferenceBuilder<'a> { + self.inner.aspect_mask = aspect_mask; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> InputAttachmentAspectReference { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct RenderPassInputAttachmentAspectCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub aspect_reference_count: u32, + pub p_aspect_references: *const InputAttachmentAspectReference, +} +impl ::std::default::Default for RenderPassInputAttachmentAspectCreateInfo { + fn default() -> RenderPassInputAttachmentAspectCreateInfo { + RenderPassInputAttachmentAspectCreateInfo { + s_type: StructureType::RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, + p_next: ::std::ptr::null(), + aspect_reference_count: u32::default(), + p_aspect_references: ::std::ptr::null(), + } + } +} +impl RenderPassInputAttachmentAspectCreateInfo { + pub fn builder<'a>() -> RenderPassInputAttachmentAspectCreateInfoBuilder<'a> { + RenderPassInputAttachmentAspectCreateInfoBuilder { + inner: RenderPassInputAttachmentAspectCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RenderPassInputAttachmentAspectCreateInfoBuilder<'a> { + inner: RenderPassInputAttachmentAspectCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsRenderPassCreateInfo for RenderPassInputAttachmentAspectCreateInfoBuilder<'_> {} +unsafe impl ExtendsRenderPassCreateInfo for RenderPassInputAttachmentAspectCreateInfo {} +impl<'a> ::std::ops::Deref for RenderPassInputAttachmentAspectCreateInfoBuilder<'a> { + type Target = RenderPassInputAttachmentAspectCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RenderPassInputAttachmentAspectCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RenderPassInputAttachmentAspectCreateInfoBuilder<'a> { + pub fn aspect_references( + mut self, + aspect_references: &'a [InputAttachmentAspectReference], + ) -> RenderPassInputAttachmentAspectCreateInfoBuilder<'a> { + self.inner.aspect_reference_count = aspect_references.len() as _; + self.inner.p_aspect_references = aspect_references.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RenderPassInputAttachmentAspectCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceSurfaceInfo2KHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub surface: SurfaceKHR, +} +impl ::std::default::Default for PhysicalDeviceSurfaceInfo2KHR { + fn default() -> PhysicalDeviceSurfaceInfo2KHR { + PhysicalDeviceSurfaceInfo2KHR { + s_type: StructureType::PHYSICAL_DEVICE_SURFACE_INFO_2_KHR, + p_next: ::std::ptr::null(), + surface: SurfaceKHR::default(), + } + } +} +impl PhysicalDeviceSurfaceInfo2KHR { + pub fn builder<'a>() -> PhysicalDeviceSurfaceInfo2KHRBuilder<'a> { + PhysicalDeviceSurfaceInfo2KHRBuilder { + inner: PhysicalDeviceSurfaceInfo2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceSurfaceInfo2KHRBuilder<'a> { + inner: PhysicalDeviceSurfaceInfo2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDeviceSurfaceInfo2KHR {} +impl<'a> ::std::ops::Deref for PhysicalDeviceSurfaceInfo2KHRBuilder<'a> { + type Target = PhysicalDeviceSurfaceInfo2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceSurfaceInfo2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceSurfaceInfo2KHRBuilder<'a> { + pub fn surface(mut self, surface: SurfaceKHR) -> PhysicalDeviceSurfaceInfo2KHRBuilder<'a> { + self.inner.surface = surface; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PhysicalDeviceSurfaceInfo2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceSurfaceInfo2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SurfaceCapabilities2KHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub surface_capabilities: SurfaceCapabilitiesKHR, +} +impl ::std::default::Default for SurfaceCapabilities2KHR { + fn default() -> SurfaceCapabilities2KHR { + SurfaceCapabilities2KHR { + s_type: StructureType::SURFACE_CAPABILITIES_2_KHR, + p_next: ::std::ptr::null_mut(), + surface_capabilities: SurfaceCapabilitiesKHR::default(), + } + } +} +impl SurfaceCapabilities2KHR { + pub fn builder<'a>() -> SurfaceCapabilities2KHRBuilder<'a> { + SurfaceCapabilities2KHRBuilder { + inner: SurfaceCapabilities2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SurfaceCapabilities2KHRBuilder<'a> { + inner: SurfaceCapabilities2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSurfaceCapabilities2KHR {} +impl<'a> ::std::ops::Deref for SurfaceCapabilities2KHRBuilder<'a> { + type Target = SurfaceCapabilities2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SurfaceCapabilities2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SurfaceCapabilities2KHRBuilder<'a> { + pub fn surface_capabilities( + mut self, + surface_capabilities: SurfaceCapabilitiesKHR, + ) -> SurfaceCapabilities2KHRBuilder<'a> { + self.inner.surface_capabilities = surface_capabilities; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SurfaceCapabilities2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SurfaceCapabilities2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SurfaceFormat2KHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub surface_format: SurfaceFormatKHR, +} +impl ::std::default::Default for SurfaceFormat2KHR { + fn default() -> SurfaceFormat2KHR { + SurfaceFormat2KHR { + s_type: StructureType::SURFACE_FORMAT_2_KHR, + p_next: ::std::ptr::null_mut(), + surface_format: SurfaceFormatKHR::default(), + } + } +} +impl SurfaceFormat2KHR { + pub fn builder<'a>() -> SurfaceFormat2KHRBuilder<'a> { + SurfaceFormat2KHRBuilder { + inner: SurfaceFormat2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SurfaceFormat2KHRBuilder<'a> { + inner: SurfaceFormat2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSurfaceFormat2KHR {} +impl<'a> ::std::ops::Deref for SurfaceFormat2KHRBuilder<'a> { + type Target = SurfaceFormat2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SurfaceFormat2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SurfaceFormat2KHRBuilder<'a> { + pub fn surface_format( + mut self, + surface_format: SurfaceFormatKHR, + ) -> SurfaceFormat2KHRBuilder<'a> { + self.inner.surface_format = surface_format; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SurfaceFormat2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SurfaceFormat2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DisplayProperties2KHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub display_properties: DisplayPropertiesKHR, +} +impl ::std::default::Default for DisplayProperties2KHR { + fn default() -> DisplayProperties2KHR { + DisplayProperties2KHR { + s_type: StructureType::DISPLAY_PROPERTIES_2_KHR, + p_next: ::std::ptr::null_mut(), + display_properties: DisplayPropertiesKHR::default(), + } + } +} +impl DisplayProperties2KHR { + pub fn builder<'a>() -> DisplayProperties2KHRBuilder<'a> { + DisplayProperties2KHRBuilder { + inner: DisplayProperties2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayProperties2KHRBuilder<'a> { + inner: DisplayProperties2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDisplayProperties2KHR {} +impl<'a> ::std::ops::Deref for DisplayProperties2KHRBuilder<'a> { + type Target = DisplayProperties2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayProperties2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayProperties2KHRBuilder<'a> { + pub fn display_properties( + mut self, + display_properties: DisplayPropertiesKHR, + ) -> DisplayProperties2KHRBuilder<'a> { + self.inner.display_properties = display_properties; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DisplayProperties2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayProperties2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DisplayPlaneProperties2KHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub display_plane_properties: DisplayPlanePropertiesKHR, +} +impl ::std::default::Default for DisplayPlaneProperties2KHR { + fn default() -> DisplayPlaneProperties2KHR { + DisplayPlaneProperties2KHR { + s_type: StructureType::DISPLAY_PLANE_PROPERTIES_2_KHR, + p_next: ::std::ptr::null_mut(), + display_plane_properties: DisplayPlanePropertiesKHR::default(), + } + } +} +impl DisplayPlaneProperties2KHR { + pub fn builder<'a>() -> DisplayPlaneProperties2KHRBuilder<'a> { + DisplayPlaneProperties2KHRBuilder { + inner: DisplayPlaneProperties2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayPlaneProperties2KHRBuilder<'a> { + inner: DisplayPlaneProperties2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDisplayPlaneProperties2KHR {} +impl<'a> ::std::ops::Deref for DisplayPlaneProperties2KHRBuilder<'a> { + type Target = DisplayPlaneProperties2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayPlaneProperties2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayPlaneProperties2KHRBuilder<'a> { + pub fn display_plane_properties( + mut self, + display_plane_properties: DisplayPlanePropertiesKHR, + ) -> DisplayPlaneProperties2KHRBuilder<'a> { + self.inner.display_plane_properties = display_plane_properties; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DisplayPlaneProperties2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayPlaneProperties2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DisplayModeProperties2KHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub display_mode_properties: DisplayModePropertiesKHR, +} +impl ::std::default::Default for DisplayModeProperties2KHR { + fn default() -> DisplayModeProperties2KHR { + DisplayModeProperties2KHR { + s_type: StructureType::DISPLAY_MODE_PROPERTIES_2_KHR, + p_next: ::std::ptr::null_mut(), + display_mode_properties: DisplayModePropertiesKHR::default(), + } + } +} +impl DisplayModeProperties2KHR { + pub fn builder<'a>() -> DisplayModeProperties2KHRBuilder<'a> { + DisplayModeProperties2KHRBuilder { + inner: DisplayModeProperties2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayModeProperties2KHRBuilder<'a> { + inner: DisplayModeProperties2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDisplayModeProperties2KHR {} +impl<'a> ::std::ops::Deref for DisplayModeProperties2KHRBuilder<'a> { + type Target = DisplayModeProperties2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayModeProperties2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayModeProperties2KHRBuilder<'a> { + pub fn display_mode_properties( + mut self, + display_mode_properties: DisplayModePropertiesKHR, + ) -> DisplayModeProperties2KHRBuilder<'a> { + self.inner.display_mode_properties = display_mode_properties; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DisplayModeProperties2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayModeProperties2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DisplayPlaneInfo2KHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub mode: DisplayModeKHR, + pub plane_index: u32, +} +impl ::std::default::Default for DisplayPlaneInfo2KHR { + fn default() -> DisplayPlaneInfo2KHR { + DisplayPlaneInfo2KHR { + s_type: StructureType::DISPLAY_PLANE_INFO_2_KHR, + p_next: ::std::ptr::null(), + mode: DisplayModeKHR::default(), + plane_index: u32::default(), + } + } +} +impl DisplayPlaneInfo2KHR { + pub fn builder<'a>() -> DisplayPlaneInfo2KHRBuilder<'a> { + DisplayPlaneInfo2KHRBuilder { + inner: DisplayPlaneInfo2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayPlaneInfo2KHRBuilder<'a> { + inner: DisplayPlaneInfo2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDisplayPlaneInfo2KHR {} +impl<'a> ::std::ops::Deref for DisplayPlaneInfo2KHRBuilder<'a> { + type Target = DisplayPlaneInfo2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayPlaneInfo2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayPlaneInfo2KHRBuilder<'a> { + pub fn mode(mut self, mode: DisplayModeKHR) -> DisplayPlaneInfo2KHRBuilder<'a> { + self.inner.mode = mode; + self + } + pub fn plane_index(mut self, plane_index: u32) -> DisplayPlaneInfo2KHRBuilder<'a> { + self.inner.plane_index = plane_index; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DisplayPlaneInfo2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayPlaneInfo2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DisplayPlaneCapabilities2KHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub capabilities: DisplayPlaneCapabilitiesKHR, +} +impl ::std::default::Default for DisplayPlaneCapabilities2KHR { + fn default() -> DisplayPlaneCapabilities2KHR { + DisplayPlaneCapabilities2KHR { + s_type: StructureType::DISPLAY_PLANE_CAPABILITIES_2_KHR, + p_next: ::std::ptr::null_mut(), + capabilities: DisplayPlaneCapabilitiesKHR::default(), + } + } +} +impl DisplayPlaneCapabilities2KHR { + pub fn builder<'a>() -> DisplayPlaneCapabilities2KHRBuilder<'a> { + DisplayPlaneCapabilities2KHRBuilder { + inner: DisplayPlaneCapabilities2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayPlaneCapabilities2KHRBuilder<'a> { + inner: DisplayPlaneCapabilities2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDisplayPlaneCapabilities2KHR {} +impl<'a> ::std::ops::Deref for DisplayPlaneCapabilities2KHRBuilder<'a> { + type Target = DisplayPlaneCapabilities2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayPlaneCapabilities2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayPlaneCapabilities2KHRBuilder<'a> { + pub fn capabilities( + mut self, + capabilities: DisplayPlaneCapabilitiesKHR, + ) -> DisplayPlaneCapabilities2KHRBuilder<'a> { + self.inner.capabilities = capabilities; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DisplayPlaneCapabilities2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayPlaneCapabilities2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SharedPresentSurfaceCapabilitiesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shared_present_supported_usage_flags: ImageUsageFlags, +} +impl ::std::default::Default for SharedPresentSurfaceCapabilitiesKHR { + fn default() -> SharedPresentSurfaceCapabilitiesKHR { + SharedPresentSurfaceCapabilitiesKHR { + s_type: StructureType::SHARED_PRESENT_SURFACE_CAPABILITIES_KHR, + p_next: ::std::ptr::null_mut(), + shared_present_supported_usage_flags: ImageUsageFlags::default(), + } + } +} +impl SharedPresentSurfaceCapabilitiesKHR { + pub fn builder<'a>() -> SharedPresentSurfaceCapabilitiesKHRBuilder<'a> { + SharedPresentSurfaceCapabilitiesKHRBuilder { + inner: SharedPresentSurfaceCapabilitiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SharedPresentSurfaceCapabilitiesKHRBuilder<'a> { + inner: SharedPresentSurfaceCapabilitiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSurfaceCapabilities2KHR for SharedPresentSurfaceCapabilitiesKHRBuilder<'_> {} +unsafe impl ExtendsSurfaceCapabilities2KHR for SharedPresentSurfaceCapabilitiesKHR {} +impl<'a> ::std::ops::Deref for SharedPresentSurfaceCapabilitiesKHRBuilder<'a> { + type Target = SharedPresentSurfaceCapabilitiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SharedPresentSurfaceCapabilitiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SharedPresentSurfaceCapabilitiesKHRBuilder<'a> { + pub fn shared_present_supported_usage_flags( + mut self, + shared_present_supported_usage_flags: ImageUsageFlags, + ) -> SharedPresentSurfaceCapabilitiesKHRBuilder<'a> { + self.inner.shared_present_supported_usage_flags = shared_present_supported_usage_flags; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SharedPresentSurfaceCapabilitiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDevice16BitStorageFeatures { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub storage_buffer16_bit_access: Bool32, + pub uniform_and_storage_buffer16_bit_access: Bool32, + pub storage_push_constant16: Bool32, + pub storage_input_output16: Bool32, +} +impl ::std::default::Default for PhysicalDevice16BitStorageFeatures { + fn default() -> PhysicalDevice16BitStorageFeatures { + PhysicalDevice16BitStorageFeatures { + s_type: StructureType::PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, + p_next: ::std::ptr::null_mut(), + storage_buffer16_bit_access: Bool32::default(), + uniform_and_storage_buffer16_bit_access: Bool32::default(), + storage_push_constant16: Bool32::default(), + storage_input_output16: Bool32::default(), + } + } +} +impl PhysicalDevice16BitStorageFeatures { + pub fn builder<'a>() -> PhysicalDevice16BitStorageFeaturesBuilder<'a> { + PhysicalDevice16BitStorageFeaturesBuilder { + inner: PhysicalDevice16BitStorageFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDevice16BitStorageFeaturesBuilder<'a> { + inner: PhysicalDevice16BitStorageFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDevice16BitStorageFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDevice16BitStorageFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDevice16BitStorageFeaturesBuilder<'a> { + type Target = PhysicalDevice16BitStorageFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDevice16BitStorageFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDevice16BitStorageFeaturesBuilder<'a> { + pub fn storage_buffer16_bit_access( + mut self, + storage_buffer16_bit_access: bool, + ) -> PhysicalDevice16BitStorageFeaturesBuilder<'a> { + self.inner.storage_buffer16_bit_access = storage_buffer16_bit_access.into(); + self + } + pub fn uniform_and_storage_buffer16_bit_access( + mut self, + uniform_and_storage_buffer16_bit_access: bool, + ) -> PhysicalDevice16BitStorageFeaturesBuilder<'a> { + self.inner.uniform_and_storage_buffer16_bit_access = + uniform_and_storage_buffer16_bit_access.into(); + self + } + pub fn storage_push_constant16( + mut self, + storage_push_constant16: bool, + ) -> PhysicalDevice16BitStorageFeaturesBuilder<'a> { + self.inner.storage_push_constant16 = storage_push_constant16.into(); + self + } + pub fn storage_input_output16( + mut self, + storage_input_output16: bool, + ) -> PhysicalDevice16BitStorageFeaturesBuilder<'a> { + self.inner.storage_input_output16 = storage_input_output16.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDevice16BitStorageFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceSubgroupProperties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub subgroup_size: u32, + pub supported_stages: ShaderStageFlags, + pub supported_operations: SubgroupFeatureFlags, + pub quad_operations_in_all_stages: Bool32, +} +impl ::std::default::Default for PhysicalDeviceSubgroupProperties { + fn default() -> PhysicalDeviceSubgroupProperties { + PhysicalDeviceSubgroupProperties { + s_type: StructureType::PHYSICAL_DEVICE_SUBGROUP_PROPERTIES, + p_next: ::std::ptr::null_mut(), + subgroup_size: u32::default(), + supported_stages: ShaderStageFlags::default(), + supported_operations: SubgroupFeatureFlags::default(), + quad_operations_in_all_stages: Bool32::default(), + } + } +} +impl PhysicalDeviceSubgroupProperties { + pub fn builder<'a>() -> PhysicalDeviceSubgroupPropertiesBuilder<'a> { + PhysicalDeviceSubgroupPropertiesBuilder { + inner: PhysicalDeviceSubgroupProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceSubgroupPropertiesBuilder<'a> { + inner: PhysicalDeviceSubgroupProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceSubgroupPropertiesBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceSubgroupProperties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceSubgroupPropertiesBuilder<'a> { + type Target = PhysicalDeviceSubgroupProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceSubgroupPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceSubgroupPropertiesBuilder<'a> { + pub fn subgroup_size( + mut self, + subgroup_size: u32, + ) -> PhysicalDeviceSubgroupPropertiesBuilder<'a> { + self.inner.subgroup_size = subgroup_size; + self + } + pub fn supported_stages( + mut self, + supported_stages: ShaderStageFlags, + ) -> PhysicalDeviceSubgroupPropertiesBuilder<'a> { + self.inner.supported_stages = supported_stages; + self + } + pub fn supported_operations( + mut self, + supported_operations: SubgroupFeatureFlags, + ) -> PhysicalDeviceSubgroupPropertiesBuilder<'a> { + self.inner.supported_operations = supported_operations; + self + } + pub fn quad_operations_in_all_stages( + mut self, + quad_operations_in_all_stages: bool, + ) -> PhysicalDeviceSubgroupPropertiesBuilder<'a> { + self.inner.quad_operations_in_all_stages = quad_operations_in_all_stages.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceSubgroupProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BufferMemoryRequirementsInfo2 { + pub s_type: StructureType, + pub p_next: *const c_void, + pub buffer: Buffer, +} +impl ::std::default::Default for BufferMemoryRequirementsInfo2 { + fn default() -> BufferMemoryRequirementsInfo2 { + BufferMemoryRequirementsInfo2 { + s_type: StructureType::BUFFER_MEMORY_REQUIREMENTS_INFO_2, + p_next: ::std::ptr::null(), + buffer: Buffer::default(), + } + } +} +impl BufferMemoryRequirementsInfo2 { + pub fn builder<'a>() -> BufferMemoryRequirementsInfo2Builder<'a> { + BufferMemoryRequirementsInfo2Builder { + inner: BufferMemoryRequirementsInfo2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BufferMemoryRequirementsInfo2Builder<'a> { + inner: BufferMemoryRequirementsInfo2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsBufferMemoryRequirementsInfo2 {} +impl<'a> ::std::ops::Deref for BufferMemoryRequirementsInfo2Builder<'a> { + type Target = BufferMemoryRequirementsInfo2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BufferMemoryRequirementsInfo2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BufferMemoryRequirementsInfo2Builder<'a> { + pub fn buffer(mut self, buffer: Buffer) -> BufferMemoryRequirementsInfo2Builder<'a> { + self.inner.buffer = buffer; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> BufferMemoryRequirementsInfo2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BufferMemoryRequirementsInfo2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageMemoryRequirementsInfo2 { + pub s_type: StructureType, + pub p_next: *const c_void, + pub image: Image, +} +impl ::std::default::Default for ImageMemoryRequirementsInfo2 { + fn default() -> ImageMemoryRequirementsInfo2 { + ImageMemoryRequirementsInfo2 { + s_type: StructureType::IMAGE_MEMORY_REQUIREMENTS_INFO_2, + p_next: ::std::ptr::null(), + image: Image::default(), + } + } +} +impl ImageMemoryRequirementsInfo2 { + pub fn builder<'a>() -> ImageMemoryRequirementsInfo2Builder<'a> { + ImageMemoryRequirementsInfo2Builder { + inner: ImageMemoryRequirementsInfo2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageMemoryRequirementsInfo2Builder<'a> { + inner: ImageMemoryRequirementsInfo2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImageMemoryRequirementsInfo2 {} +impl<'a> ::std::ops::Deref for ImageMemoryRequirementsInfo2Builder<'a> { + type Target = ImageMemoryRequirementsInfo2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageMemoryRequirementsInfo2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageMemoryRequirementsInfo2Builder<'a> { + pub fn image(mut self, image: Image) -> ImageMemoryRequirementsInfo2Builder<'a> { + self.inner.image = image; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImageMemoryRequirementsInfo2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageMemoryRequirementsInfo2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageSparseMemoryRequirementsInfo2 { + pub s_type: StructureType, + pub p_next: *const c_void, + pub image: Image, +} +impl ::std::default::Default for ImageSparseMemoryRequirementsInfo2 { + fn default() -> ImageSparseMemoryRequirementsInfo2 { + ImageSparseMemoryRequirementsInfo2 { + s_type: StructureType::IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2, + p_next: ::std::ptr::null(), + image: Image::default(), + } + } +} +impl ImageSparseMemoryRequirementsInfo2 { + pub fn builder<'a>() -> ImageSparseMemoryRequirementsInfo2Builder<'a> { + ImageSparseMemoryRequirementsInfo2Builder { + inner: ImageSparseMemoryRequirementsInfo2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageSparseMemoryRequirementsInfo2Builder<'a> { + inner: ImageSparseMemoryRequirementsInfo2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImageSparseMemoryRequirementsInfo2 {} +impl<'a> ::std::ops::Deref for ImageSparseMemoryRequirementsInfo2Builder<'a> { + type Target = ImageSparseMemoryRequirementsInfo2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageSparseMemoryRequirementsInfo2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageSparseMemoryRequirementsInfo2Builder<'a> { + pub fn image(mut self, image: Image) -> ImageSparseMemoryRequirementsInfo2Builder<'a> { + self.inner.image = image; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImageSparseMemoryRequirementsInfo2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageSparseMemoryRequirementsInfo2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryRequirements2 { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub memory_requirements: MemoryRequirements, +} +impl ::std::default::Default for MemoryRequirements2 { + fn default() -> MemoryRequirements2 { + MemoryRequirements2 { + s_type: StructureType::MEMORY_REQUIREMENTS_2, + p_next: ::std::ptr::null_mut(), + memory_requirements: MemoryRequirements::default(), + } + } +} +impl MemoryRequirements2 { + pub fn builder<'a>() -> MemoryRequirements2Builder<'a> { + MemoryRequirements2Builder { + inner: MemoryRequirements2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryRequirements2Builder<'a> { + inner: MemoryRequirements2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMemoryRequirements2 {} +impl<'a> ::std::ops::Deref for MemoryRequirements2Builder<'a> { + type Target = MemoryRequirements2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryRequirements2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryRequirements2Builder<'a> { + pub fn memory_requirements( + mut self, + memory_requirements: MemoryRequirements, + ) -> MemoryRequirements2Builder<'a> { + self.inner.memory_requirements = memory_requirements; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MemoryRequirements2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryRequirements2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SparseImageMemoryRequirements2 { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub memory_requirements: SparseImageMemoryRequirements, +} +impl ::std::default::Default for SparseImageMemoryRequirements2 { + fn default() -> SparseImageMemoryRequirements2 { + SparseImageMemoryRequirements2 { + s_type: StructureType::SPARSE_IMAGE_MEMORY_REQUIREMENTS_2, + p_next: ::std::ptr::null_mut(), + memory_requirements: SparseImageMemoryRequirements::default(), + } + } +} +impl SparseImageMemoryRequirements2 { + pub fn builder<'a>() -> SparseImageMemoryRequirements2Builder<'a> { + SparseImageMemoryRequirements2Builder { + inner: SparseImageMemoryRequirements2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SparseImageMemoryRequirements2Builder<'a> { + inner: SparseImageMemoryRequirements2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSparseImageMemoryRequirements2 {} +impl<'a> ::std::ops::Deref for SparseImageMemoryRequirements2Builder<'a> { + type Target = SparseImageMemoryRequirements2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SparseImageMemoryRequirements2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SparseImageMemoryRequirements2Builder<'a> { + pub fn memory_requirements( + mut self, + memory_requirements: SparseImageMemoryRequirements, + ) -> SparseImageMemoryRequirements2Builder<'a> { + self.inner.memory_requirements = memory_requirements; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SparseImageMemoryRequirements2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SparseImageMemoryRequirements2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDevicePointClippingProperties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub point_clipping_behavior: PointClippingBehavior, +} +impl ::std::default::Default for PhysicalDevicePointClippingProperties { + fn default() -> PhysicalDevicePointClippingProperties { + PhysicalDevicePointClippingProperties { + s_type: StructureType::PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES, + p_next: ::std::ptr::null_mut(), + point_clipping_behavior: PointClippingBehavior::default(), + } + } +} +impl PhysicalDevicePointClippingProperties { + pub fn builder<'a>() -> PhysicalDevicePointClippingPropertiesBuilder<'a> { + PhysicalDevicePointClippingPropertiesBuilder { + inner: PhysicalDevicePointClippingProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDevicePointClippingPropertiesBuilder<'a> { + inner: PhysicalDevicePointClippingProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDevicePointClippingPropertiesBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDevicePointClippingProperties {} +impl<'a> ::std::ops::Deref for PhysicalDevicePointClippingPropertiesBuilder<'a> { + type Target = PhysicalDevicePointClippingProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDevicePointClippingPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDevicePointClippingPropertiesBuilder<'a> { + pub fn point_clipping_behavior( + mut self, + point_clipping_behavior: PointClippingBehavior, + ) -> PhysicalDevicePointClippingPropertiesBuilder<'a> { + self.inner.point_clipping_behavior = point_clipping_behavior; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDevicePointClippingProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryDedicatedRequirements { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub prefers_dedicated_allocation: Bool32, + pub requires_dedicated_allocation: Bool32, +} +impl ::std::default::Default for MemoryDedicatedRequirements { + fn default() -> MemoryDedicatedRequirements { + MemoryDedicatedRequirements { + s_type: StructureType::MEMORY_DEDICATED_REQUIREMENTS, + p_next: ::std::ptr::null_mut(), + prefers_dedicated_allocation: Bool32::default(), + requires_dedicated_allocation: Bool32::default(), + } + } +} +impl MemoryDedicatedRequirements { + pub fn builder<'a>() -> MemoryDedicatedRequirementsBuilder<'a> { + MemoryDedicatedRequirementsBuilder { + inner: MemoryDedicatedRequirements::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryDedicatedRequirementsBuilder<'a> { + inner: MemoryDedicatedRequirements, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryRequirements2 for MemoryDedicatedRequirementsBuilder<'_> {} +unsafe impl ExtendsMemoryRequirements2 for MemoryDedicatedRequirements {} +impl<'a> ::std::ops::Deref for MemoryDedicatedRequirementsBuilder<'a> { + type Target = MemoryDedicatedRequirements; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryDedicatedRequirementsBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryDedicatedRequirementsBuilder<'a> { + pub fn prefers_dedicated_allocation( + mut self, + prefers_dedicated_allocation: bool, + ) -> MemoryDedicatedRequirementsBuilder<'a> { + self.inner.prefers_dedicated_allocation = prefers_dedicated_allocation.into(); + self + } + pub fn requires_dedicated_allocation( + mut self, + requires_dedicated_allocation: bool, + ) -> MemoryDedicatedRequirementsBuilder<'a> { + self.inner.requires_dedicated_allocation = requires_dedicated_allocation.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryDedicatedRequirements { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryDedicatedAllocateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub image: Image, + pub buffer: Buffer, +} +impl ::std::default::Default for MemoryDedicatedAllocateInfo { + fn default() -> MemoryDedicatedAllocateInfo { + MemoryDedicatedAllocateInfo { + s_type: StructureType::MEMORY_DEDICATED_ALLOCATE_INFO, + p_next: ::std::ptr::null(), + image: Image::default(), + buffer: Buffer::default(), + } + } +} +impl MemoryDedicatedAllocateInfo { + pub fn builder<'a>() -> MemoryDedicatedAllocateInfoBuilder<'a> { + MemoryDedicatedAllocateInfoBuilder { + inner: MemoryDedicatedAllocateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryDedicatedAllocateInfoBuilder<'a> { + inner: MemoryDedicatedAllocateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for MemoryDedicatedAllocateInfoBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for MemoryDedicatedAllocateInfo {} +impl<'a> ::std::ops::Deref for MemoryDedicatedAllocateInfoBuilder<'a> { + type Target = MemoryDedicatedAllocateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryDedicatedAllocateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryDedicatedAllocateInfoBuilder<'a> { + pub fn image(mut self, image: Image) -> MemoryDedicatedAllocateInfoBuilder<'a> { + self.inner.image = image; + self + } + pub fn buffer(mut self, buffer: Buffer) -> MemoryDedicatedAllocateInfoBuilder<'a> { + self.inner.buffer = buffer; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryDedicatedAllocateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageViewUsageCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub usage: ImageUsageFlags, +} +impl ::std::default::Default for ImageViewUsageCreateInfo { + fn default() -> ImageViewUsageCreateInfo { + ImageViewUsageCreateInfo { + s_type: StructureType::IMAGE_VIEW_USAGE_CREATE_INFO, + p_next: ::std::ptr::null(), + usage: ImageUsageFlags::default(), + } + } +} +impl ImageViewUsageCreateInfo { + pub fn builder<'a>() -> ImageViewUsageCreateInfoBuilder<'a> { + ImageViewUsageCreateInfoBuilder { + inner: ImageViewUsageCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageViewUsageCreateInfoBuilder<'a> { + inner: ImageViewUsageCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageViewCreateInfo for ImageViewUsageCreateInfoBuilder<'_> {} +unsafe impl ExtendsImageViewCreateInfo for ImageViewUsageCreateInfo {} +impl<'a> ::std::ops::Deref for ImageViewUsageCreateInfoBuilder<'a> { + type Target = ImageViewUsageCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageViewUsageCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageViewUsageCreateInfoBuilder<'a> { + pub fn usage(mut self, usage: ImageUsageFlags) -> ImageViewUsageCreateInfoBuilder<'a> { + self.inner.usage = usage; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageViewUsageCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineTessellationDomainOriginStateCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub domain_origin: TessellationDomainOrigin, +} +impl ::std::default::Default for PipelineTessellationDomainOriginStateCreateInfo { + fn default() -> PipelineTessellationDomainOriginStateCreateInfo { + PipelineTessellationDomainOriginStateCreateInfo { + s_type: StructureType::PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, + p_next: ::std::ptr::null(), + domain_origin: TessellationDomainOrigin::default(), + } + } +} +impl PipelineTessellationDomainOriginStateCreateInfo { + pub fn builder<'a>() -> PipelineTessellationDomainOriginStateCreateInfoBuilder<'a> { + PipelineTessellationDomainOriginStateCreateInfoBuilder { + inner: PipelineTessellationDomainOriginStateCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineTessellationDomainOriginStateCreateInfoBuilder<'a> { + inner: PipelineTessellationDomainOriginStateCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineTessellationStateCreateInfo + for PipelineTessellationDomainOriginStateCreateInfoBuilder<'_> +{ +} +unsafe impl ExtendsPipelineTessellationStateCreateInfo + for PipelineTessellationDomainOriginStateCreateInfo +{ +} +impl<'a> ::std::ops::Deref for PipelineTessellationDomainOriginStateCreateInfoBuilder<'a> { + type Target = PipelineTessellationDomainOriginStateCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineTessellationDomainOriginStateCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineTessellationDomainOriginStateCreateInfoBuilder<'a> { + pub fn domain_origin( + mut self, + domain_origin: TessellationDomainOrigin, + ) -> PipelineTessellationDomainOriginStateCreateInfoBuilder<'a> { + self.inner.domain_origin = domain_origin; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineTessellationDomainOriginStateCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SamplerYcbcrConversionInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub conversion: SamplerYcbcrConversion, +} +impl ::std::default::Default for SamplerYcbcrConversionInfo { + fn default() -> SamplerYcbcrConversionInfo { + SamplerYcbcrConversionInfo { + s_type: StructureType::SAMPLER_YCBCR_CONVERSION_INFO, + p_next: ::std::ptr::null(), + conversion: SamplerYcbcrConversion::default(), + } + } +} +impl SamplerYcbcrConversionInfo { + pub fn builder<'a>() -> SamplerYcbcrConversionInfoBuilder<'a> { + SamplerYcbcrConversionInfoBuilder { + inner: SamplerYcbcrConversionInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SamplerYcbcrConversionInfoBuilder<'a> { + inner: SamplerYcbcrConversionInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSamplerCreateInfo for SamplerYcbcrConversionInfoBuilder<'_> {} +unsafe impl ExtendsSamplerCreateInfo for SamplerYcbcrConversionInfo {} +unsafe impl ExtendsImageViewCreateInfo for SamplerYcbcrConversionInfoBuilder<'_> {} +unsafe impl ExtendsImageViewCreateInfo for SamplerYcbcrConversionInfo {} +impl<'a> ::std::ops::Deref for SamplerYcbcrConversionInfoBuilder<'a> { + type Target = SamplerYcbcrConversionInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SamplerYcbcrConversionInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SamplerYcbcrConversionInfoBuilder<'a> { + pub fn conversion( + mut self, + conversion: SamplerYcbcrConversion, + ) -> SamplerYcbcrConversionInfoBuilder<'a> { + self.inner.conversion = conversion; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SamplerYcbcrConversionInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SamplerYcbcrConversionCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub format: Format, + pub ycbcr_model: SamplerYcbcrModelConversion, + pub ycbcr_range: SamplerYcbcrRange, + pub components: ComponentMapping, + pub x_chroma_offset: ChromaLocation, + pub y_chroma_offset: ChromaLocation, + pub chroma_filter: Filter, + pub force_explicit_reconstruction: Bool32, +} +impl ::std::default::Default for SamplerYcbcrConversionCreateInfo { + fn default() -> SamplerYcbcrConversionCreateInfo { + SamplerYcbcrConversionCreateInfo { + s_type: StructureType::SAMPLER_YCBCR_CONVERSION_CREATE_INFO, + p_next: ::std::ptr::null(), + format: Format::default(), + ycbcr_model: SamplerYcbcrModelConversion::default(), + ycbcr_range: SamplerYcbcrRange::default(), + components: ComponentMapping::default(), + x_chroma_offset: ChromaLocation::default(), + y_chroma_offset: ChromaLocation::default(), + chroma_filter: Filter::default(), + force_explicit_reconstruction: Bool32::default(), + } + } +} +impl SamplerYcbcrConversionCreateInfo { + pub fn builder<'a>() -> SamplerYcbcrConversionCreateInfoBuilder<'a> { + SamplerYcbcrConversionCreateInfoBuilder { + inner: SamplerYcbcrConversionCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SamplerYcbcrConversionCreateInfoBuilder<'a> { + inner: SamplerYcbcrConversionCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSamplerYcbcrConversionCreateInfo {} +impl<'a> ::std::ops::Deref for SamplerYcbcrConversionCreateInfoBuilder<'a> { + type Target = SamplerYcbcrConversionCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SamplerYcbcrConversionCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SamplerYcbcrConversionCreateInfoBuilder<'a> { + pub fn format(mut self, format: Format) -> SamplerYcbcrConversionCreateInfoBuilder<'a> { + self.inner.format = format; + self + } + pub fn ycbcr_model( + mut self, + ycbcr_model: SamplerYcbcrModelConversion, + ) -> SamplerYcbcrConversionCreateInfoBuilder<'a> { + self.inner.ycbcr_model = ycbcr_model; + self + } + pub fn ycbcr_range( + mut self, + ycbcr_range: SamplerYcbcrRange, + ) -> SamplerYcbcrConversionCreateInfoBuilder<'a> { + self.inner.ycbcr_range = ycbcr_range; + self + } + pub fn components( + mut self, + components: ComponentMapping, + ) -> SamplerYcbcrConversionCreateInfoBuilder<'a> { + self.inner.components = components; + self + } + pub fn x_chroma_offset( + mut self, + x_chroma_offset: ChromaLocation, + ) -> SamplerYcbcrConversionCreateInfoBuilder<'a> { + self.inner.x_chroma_offset = x_chroma_offset; + self + } + pub fn y_chroma_offset( + mut self, + y_chroma_offset: ChromaLocation, + ) -> SamplerYcbcrConversionCreateInfoBuilder<'a> { + self.inner.y_chroma_offset = y_chroma_offset; + self + } + pub fn chroma_filter( + mut self, + chroma_filter: Filter, + ) -> SamplerYcbcrConversionCreateInfoBuilder<'a> { + self.inner.chroma_filter = chroma_filter; + self + } + pub fn force_explicit_reconstruction( + mut self, + force_explicit_reconstruction: bool, + ) -> SamplerYcbcrConversionCreateInfoBuilder<'a> { + self.inner.force_explicit_reconstruction = force_explicit_reconstruction.into(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SamplerYcbcrConversionCreateInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SamplerYcbcrConversionCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BindImagePlaneMemoryInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub plane_aspect: ImageAspectFlags, +} +impl ::std::default::Default for BindImagePlaneMemoryInfo { + fn default() -> BindImagePlaneMemoryInfo { + BindImagePlaneMemoryInfo { + s_type: StructureType::BIND_IMAGE_PLANE_MEMORY_INFO, + p_next: ::std::ptr::null(), + plane_aspect: ImageAspectFlags::default(), + } + } +} +impl BindImagePlaneMemoryInfo { + pub fn builder<'a>() -> BindImagePlaneMemoryInfoBuilder<'a> { + BindImagePlaneMemoryInfoBuilder { + inner: BindImagePlaneMemoryInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BindImagePlaneMemoryInfoBuilder<'a> { + inner: BindImagePlaneMemoryInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsBindImageMemoryInfo for BindImagePlaneMemoryInfoBuilder<'_> {} +unsafe impl ExtendsBindImageMemoryInfo for BindImagePlaneMemoryInfo {} +impl<'a> ::std::ops::Deref for BindImagePlaneMemoryInfoBuilder<'a> { + type Target = BindImagePlaneMemoryInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BindImagePlaneMemoryInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BindImagePlaneMemoryInfoBuilder<'a> { + pub fn plane_aspect( + mut self, + plane_aspect: ImageAspectFlags, + ) -> BindImagePlaneMemoryInfoBuilder<'a> { + self.inner.plane_aspect = plane_aspect; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BindImagePlaneMemoryInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImagePlaneMemoryRequirementsInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub plane_aspect: ImageAspectFlags, +} +impl ::std::default::Default for ImagePlaneMemoryRequirementsInfo { + fn default() -> ImagePlaneMemoryRequirementsInfo { + ImagePlaneMemoryRequirementsInfo { + s_type: StructureType::IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO, + p_next: ::std::ptr::null(), + plane_aspect: ImageAspectFlags::default(), + } + } +} +impl ImagePlaneMemoryRequirementsInfo { + pub fn builder<'a>() -> ImagePlaneMemoryRequirementsInfoBuilder<'a> { + ImagePlaneMemoryRequirementsInfoBuilder { + inner: ImagePlaneMemoryRequirementsInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImagePlaneMemoryRequirementsInfoBuilder<'a> { + inner: ImagePlaneMemoryRequirementsInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageMemoryRequirementsInfo2 for ImagePlaneMemoryRequirementsInfoBuilder<'_> {} +unsafe impl ExtendsImageMemoryRequirementsInfo2 for ImagePlaneMemoryRequirementsInfo {} +impl<'a> ::std::ops::Deref for ImagePlaneMemoryRequirementsInfoBuilder<'a> { + type Target = ImagePlaneMemoryRequirementsInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImagePlaneMemoryRequirementsInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImagePlaneMemoryRequirementsInfoBuilder<'a> { + pub fn plane_aspect( + mut self, + plane_aspect: ImageAspectFlags, + ) -> ImagePlaneMemoryRequirementsInfoBuilder<'a> { + self.inner.plane_aspect = plane_aspect; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImagePlaneMemoryRequirementsInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceSamplerYcbcrConversionFeatures { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub sampler_ycbcr_conversion: Bool32, +} +impl ::std::default::Default for PhysicalDeviceSamplerYcbcrConversionFeatures { + fn default() -> PhysicalDeviceSamplerYcbcrConversionFeatures { + PhysicalDeviceSamplerYcbcrConversionFeatures { + s_type: StructureType::PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES, + p_next: ::std::ptr::null_mut(), + sampler_ycbcr_conversion: Bool32::default(), + } + } +} +impl PhysicalDeviceSamplerYcbcrConversionFeatures { + pub fn builder<'a>() -> PhysicalDeviceSamplerYcbcrConversionFeaturesBuilder<'a> { + PhysicalDeviceSamplerYcbcrConversionFeaturesBuilder { + inner: PhysicalDeviceSamplerYcbcrConversionFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceSamplerYcbcrConversionFeaturesBuilder<'a> { + inner: PhysicalDeviceSamplerYcbcrConversionFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceSamplerYcbcrConversionFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceSamplerYcbcrConversionFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceSamplerYcbcrConversionFeaturesBuilder<'a> { + type Target = PhysicalDeviceSamplerYcbcrConversionFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceSamplerYcbcrConversionFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceSamplerYcbcrConversionFeaturesBuilder<'a> { + pub fn sampler_ycbcr_conversion( + mut self, + sampler_ycbcr_conversion: bool, + ) -> PhysicalDeviceSamplerYcbcrConversionFeaturesBuilder<'a> { + self.inner.sampler_ycbcr_conversion = sampler_ycbcr_conversion.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceSamplerYcbcrConversionFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SamplerYcbcrConversionImageFormatProperties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub combined_image_sampler_descriptor_count: u32, +} +impl ::std::default::Default for SamplerYcbcrConversionImageFormatProperties { + fn default() -> SamplerYcbcrConversionImageFormatProperties { + SamplerYcbcrConversionImageFormatProperties { + s_type: StructureType::SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES, + p_next: ::std::ptr::null_mut(), + combined_image_sampler_descriptor_count: u32::default(), + } + } +} +impl SamplerYcbcrConversionImageFormatProperties { + pub fn builder<'a>() -> SamplerYcbcrConversionImageFormatPropertiesBuilder<'a> { + SamplerYcbcrConversionImageFormatPropertiesBuilder { + inner: SamplerYcbcrConversionImageFormatProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SamplerYcbcrConversionImageFormatPropertiesBuilder<'a> { + inner: SamplerYcbcrConversionImageFormatProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageFormatProperties2 + for SamplerYcbcrConversionImageFormatPropertiesBuilder<'_> +{ +} +unsafe impl ExtendsImageFormatProperties2 for SamplerYcbcrConversionImageFormatProperties {} +impl<'a> ::std::ops::Deref for SamplerYcbcrConversionImageFormatPropertiesBuilder<'a> { + type Target = SamplerYcbcrConversionImageFormatProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SamplerYcbcrConversionImageFormatPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SamplerYcbcrConversionImageFormatPropertiesBuilder<'a> { + pub fn combined_image_sampler_descriptor_count( + mut self, + combined_image_sampler_descriptor_count: u32, + ) -> SamplerYcbcrConversionImageFormatPropertiesBuilder<'a> { + self.inner.combined_image_sampler_descriptor_count = + combined_image_sampler_descriptor_count; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SamplerYcbcrConversionImageFormatProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct TextureLODGatherFormatPropertiesAMD { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub supports_texture_gather_lod_bias_amd: Bool32, +} +impl ::std::default::Default for TextureLODGatherFormatPropertiesAMD { + fn default() -> TextureLODGatherFormatPropertiesAMD { + TextureLODGatherFormatPropertiesAMD { + s_type: StructureType::TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD, + p_next: ::std::ptr::null_mut(), + supports_texture_gather_lod_bias_amd: Bool32::default(), + } + } +} +impl TextureLODGatherFormatPropertiesAMD { + pub fn builder<'a>() -> TextureLODGatherFormatPropertiesAMDBuilder<'a> { + TextureLODGatherFormatPropertiesAMDBuilder { + inner: TextureLODGatherFormatPropertiesAMD::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct TextureLODGatherFormatPropertiesAMDBuilder<'a> { + inner: TextureLODGatherFormatPropertiesAMD, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageFormatProperties2 for TextureLODGatherFormatPropertiesAMDBuilder<'_> {} +unsafe impl ExtendsImageFormatProperties2 for TextureLODGatherFormatPropertiesAMD {} +impl<'a> ::std::ops::Deref for TextureLODGatherFormatPropertiesAMDBuilder<'a> { + type Target = TextureLODGatherFormatPropertiesAMD; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for TextureLODGatherFormatPropertiesAMDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> TextureLODGatherFormatPropertiesAMDBuilder<'a> { + pub fn supports_texture_gather_lod_bias_amd( + mut self, + supports_texture_gather_lod_bias_amd: bool, + ) -> TextureLODGatherFormatPropertiesAMDBuilder<'a> { + self.inner.supports_texture_gather_lod_bias_amd = + supports_texture_gather_lod_bias_amd.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> TextureLODGatherFormatPropertiesAMD { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ConditionalRenderingBeginInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub buffer: Buffer, + pub offset: DeviceSize, + pub flags: ConditionalRenderingFlagsEXT, +} +impl ::std::default::Default for ConditionalRenderingBeginInfoEXT { + fn default() -> ConditionalRenderingBeginInfoEXT { + ConditionalRenderingBeginInfoEXT { + s_type: StructureType::CONDITIONAL_RENDERING_BEGIN_INFO_EXT, + p_next: ::std::ptr::null(), + buffer: Buffer::default(), + offset: DeviceSize::default(), + flags: ConditionalRenderingFlagsEXT::default(), + } + } +} +impl ConditionalRenderingBeginInfoEXT { + pub fn builder<'a>() -> ConditionalRenderingBeginInfoEXTBuilder<'a> { + ConditionalRenderingBeginInfoEXTBuilder { + inner: ConditionalRenderingBeginInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ConditionalRenderingBeginInfoEXTBuilder<'a> { + inner: ConditionalRenderingBeginInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsConditionalRenderingBeginInfoEXT {} +impl<'a> ::std::ops::Deref for ConditionalRenderingBeginInfoEXTBuilder<'a> { + type Target = ConditionalRenderingBeginInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ConditionalRenderingBeginInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ConditionalRenderingBeginInfoEXTBuilder<'a> { + pub fn buffer(mut self, buffer: Buffer) -> ConditionalRenderingBeginInfoEXTBuilder<'a> { + self.inner.buffer = buffer; + self + } + pub fn offset(mut self, offset: DeviceSize) -> ConditionalRenderingBeginInfoEXTBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn flags( + mut self, + flags: ConditionalRenderingFlagsEXT, + ) -> ConditionalRenderingBeginInfoEXTBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ConditionalRenderingBeginInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ConditionalRenderingBeginInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ProtectedSubmitInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub protected_submit: Bool32, +} +impl ::std::default::Default for ProtectedSubmitInfo { + fn default() -> ProtectedSubmitInfo { + ProtectedSubmitInfo { + s_type: StructureType::PROTECTED_SUBMIT_INFO, + p_next: ::std::ptr::null(), + protected_submit: Bool32::default(), + } + } +} +impl ProtectedSubmitInfo { + pub fn builder<'a>() -> ProtectedSubmitInfoBuilder<'a> { + ProtectedSubmitInfoBuilder { + inner: ProtectedSubmitInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ProtectedSubmitInfoBuilder<'a> { + inner: ProtectedSubmitInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSubmitInfo for ProtectedSubmitInfoBuilder<'_> {} +unsafe impl ExtendsSubmitInfo for ProtectedSubmitInfo {} +impl<'a> ::std::ops::Deref for ProtectedSubmitInfoBuilder<'a> { + type Target = ProtectedSubmitInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ProtectedSubmitInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ProtectedSubmitInfoBuilder<'a> { + pub fn protected_submit(mut self, protected_submit: bool) -> ProtectedSubmitInfoBuilder<'a> { + self.inner.protected_submit = protected_submit.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ProtectedSubmitInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceProtectedMemoryFeatures { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub protected_memory: Bool32, +} +impl ::std::default::Default for PhysicalDeviceProtectedMemoryFeatures { + fn default() -> PhysicalDeviceProtectedMemoryFeatures { + PhysicalDeviceProtectedMemoryFeatures { + s_type: StructureType::PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES, + p_next: ::std::ptr::null_mut(), + protected_memory: Bool32::default(), + } + } +} +impl PhysicalDeviceProtectedMemoryFeatures { + pub fn builder<'a>() -> PhysicalDeviceProtectedMemoryFeaturesBuilder<'a> { + PhysicalDeviceProtectedMemoryFeaturesBuilder { + inner: PhysicalDeviceProtectedMemoryFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceProtectedMemoryFeaturesBuilder<'a> { + inner: PhysicalDeviceProtectedMemoryFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceProtectedMemoryFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceProtectedMemoryFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceProtectedMemoryFeaturesBuilder<'a> { + type Target = PhysicalDeviceProtectedMemoryFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceProtectedMemoryFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceProtectedMemoryFeaturesBuilder<'a> { + pub fn protected_memory( + mut self, + protected_memory: bool, + ) -> PhysicalDeviceProtectedMemoryFeaturesBuilder<'a> { + self.inner.protected_memory = protected_memory.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceProtectedMemoryFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceProtectedMemoryProperties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub protected_no_fault: Bool32, +} +impl ::std::default::Default for PhysicalDeviceProtectedMemoryProperties { + fn default() -> PhysicalDeviceProtectedMemoryProperties { + PhysicalDeviceProtectedMemoryProperties { + s_type: StructureType::PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES, + p_next: ::std::ptr::null_mut(), + protected_no_fault: Bool32::default(), + } + } +} +impl PhysicalDeviceProtectedMemoryProperties { + pub fn builder<'a>() -> PhysicalDeviceProtectedMemoryPropertiesBuilder<'a> { + PhysicalDeviceProtectedMemoryPropertiesBuilder { + inner: PhysicalDeviceProtectedMemoryProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceProtectedMemoryPropertiesBuilder<'a> { + inner: PhysicalDeviceProtectedMemoryProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceProtectedMemoryPropertiesBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceProtectedMemoryProperties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceProtectedMemoryPropertiesBuilder<'a> { + type Target = PhysicalDeviceProtectedMemoryProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceProtectedMemoryPropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceProtectedMemoryPropertiesBuilder<'a> { + pub fn protected_no_fault( + mut self, + protected_no_fault: bool, + ) -> PhysicalDeviceProtectedMemoryPropertiesBuilder<'a> { + self.inner.protected_no_fault = protected_no_fault.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceProtectedMemoryProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceQueueInfo2 { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: DeviceQueueCreateFlags, + pub queue_family_index: u32, + pub queue_index: u32, +} +impl ::std::default::Default for DeviceQueueInfo2 { + fn default() -> DeviceQueueInfo2 { + DeviceQueueInfo2 { + s_type: StructureType::DEVICE_QUEUE_INFO_2, + p_next: ::std::ptr::null(), + flags: DeviceQueueCreateFlags::default(), + queue_family_index: u32::default(), + queue_index: u32::default(), + } + } +} +impl DeviceQueueInfo2 { + pub fn builder<'a>() -> DeviceQueueInfo2Builder<'a> { + DeviceQueueInfo2Builder { + inner: DeviceQueueInfo2::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceQueueInfo2Builder<'a> { + inner: DeviceQueueInfo2, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDeviceQueueInfo2 {} +impl<'a> ::std::ops::Deref for DeviceQueueInfo2Builder<'a> { + type Target = DeviceQueueInfo2; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceQueueInfo2Builder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceQueueInfo2Builder<'a> { + pub fn flags(mut self, flags: DeviceQueueCreateFlags) -> DeviceQueueInfo2Builder<'a> { + self.inner.flags = flags; + self + } + pub fn queue_family_index(mut self, queue_family_index: u32) -> DeviceQueueInfo2Builder<'a> { + self.inner.queue_family_index = queue_family_index; + self + } + pub fn queue_index(mut self, queue_index: u32) -> DeviceQueueInfo2Builder<'a> { + self.inner.queue_index = queue_index; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DeviceQueueInfo2Builder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceQueueInfo2 { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineCoverageToColorStateCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineCoverageToColorStateCreateFlagsNV, + pub coverage_to_color_enable: Bool32, + pub coverage_to_color_location: u32, +} +impl ::std::default::Default for PipelineCoverageToColorStateCreateInfoNV { + fn default() -> PipelineCoverageToColorStateCreateInfoNV { + PipelineCoverageToColorStateCreateInfoNV { + s_type: StructureType::PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + flags: PipelineCoverageToColorStateCreateFlagsNV::default(), + coverage_to_color_enable: Bool32::default(), + coverage_to_color_location: u32::default(), + } + } +} +impl PipelineCoverageToColorStateCreateInfoNV { + pub fn builder<'a>() -> PipelineCoverageToColorStateCreateInfoNVBuilder<'a> { + PipelineCoverageToColorStateCreateInfoNVBuilder { + inner: PipelineCoverageToColorStateCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineCoverageToColorStateCreateInfoNVBuilder<'a> { + inner: PipelineCoverageToColorStateCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineMultisampleStateCreateInfo + for PipelineCoverageToColorStateCreateInfoNVBuilder<'_> +{ +} +unsafe impl ExtendsPipelineMultisampleStateCreateInfo for PipelineCoverageToColorStateCreateInfoNV {} +impl<'a> ::std::ops::Deref for PipelineCoverageToColorStateCreateInfoNVBuilder<'a> { + type Target = PipelineCoverageToColorStateCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineCoverageToColorStateCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineCoverageToColorStateCreateInfoNVBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineCoverageToColorStateCreateFlagsNV, + ) -> PipelineCoverageToColorStateCreateInfoNVBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn coverage_to_color_enable( + mut self, + coverage_to_color_enable: bool, + ) -> PipelineCoverageToColorStateCreateInfoNVBuilder<'a> { + self.inner.coverage_to_color_enable = coverage_to_color_enable.into(); + self + } + pub fn coverage_to_color_location( + mut self, + coverage_to_color_location: u32, + ) -> PipelineCoverageToColorStateCreateInfoNVBuilder<'a> { + self.inner.coverage_to_color_location = coverage_to_color_location; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineCoverageToColorStateCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceSamplerFilterMinmaxPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub filter_minmax_single_component_formats: Bool32, + pub filter_minmax_image_component_mapping: Bool32, +} +impl ::std::default::Default for PhysicalDeviceSamplerFilterMinmaxPropertiesEXT { + fn default() -> PhysicalDeviceSamplerFilterMinmaxPropertiesEXT { + PhysicalDeviceSamplerFilterMinmaxPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + filter_minmax_single_component_formats: Bool32::default(), + filter_minmax_image_component_mapping: Bool32::default(), + } + } +} +impl PhysicalDeviceSamplerFilterMinmaxPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { + PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder { + inner: PhysicalDeviceSamplerFilterMinmaxPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceSamplerFilterMinmaxPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceSamplerFilterMinmaxPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceSamplerFilterMinmaxPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { + pub fn filter_minmax_single_component_formats( + mut self, + filter_minmax_single_component_formats: bool, + ) -> PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { + self.inner.filter_minmax_single_component_formats = + filter_minmax_single_component_formats.into(); + self + } + pub fn filter_minmax_image_component_mapping( + mut self, + filter_minmax_image_component_mapping: bool, + ) -> PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { + self.inner.filter_minmax_image_component_mapping = + filter_minmax_image_component_mapping.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceSamplerFilterMinmaxPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct SampleLocationEXT { + pub x: f32, + pub y: f32, +} +impl SampleLocationEXT { + pub fn builder<'a>() -> SampleLocationEXTBuilder<'a> { + SampleLocationEXTBuilder { + inner: SampleLocationEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SampleLocationEXTBuilder<'a> { + inner: SampleLocationEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SampleLocationEXTBuilder<'a> { + type Target = SampleLocationEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SampleLocationEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SampleLocationEXTBuilder<'a> { + pub fn x(mut self, x: f32) -> SampleLocationEXTBuilder<'a> { + self.inner.x = x; + self + } + pub fn y(mut self, y: f32) -> SampleLocationEXTBuilder<'a> { + self.inner.y = y; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SampleLocationEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SampleLocationsInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub sample_locations_per_pixel: SampleCountFlags, + pub sample_location_grid_size: Extent2D, + pub sample_locations_count: u32, + pub p_sample_locations: *const SampleLocationEXT, +} +impl ::std::default::Default for SampleLocationsInfoEXT { + fn default() -> SampleLocationsInfoEXT { + SampleLocationsInfoEXT { + s_type: StructureType::SAMPLE_LOCATIONS_INFO_EXT, + p_next: ::std::ptr::null(), + sample_locations_per_pixel: SampleCountFlags::default(), + sample_location_grid_size: Extent2D::default(), + sample_locations_count: u32::default(), + p_sample_locations: ::std::ptr::null(), + } + } +} +impl SampleLocationsInfoEXT { + pub fn builder<'a>() -> SampleLocationsInfoEXTBuilder<'a> { + SampleLocationsInfoEXTBuilder { + inner: SampleLocationsInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SampleLocationsInfoEXTBuilder<'a> { + inner: SampleLocationsInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageMemoryBarrier for SampleLocationsInfoEXTBuilder<'_> {} +unsafe impl ExtendsImageMemoryBarrier for SampleLocationsInfoEXT {} +impl<'a> ::std::ops::Deref for SampleLocationsInfoEXTBuilder<'a> { + type Target = SampleLocationsInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SampleLocationsInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SampleLocationsInfoEXTBuilder<'a> { + pub fn sample_locations_per_pixel( + mut self, + sample_locations_per_pixel: SampleCountFlags, + ) -> SampleLocationsInfoEXTBuilder<'a> { + self.inner.sample_locations_per_pixel = sample_locations_per_pixel; + self + } + pub fn sample_location_grid_size( + mut self, + sample_location_grid_size: Extent2D, + ) -> SampleLocationsInfoEXTBuilder<'a> { + self.inner.sample_location_grid_size = sample_location_grid_size; + self + } + pub fn sample_locations( + mut self, + sample_locations: &'a [SampleLocationEXT], + ) -> SampleLocationsInfoEXTBuilder<'a> { + self.inner.sample_locations_count = sample_locations.len() as _; + self.inner.p_sample_locations = sample_locations.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SampleLocationsInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct AttachmentSampleLocationsEXT { + pub attachment_index: u32, + pub sample_locations_info: SampleLocationsInfoEXT, +} +impl AttachmentSampleLocationsEXT { + pub fn builder<'a>() -> AttachmentSampleLocationsEXTBuilder<'a> { + AttachmentSampleLocationsEXTBuilder { + inner: AttachmentSampleLocationsEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AttachmentSampleLocationsEXTBuilder<'a> { + inner: AttachmentSampleLocationsEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for AttachmentSampleLocationsEXTBuilder<'a> { + type Target = AttachmentSampleLocationsEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AttachmentSampleLocationsEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AttachmentSampleLocationsEXTBuilder<'a> { + pub fn attachment_index( + mut self, + attachment_index: u32, + ) -> AttachmentSampleLocationsEXTBuilder<'a> { + self.inner.attachment_index = attachment_index; + self + } + pub fn sample_locations_info( + mut self, + sample_locations_info: SampleLocationsInfoEXT, + ) -> AttachmentSampleLocationsEXTBuilder<'a> { + self.inner.sample_locations_info = sample_locations_info; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AttachmentSampleLocationsEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct SubpassSampleLocationsEXT { + pub subpass_index: u32, + pub sample_locations_info: SampleLocationsInfoEXT, +} +impl SubpassSampleLocationsEXT { + pub fn builder<'a>() -> SubpassSampleLocationsEXTBuilder<'a> { + SubpassSampleLocationsEXTBuilder { + inner: SubpassSampleLocationsEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SubpassSampleLocationsEXTBuilder<'a> { + inner: SubpassSampleLocationsEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SubpassSampleLocationsEXTBuilder<'a> { + type Target = SubpassSampleLocationsEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SubpassSampleLocationsEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SubpassSampleLocationsEXTBuilder<'a> { + pub fn subpass_index(mut self, subpass_index: u32) -> SubpassSampleLocationsEXTBuilder<'a> { + self.inner.subpass_index = subpass_index; + self + } + pub fn sample_locations_info( + mut self, + sample_locations_info: SampleLocationsInfoEXT, + ) -> SubpassSampleLocationsEXTBuilder<'a> { + self.inner.sample_locations_info = sample_locations_info; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SubpassSampleLocationsEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct RenderPassSampleLocationsBeginInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub attachment_initial_sample_locations_count: u32, + pub p_attachment_initial_sample_locations: *const AttachmentSampleLocationsEXT, + pub post_subpass_sample_locations_count: u32, + pub p_post_subpass_sample_locations: *const SubpassSampleLocationsEXT, +} +impl ::std::default::Default for RenderPassSampleLocationsBeginInfoEXT { + fn default() -> RenderPassSampleLocationsBeginInfoEXT { + RenderPassSampleLocationsBeginInfoEXT { + s_type: StructureType::RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT, + p_next: ::std::ptr::null(), + attachment_initial_sample_locations_count: u32::default(), + p_attachment_initial_sample_locations: ::std::ptr::null(), + post_subpass_sample_locations_count: u32::default(), + p_post_subpass_sample_locations: ::std::ptr::null(), + } + } +} +impl RenderPassSampleLocationsBeginInfoEXT { + pub fn builder<'a>() -> RenderPassSampleLocationsBeginInfoEXTBuilder<'a> { + RenderPassSampleLocationsBeginInfoEXTBuilder { + inner: RenderPassSampleLocationsBeginInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RenderPassSampleLocationsBeginInfoEXTBuilder<'a> { + inner: RenderPassSampleLocationsBeginInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsRenderPassBeginInfo for RenderPassSampleLocationsBeginInfoEXTBuilder<'_> {} +unsafe impl ExtendsRenderPassBeginInfo for RenderPassSampleLocationsBeginInfoEXT {} +impl<'a> ::std::ops::Deref for RenderPassSampleLocationsBeginInfoEXTBuilder<'a> { + type Target = RenderPassSampleLocationsBeginInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RenderPassSampleLocationsBeginInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RenderPassSampleLocationsBeginInfoEXTBuilder<'a> { + pub fn attachment_initial_sample_locations( + mut self, + attachment_initial_sample_locations: &'a [AttachmentSampleLocationsEXT], + ) -> RenderPassSampleLocationsBeginInfoEXTBuilder<'a> { + self.inner.attachment_initial_sample_locations_count = + attachment_initial_sample_locations.len() as _; + self.inner.p_attachment_initial_sample_locations = + attachment_initial_sample_locations.as_ptr(); + self + } + pub fn post_subpass_sample_locations( + mut self, + post_subpass_sample_locations: &'a [SubpassSampleLocationsEXT], + ) -> RenderPassSampleLocationsBeginInfoEXTBuilder<'a> { + self.inner.post_subpass_sample_locations_count = post_subpass_sample_locations.len() as _; + self.inner.p_post_subpass_sample_locations = post_subpass_sample_locations.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RenderPassSampleLocationsBeginInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineSampleLocationsStateCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub sample_locations_enable: Bool32, + pub sample_locations_info: SampleLocationsInfoEXT, +} +impl ::std::default::Default for PipelineSampleLocationsStateCreateInfoEXT { + fn default() -> PipelineSampleLocationsStateCreateInfoEXT { + PipelineSampleLocationsStateCreateInfoEXT { + s_type: StructureType::PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + sample_locations_enable: Bool32::default(), + sample_locations_info: SampleLocationsInfoEXT::default(), + } + } +} +impl PipelineSampleLocationsStateCreateInfoEXT { + pub fn builder<'a>() -> PipelineSampleLocationsStateCreateInfoEXTBuilder<'a> { + PipelineSampleLocationsStateCreateInfoEXTBuilder { + inner: PipelineSampleLocationsStateCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineSampleLocationsStateCreateInfoEXTBuilder<'a> { + inner: PipelineSampleLocationsStateCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineMultisampleStateCreateInfo + for PipelineSampleLocationsStateCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsPipelineMultisampleStateCreateInfo + for PipelineSampleLocationsStateCreateInfoEXT +{ +} +impl<'a> ::std::ops::Deref for PipelineSampleLocationsStateCreateInfoEXTBuilder<'a> { + type Target = PipelineSampleLocationsStateCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineSampleLocationsStateCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineSampleLocationsStateCreateInfoEXTBuilder<'a> { + pub fn sample_locations_enable( + mut self, + sample_locations_enable: bool, + ) -> PipelineSampleLocationsStateCreateInfoEXTBuilder<'a> { + self.inner.sample_locations_enable = sample_locations_enable.into(); + self + } + pub fn sample_locations_info( + mut self, + sample_locations_info: SampleLocationsInfoEXT, + ) -> PipelineSampleLocationsStateCreateInfoEXTBuilder<'a> { + self.inner.sample_locations_info = sample_locations_info; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineSampleLocationsStateCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceSampleLocationsPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub sample_location_sample_counts: SampleCountFlags, + pub max_sample_location_grid_size: Extent2D, + pub sample_location_coordinate_range: [f32; 2], + pub sample_location_sub_pixel_bits: u32, + pub variable_sample_locations: Bool32, +} +impl ::std::default::Default for PhysicalDeviceSampleLocationsPropertiesEXT { + fn default() -> PhysicalDeviceSampleLocationsPropertiesEXT { + PhysicalDeviceSampleLocationsPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + sample_location_sample_counts: SampleCountFlags::default(), + max_sample_location_grid_size: Extent2D::default(), + sample_location_coordinate_range: unsafe { ::std::mem::zeroed() }, + sample_location_sub_pixel_bits: u32::default(), + variable_sample_locations: Bool32::default(), + } + } +} +impl PhysicalDeviceSampleLocationsPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceSampleLocationsPropertiesEXTBuilder<'a> { + PhysicalDeviceSampleLocationsPropertiesEXTBuilder { + inner: PhysicalDeviceSampleLocationsPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceSampleLocationsPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceSampleLocationsPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceSampleLocationsPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceSampleLocationsPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceSampleLocationsPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceSampleLocationsPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceSampleLocationsPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceSampleLocationsPropertiesEXTBuilder<'a> { + pub fn sample_location_sample_counts( + mut self, + sample_location_sample_counts: SampleCountFlags, + ) -> PhysicalDeviceSampleLocationsPropertiesEXTBuilder<'a> { + self.inner.sample_location_sample_counts = sample_location_sample_counts; + self + } + pub fn max_sample_location_grid_size( + mut self, + max_sample_location_grid_size: Extent2D, + ) -> PhysicalDeviceSampleLocationsPropertiesEXTBuilder<'a> { + self.inner.max_sample_location_grid_size = max_sample_location_grid_size; + self + } + pub fn sample_location_coordinate_range( + mut self, + sample_location_coordinate_range: [f32; 2], + ) -> PhysicalDeviceSampleLocationsPropertiesEXTBuilder<'a> { + self.inner.sample_location_coordinate_range = sample_location_coordinate_range; + self + } + pub fn sample_location_sub_pixel_bits( + mut self, + sample_location_sub_pixel_bits: u32, + ) -> PhysicalDeviceSampleLocationsPropertiesEXTBuilder<'a> { + self.inner.sample_location_sub_pixel_bits = sample_location_sub_pixel_bits; + self + } + pub fn variable_sample_locations( + mut self, + variable_sample_locations: bool, + ) -> PhysicalDeviceSampleLocationsPropertiesEXTBuilder<'a> { + self.inner.variable_sample_locations = variable_sample_locations.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceSampleLocationsPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MultisamplePropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_sample_location_grid_size: Extent2D, +} +impl ::std::default::Default for MultisamplePropertiesEXT { + fn default() -> MultisamplePropertiesEXT { + MultisamplePropertiesEXT { + s_type: StructureType::MULTISAMPLE_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + max_sample_location_grid_size: Extent2D::default(), + } + } +} +impl MultisamplePropertiesEXT { + pub fn builder<'a>() -> MultisamplePropertiesEXTBuilder<'a> { + MultisamplePropertiesEXTBuilder { + inner: MultisamplePropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MultisamplePropertiesEXTBuilder<'a> { + inner: MultisamplePropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMultisamplePropertiesEXT {} +impl<'a> ::std::ops::Deref for MultisamplePropertiesEXTBuilder<'a> { + type Target = MultisamplePropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MultisamplePropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MultisamplePropertiesEXTBuilder<'a> { + pub fn max_sample_location_grid_size( + mut self, + max_sample_location_grid_size: Extent2D, + ) -> MultisamplePropertiesEXTBuilder<'a> { + self.inner.max_sample_location_grid_size = max_sample_location_grid_size; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MultisamplePropertiesEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MultisamplePropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SamplerReductionModeCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub reduction_mode: SamplerReductionModeEXT, +} +impl ::std::default::Default for SamplerReductionModeCreateInfoEXT { + fn default() -> SamplerReductionModeCreateInfoEXT { + SamplerReductionModeCreateInfoEXT { + s_type: StructureType::SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + reduction_mode: SamplerReductionModeEXT::default(), + } + } +} +impl SamplerReductionModeCreateInfoEXT { + pub fn builder<'a>() -> SamplerReductionModeCreateInfoEXTBuilder<'a> { + SamplerReductionModeCreateInfoEXTBuilder { + inner: SamplerReductionModeCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SamplerReductionModeCreateInfoEXTBuilder<'a> { + inner: SamplerReductionModeCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSamplerCreateInfo for SamplerReductionModeCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsSamplerCreateInfo for SamplerReductionModeCreateInfoEXT {} +impl<'a> ::std::ops::Deref for SamplerReductionModeCreateInfoEXTBuilder<'a> { + type Target = SamplerReductionModeCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SamplerReductionModeCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SamplerReductionModeCreateInfoEXTBuilder<'a> { + pub fn reduction_mode( + mut self, + reduction_mode: SamplerReductionModeEXT, + ) -> SamplerReductionModeCreateInfoEXTBuilder<'a> { + self.inner.reduction_mode = reduction_mode; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SamplerReductionModeCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceBlendOperationAdvancedFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub advanced_blend_coherent_operations: Bool32, +} +impl ::std::default::Default for PhysicalDeviceBlendOperationAdvancedFeaturesEXT { + fn default() -> PhysicalDeviceBlendOperationAdvancedFeaturesEXT { + PhysicalDeviceBlendOperationAdvancedFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + advanced_blend_coherent_operations: Bool32::default(), + } + } +} +impl PhysicalDeviceBlendOperationAdvancedFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceBlendOperationAdvancedFeaturesEXTBuilder<'a> { + PhysicalDeviceBlendOperationAdvancedFeaturesEXTBuilder { + inner: PhysicalDeviceBlendOperationAdvancedFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceBlendOperationAdvancedFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceBlendOperationAdvancedFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceBlendOperationAdvancedFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceBlendOperationAdvancedFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceBlendOperationAdvancedFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceBlendOperationAdvancedFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceBlendOperationAdvancedFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceBlendOperationAdvancedFeaturesEXTBuilder<'a> { + pub fn advanced_blend_coherent_operations( + mut self, + advanced_blend_coherent_operations: bool, + ) -> PhysicalDeviceBlendOperationAdvancedFeaturesEXTBuilder<'a> { + self.inner.advanced_blend_coherent_operations = advanced_blend_coherent_operations.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceBlendOperationAdvancedFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceBlendOperationAdvancedPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub advanced_blend_max_color_attachments: u32, + pub advanced_blend_independent_blend: Bool32, + pub advanced_blend_non_premultiplied_src_color: Bool32, + pub advanced_blend_non_premultiplied_dst_color: Bool32, + pub advanced_blend_correlated_overlap: Bool32, + pub advanced_blend_all_operations: Bool32, +} +impl ::std::default::Default for PhysicalDeviceBlendOperationAdvancedPropertiesEXT { + fn default() -> PhysicalDeviceBlendOperationAdvancedPropertiesEXT { + PhysicalDeviceBlendOperationAdvancedPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + advanced_blend_max_color_attachments: u32::default(), + advanced_blend_independent_blend: Bool32::default(), + advanced_blend_non_premultiplied_src_color: Bool32::default(), + advanced_blend_non_premultiplied_dst_color: Bool32::default(), + advanced_blend_correlated_overlap: Bool32::default(), + advanced_blend_all_operations: Bool32::default(), + } + } +} +impl PhysicalDeviceBlendOperationAdvancedPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'a> { + PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder { + inner: PhysicalDeviceBlendOperationAdvancedPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceBlendOperationAdvancedPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceBlendOperationAdvancedPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceBlendOperationAdvancedPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'a> { + pub fn advanced_blend_max_color_attachments( + mut self, + advanced_blend_max_color_attachments: u32, + ) -> PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'a> { + self.inner.advanced_blend_max_color_attachments = advanced_blend_max_color_attachments; + self + } + pub fn advanced_blend_independent_blend( + mut self, + advanced_blend_independent_blend: bool, + ) -> PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'a> { + self.inner.advanced_blend_independent_blend = advanced_blend_independent_blend.into(); + self + } + pub fn advanced_blend_non_premultiplied_src_color( + mut self, + advanced_blend_non_premultiplied_src_color: bool, + ) -> PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'a> { + self.inner.advanced_blend_non_premultiplied_src_color = + advanced_blend_non_premultiplied_src_color.into(); + self + } + pub fn advanced_blend_non_premultiplied_dst_color( + mut self, + advanced_blend_non_premultiplied_dst_color: bool, + ) -> PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'a> { + self.inner.advanced_blend_non_premultiplied_dst_color = + advanced_blend_non_premultiplied_dst_color.into(); + self + } + pub fn advanced_blend_correlated_overlap( + mut self, + advanced_blend_correlated_overlap: bool, + ) -> PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'a> { + self.inner.advanced_blend_correlated_overlap = advanced_blend_correlated_overlap.into(); + self + } + pub fn advanced_blend_all_operations( + mut self, + advanced_blend_all_operations: bool, + ) -> PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'a> { + self.inner.advanced_blend_all_operations = advanced_blend_all_operations.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceBlendOperationAdvancedPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineColorBlendAdvancedStateCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub src_premultiplied: Bool32, + pub dst_premultiplied: Bool32, + pub blend_overlap: BlendOverlapEXT, +} +impl ::std::default::Default for PipelineColorBlendAdvancedStateCreateInfoEXT { + fn default() -> PipelineColorBlendAdvancedStateCreateInfoEXT { + PipelineColorBlendAdvancedStateCreateInfoEXT { + s_type: StructureType::PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + src_premultiplied: Bool32::default(), + dst_premultiplied: Bool32::default(), + blend_overlap: BlendOverlapEXT::default(), + } + } +} +impl PipelineColorBlendAdvancedStateCreateInfoEXT { + pub fn builder<'a>() -> PipelineColorBlendAdvancedStateCreateInfoEXTBuilder<'a> { + PipelineColorBlendAdvancedStateCreateInfoEXTBuilder { + inner: PipelineColorBlendAdvancedStateCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineColorBlendAdvancedStateCreateInfoEXTBuilder<'a> { + inner: PipelineColorBlendAdvancedStateCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineColorBlendStateCreateInfo + for PipelineColorBlendAdvancedStateCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsPipelineColorBlendStateCreateInfo + for PipelineColorBlendAdvancedStateCreateInfoEXT +{ +} +impl<'a> ::std::ops::Deref for PipelineColorBlendAdvancedStateCreateInfoEXTBuilder<'a> { + type Target = PipelineColorBlendAdvancedStateCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineColorBlendAdvancedStateCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineColorBlendAdvancedStateCreateInfoEXTBuilder<'a> { + pub fn src_premultiplied( + mut self, + src_premultiplied: bool, + ) -> PipelineColorBlendAdvancedStateCreateInfoEXTBuilder<'a> { + self.inner.src_premultiplied = src_premultiplied.into(); + self + } + pub fn dst_premultiplied( + mut self, + dst_premultiplied: bool, + ) -> PipelineColorBlendAdvancedStateCreateInfoEXTBuilder<'a> { + self.inner.dst_premultiplied = dst_premultiplied.into(); + self + } + pub fn blend_overlap( + mut self, + blend_overlap: BlendOverlapEXT, + ) -> PipelineColorBlendAdvancedStateCreateInfoEXTBuilder<'a> { + self.inner.blend_overlap = blend_overlap; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineColorBlendAdvancedStateCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceInlineUniformBlockFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub inline_uniform_block: Bool32, + pub descriptor_binding_inline_uniform_block_update_after_bind: Bool32, +} +impl ::std::default::Default for PhysicalDeviceInlineUniformBlockFeaturesEXT { + fn default() -> PhysicalDeviceInlineUniformBlockFeaturesEXT { + PhysicalDeviceInlineUniformBlockFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + inline_uniform_block: Bool32::default(), + descriptor_binding_inline_uniform_block_update_after_bind: Bool32::default(), + } + } +} +impl PhysicalDeviceInlineUniformBlockFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceInlineUniformBlockFeaturesEXTBuilder<'a> { + PhysicalDeviceInlineUniformBlockFeaturesEXTBuilder { + inner: PhysicalDeviceInlineUniformBlockFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceInlineUniformBlockFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceInlineUniformBlockFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceInlineUniformBlockFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceInlineUniformBlockFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceInlineUniformBlockFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceInlineUniformBlockFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceInlineUniformBlockFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceInlineUniformBlockFeaturesEXTBuilder<'a> { + pub fn inline_uniform_block( + mut self, + inline_uniform_block: bool, + ) -> PhysicalDeviceInlineUniformBlockFeaturesEXTBuilder<'a> { + self.inner.inline_uniform_block = inline_uniform_block.into(); + self + } + pub fn descriptor_binding_inline_uniform_block_update_after_bind( + mut self, + descriptor_binding_inline_uniform_block_update_after_bind: bool, + ) -> PhysicalDeviceInlineUniformBlockFeaturesEXTBuilder<'a> { + self.inner + .descriptor_binding_inline_uniform_block_update_after_bind = + descriptor_binding_inline_uniform_block_update_after_bind.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceInlineUniformBlockFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceInlineUniformBlockPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_inline_uniform_block_size: u32, + pub max_per_stage_descriptor_inline_uniform_blocks: u32, + pub max_per_stage_descriptor_update_after_bind_inline_uniform_blocks: u32, + pub max_descriptor_set_inline_uniform_blocks: u32, + pub max_descriptor_set_update_after_bind_inline_uniform_blocks: u32, +} +impl ::std::default::Default for PhysicalDeviceInlineUniformBlockPropertiesEXT { + fn default() -> PhysicalDeviceInlineUniformBlockPropertiesEXT { + PhysicalDeviceInlineUniformBlockPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + max_inline_uniform_block_size: u32::default(), + max_per_stage_descriptor_inline_uniform_blocks: u32::default(), + max_per_stage_descriptor_update_after_bind_inline_uniform_blocks: u32::default(), + max_descriptor_set_inline_uniform_blocks: u32::default(), + max_descriptor_set_update_after_bind_inline_uniform_blocks: u32::default(), + } + } +} +impl PhysicalDeviceInlineUniformBlockPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder<'a> { + PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder { + inner: PhysicalDeviceInlineUniformBlockPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceInlineUniformBlockPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceInlineUniformBlockPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceInlineUniformBlockPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder<'a> { + pub fn max_inline_uniform_block_size( + mut self, + max_inline_uniform_block_size: u32, + ) -> PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder<'a> { + self.inner.max_inline_uniform_block_size = max_inline_uniform_block_size; + self + } + pub fn max_per_stage_descriptor_inline_uniform_blocks( + mut self, + max_per_stage_descriptor_inline_uniform_blocks: u32, + ) -> PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder<'a> { + self.inner.max_per_stage_descriptor_inline_uniform_blocks = + max_per_stage_descriptor_inline_uniform_blocks; + self + } + pub fn max_per_stage_descriptor_update_after_bind_inline_uniform_blocks( + mut self, + max_per_stage_descriptor_update_after_bind_inline_uniform_blocks: u32, + ) -> PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_inline_uniform_blocks = + max_per_stage_descriptor_update_after_bind_inline_uniform_blocks; + self + } + pub fn max_descriptor_set_inline_uniform_blocks( + mut self, + max_descriptor_set_inline_uniform_blocks: u32, + ) -> PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder<'a> { + self.inner.max_descriptor_set_inline_uniform_blocks = + max_descriptor_set_inline_uniform_blocks; + self + } + pub fn max_descriptor_set_update_after_bind_inline_uniform_blocks( + mut self, + max_descriptor_set_update_after_bind_inline_uniform_blocks: u32, + ) -> PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_inline_uniform_blocks = + max_descriptor_set_update_after_bind_inline_uniform_blocks; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceInlineUniformBlockPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct WriteDescriptorSetInlineUniformBlockEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub data_size: u32, + pub p_data: *const c_void, +} +impl ::std::default::Default for WriteDescriptorSetInlineUniformBlockEXT { + fn default() -> WriteDescriptorSetInlineUniformBlockEXT { + WriteDescriptorSetInlineUniformBlockEXT { + s_type: StructureType::WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT, + p_next: ::std::ptr::null(), + data_size: u32::default(), + p_data: ::std::ptr::null(), + } + } +} +impl WriteDescriptorSetInlineUniformBlockEXT { + pub fn builder<'a>() -> WriteDescriptorSetInlineUniformBlockEXTBuilder<'a> { + WriteDescriptorSetInlineUniformBlockEXTBuilder { + inner: WriteDescriptorSetInlineUniformBlockEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct WriteDescriptorSetInlineUniformBlockEXTBuilder<'a> { + inner: WriteDescriptorSetInlineUniformBlockEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsWriteDescriptorSet for WriteDescriptorSetInlineUniformBlockEXTBuilder<'_> {} +unsafe impl ExtendsWriteDescriptorSet for WriteDescriptorSetInlineUniformBlockEXT {} +impl<'a> ::std::ops::Deref for WriteDescriptorSetInlineUniformBlockEXTBuilder<'a> { + type Target = WriteDescriptorSetInlineUniformBlockEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for WriteDescriptorSetInlineUniformBlockEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> WriteDescriptorSetInlineUniformBlockEXTBuilder<'a> { + pub fn data(mut self, data: &'a [u8]) -> WriteDescriptorSetInlineUniformBlockEXTBuilder<'a> { + self.inner.data_size = data.len() as _; + self.inner.p_data = data.as_ptr() as *const c_void; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> WriteDescriptorSetInlineUniformBlockEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DescriptorPoolInlineUniformBlockCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub max_inline_uniform_block_bindings: u32, +} +impl ::std::default::Default for DescriptorPoolInlineUniformBlockCreateInfoEXT { + fn default() -> DescriptorPoolInlineUniformBlockCreateInfoEXT { + DescriptorPoolInlineUniformBlockCreateInfoEXT { + s_type: StructureType::DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + max_inline_uniform_block_bindings: u32::default(), + } + } +} +impl DescriptorPoolInlineUniformBlockCreateInfoEXT { + pub fn builder<'a>() -> DescriptorPoolInlineUniformBlockCreateInfoEXTBuilder<'a> { + DescriptorPoolInlineUniformBlockCreateInfoEXTBuilder { + inner: DescriptorPoolInlineUniformBlockCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorPoolInlineUniformBlockCreateInfoEXTBuilder<'a> { + inner: DescriptorPoolInlineUniformBlockCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDescriptorPoolCreateInfo + for DescriptorPoolInlineUniformBlockCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsDescriptorPoolCreateInfo for DescriptorPoolInlineUniformBlockCreateInfoEXT {} +impl<'a> ::std::ops::Deref for DescriptorPoolInlineUniformBlockCreateInfoEXTBuilder<'a> { + type Target = DescriptorPoolInlineUniformBlockCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorPoolInlineUniformBlockCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorPoolInlineUniformBlockCreateInfoEXTBuilder<'a> { + pub fn max_inline_uniform_block_bindings( + mut self, + max_inline_uniform_block_bindings: u32, + ) -> DescriptorPoolInlineUniformBlockCreateInfoEXTBuilder<'a> { + self.inner.max_inline_uniform_block_bindings = max_inline_uniform_block_bindings; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorPoolInlineUniformBlockCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineCoverageModulationStateCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineCoverageModulationStateCreateFlagsNV, + pub coverage_modulation_mode: CoverageModulationModeNV, + pub coverage_modulation_table_enable: Bool32, + pub coverage_modulation_table_count: u32, + pub p_coverage_modulation_table: *const f32, +} +impl ::std::default::Default for PipelineCoverageModulationStateCreateInfoNV { + fn default() -> PipelineCoverageModulationStateCreateInfoNV { + PipelineCoverageModulationStateCreateInfoNV { + s_type: StructureType::PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + flags: PipelineCoverageModulationStateCreateFlagsNV::default(), + coverage_modulation_mode: CoverageModulationModeNV::default(), + coverage_modulation_table_enable: Bool32::default(), + coverage_modulation_table_count: u32::default(), + p_coverage_modulation_table: ::std::ptr::null(), + } + } +} +impl PipelineCoverageModulationStateCreateInfoNV { + pub fn builder<'a>() -> PipelineCoverageModulationStateCreateInfoNVBuilder<'a> { + PipelineCoverageModulationStateCreateInfoNVBuilder { + inner: PipelineCoverageModulationStateCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineCoverageModulationStateCreateInfoNVBuilder<'a> { + inner: PipelineCoverageModulationStateCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineMultisampleStateCreateInfo + for PipelineCoverageModulationStateCreateInfoNVBuilder<'_> +{ +} +unsafe impl ExtendsPipelineMultisampleStateCreateInfo + for PipelineCoverageModulationStateCreateInfoNV +{ +} +impl<'a> ::std::ops::Deref for PipelineCoverageModulationStateCreateInfoNVBuilder<'a> { + type Target = PipelineCoverageModulationStateCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineCoverageModulationStateCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineCoverageModulationStateCreateInfoNVBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineCoverageModulationStateCreateFlagsNV, + ) -> PipelineCoverageModulationStateCreateInfoNVBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn coverage_modulation_mode( + mut self, + coverage_modulation_mode: CoverageModulationModeNV, + ) -> PipelineCoverageModulationStateCreateInfoNVBuilder<'a> { + self.inner.coverage_modulation_mode = coverage_modulation_mode; + self + } + pub fn coverage_modulation_table_enable( + mut self, + coverage_modulation_table_enable: bool, + ) -> PipelineCoverageModulationStateCreateInfoNVBuilder<'a> { + self.inner.coverage_modulation_table_enable = coverage_modulation_table_enable.into(); + self + } + pub fn coverage_modulation_table( + mut self, + coverage_modulation_table: &'a [f32], + ) -> PipelineCoverageModulationStateCreateInfoNVBuilder<'a> { + self.inner.coverage_modulation_table_count = coverage_modulation_table.len() as _; + self.inner.p_coverage_modulation_table = coverage_modulation_table.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineCoverageModulationStateCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageFormatListCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub view_format_count: u32, + pub p_view_formats: *const Format, +} +impl ::std::default::Default for ImageFormatListCreateInfoKHR { + fn default() -> ImageFormatListCreateInfoKHR { + ImageFormatListCreateInfoKHR { + s_type: StructureType::IMAGE_FORMAT_LIST_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + view_format_count: u32::default(), + p_view_formats: ::std::ptr::null(), + } + } +} +impl ImageFormatListCreateInfoKHR { + pub fn builder<'a>() -> ImageFormatListCreateInfoKHRBuilder<'a> { + ImageFormatListCreateInfoKHRBuilder { + inner: ImageFormatListCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageFormatListCreateInfoKHRBuilder<'a> { + inner: ImageFormatListCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageCreateInfo for ImageFormatListCreateInfoKHRBuilder<'_> {} +unsafe impl ExtendsImageCreateInfo for ImageFormatListCreateInfoKHR {} +unsafe impl ExtendsSwapchainCreateInfoKHR for ImageFormatListCreateInfoKHRBuilder<'_> {} +unsafe impl ExtendsSwapchainCreateInfoKHR for ImageFormatListCreateInfoKHR {} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for ImageFormatListCreateInfoKHRBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for ImageFormatListCreateInfoKHR {} +impl<'a> ::std::ops::Deref for ImageFormatListCreateInfoKHRBuilder<'a> { + type Target = ImageFormatListCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageFormatListCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageFormatListCreateInfoKHRBuilder<'a> { + pub fn view_formats( + mut self, + view_formats: &'a [Format], + ) -> ImageFormatListCreateInfoKHRBuilder<'a> { + self.inner.view_format_count = view_formats.len() as _; + self.inner.p_view_formats = view_formats.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageFormatListCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ValidationCacheCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: ValidationCacheCreateFlagsEXT, + pub initial_data_size: usize, + pub p_initial_data: *const c_void, +} +impl ::std::default::Default for ValidationCacheCreateInfoEXT { + fn default() -> ValidationCacheCreateInfoEXT { + ValidationCacheCreateInfoEXT { + s_type: StructureType::VALIDATION_CACHE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + flags: ValidationCacheCreateFlagsEXT::default(), + initial_data_size: usize::default(), + p_initial_data: ::std::ptr::null(), + } + } +} +impl ValidationCacheCreateInfoEXT { + pub fn builder<'a>() -> ValidationCacheCreateInfoEXTBuilder<'a> { + ValidationCacheCreateInfoEXTBuilder { + inner: ValidationCacheCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ValidationCacheCreateInfoEXTBuilder<'a> { + inner: ValidationCacheCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsValidationCacheCreateInfoEXT {} +impl<'a> ::std::ops::Deref for ValidationCacheCreateInfoEXTBuilder<'a> { + type Target = ValidationCacheCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ValidationCacheCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ValidationCacheCreateInfoEXTBuilder<'a> { + pub fn flags( + mut self, + flags: ValidationCacheCreateFlagsEXT, + ) -> ValidationCacheCreateInfoEXTBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn initial_data( + mut self, + initial_data: &'a [u8], + ) -> ValidationCacheCreateInfoEXTBuilder<'a> { + self.inner.initial_data_size = initial_data.len() as _; + self.inner.p_initial_data = initial_data.as_ptr() as *const c_void; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ValidationCacheCreateInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ValidationCacheCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ShaderModuleValidationCacheCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub validation_cache: ValidationCacheEXT, +} +impl ::std::default::Default for ShaderModuleValidationCacheCreateInfoEXT { + fn default() -> ShaderModuleValidationCacheCreateInfoEXT { + ShaderModuleValidationCacheCreateInfoEXT { + s_type: StructureType::SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + validation_cache: ValidationCacheEXT::default(), + } + } +} +impl ShaderModuleValidationCacheCreateInfoEXT { + pub fn builder<'a>() -> ShaderModuleValidationCacheCreateInfoEXTBuilder<'a> { + ShaderModuleValidationCacheCreateInfoEXTBuilder { + inner: ShaderModuleValidationCacheCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ShaderModuleValidationCacheCreateInfoEXTBuilder<'a> { + inner: ShaderModuleValidationCacheCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsShaderModuleCreateInfo for ShaderModuleValidationCacheCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsShaderModuleCreateInfo for ShaderModuleValidationCacheCreateInfoEXT {} +impl<'a> ::std::ops::Deref for ShaderModuleValidationCacheCreateInfoEXTBuilder<'a> { + type Target = ShaderModuleValidationCacheCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ShaderModuleValidationCacheCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ShaderModuleValidationCacheCreateInfoEXTBuilder<'a> { + pub fn validation_cache( + mut self, + validation_cache: ValidationCacheEXT, + ) -> ShaderModuleValidationCacheCreateInfoEXTBuilder<'a> { + self.inner.validation_cache = validation_cache; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ShaderModuleValidationCacheCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceMaintenance3Properties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_per_set_descriptors: u32, + pub max_memory_allocation_size: DeviceSize, +} +impl ::std::default::Default for PhysicalDeviceMaintenance3Properties { + fn default() -> PhysicalDeviceMaintenance3Properties { + PhysicalDeviceMaintenance3Properties { + s_type: StructureType::PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, + p_next: ::std::ptr::null_mut(), + max_per_set_descriptors: u32::default(), + max_memory_allocation_size: DeviceSize::default(), + } + } +} +impl PhysicalDeviceMaintenance3Properties { + pub fn builder<'a>() -> PhysicalDeviceMaintenance3PropertiesBuilder<'a> { + PhysicalDeviceMaintenance3PropertiesBuilder { + inner: PhysicalDeviceMaintenance3Properties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceMaintenance3PropertiesBuilder<'a> { + inner: PhysicalDeviceMaintenance3Properties, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceMaintenance3PropertiesBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceMaintenance3Properties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceMaintenance3PropertiesBuilder<'a> { + type Target = PhysicalDeviceMaintenance3Properties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceMaintenance3PropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceMaintenance3PropertiesBuilder<'a> { + pub fn max_per_set_descriptors( + mut self, + max_per_set_descriptors: u32, + ) -> PhysicalDeviceMaintenance3PropertiesBuilder<'a> { + self.inner.max_per_set_descriptors = max_per_set_descriptors; + self + } + pub fn max_memory_allocation_size( + mut self, + max_memory_allocation_size: DeviceSize, + ) -> PhysicalDeviceMaintenance3PropertiesBuilder<'a> { + self.inner.max_memory_allocation_size = max_memory_allocation_size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceMaintenance3Properties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DescriptorSetLayoutSupport { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub supported: Bool32, +} +impl ::std::default::Default for DescriptorSetLayoutSupport { + fn default() -> DescriptorSetLayoutSupport { + DescriptorSetLayoutSupport { + s_type: StructureType::DESCRIPTOR_SET_LAYOUT_SUPPORT, + p_next: ::std::ptr::null_mut(), + supported: Bool32::default(), + } + } +} +impl DescriptorSetLayoutSupport { + pub fn builder<'a>() -> DescriptorSetLayoutSupportBuilder<'a> { + DescriptorSetLayoutSupportBuilder { + inner: DescriptorSetLayoutSupport::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorSetLayoutSupportBuilder<'a> { + inner: DescriptorSetLayoutSupport, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDescriptorSetLayoutSupport {} +impl<'a> ::std::ops::Deref for DescriptorSetLayoutSupportBuilder<'a> { + type Target = DescriptorSetLayoutSupport; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorSetLayoutSupportBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorSetLayoutSupportBuilder<'a> { + pub fn supported(mut self, supported: bool) -> DescriptorSetLayoutSupportBuilder<'a> { + self.inner.supported = supported.into(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DescriptorSetLayoutSupportBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorSetLayoutSupport { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceShaderDrawParameterFeatures { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_draw_parameters: Bool32, +} +impl ::std::default::Default for PhysicalDeviceShaderDrawParameterFeatures { + fn default() -> PhysicalDeviceShaderDrawParameterFeatures { + PhysicalDeviceShaderDrawParameterFeatures { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES, + p_next: ::std::ptr::null_mut(), + shader_draw_parameters: Bool32::default(), + } + } +} +impl PhysicalDeviceShaderDrawParameterFeatures { + pub fn builder<'a>() -> PhysicalDeviceShaderDrawParameterFeaturesBuilder<'a> { + PhysicalDeviceShaderDrawParameterFeaturesBuilder { + inner: PhysicalDeviceShaderDrawParameterFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderDrawParameterFeaturesBuilder<'a> { + inner: PhysicalDeviceShaderDrawParameterFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderDrawParameterFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderDrawParameterFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderDrawParameterFeaturesBuilder<'a> { + type Target = PhysicalDeviceShaderDrawParameterFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderDrawParameterFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderDrawParameterFeaturesBuilder<'a> { + pub fn shader_draw_parameters( + mut self, + shader_draw_parameters: bool, + ) -> PhysicalDeviceShaderDrawParameterFeaturesBuilder<'a> { + self.inner.shader_draw_parameters = shader_draw_parameters.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderDrawParameterFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceFloat16Int8FeaturesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_float16: Bool32, + pub shader_int8: Bool32, +} +impl ::std::default::Default for PhysicalDeviceFloat16Int8FeaturesKHR { + fn default() -> PhysicalDeviceFloat16Int8FeaturesKHR { + PhysicalDeviceFloat16Int8FeaturesKHR { + s_type: StructureType::PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR, + p_next: ::std::ptr::null_mut(), + shader_float16: Bool32::default(), + shader_int8: Bool32::default(), + } + } +} +impl PhysicalDeviceFloat16Int8FeaturesKHR { + pub fn builder<'a>() -> PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { + PhysicalDeviceFloat16Int8FeaturesKHRBuilder { + inner: PhysicalDeviceFloat16Int8FeaturesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { + inner: PhysicalDeviceFloat16Int8FeaturesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceFloat16Int8FeaturesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { + type Target = PhysicalDeviceFloat16Int8FeaturesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { + pub fn shader_float16( + mut self, + shader_float16: bool, + ) -> PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { + self.inner.shader_float16 = shader_float16.into(); + self + } + pub fn shader_int8( + mut self, + shader_int8: bool, + ) -> PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { + self.inner.shader_int8 = shader_int8.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceFloat16Int8FeaturesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceFloatControlsPropertiesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub separate_denorm_settings: Bool32, + pub separate_rounding_mode_settings: Bool32, + pub shader_signed_zero_inf_nan_preserve_float16: Bool32, + pub shader_signed_zero_inf_nan_preserve_float32: Bool32, + pub shader_signed_zero_inf_nan_preserve_float64: Bool32, + pub shader_denorm_preserve_float16: Bool32, + pub shader_denorm_preserve_float32: Bool32, + pub shader_denorm_preserve_float64: Bool32, + pub shader_denorm_flush_to_zero_float16: Bool32, + pub shader_denorm_flush_to_zero_float32: Bool32, + pub shader_denorm_flush_to_zero_float64: Bool32, + pub shader_rounding_mode_rte_float16: Bool32, + pub shader_rounding_mode_rte_float32: Bool32, + pub shader_rounding_mode_rte_float64: Bool32, + pub shader_rounding_mode_rtz_float16: Bool32, + pub shader_rounding_mode_rtz_float32: Bool32, + pub shader_rounding_mode_rtz_float64: Bool32, +} +impl ::std::default::Default for PhysicalDeviceFloatControlsPropertiesKHR { + fn default() -> PhysicalDeviceFloatControlsPropertiesKHR { + PhysicalDeviceFloatControlsPropertiesKHR { + s_type: StructureType::PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR, + p_next: ::std::ptr::null_mut(), + separate_denorm_settings: Bool32::default(), + separate_rounding_mode_settings: Bool32::default(), + shader_signed_zero_inf_nan_preserve_float16: Bool32::default(), + shader_signed_zero_inf_nan_preserve_float32: Bool32::default(), + shader_signed_zero_inf_nan_preserve_float64: Bool32::default(), + shader_denorm_preserve_float16: Bool32::default(), + shader_denorm_preserve_float32: Bool32::default(), + shader_denorm_preserve_float64: Bool32::default(), + shader_denorm_flush_to_zero_float16: Bool32::default(), + shader_denorm_flush_to_zero_float32: Bool32::default(), + shader_denorm_flush_to_zero_float64: Bool32::default(), + shader_rounding_mode_rte_float16: Bool32::default(), + shader_rounding_mode_rte_float32: Bool32::default(), + shader_rounding_mode_rte_float64: Bool32::default(), + shader_rounding_mode_rtz_float16: Bool32::default(), + shader_rounding_mode_rtz_float32: Bool32::default(), + shader_rounding_mode_rtz_float64: Bool32::default(), + } + } +} +impl PhysicalDeviceFloatControlsPropertiesKHR { + pub fn builder<'a>() -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + PhysicalDeviceFloatControlsPropertiesKHRBuilder { + inner: PhysicalDeviceFloatControlsPropertiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + inner: PhysicalDeviceFloatControlsPropertiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceFloatControlsPropertiesKHRBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceFloatControlsPropertiesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + type Target = PhysicalDeviceFloatControlsPropertiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + pub fn separate_denorm_settings( + mut self, + separate_denorm_settings: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.separate_denorm_settings = separate_denorm_settings.into(); + self + } + pub fn separate_rounding_mode_settings( + mut self, + separate_rounding_mode_settings: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.separate_rounding_mode_settings = separate_rounding_mode_settings.into(); + self + } + pub fn shader_signed_zero_inf_nan_preserve_float16( + mut self, + shader_signed_zero_inf_nan_preserve_float16: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_signed_zero_inf_nan_preserve_float16 = + shader_signed_zero_inf_nan_preserve_float16.into(); + self + } + pub fn shader_signed_zero_inf_nan_preserve_float32( + mut self, + shader_signed_zero_inf_nan_preserve_float32: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_signed_zero_inf_nan_preserve_float32 = + shader_signed_zero_inf_nan_preserve_float32.into(); + self + } + pub fn shader_signed_zero_inf_nan_preserve_float64( + mut self, + shader_signed_zero_inf_nan_preserve_float64: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_signed_zero_inf_nan_preserve_float64 = + shader_signed_zero_inf_nan_preserve_float64.into(); + self + } + pub fn shader_denorm_preserve_float16( + mut self, + shader_denorm_preserve_float16: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_denorm_preserve_float16 = shader_denorm_preserve_float16.into(); + self + } + pub fn shader_denorm_preserve_float32( + mut self, + shader_denorm_preserve_float32: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_denorm_preserve_float32 = shader_denorm_preserve_float32.into(); + self + } + pub fn shader_denorm_preserve_float64( + mut self, + shader_denorm_preserve_float64: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_denorm_preserve_float64 = shader_denorm_preserve_float64.into(); + self + } + pub fn shader_denorm_flush_to_zero_float16( + mut self, + shader_denorm_flush_to_zero_float16: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_denorm_flush_to_zero_float16 = shader_denorm_flush_to_zero_float16.into(); + self + } + pub fn shader_denorm_flush_to_zero_float32( + mut self, + shader_denorm_flush_to_zero_float32: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_denorm_flush_to_zero_float32 = shader_denorm_flush_to_zero_float32.into(); + self + } + pub fn shader_denorm_flush_to_zero_float64( + mut self, + shader_denorm_flush_to_zero_float64: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_denorm_flush_to_zero_float64 = shader_denorm_flush_to_zero_float64.into(); + self + } + pub fn shader_rounding_mode_rte_float16( + mut self, + shader_rounding_mode_rte_float16: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_rounding_mode_rte_float16 = shader_rounding_mode_rte_float16.into(); + self + } + pub fn shader_rounding_mode_rte_float32( + mut self, + shader_rounding_mode_rte_float32: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_rounding_mode_rte_float32 = shader_rounding_mode_rte_float32.into(); + self + } + pub fn shader_rounding_mode_rte_float64( + mut self, + shader_rounding_mode_rte_float64: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_rounding_mode_rte_float64 = shader_rounding_mode_rte_float64.into(); + self + } + pub fn shader_rounding_mode_rtz_float16( + mut self, + shader_rounding_mode_rtz_float16: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_rounding_mode_rtz_float16 = shader_rounding_mode_rtz_float16.into(); + self + } + pub fn shader_rounding_mode_rtz_float32( + mut self, + shader_rounding_mode_rtz_float32: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_rounding_mode_rtz_float32 = shader_rounding_mode_rtz_float32.into(); + self + } + pub fn shader_rounding_mode_rtz_float64( + mut self, + shader_rounding_mode_rtz_float64: bool, + ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + self.inner.shader_rounding_mode_rtz_float64 = shader_rounding_mode_rtz_float64.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceFloatControlsPropertiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct NativeBufferANDROID { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle: *const c_void, + pub stride: c_int, + pub format: c_int, + pub usage: c_int, +} +impl ::std::default::Default for NativeBufferANDROID { + fn default() -> NativeBufferANDROID { + NativeBufferANDROID { + s_type: StructureType::NATIVE_BUFFER_ANDROID, + p_next: ::std::ptr::null(), + handle: ::std::ptr::null(), + stride: c_int::default(), + format: c_int::default(), + usage: c_int::default(), + } + } +} +impl NativeBufferANDROID { + pub fn builder<'a>() -> NativeBufferANDROIDBuilder<'a> { + NativeBufferANDROIDBuilder { + inner: NativeBufferANDROID::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct NativeBufferANDROIDBuilder<'a> { + inner: NativeBufferANDROID, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsNativeBufferANDROID {} +impl<'a> ::std::ops::Deref for NativeBufferANDROIDBuilder<'a> { + type Target = NativeBufferANDROID; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for NativeBufferANDROIDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> NativeBufferANDROIDBuilder<'a> { + pub fn handle(mut self, handle: *const c_void) -> NativeBufferANDROIDBuilder<'a> { + self.inner.handle = handle; + self + } + pub fn stride(mut self, stride: c_int) -> NativeBufferANDROIDBuilder<'a> { + self.inner.stride = stride; + self + } + pub fn format(mut self, format: c_int) -> NativeBufferANDROIDBuilder<'a> { + self.inner.format = format; + self + } + pub fn usage(mut self, usage: c_int) -> NativeBufferANDROIDBuilder<'a> { + self.inner.usage = usage; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> NativeBufferANDROIDBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> NativeBufferANDROID { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct ShaderResourceUsageAMD { + pub num_used_vgprs: u32, + pub num_used_sgprs: u32, + pub lds_size_per_local_work_group: u32, + pub lds_usage_size_in_bytes: usize, + pub scratch_mem_usage_in_bytes: usize, +} +impl ShaderResourceUsageAMD { + pub fn builder<'a>() -> ShaderResourceUsageAMDBuilder<'a> { + ShaderResourceUsageAMDBuilder { + inner: ShaderResourceUsageAMD::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ShaderResourceUsageAMDBuilder<'a> { + inner: ShaderResourceUsageAMD, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ShaderResourceUsageAMDBuilder<'a> { + type Target = ShaderResourceUsageAMD; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ShaderResourceUsageAMDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ShaderResourceUsageAMDBuilder<'a> { + pub fn num_used_vgprs(mut self, num_used_vgprs: u32) -> ShaderResourceUsageAMDBuilder<'a> { + self.inner.num_used_vgprs = num_used_vgprs; + self + } + pub fn num_used_sgprs(mut self, num_used_sgprs: u32) -> ShaderResourceUsageAMDBuilder<'a> { + self.inner.num_used_sgprs = num_used_sgprs; + self + } + pub fn lds_size_per_local_work_group( + mut self, + lds_size_per_local_work_group: u32, + ) -> ShaderResourceUsageAMDBuilder<'a> { + self.inner.lds_size_per_local_work_group = lds_size_per_local_work_group; + self + } + pub fn lds_usage_size_in_bytes( + mut self, + lds_usage_size_in_bytes: usize, + ) -> ShaderResourceUsageAMDBuilder<'a> { + self.inner.lds_usage_size_in_bytes = lds_usage_size_in_bytes; + self + } + pub fn scratch_mem_usage_in_bytes( + mut self, + scratch_mem_usage_in_bytes: usize, + ) -> ShaderResourceUsageAMDBuilder<'a> { + self.inner.scratch_mem_usage_in_bytes = scratch_mem_usage_in_bytes; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ShaderResourceUsageAMD { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ShaderStatisticsInfoAMD { + pub shader_stage_mask: ShaderStageFlags, + pub resource_usage: ShaderResourceUsageAMD, + pub num_physical_vgprs: u32, + pub num_physical_sgprs: u32, + pub num_available_vgprs: u32, + pub num_available_sgprs: u32, + pub compute_work_group_size: [u32; 3], +} +impl ::std::default::Default for ShaderStatisticsInfoAMD { + fn default() -> ShaderStatisticsInfoAMD { + ShaderStatisticsInfoAMD { + shader_stage_mask: ShaderStageFlags::default(), + resource_usage: ShaderResourceUsageAMD::default(), + num_physical_vgprs: u32::default(), + num_physical_sgprs: u32::default(), + num_available_vgprs: u32::default(), + num_available_sgprs: u32::default(), + compute_work_group_size: unsafe { ::std::mem::zeroed() }, + } + } +} +impl ShaderStatisticsInfoAMD { + pub fn builder<'a>() -> ShaderStatisticsInfoAMDBuilder<'a> { + ShaderStatisticsInfoAMDBuilder { + inner: ShaderStatisticsInfoAMD::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ShaderStatisticsInfoAMDBuilder<'a> { + inner: ShaderStatisticsInfoAMD, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ShaderStatisticsInfoAMDBuilder<'a> { + type Target = ShaderStatisticsInfoAMD; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ShaderStatisticsInfoAMDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ShaderStatisticsInfoAMDBuilder<'a> { + pub fn shader_stage_mask( + mut self, + shader_stage_mask: ShaderStageFlags, + ) -> ShaderStatisticsInfoAMDBuilder<'a> { + self.inner.shader_stage_mask = shader_stage_mask; + self + } + pub fn resource_usage( + mut self, + resource_usage: ShaderResourceUsageAMD, + ) -> ShaderStatisticsInfoAMDBuilder<'a> { + self.inner.resource_usage = resource_usage; + self + } + pub fn num_physical_vgprs( + mut self, + num_physical_vgprs: u32, + ) -> ShaderStatisticsInfoAMDBuilder<'a> { + self.inner.num_physical_vgprs = num_physical_vgprs; + self + } + pub fn num_physical_sgprs( + mut self, + num_physical_sgprs: u32, + ) -> ShaderStatisticsInfoAMDBuilder<'a> { + self.inner.num_physical_sgprs = num_physical_sgprs; + self + } + pub fn num_available_vgprs( + mut self, + num_available_vgprs: u32, + ) -> ShaderStatisticsInfoAMDBuilder<'a> { + self.inner.num_available_vgprs = num_available_vgprs; + self + } + pub fn num_available_sgprs( + mut self, + num_available_sgprs: u32, + ) -> ShaderStatisticsInfoAMDBuilder<'a> { + self.inner.num_available_sgprs = num_available_sgprs; + self + } + pub fn compute_work_group_size( + mut self, + compute_work_group_size: [u32; 3], + ) -> ShaderStatisticsInfoAMDBuilder<'a> { + self.inner.compute_work_group_size = compute_work_group_size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ShaderStatisticsInfoAMD { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceQueueGlobalPriorityCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub global_priority: QueueGlobalPriorityEXT, +} +impl ::std::default::Default for DeviceQueueGlobalPriorityCreateInfoEXT { + fn default() -> DeviceQueueGlobalPriorityCreateInfoEXT { + DeviceQueueGlobalPriorityCreateInfoEXT { + s_type: StructureType::DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + global_priority: QueueGlobalPriorityEXT::default(), + } + } +} +impl DeviceQueueGlobalPriorityCreateInfoEXT { + pub fn builder<'a>() -> DeviceQueueGlobalPriorityCreateInfoEXTBuilder<'a> { + DeviceQueueGlobalPriorityCreateInfoEXTBuilder { + inner: DeviceQueueGlobalPriorityCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceQueueGlobalPriorityCreateInfoEXTBuilder<'a> { + inner: DeviceQueueGlobalPriorityCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceQueueCreateInfo for DeviceQueueGlobalPriorityCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsDeviceQueueCreateInfo for DeviceQueueGlobalPriorityCreateInfoEXT {} +impl<'a> ::std::ops::Deref for DeviceQueueGlobalPriorityCreateInfoEXTBuilder<'a> { + type Target = DeviceQueueGlobalPriorityCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceQueueGlobalPriorityCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceQueueGlobalPriorityCreateInfoEXTBuilder<'a> { + pub fn global_priority( + mut self, + global_priority: QueueGlobalPriorityEXT, + ) -> DeviceQueueGlobalPriorityCreateInfoEXTBuilder<'a> { + self.inner.global_priority = global_priority; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceQueueGlobalPriorityCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DebugUtilsObjectNameInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub object_type: ObjectType, + pub object_handle: u64, + pub p_object_name: *const c_char, +} +impl ::std::default::Default for DebugUtilsObjectNameInfoEXT { + fn default() -> DebugUtilsObjectNameInfoEXT { + DebugUtilsObjectNameInfoEXT { + s_type: StructureType::DEBUG_UTILS_OBJECT_NAME_INFO_EXT, + p_next: ::std::ptr::null(), + object_type: ObjectType::default(), + object_handle: u64::default(), + p_object_name: ::std::ptr::null(), + } + } +} +impl DebugUtilsObjectNameInfoEXT { + pub fn builder<'a>() -> DebugUtilsObjectNameInfoEXTBuilder<'a> { + DebugUtilsObjectNameInfoEXTBuilder { + inner: DebugUtilsObjectNameInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DebugUtilsObjectNameInfoEXTBuilder<'a> { + inner: DebugUtilsObjectNameInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDebugUtilsObjectNameInfoEXT {} +impl<'a> ::std::ops::Deref for DebugUtilsObjectNameInfoEXTBuilder<'a> { + type Target = DebugUtilsObjectNameInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DebugUtilsObjectNameInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DebugUtilsObjectNameInfoEXTBuilder<'a> { + pub fn object_type( + mut self, + object_type: ObjectType, + ) -> DebugUtilsObjectNameInfoEXTBuilder<'a> { + self.inner.object_type = object_type; + self + } + pub fn object_handle(mut self, object_handle: u64) -> DebugUtilsObjectNameInfoEXTBuilder<'a> { + self.inner.object_handle = object_handle; + self + } + pub fn object_name( + mut self, + object_name: &'a ::std::ffi::CStr, + ) -> DebugUtilsObjectNameInfoEXTBuilder<'a> { + self.inner.p_object_name = object_name.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DebugUtilsObjectNameInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DebugUtilsObjectNameInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DebugUtilsObjectTagInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub object_type: ObjectType, + pub object_handle: u64, + pub tag_name: u64, + pub tag_size: usize, + pub p_tag: *const c_void, +} +impl ::std::default::Default for DebugUtilsObjectTagInfoEXT { + fn default() -> DebugUtilsObjectTagInfoEXT { + DebugUtilsObjectTagInfoEXT { + s_type: StructureType::DEBUG_UTILS_OBJECT_TAG_INFO_EXT, + p_next: ::std::ptr::null(), + object_type: ObjectType::default(), + object_handle: u64::default(), + tag_name: u64::default(), + tag_size: usize::default(), + p_tag: ::std::ptr::null(), + } + } +} +impl DebugUtilsObjectTagInfoEXT { + pub fn builder<'a>() -> DebugUtilsObjectTagInfoEXTBuilder<'a> { + DebugUtilsObjectTagInfoEXTBuilder { + inner: DebugUtilsObjectTagInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DebugUtilsObjectTagInfoEXTBuilder<'a> { + inner: DebugUtilsObjectTagInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDebugUtilsObjectTagInfoEXT {} +impl<'a> ::std::ops::Deref for DebugUtilsObjectTagInfoEXTBuilder<'a> { + type Target = DebugUtilsObjectTagInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DebugUtilsObjectTagInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DebugUtilsObjectTagInfoEXTBuilder<'a> { + pub fn object_type(mut self, object_type: ObjectType) -> DebugUtilsObjectTagInfoEXTBuilder<'a> { + self.inner.object_type = object_type; + self + } + pub fn object_handle(mut self, object_handle: u64) -> DebugUtilsObjectTagInfoEXTBuilder<'a> { + self.inner.object_handle = object_handle; + self + } + pub fn tag_name(mut self, tag_name: u64) -> DebugUtilsObjectTagInfoEXTBuilder<'a> { + self.inner.tag_name = tag_name; + self + } + pub fn tag(mut self, tag: &'a [u8]) -> DebugUtilsObjectTagInfoEXTBuilder<'a> { + self.inner.tag_size = tag.len() as _; + self.inner.p_tag = tag.as_ptr() as *const c_void; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DebugUtilsObjectTagInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DebugUtilsObjectTagInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DebugUtilsLabelEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub p_label_name: *const c_char, + pub color: [f32; 4], +} +impl ::std::default::Default for DebugUtilsLabelEXT { + fn default() -> DebugUtilsLabelEXT { + DebugUtilsLabelEXT { + s_type: StructureType::DEBUG_UTILS_LABEL_EXT, + p_next: ::std::ptr::null(), + p_label_name: ::std::ptr::null(), + color: unsafe { ::std::mem::zeroed() }, + } + } +} +impl DebugUtilsLabelEXT { + pub fn builder<'a>() -> DebugUtilsLabelEXTBuilder<'a> { + DebugUtilsLabelEXTBuilder { + inner: DebugUtilsLabelEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DebugUtilsLabelEXTBuilder<'a> { + inner: DebugUtilsLabelEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDebugUtilsLabelEXT {} +impl<'a> ::std::ops::Deref for DebugUtilsLabelEXTBuilder<'a> { + type Target = DebugUtilsLabelEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DebugUtilsLabelEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DebugUtilsLabelEXTBuilder<'a> { + pub fn label_name(mut self, label_name: &'a ::std::ffi::CStr) -> DebugUtilsLabelEXTBuilder<'a> { + self.inner.p_label_name = label_name.as_ptr(); + self + } + pub fn color(mut self, color: [f32; 4]) -> DebugUtilsLabelEXTBuilder<'a> { + self.inner.color = color; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DebugUtilsLabelEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DebugUtilsLabelEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct DebugUtilsMessengerCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: DebugUtilsMessengerCreateFlagsEXT, + pub message_severity: DebugUtilsMessageSeverityFlagsEXT, + pub message_type: DebugUtilsMessageTypeFlagsEXT, + pub pfn_user_callback: PFN_vkDebugUtilsMessengerCallbackEXT, + pub p_user_data: *mut c_void, +} +impl fmt::Debug for DebugUtilsMessengerCreateInfoEXT { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("DebugUtilsMessengerCreateInfoEXT") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("flags", &self.flags) + .field("message_severity", &self.message_severity) + .field("message_type", &self.message_type) + .field( + "pfn_user_callback", + &(self.pfn_user_callback.map(|x| x as *const ())), + ) + .field("p_user_data", &self.p_user_data) + .finish() + } +} +impl ::std::default::Default for DebugUtilsMessengerCreateInfoEXT { + fn default() -> DebugUtilsMessengerCreateInfoEXT { + DebugUtilsMessengerCreateInfoEXT { + s_type: StructureType::DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + flags: DebugUtilsMessengerCreateFlagsEXT::default(), + message_severity: DebugUtilsMessageSeverityFlagsEXT::default(), + message_type: DebugUtilsMessageTypeFlagsEXT::default(), + pfn_user_callback: PFN_vkDebugUtilsMessengerCallbackEXT::default(), + p_user_data: ::std::ptr::null_mut(), + } + } +} +impl DebugUtilsMessengerCreateInfoEXT { + pub fn builder<'a>() -> DebugUtilsMessengerCreateInfoEXTBuilder<'a> { + DebugUtilsMessengerCreateInfoEXTBuilder { + inner: DebugUtilsMessengerCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DebugUtilsMessengerCreateInfoEXTBuilder<'a> { + inner: DebugUtilsMessengerCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsInstanceCreateInfo for DebugUtilsMessengerCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsInstanceCreateInfo for DebugUtilsMessengerCreateInfoEXT {} +impl<'a> ::std::ops::Deref for DebugUtilsMessengerCreateInfoEXTBuilder<'a> { + type Target = DebugUtilsMessengerCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DebugUtilsMessengerCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DebugUtilsMessengerCreateInfoEXTBuilder<'a> { + pub fn flags( + mut self, + flags: DebugUtilsMessengerCreateFlagsEXT, + ) -> DebugUtilsMessengerCreateInfoEXTBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn message_severity( + mut self, + message_severity: DebugUtilsMessageSeverityFlagsEXT, + ) -> DebugUtilsMessengerCreateInfoEXTBuilder<'a> { + self.inner.message_severity = message_severity; + self + } + pub fn message_type( + mut self, + message_type: DebugUtilsMessageTypeFlagsEXT, + ) -> DebugUtilsMessengerCreateInfoEXTBuilder<'a> { + self.inner.message_type = message_type; + self + } + pub fn pfn_user_callback( + mut self, + pfn_user_callback: PFN_vkDebugUtilsMessengerCallbackEXT, + ) -> DebugUtilsMessengerCreateInfoEXTBuilder<'a> { + self.inner.pfn_user_callback = pfn_user_callback; + self + } + pub fn user_data( + mut self, + user_data: *mut c_void, + ) -> DebugUtilsMessengerCreateInfoEXTBuilder<'a> { + self.inner.p_user_data = user_data; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DebugUtilsMessengerCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DebugUtilsMessengerCallbackDataEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: DebugUtilsMessengerCallbackDataFlagsEXT, + pub p_message_id_name: *const c_char, + pub message_id_number: i32, + pub p_message: *const c_char, + pub queue_label_count: u32, + pub p_queue_labels: *const DebugUtilsLabelEXT, + pub cmd_buf_label_count: u32, + pub p_cmd_buf_labels: *const DebugUtilsLabelEXT, + pub object_count: u32, + pub p_objects: *const DebugUtilsObjectNameInfoEXT, +} +impl ::std::default::Default for DebugUtilsMessengerCallbackDataEXT { + fn default() -> DebugUtilsMessengerCallbackDataEXT { + DebugUtilsMessengerCallbackDataEXT { + s_type: StructureType::DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT, + p_next: ::std::ptr::null(), + flags: DebugUtilsMessengerCallbackDataFlagsEXT::default(), + p_message_id_name: ::std::ptr::null(), + message_id_number: i32::default(), + p_message: ::std::ptr::null(), + queue_label_count: u32::default(), + p_queue_labels: ::std::ptr::null(), + cmd_buf_label_count: u32::default(), + p_cmd_buf_labels: ::std::ptr::null(), + object_count: u32::default(), + p_objects: ::std::ptr::null(), + } + } +} +impl DebugUtilsMessengerCallbackDataEXT { + pub fn builder<'a>() -> DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + DebugUtilsMessengerCallbackDataEXTBuilder { + inner: DebugUtilsMessengerCallbackDataEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + inner: DebugUtilsMessengerCallbackDataEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDebugUtilsMessengerCallbackDataEXT {} +impl<'a> ::std::ops::Deref for DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + type Target = DebugUtilsMessengerCallbackDataEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + pub fn flags( + mut self, + flags: DebugUtilsMessengerCallbackDataFlagsEXT, + ) -> DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn message_id_name( + mut self, + message_id_name: &'a ::std::ffi::CStr, + ) -> DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + self.inner.p_message_id_name = message_id_name.as_ptr(); + self + } + pub fn message_id_number( + mut self, + message_id_number: i32, + ) -> DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + self.inner.message_id_number = message_id_number; + self + } + pub fn message( + mut self, + message: &'a ::std::ffi::CStr, + ) -> DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + self.inner.p_message = message.as_ptr(); + self + } + pub fn queue_labels( + mut self, + queue_labels: &'a [DebugUtilsLabelEXT], + ) -> DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + self.inner.queue_label_count = queue_labels.len() as _; + self.inner.p_queue_labels = queue_labels.as_ptr(); + self + } + pub fn cmd_buf_labels( + mut self, + cmd_buf_labels: &'a [DebugUtilsLabelEXT], + ) -> DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + self.inner.cmd_buf_label_count = cmd_buf_labels.len() as _; + self.inner.p_cmd_buf_labels = cmd_buf_labels.as_ptr(); + self + } + pub fn objects( + mut self, + objects: &'a [DebugUtilsObjectNameInfoEXT], + ) -> DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + self.inner.object_count = objects.len() as _; + self.inner.p_objects = objects.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DebugUtilsMessengerCallbackDataEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DebugUtilsMessengerCallbackDataEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImportMemoryHostPointerInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub handle_type: ExternalMemoryHandleTypeFlags, + pub p_host_pointer: *mut c_void, +} +impl ::std::default::Default for ImportMemoryHostPointerInfoEXT { + fn default() -> ImportMemoryHostPointerInfoEXT { + ImportMemoryHostPointerInfoEXT { + s_type: StructureType::IMPORT_MEMORY_HOST_POINTER_INFO_EXT, + p_next: ::std::ptr::null(), + handle_type: ExternalMemoryHandleTypeFlags::default(), + p_host_pointer: ::std::ptr::null_mut(), + } + } +} +impl ImportMemoryHostPointerInfoEXT { + pub fn builder<'a>() -> ImportMemoryHostPointerInfoEXTBuilder<'a> { + ImportMemoryHostPointerInfoEXTBuilder { + inner: ImportMemoryHostPointerInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImportMemoryHostPointerInfoEXTBuilder<'a> { + inner: ImportMemoryHostPointerInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for ImportMemoryHostPointerInfoEXTBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for ImportMemoryHostPointerInfoEXT {} +impl<'a> ::std::ops::Deref for ImportMemoryHostPointerInfoEXTBuilder<'a> { + type Target = ImportMemoryHostPointerInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImportMemoryHostPointerInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImportMemoryHostPointerInfoEXTBuilder<'a> { + pub fn handle_type( + mut self, + handle_type: ExternalMemoryHandleTypeFlags, + ) -> ImportMemoryHostPointerInfoEXTBuilder<'a> { + self.inner.handle_type = handle_type; + self + } + pub fn host_pointer( + mut self, + host_pointer: *mut c_void, + ) -> ImportMemoryHostPointerInfoEXTBuilder<'a> { + self.inner.p_host_pointer = host_pointer; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImportMemoryHostPointerInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryHostPointerPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub memory_type_bits: u32, +} +impl ::std::default::Default for MemoryHostPointerPropertiesEXT { + fn default() -> MemoryHostPointerPropertiesEXT { + MemoryHostPointerPropertiesEXT { + s_type: StructureType::MEMORY_HOST_POINTER_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + memory_type_bits: u32::default(), + } + } +} +impl MemoryHostPointerPropertiesEXT { + pub fn builder<'a>() -> MemoryHostPointerPropertiesEXTBuilder<'a> { + MemoryHostPointerPropertiesEXTBuilder { + inner: MemoryHostPointerPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryHostPointerPropertiesEXTBuilder<'a> { + inner: MemoryHostPointerPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMemoryHostPointerPropertiesEXT {} +impl<'a> ::std::ops::Deref for MemoryHostPointerPropertiesEXTBuilder<'a> { + type Target = MemoryHostPointerPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryHostPointerPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryHostPointerPropertiesEXTBuilder<'a> { + pub fn memory_type_bits( + mut self, + memory_type_bits: u32, + ) -> MemoryHostPointerPropertiesEXTBuilder<'a> { + self.inner.memory_type_bits = memory_type_bits; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MemoryHostPointerPropertiesEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryHostPointerPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceExternalMemoryHostPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub min_imported_host_pointer_alignment: DeviceSize, +} +impl ::std::default::Default for PhysicalDeviceExternalMemoryHostPropertiesEXT { + fn default() -> PhysicalDeviceExternalMemoryHostPropertiesEXT { + PhysicalDeviceExternalMemoryHostPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + min_imported_host_pointer_alignment: DeviceSize::default(), + } + } +} +impl PhysicalDeviceExternalMemoryHostPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceExternalMemoryHostPropertiesEXTBuilder<'a> { + PhysicalDeviceExternalMemoryHostPropertiesEXTBuilder { + inner: PhysicalDeviceExternalMemoryHostPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceExternalMemoryHostPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceExternalMemoryHostPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceExternalMemoryHostPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceExternalMemoryHostPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceExternalMemoryHostPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceExternalMemoryHostPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceExternalMemoryHostPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceExternalMemoryHostPropertiesEXTBuilder<'a> { + pub fn min_imported_host_pointer_alignment( + mut self, + min_imported_host_pointer_alignment: DeviceSize, + ) -> PhysicalDeviceExternalMemoryHostPropertiesEXTBuilder<'a> { + self.inner.min_imported_host_pointer_alignment = min_imported_host_pointer_alignment; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceExternalMemoryHostPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceConservativeRasterizationPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub primitive_overestimation_size: f32, + pub max_extra_primitive_overestimation_size: f32, + pub extra_primitive_overestimation_size_granularity: f32, + pub primitive_underestimation: Bool32, + pub conservative_point_and_line_rasterization: Bool32, + pub degenerate_triangles_rasterized: Bool32, + pub degenerate_lines_rasterized: Bool32, + pub fully_covered_fragment_shader_input_variable: Bool32, + pub conservative_rasterization_post_depth_coverage: Bool32, +} +impl ::std::default::Default for PhysicalDeviceConservativeRasterizationPropertiesEXT { + fn default() -> PhysicalDeviceConservativeRasterizationPropertiesEXT { + PhysicalDeviceConservativeRasterizationPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + primitive_overestimation_size: f32::default(), + max_extra_primitive_overestimation_size: f32::default(), + extra_primitive_overestimation_size_granularity: f32::default(), + primitive_underestimation: Bool32::default(), + conservative_point_and_line_rasterization: Bool32::default(), + degenerate_triangles_rasterized: Bool32::default(), + degenerate_lines_rasterized: Bool32::default(), + fully_covered_fragment_shader_input_variable: Bool32::default(), + conservative_rasterization_post_depth_coverage: Bool32::default(), + } + } +} +impl PhysicalDeviceConservativeRasterizationPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder { + inner: PhysicalDeviceConservativeRasterizationPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceConservativeRasterizationPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceConservativeRasterizationPropertiesEXT +{ +} +impl<'a> ::std::ops::Deref for PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceConservativeRasterizationPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + pub fn primitive_overestimation_size( + mut self, + primitive_overestimation_size: f32, + ) -> PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + self.inner.primitive_overestimation_size = primitive_overestimation_size; + self + } + pub fn max_extra_primitive_overestimation_size( + mut self, + max_extra_primitive_overestimation_size: f32, + ) -> PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + self.inner.max_extra_primitive_overestimation_size = + max_extra_primitive_overestimation_size; + self + } + pub fn extra_primitive_overestimation_size_granularity( + mut self, + extra_primitive_overestimation_size_granularity: f32, + ) -> PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + self.inner.extra_primitive_overestimation_size_granularity = + extra_primitive_overestimation_size_granularity; + self + } + pub fn primitive_underestimation( + mut self, + primitive_underestimation: bool, + ) -> PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + self.inner.primitive_underestimation = primitive_underestimation.into(); + self + } + pub fn conservative_point_and_line_rasterization( + mut self, + conservative_point_and_line_rasterization: bool, + ) -> PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + self.inner.conservative_point_and_line_rasterization = + conservative_point_and_line_rasterization.into(); + self + } + pub fn degenerate_triangles_rasterized( + mut self, + degenerate_triangles_rasterized: bool, + ) -> PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + self.inner.degenerate_triangles_rasterized = degenerate_triangles_rasterized.into(); + self + } + pub fn degenerate_lines_rasterized( + mut self, + degenerate_lines_rasterized: bool, + ) -> PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + self.inner.degenerate_lines_rasterized = degenerate_lines_rasterized.into(); + self + } + pub fn fully_covered_fragment_shader_input_variable( + mut self, + fully_covered_fragment_shader_input_variable: bool, + ) -> PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + self.inner.fully_covered_fragment_shader_input_variable = + fully_covered_fragment_shader_input_variable.into(); + self + } + pub fn conservative_rasterization_post_depth_coverage( + mut self, + conservative_rasterization_post_depth_coverage: bool, + ) -> PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { + self.inner.conservative_rasterization_post_depth_coverage = + conservative_rasterization_post_depth_coverage.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceConservativeRasterizationPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CalibratedTimestampInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub time_domain: TimeDomainEXT, +} +impl ::std::default::Default for CalibratedTimestampInfoEXT { + fn default() -> CalibratedTimestampInfoEXT { + CalibratedTimestampInfoEXT { + s_type: StructureType::CALIBRATED_TIMESTAMP_INFO_EXT, + p_next: ::std::ptr::null(), + time_domain: TimeDomainEXT::default(), + } + } +} +impl CalibratedTimestampInfoEXT { + pub fn builder<'a>() -> CalibratedTimestampInfoEXTBuilder<'a> { + CalibratedTimestampInfoEXTBuilder { + inner: CalibratedTimestampInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CalibratedTimestampInfoEXTBuilder<'a> { + inner: CalibratedTimestampInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCalibratedTimestampInfoEXT {} +impl<'a> ::std::ops::Deref for CalibratedTimestampInfoEXTBuilder<'a> { + type Target = CalibratedTimestampInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CalibratedTimestampInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CalibratedTimestampInfoEXTBuilder<'a> { + pub fn time_domain( + mut self, + time_domain: TimeDomainEXT, + ) -> CalibratedTimestampInfoEXTBuilder<'a> { + self.inner.time_domain = time_domain; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CalibratedTimestampInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CalibratedTimestampInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceShaderCorePropertiesAMD { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_engine_count: u32, + pub shader_arrays_per_engine_count: u32, + pub compute_units_per_shader_array: u32, + pub simd_per_compute_unit: u32, + pub wavefronts_per_simd: u32, + pub wavefront_size: u32, + pub sgprs_per_simd: u32, + pub min_sgpr_allocation: u32, + pub max_sgpr_allocation: u32, + pub sgpr_allocation_granularity: u32, + pub vgprs_per_simd: u32, + pub min_vgpr_allocation: u32, + pub max_vgpr_allocation: u32, + pub vgpr_allocation_granularity: u32, +} +impl ::std::default::Default for PhysicalDeviceShaderCorePropertiesAMD { + fn default() -> PhysicalDeviceShaderCorePropertiesAMD { + PhysicalDeviceShaderCorePropertiesAMD { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD, + p_next: ::std::ptr::null_mut(), + shader_engine_count: u32::default(), + shader_arrays_per_engine_count: u32::default(), + compute_units_per_shader_array: u32::default(), + simd_per_compute_unit: u32::default(), + wavefronts_per_simd: u32::default(), + wavefront_size: u32::default(), + sgprs_per_simd: u32::default(), + min_sgpr_allocation: u32::default(), + max_sgpr_allocation: u32::default(), + sgpr_allocation_granularity: u32::default(), + vgprs_per_simd: u32::default(), + min_vgpr_allocation: u32::default(), + max_vgpr_allocation: u32::default(), + vgpr_allocation_granularity: u32::default(), + } + } +} +impl PhysicalDeviceShaderCorePropertiesAMD { + pub fn builder<'a>() -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + PhysicalDeviceShaderCorePropertiesAMDBuilder { + inner: PhysicalDeviceShaderCorePropertiesAMD::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + inner: PhysicalDeviceShaderCorePropertiesAMD, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceShaderCorePropertiesAMDBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceShaderCorePropertiesAMD {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + type Target = PhysicalDeviceShaderCorePropertiesAMD; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + pub fn shader_engine_count( + mut self, + shader_engine_count: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.shader_engine_count = shader_engine_count; + self + } + pub fn shader_arrays_per_engine_count( + mut self, + shader_arrays_per_engine_count: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.shader_arrays_per_engine_count = shader_arrays_per_engine_count; + self + } + pub fn compute_units_per_shader_array( + mut self, + compute_units_per_shader_array: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.compute_units_per_shader_array = compute_units_per_shader_array; + self + } + pub fn simd_per_compute_unit( + mut self, + simd_per_compute_unit: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.simd_per_compute_unit = simd_per_compute_unit; + self + } + pub fn wavefronts_per_simd( + mut self, + wavefronts_per_simd: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.wavefronts_per_simd = wavefronts_per_simd; + self + } + pub fn wavefront_size( + mut self, + wavefront_size: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.wavefront_size = wavefront_size; + self + } + pub fn sgprs_per_simd( + mut self, + sgprs_per_simd: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.sgprs_per_simd = sgprs_per_simd; + self + } + pub fn min_sgpr_allocation( + mut self, + min_sgpr_allocation: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.min_sgpr_allocation = min_sgpr_allocation; + self + } + pub fn max_sgpr_allocation( + mut self, + max_sgpr_allocation: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.max_sgpr_allocation = max_sgpr_allocation; + self + } + pub fn sgpr_allocation_granularity( + mut self, + sgpr_allocation_granularity: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.sgpr_allocation_granularity = sgpr_allocation_granularity; + self + } + pub fn vgprs_per_simd( + mut self, + vgprs_per_simd: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.vgprs_per_simd = vgprs_per_simd; + self + } + pub fn min_vgpr_allocation( + mut self, + min_vgpr_allocation: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.min_vgpr_allocation = min_vgpr_allocation; + self + } + pub fn max_vgpr_allocation( + mut self, + max_vgpr_allocation: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.max_vgpr_allocation = max_vgpr_allocation; + self + } + pub fn vgpr_allocation_granularity( + mut self, + vgpr_allocation_granularity: u32, + ) -> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { + self.inner.vgpr_allocation_granularity = vgpr_allocation_granularity; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderCorePropertiesAMD { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineRasterizationConservativeStateCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineRasterizationConservativeStateCreateFlagsEXT, + pub conservative_rasterization_mode: ConservativeRasterizationModeEXT, + pub extra_primitive_overestimation_size: f32, +} +impl ::std::default::Default for PipelineRasterizationConservativeStateCreateInfoEXT { + fn default() -> PipelineRasterizationConservativeStateCreateInfoEXT { + PipelineRasterizationConservativeStateCreateInfoEXT { + s_type: StructureType::PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + flags: PipelineRasterizationConservativeStateCreateFlagsEXT::default(), + conservative_rasterization_mode: ConservativeRasterizationModeEXT::default(), + extra_primitive_overestimation_size: f32::default(), + } + } +} +impl PipelineRasterizationConservativeStateCreateInfoEXT { + pub fn builder<'a>() -> PipelineRasterizationConservativeStateCreateInfoEXTBuilder<'a> { + PipelineRasterizationConservativeStateCreateInfoEXTBuilder { + inner: PipelineRasterizationConservativeStateCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineRasterizationConservativeStateCreateInfoEXTBuilder<'a> { + inner: PipelineRasterizationConservativeStateCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineRasterizationStateCreateInfo + for PipelineRasterizationConservativeStateCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsPipelineRasterizationStateCreateInfo + for PipelineRasterizationConservativeStateCreateInfoEXT +{ +} +impl<'a> ::std::ops::Deref for PipelineRasterizationConservativeStateCreateInfoEXTBuilder<'a> { + type Target = PipelineRasterizationConservativeStateCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineRasterizationConservativeStateCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineRasterizationConservativeStateCreateInfoEXTBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineRasterizationConservativeStateCreateFlagsEXT, + ) -> PipelineRasterizationConservativeStateCreateInfoEXTBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn conservative_rasterization_mode( + mut self, + conservative_rasterization_mode: ConservativeRasterizationModeEXT, + ) -> PipelineRasterizationConservativeStateCreateInfoEXTBuilder<'a> { + self.inner.conservative_rasterization_mode = conservative_rasterization_mode; + self + } + pub fn extra_primitive_overestimation_size( + mut self, + extra_primitive_overestimation_size: f32, + ) -> PipelineRasterizationConservativeStateCreateInfoEXTBuilder<'a> { + self.inner.extra_primitive_overestimation_size = extra_primitive_overestimation_size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineRasterizationConservativeStateCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceDescriptorIndexingFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_input_attachment_array_dynamic_indexing: Bool32, + pub shader_uniform_texel_buffer_array_dynamic_indexing: Bool32, + pub shader_storage_texel_buffer_array_dynamic_indexing: Bool32, + pub shader_uniform_buffer_array_non_uniform_indexing: Bool32, + pub shader_sampled_image_array_non_uniform_indexing: Bool32, + pub shader_storage_buffer_array_non_uniform_indexing: Bool32, + pub shader_storage_image_array_non_uniform_indexing: Bool32, + pub shader_input_attachment_array_non_uniform_indexing: Bool32, + pub shader_uniform_texel_buffer_array_non_uniform_indexing: Bool32, + pub shader_storage_texel_buffer_array_non_uniform_indexing: Bool32, + pub descriptor_binding_uniform_buffer_update_after_bind: Bool32, + pub descriptor_binding_sampled_image_update_after_bind: Bool32, + pub descriptor_binding_storage_image_update_after_bind: Bool32, + pub descriptor_binding_storage_buffer_update_after_bind: Bool32, + pub descriptor_binding_uniform_texel_buffer_update_after_bind: Bool32, + pub descriptor_binding_storage_texel_buffer_update_after_bind: Bool32, + pub descriptor_binding_update_unused_while_pending: Bool32, + pub descriptor_binding_partially_bound: Bool32, + pub descriptor_binding_variable_descriptor_count: Bool32, + pub runtime_descriptor_array: Bool32, +} +impl ::std::default::Default for PhysicalDeviceDescriptorIndexingFeaturesEXT { + fn default() -> PhysicalDeviceDescriptorIndexingFeaturesEXT { + PhysicalDeviceDescriptorIndexingFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + shader_input_attachment_array_dynamic_indexing: Bool32::default(), + shader_uniform_texel_buffer_array_dynamic_indexing: Bool32::default(), + shader_storage_texel_buffer_array_dynamic_indexing: Bool32::default(), + shader_uniform_buffer_array_non_uniform_indexing: Bool32::default(), + shader_sampled_image_array_non_uniform_indexing: Bool32::default(), + shader_storage_buffer_array_non_uniform_indexing: Bool32::default(), + shader_storage_image_array_non_uniform_indexing: Bool32::default(), + shader_input_attachment_array_non_uniform_indexing: Bool32::default(), + shader_uniform_texel_buffer_array_non_uniform_indexing: Bool32::default(), + shader_storage_texel_buffer_array_non_uniform_indexing: Bool32::default(), + descriptor_binding_uniform_buffer_update_after_bind: Bool32::default(), + descriptor_binding_sampled_image_update_after_bind: Bool32::default(), + descriptor_binding_storage_image_update_after_bind: Bool32::default(), + descriptor_binding_storage_buffer_update_after_bind: Bool32::default(), + descriptor_binding_uniform_texel_buffer_update_after_bind: Bool32::default(), + descriptor_binding_storage_texel_buffer_update_after_bind: Bool32::default(), + descriptor_binding_update_unused_while_pending: Bool32::default(), + descriptor_binding_partially_bound: Bool32::default(), + descriptor_binding_variable_descriptor_count: Bool32::default(), + runtime_descriptor_array: Bool32::default(), + } + } +} +impl PhysicalDeviceDescriptorIndexingFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder { + inner: PhysicalDeviceDescriptorIndexingFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceDescriptorIndexingFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDescriptorIndexingFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceDescriptorIndexingFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + pub fn shader_input_attachment_array_dynamic_indexing( + mut self, + shader_input_attachment_array_dynamic_indexing: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner.shader_input_attachment_array_dynamic_indexing = + shader_input_attachment_array_dynamic_indexing.into(); + self + } + pub fn shader_uniform_texel_buffer_array_dynamic_indexing( + mut self, + shader_uniform_texel_buffer_array_dynamic_indexing: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner + .shader_uniform_texel_buffer_array_dynamic_indexing = + shader_uniform_texel_buffer_array_dynamic_indexing.into(); + self + } + pub fn shader_storage_texel_buffer_array_dynamic_indexing( + mut self, + shader_storage_texel_buffer_array_dynamic_indexing: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner + .shader_storage_texel_buffer_array_dynamic_indexing = + shader_storage_texel_buffer_array_dynamic_indexing.into(); + self + } + pub fn shader_uniform_buffer_array_non_uniform_indexing( + mut self, + shader_uniform_buffer_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner.shader_uniform_buffer_array_non_uniform_indexing = + shader_uniform_buffer_array_non_uniform_indexing.into(); + self + } + pub fn shader_sampled_image_array_non_uniform_indexing( + mut self, + shader_sampled_image_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner.shader_sampled_image_array_non_uniform_indexing = + shader_sampled_image_array_non_uniform_indexing.into(); + self + } + pub fn shader_storage_buffer_array_non_uniform_indexing( + mut self, + shader_storage_buffer_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner.shader_storage_buffer_array_non_uniform_indexing = + shader_storage_buffer_array_non_uniform_indexing.into(); + self + } + pub fn shader_storage_image_array_non_uniform_indexing( + mut self, + shader_storage_image_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner.shader_storage_image_array_non_uniform_indexing = + shader_storage_image_array_non_uniform_indexing.into(); + self + } + pub fn shader_input_attachment_array_non_uniform_indexing( + mut self, + shader_input_attachment_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner + .shader_input_attachment_array_non_uniform_indexing = + shader_input_attachment_array_non_uniform_indexing.into(); + self + } + pub fn shader_uniform_texel_buffer_array_non_uniform_indexing( + mut self, + shader_uniform_texel_buffer_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner + .shader_uniform_texel_buffer_array_non_uniform_indexing = + shader_uniform_texel_buffer_array_non_uniform_indexing.into(); + self + } + pub fn shader_storage_texel_buffer_array_non_uniform_indexing( + mut self, + shader_storage_texel_buffer_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner + .shader_storage_texel_buffer_array_non_uniform_indexing = + shader_storage_texel_buffer_array_non_uniform_indexing.into(); + self + } + pub fn descriptor_binding_uniform_buffer_update_after_bind( + mut self, + descriptor_binding_uniform_buffer_update_after_bind: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner + .descriptor_binding_uniform_buffer_update_after_bind = + descriptor_binding_uniform_buffer_update_after_bind.into(); + self + } + pub fn descriptor_binding_sampled_image_update_after_bind( + mut self, + descriptor_binding_sampled_image_update_after_bind: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner + .descriptor_binding_sampled_image_update_after_bind = + descriptor_binding_sampled_image_update_after_bind.into(); + self + } + pub fn descriptor_binding_storage_image_update_after_bind( + mut self, + descriptor_binding_storage_image_update_after_bind: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner + .descriptor_binding_storage_image_update_after_bind = + descriptor_binding_storage_image_update_after_bind.into(); + self + } + pub fn descriptor_binding_storage_buffer_update_after_bind( + mut self, + descriptor_binding_storage_buffer_update_after_bind: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner + .descriptor_binding_storage_buffer_update_after_bind = + descriptor_binding_storage_buffer_update_after_bind.into(); + self + } + pub fn descriptor_binding_uniform_texel_buffer_update_after_bind( + mut self, + descriptor_binding_uniform_texel_buffer_update_after_bind: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner + .descriptor_binding_uniform_texel_buffer_update_after_bind = + descriptor_binding_uniform_texel_buffer_update_after_bind.into(); + self + } + pub fn descriptor_binding_storage_texel_buffer_update_after_bind( + mut self, + descriptor_binding_storage_texel_buffer_update_after_bind: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner + .descriptor_binding_storage_texel_buffer_update_after_bind = + descriptor_binding_storage_texel_buffer_update_after_bind.into(); + self + } + pub fn descriptor_binding_update_unused_while_pending( + mut self, + descriptor_binding_update_unused_while_pending: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner.descriptor_binding_update_unused_while_pending = + descriptor_binding_update_unused_while_pending.into(); + self + } + pub fn descriptor_binding_partially_bound( + mut self, + descriptor_binding_partially_bound: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner.descriptor_binding_partially_bound = descriptor_binding_partially_bound.into(); + self + } + pub fn descriptor_binding_variable_descriptor_count( + mut self, + descriptor_binding_variable_descriptor_count: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner.descriptor_binding_variable_descriptor_count = + descriptor_binding_variable_descriptor_count.into(); + self + } + pub fn runtime_descriptor_array( + mut self, + runtime_descriptor_array: bool, + ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + self.inner.runtime_descriptor_array = runtime_descriptor_array.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceDescriptorIndexingFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceDescriptorIndexingPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_update_after_bind_descriptors_in_all_pools: u32, + pub shader_uniform_buffer_array_non_uniform_indexing_native: Bool32, + pub shader_sampled_image_array_non_uniform_indexing_native: Bool32, + pub shader_storage_buffer_array_non_uniform_indexing_native: Bool32, + pub shader_storage_image_array_non_uniform_indexing_native: Bool32, + pub shader_input_attachment_array_non_uniform_indexing_native: Bool32, + pub robust_buffer_access_update_after_bind: Bool32, + pub quad_divergent_implicit_lod: Bool32, + pub max_per_stage_descriptor_update_after_bind_samplers: u32, + pub max_per_stage_descriptor_update_after_bind_uniform_buffers: u32, + pub max_per_stage_descriptor_update_after_bind_storage_buffers: u32, + pub max_per_stage_descriptor_update_after_bind_sampled_images: u32, + pub max_per_stage_descriptor_update_after_bind_storage_images: u32, + pub max_per_stage_descriptor_update_after_bind_input_attachments: u32, + pub max_per_stage_update_after_bind_resources: u32, + pub max_descriptor_set_update_after_bind_samplers: u32, + pub max_descriptor_set_update_after_bind_uniform_buffers: u32, + pub max_descriptor_set_update_after_bind_uniform_buffers_dynamic: u32, + pub max_descriptor_set_update_after_bind_storage_buffers: u32, + pub max_descriptor_set_update_after_bind_storage_buffers_dynamic: u32, + pub max_descriptor_set_update_after_bind_sampled_images: u32, + pub max_descriptor_set_update_after_bind_storage_images: u32, + pub max_descriptor_set_update_after_bind_input_attachments: u32, +} +impl ::std::default::Default for PhysicalDeviceDescriptorIndexingPropertiesEXT { + fn default() -> PhysicalDeviceDescriptorIndexingPropertiesEXT { + PhysicalDeviceDescriptorIndexingPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + max_update_after_bind_descriptors_in_all_pools: u32::default(), + shader_uniform_buffer_array_non_uniform_indexing_native: Bool32::default(), + shader_sampled_image_array_non_uniform_indexing_native: Bool32::default(), + shader_storage_buffer_array_non_uniform_indexing_native: Bool32::default(), + shader_storage_image_array_non_uniform_indexing_native: Bool32::default(), + shader_input_attachment_array_non_uniform_indexing_native: Bool32::default(), + robust_buffer_access_update_after_bind: Bool32::default(), + quad_divergent_implicit_lod: Bool32::default(), + max_per_stage_descriptor_update_after_bind_samplers: u32::default(), + max_per_stage_descriptor_update_after_bind_uniform_buffers: u32::default(), + max_per_stage_descriptor_update_after_bind_storage_buffers: u32::default(), + max_per_stage_descriptor_update_after_bind_sampled_images: u32::default(), + max_per_stage_descriptor_update_after_bind_storage_images: u32::default(), + max_per_stage_descriptor_update_after_bind_input_attachments: u32::default(), + max_per_stage_update_after_bind_resources: u32::default(), + max_descriptor_set_update_after_bind_samplers: u32::default(), + max_descriptor_set_update_after_bind_uniform_buffers: u32::default(), + max_descriptor_set_update_after_bind_uniform_buffers_dynamic: u32::default(), + max_descriptor_set_update_after_bind_storage_buffers: u32::default(), + max_descriptor_set_update_after_bind_storage_buffers_dynamic: u32::default(), + max_descriptor_set_update_after_bind_sampled_images: u32::default(), + max_descriptor_set_update_after_bind_storage_images: u32::default(), + max_descriptor_set_update_after_bind_input_attachments: u32::default(), + } + } +} +impl PhysicalDeviceDescriptorIndexingPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder { + inner: PhysicalDeviceDescriptorIndexingPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceDescriptorIndexingPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDescriptorIndexingPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceDescriptorIndexingPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + pub fn max_update_after_bind_descriptors_in_all_pools( + mut self, + max_update_after_bind_descriptors_in_all_pools: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner.max_update_after_bind_descriptors_in_all_pools = + max_update_after_bind_descriptors_in_all_pools; + self + } + pub fn shader_uniform_buffer_array_non_uniform_indexing_native( + mut self, + shader_uniform_buffer_array_non_uniform_indexing_native: bool, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .shader_uniform_buffer_array_non_uniform_indexing_native = + shader_uniform_buffer_array_non_uniform_indexing_native.into(); + self + } + pub fn shader_sampled_image_array_non_uniform_indexing_native( + mut self, + shader_sampled_image_array_non_uniform_indexing_native: bool, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .shader_sampled_image_array_non_uniform_indexing_native = + shader_sampled_image_array_non_uniform_indexing_native.into(); + self + } + pub fn shader_storage_buffer_array_non_uniform_indexing_native( + mut self, + shader_storage_buffer_array_non_uniform_indexing_native: bool, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .shader_storage_buffer_array_non_uniform_indexing_native = + shader_storage_buffer_array_non_uniform_indexing_native.into(); + self + } + pub fn shader_storage_image_array_non_uniform_indexing_native( + mut self, + shader_storage_image_array_non_uniform_indexing_native: bool, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .shader_storage_image_array_non_uniform_indexing_native = + shader_storage_image_array_non_uniform_indexing_native.into(); + self + } + pub fn shader_input_attachment_array_non_uniform_indexing_native( + mut self, + shader_input_attachment_array_non_uniform_indexing_native: bool, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .shader_input_attachment_array_non_uniform_indexing_native = + shader_input_attachment_array_non_uniform_indexing_native.into(); + self + } + pub fn robust_buffer_access_update_after_bind( + mut self, + robust_buffer_access_update_after_bind: bool, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner.robust_buffer_access_update_after_bind = + robust_buffer_access_update_after_bind.into(); + self + } + pub fn quad_divergent_implicit_lod( + mut self, + quad_divergent_implicit_lod: bool, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner.quad_divergent_implicit_lod = quad_divergent_implicit_lod.into(); + self + } + pub fn max_per_stage_descriptor_update_after_bind_samplers( + mut self, + max_per_stage_descriptor_update_after_bind_samplers: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_samplers = + max_per_stage_descriptor_update_after_bind_samplers; + self + } + pub fn max_per_stage_descriptor_update_after_bind_uniform_buffers( + mut self, + max_per_stage_descriptor_update_after_bind_uniform_buffers: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_uniform_buffers = + max_per_stage_descriptor_update_after_bind_uniform_buffers; + self + } + pub fn max_per_stage_descriptor_update_after_bind_storage_buffers( + mut self, + max_per_stage_descriptor_update_after_bind_storage_buffers: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_storage_buffers = + max_per_stage_descriptor_update_after_bind_storage_buffers; + self + } + pub fn max_per_stage_descriptor_update_after_bind_sampled_images( + mut self, + max_per_stage_descriptor_update_after_bind_sampled_images: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_sampled_images = + max_per_stage_descriptor_update_after_bind_sampled_images; + self + } + pub fn max_per_stage_descriptor_update_after_bind_storage_images( + mut self, + max_per_stage_descriptor_update_after_bind_storage_images: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_storage_images = + max_per_stage_descriptor_update_after_bind_storage_images; + self + } + pub fn max_per_stage_descriptor_update_after_bind_input_attachments( + mut self, + max_per_stage_descriptor_update_after_bind_input_attachments: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_input_attachments = + max_per_stage_descriptor_update_after_bind_input_attachments; + self + } + pub fn max_per_stage_update_after_bind_resources( + mut self, + max_per_stage_update_after_bind_resources: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner.max_per_stage_update_after_bind_resources = + max_per_stage_update_after_bind_resources; + self + } + pub fn max_descriptor_set_update_after_bind_samplers( + mut self, + max_descriptor_set_update_after_bind_samplers: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner.max_descriptor_set_update_after_bind_samplers = + max_descriptor_set_update_after_bind_samplers; + self + } + pub fn max_descriptor_set_update_after_bind_uniform_buffers( + mut self, + max_descriptor_set_update_after_bind_uniform_buffers: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_uniform_buffers = + max_descriptor_set_update_after_bind_uniform_buffers; + self + } + pub fn max_descriptor_set_update_after_bind_uniform_buffers_dynamic( + mut self, + max_descriptor_set_update_after_bind_uniform_buffers_dynamic: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_uniform_buffers_dynamic = + max_descriptor_set_update_after_bind_uniform_buffers_dynamic; + self + } + pub fn max_descriptor_set_update_after_bind_storage_buffers( + mut self, + max_descriptor_set_update_after_bind_storage_buffers: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_storage_buffers = + max_descriptor_set_update_after_bind_storage_buffers; + self + } + pub fn max_descriptor_set_update_after_bind_storage_buffers_dynamic( + mut self, + max_descriptor_set_update_after_bind_storage_buffers_dynamic: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_storage_buffers_dynamic = + max_descriptor_set_update_after_bind_storage_buffers_dynamic; + self + } + pub fn max_descriptor_set_update_after_bind_sampled_images( + mut self, + max_descriptor_set_update_after_bind_sampled_images: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_sampled_images = + max_descriptor_set_update_after_bind_sampled_images; + self + } + pub fn max_descriptor_set_update_after_bind_storage_images( + mut self, + max_descriptor_set_update_after_bind_storage_images: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_storage_images = + max_descriptor_set_update_after_bind_storage_images; + self + } + pub fn max_descriptor_set_update_after_bind_input_attachments( + mut self, + max_descriptor_set_update_after_bind_input_attachments: u32, + ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_input_attachments = + max_descriptor_set_update_after_bind_input_attachments; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceDescriptorIndexingPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DescriptorSetLayoutBindingFlagsCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub binding_count: u32, + pub p_binding_flags: *const DescriptorBindingFlagsEXT, +} +impl ::std::default::Default for DescriptorSetLayoutBindingFlagsCreateInfoEXT { + fn default() -> DescriptorSetLayoutBindingFlagsCreateInfoEXT { + DescriptorSetLayoutBindingFlagsCreateInfoEXT { + s_type: StructureType::DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + binding_count: u32::default(), + p_binding_flags: ::std::ptr::null(), + } + } +} +impl DescriptorSetLayoutBindingFlagsCreateInfoEXT { + pub fn builder<'a>() -> DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { + DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder { + inner: DescriptorSetLayoutBindingFlagsCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { + inner: DescriptorSetLayoutBindingFlagsCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDescriptorSetLayoutCreateInfo + for DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsDescriptorSetLayoutCreateInfo for DescriptorSetLayoutBindingFlagsCreateInfoEXT {} +impl<'a> ::std::ops::Deref for DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { + type Target = DescriptorSetLayoutBindingFlagsCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { + pub fn binding_flags( + mut self, + binding_flags: &'a [DescriptorBindingFlagsEXT], + ) -> DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { + self.inner.binding_count = binding_flags.len() as _; + self.inner.p_binding_flags = binding_flags.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorSetLayoutBindingFlagsCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DescriptorSetVariableDescriptorCountAllocateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub descriptor_set_count: u32, + pub p_descriptor_counts: *const u32, +} +impl ::std::default::Default for DescriptorSetVariableDescriptorCountAllocateInfoEXT { + fn default() -> DescriptorSetVariableDescriptorCountAllocateInfoEXT { + DescriptorSetVariableDescriptorCountAllocateInfoEXT { + s_type: StructureType::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT, + p_next: ::std::ptr::null(), + descriptor_set_count: u32::default(), + p_descriptor_counts: ::std::ptr::null(), + } + } +} +impl DescriptorSetVariableDescriptorCountAllocateInfoEXT { + pub fn builder<'a>() -> DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { + DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder { + inner: DescriptorSetVariableDescriptorCountAllocateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { + inner: DescriptorSetVariableDescriptorCountAllocateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDescriptorSetAllocateInfo + for DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsDescriptorSetAllocateInfo + for DescriptorSetVariableDescriptorCountAllocateInfoEXT +{ +} +impl<'a> ::std::ops::Deref for DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { + type Target = DescriptorSetVariableDescriptorCountAllocateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { + pub fn descriptor_counts( + mut self, + descriptor_counts: &'a [u32], + ) -> DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { + self.inner.descriptor_set_count = descriptor_counts.len() as _; + self.inner.p_descriptor_counts = descriptor_counts.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorSetVariableDescriptorCountAllocateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DescriptorSetVariableDescriptorCountLayoutSupportEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_variable_descriptor_count: u32, +} +impl ::std::default::Default for DescriptorSetVariableDescriptorCountLayoutSupportEXT { + fn default() -> DescriptorSetVariableDescriptorCountLayoutSupportEXT { + DescriptorSetVariableDescriptorCountLayoutSupportEXT { + s_type: StructureType::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT, + p_next: ::std::ptr::null_mut(), + max_variable_descriptor_count: u32::default(), + } + } +} +impl DescriptorSetVariableDescriptorCountLayoutSupportEXT { + pub fn builder<'a>() -> DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'a> { + DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder { + inner: DescriptorSetVariableDescriptorCountLayoutSupportEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'a> { + inner: DescriptorSetVariableDescriptorCountLayoutSupportEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDescriptorSetLayoutSupport + for DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'_> +{ +} +unsafe impl ExtendsDescriptorSetLayoutSupport + for DescriptorSetVariableDescriptorCountLayoutSupportEXT +{ +} +impl<'a> ::std::ops::Deref for DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'a> { + type Target = DescriptorSetVariableDescriptorCountLayoutSupportEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'a> { + pub fn max_variable_descriptor_count( + mut self, + max_variable_descriptor_count: u32, + ) -> DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'a> { + self.inner.max_variable_descriptor_count = max_variable_descriptor_count; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DescriptorSetVariableDescriptorCountLayoutSupportEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AttachmentDescription2KHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: AttachmentDescriptionFlags, + pub format: Format, + pub samples: SampleCountFlags, + pub load_op: AttachmentLoadOp, + pub store_op: AttachmentStoreOp, + pub stencil_load_op: AttachmentLoadOp, + pub stencil_store_op: AttachmentStoreOp, + pub initial_layout: ImageLayout, + pub final_layout: ImageLayout, +} +impl ::std::default::Default for AttachmentDescription2KHR { + fn default() -> AttachmentDescription2KHR { + AttachmentDescription2KHR { + s_type: StructureType::ATTACHMENT_DESCRIPTION_2_KHR, + p_next: ::std::ptr::null(), + flags: AttachmentDescriptionFlags::default(), + format: Format::default(), + samples: SampleCountFlags::default(), + load_op: AttachmentLoadOp::default(), + store_op: AttachmentStoreOp::default(), + stencil_load_op: AttachmentLoadOp::default(), + stencil_store_op: AttachmentStoreOp::default(), + initial_layout: ImageLayout::default(), + final_layout: ImageLayout::default(), + } + } +} +impl AttachmentDescription2KHR { + pub fn builder<'a>() -> AttachmentDescription2KHRBuilder<'a> { + AttachmentDescription2KHRBuilder { + inner: AttachmentDescription2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AttachmentDescription2KHRBuilder<'a> { + inner: AttachmentDescription2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAttachmentDescription2KHR {} +impl<'a> ::std::ops::Deref for AttachmentDescription2KHRBuilder<'a> { + type Target = AttachmentDescription2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AttachmentDescription2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AttachmentDescription2KHRBuilder<'a> { + pub fn flags( + mut self, + flags: AttachmentDescriptionFlags, + ) -> AttachmentDescription2KHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn format(mut self, format: Format) -> AttachmentDescription2KHRBuilder<'a> { + self.inner.format = format; + self + } + pub fn samples(mut self, samples: SampleCountFlags) -> AttachmentDescription2KHRBuilder<'a> { + self.inner.samples = samples; + self + } + pub fn load_op(mut self, load_op: AttachmentLoadOp) -> AttachmentDescription2KHRBuilder<'a> { + self.inner.load_op = load_op; + self + } + pub fn store_op(mut self, store_op: AttachmentStoreOp) -> AttachmentDescription2KHRBuilder<'a> { + self.inner.store_op = store_op; + self + } + pub fn stencil_load_op( + mut self, + stencil_load_op: AttachmentLoadOp, + ) -> AttachmentDescription2KHRBuilder<'a> { + self.inner.stencil_load_op = stencil_load_op; + self + } + pub fn stencil_store_op( + mut self, + stencil_store_op: AttachmentStoreOp, + ) -> AttachmentDescription2KHRBuilder<'a> { + self.inner.stencil_store_op = stencil_store_op; + self + } + pub fn initial_layout( + mut self, + initial_layout: ImageLayout, + ) -> AttachmentDescription2KHRBuilder<'a> { + self.inner.initial_layout = initial_layout; + self + } + pub fn final_layout( + mut self, + final_layout: ImageLayout, + ) -> AttachmentDescription2KHRBuilder<'a> { + self.inner.final_layout = final_layout; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AttachmentDescription2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AttachmentDescription2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AttachmentReference2KHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub attachment: u32, + pub layout: ImageLayout, + pub aspect_mask: ImageAspectFlags, +} +impl ::std::default::Default for AttachmentReference2KHR { + fn default() -> AttachmentReference2KHR { + AttachmentReference2KHR { + s_type: StructureType::ATTACHMENT_REFERENCE_2_KHR, + p_next: ::std::ptr::null(), + attachment: u32::default(), + layout: ImageLayout::default(), + aspect_mask: ImageAspectFlags::default(), + } + } +} +impl AttachmentReference2KHR { + pub fn builder<'a>() -> AttachmentReference2KHRBuilder<'a> { + AttachmentReference2KHRBuilder { + inner: AttachmentReference2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AttachmentReference2KHRBuilder<'a> { + inner: AttachmentReference2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAttachmentReference2KHR {} +impl<'a> ::std::ops::Deref for AttachmentReference2KHRBuilder<'a> { + type Target = AttachmentReference2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AttachmentReference2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AttachmentReference2KHRBuilder<'a> { + pub fn attachment(mut self, attachment: u32) -> AttachmentReference2KHRBuilder<'a> { + self.inner.attachment = attachment; + self + } + pub fn layout(mut self, layout: ImageLayout) -> AttachmentReference2KHRBuilder<'a> { + self.inner.layout = layout; + self + } + pub fn aspect_mask( + mut self, + aspect_mask: ImageAspectFlags, + ) -> AttachmentReference2KHRBuilder<'a> { + self.inner.aspect_mask = aspect_mask; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AttachmentReference2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AttachmentReference2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SubpassDescription2KHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: SubpassDescriptionFlags, + pub pipeline_bind_point: PipelineBindPoint, + pub view_mask: u32, + pub input_attachment_count: u32, + pub p_input_attachments: *const AttachmentReference2KHR, + pub color_attachment_count: u32, + pub p_color_attachments: *const AttachmentReference2KHR, + pub p_resolve_attachments: *const AttachmentReference2KHR, + pub p_depth_stencil_attachment: *const AttachmentReference2KHR, + pub preserve_attachment_count: u32, + pub p_preserve_attachments: *const u32, +} +impl ::std::default::Default for SubpassDescription2KHR { + fn default() -> SubpassDescription2KHR { + SubpassDescription2KHR { + s_type: StructureType::SUBPASS_DESCRIPTION_2_KHR, + p_next: ::std::ptr::null(), + flags: SubpassDescriptionFlags::default(), + pipeline_bind_point: PipelineBindPoint::default(), + view_mask: u32::default(), + input_attachment_count: u32::default(), + p_input_attachments: ::std::ptr::null(), + color_attachment_count: u32::default(), + p_color_attachments: ::std::ptr::null(), + p_resolve_attachments: ::std::ptr::null(), + p_depth_stencil_attachment: ::std::ptr::null(), + preserve_attachment_count: u32::default(), + p_preserve_attachments: ::std::ptr::null(), + } + } +} +impl SubpassDescription2KHR { + pub fn builder<'a>() -> SubpassDescription2KHRBuilder<'a> { + SubpassDescription2KHRBuilder { + inner: SubpassDescription2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SubpassDescription2KHRBuilder<'a> { + inner: SubpassDescription2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSubpassDescription2KHR {} +impl<'a> ::std::ops::Deref for SubpassDescription2KHRBuilder<'a> { + type Target = SubpassDescription2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SubpassDescription2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SubpassDescription2KHRBuilder<'a> { + pub fn flags(mut self, flags: SubpassDescriptionFlags) -> SubpassDescription2KHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn pipeline_bind_point( + mut self, + pipeline_bind_point: PipelineBindPoint, + ) -> SubpassDescription2KHRBuilder<'a> { + self.inner.pipeline_bind_point = pipeline_bind_point; + self + } + pub fn view_mask(mut self, view_mask: u32) -> SubpassDescription2KHRBuilder<'a> { + self.inner.view_mask = view_mask; + self + } + pub fn input_attachments( + mut self, + input_attachments: &'a [AttachmentReference2KHR], + ) -> SubpassDescription2KHRBuilder<'a> { + self.inner.input_attachment_count = input_attachments.len() as _; + self.inner.p_input_attachments = input_attachments.as_ptr(); + self + } + pub fn color_attachments( + mut self, + color_attachments: &'a [AttachmentReference2KHR], + ) -> SubpassDescription2KHRBuilder<'a> { + self.inner.color_attachment_count = color_attachments.len() as _; + self.inner.p_color_attachments = color_attachments.as_ptr(); + self + } + pub fn resolve_attachments( + mut self, + resolve_attachments: &'a [AttachmentReference2KHR], + ) -> SubpassDescription2KHRBuilder<'a> { + self.inner.color_attachment_count = resolve_attachments.len() as _; + self.inner.p_resolve_attachments = resolve_attachments.as_ptr(); + self + } + pub fn depth_stencil_attachment( + mut self, + depth_stencil_attachment: &'a AttachmentReference2KHR, + ) -> SubpassDescription2KHRBuilder<'a> { + self.inner.p_depth_stencil_attachment = depth_stencil_attachment; + self + } + pub fn preserve_attachments( + mut self, + preserve_attachments: &'a [u32], + ) -> SubpassDescription2KHRBuilder<'a> { + self.inner.preserve_attachment_count = preserve_attachments.len() as _; + self.inner.p_preserve_attachments = preserve_attachments.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SubpassDescription2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SubpassDescription2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SubpassDependency2KHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub src_subpass: u32, + pub dst_subpass: u32, + pub src_stage_mask: PipelineStageFlags, + pub dst_stage_mask: PipelineStageFlags, + pub src_access_mask: AccessFlags, + pub dst_access_mask: AccessFlags, + pub dependency_flags: DependencyFlags, + pub view_offset: i32, +} +impl ::std::default::Default for SubpassDependency2KHR { + fn default() -> SubpassDependency2KHR { + SubpassDependency2KHR { + s_type: StructureType::SUBPASS_DEPENDENCY_2_KHR, + p_next: ::std::ptr::null(), + src_subpass: u32::default(), + dst_subpass: u32::default(), + src_stage_mask: PipelineStageFlags::default(), + dst_stage_mask: PipelineStageFlags::default(), + src_access_mask: AccessFlags::default(), + dst_access_mask: AccessFlags::default(), + dependency_flags: DependencyFlags::default(), + view_offset: i32::default(), + } + } +} +impl SubpassDependency2KHR { + pub fn builder<'a>() -> SubpassDependency2KHRBuilder<'a> { + SubpassDependency2KHRBuilder { + inner: SubpassDependency2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SubpassDependency2KHRBuilder<'a> { + inner: SubpassDependency2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSubpassDependency2KHR {} +impl<'a> ::std::ops::Deref for SubpassDependency2KHRBuilder<'a> { + type Target = SubpassDependency2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SubpassDependency2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SubpassDependency2KHRBuilder<'a> { + pub fn src_subpass(mut self, src_subpass: u32) -> SubpassDependency2KHRBuilder<'a> { + self.inner.src_subpass = src_subpass; + self + } + pub fn dst_subpass(mut self, dst_subpass: u32) -> SubpassDependency2KHRBuilder<'a> { + self.inner.dst_subpass = dst_subpass; + self + } + pub fn src_stage_mask( + mut self, + src_stage_mask: PipelineStageFlags, + ) -> SubpassDependency2KHRBuilder<'a> { + self.inner.src_stage_mask = src_stage_mask; + self + } + pub fn dst_stage_mask( + mut self, + dst_stage_mask: PipelineStageFlags, + ) -> SubpassDependency2KHRBuilder<'a> { + self.inner.dst_stage_mask = dst_stage_mask; + self + } + pub fn src_access_mask( + mut self, + src_access_mask: AccessFlags, + ) -> SubpassDependency2KHRBuilder<'a> { + self.inner.src_access_mask = src_access_mask; + self + } + pub fn dst_access_mask( + mut self, + dst_access_mask: AccessFlags, + ) -> SubpassDependency2KHRBuilder<'a> { + self.inner.dst_access_mask = dst_access_mask; + self + } + pub fn dependency_flags( + mut self, + dependency_flags: DependencyFlags, + ) -> SubpassDependency2KHRBuilder<'a> { + self.inner.dependency_flags = dependency_flags; + self + } + pub fn view_offset(mut self, view_offset: i32) -> SubpassDependency2KHRBuilder<'a> { + self.inner.view_offset = view_offset; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SubpassDependency2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SubpassDependency2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct RenderPassCreateInfo2KHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: RenderPassCreateFlags, + pub attachment_count: u32, + pub p_attachments: *const AttachmentDescription2KHR, + pub subpass_count: u32, + pub p_subpasses: *const SubpassDescription2KHR, + pub dependency_count: u32, + pub p_dependencies: *const SubpassDependency2KHR, + pub correlated_view_mask_count: u32, + pub p_correlated_view_masks: *const u32, +} +impl ::std::default::Default for RenderPassCreateInfo2KHR { + fn default() -> RenderPassCreateInfo2KHR { + RenderPassCreateInfo2KHR { + s_type: StructureType::RENDER_PASS_CREATE_INFO_2_KHR, + p_next: ::std::ptr::null(), + flags: RenderPassCreateFlags::default(), + attachment_count: u32::default(), + p_attachments: ::std::ptr::null(), + subpass_count: u32::default(), + p_subpasses: ::std::ptr::null(), + dependency_count: u32::default(), + p_dependencies: ::std::ptr::null(), + correlated_view_mask_count: u32::default(), + p_correlated_view_masks: ::std::ptr::null(), + } + } +} +impl RenderPassCreateInfo2KHR { + pub fn builder<'a>() -> RenderPassCreateInfo2KHRBuilder<'a> { + RenderPassCreateInfo2KHRBuilder { + inner: RenderPassCreateInfo2KHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RenderPassCreateInfo2KHRBuilder<'a> { + inner: RenderPassCreateInfo2KHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsRenderPassCreateInfo2KHR {} +impl<'a> ::std::ops::Deref for RenderPassCreateInfo2KHRBuilder<'a> { + type Target = RenderPassCreateInfo2KHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RenderPassCreateInfo2KHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RenderPassCreateInfo2KHRBuilder<'a> { + pub fn flags(mut self, flags: RenderPassCreateFlags) -> RenderPassCreateInfo2KHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn attachments( + mut self, + attachments: &'a [AttachmentDescription2KHR], + ) -> RenderPassCreateInfo2KHRBuilder<'a> { + self.inner.attachment_count = attachments.len() as _; + self.inner.p_attachments = attachments.as_ptr(); + self + } + pub fn subpasses( + mut self, + subpasses: &'a [SubpassDescription2KHR], + ) -> RenderPassCreateInfo2KHRBuilder<'a> { + self.inner.subpass_count = subpasses.len() as _; + self.inner.p_subpasses = subpasses.as_ptr(); + self + } + pub fn dependencies( + mut self, + dependencies: &'a [SubpassDependency2KHR], + ) -> RenderPassCreateInfo2KHRBuilder<'a> { + self.inner.dependency_count = dependencies.len() as _; + self.inner.p_dependencies = dependencies.as_ptr(); + self + } + pub fn correlated_view_masks( + mut self, + correlated_view_masks: &'a [u32], + ) -> RenderPassCreateInfo2KHRBuilder<'a> { + self.inner.correlated_view_mask_count = correlated_view_masks.len() as _; + self.inner.p_correlated_view_masks = correlated_view_masks.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> RenderPassCreateInfo2KHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RenderPassCreateInfo2KHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SubpassBeginInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub contents: SubpassContents, +} +impl ::std::default::Default for SubpassBeginInfoKHR { + fn default() -> SubpassBeginInfoKHR { + SubpassBeginInfoKHR { + s_type: StructureType::SUBPASS_BEGIN_INFO_KHR, + p_next: ::std::ptr::null(), + contents: SubpassContents::default(), + } + } +} +impl SubpassBeginInfoKHR { + pub fn builder<'a>() -> SubpassBeginInfoKHRBuilder<'a> { + SubpassBeginInfoKHRBuilder { + inner: SubpassBeginInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SubpassBeginInfoKHRBuilder<'a> { + inner: SubpassBeginInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSubpassBeginInfoKHR {} +impl<'a> ::std::ops::Deref for SubpassBeginInfoKHRBuilder<'a> { + type Target = SubpassBeginInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SubpassBeginInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SubpassBeginInfoKHRBuilder<'a> { + pub fn contents(mut self, contents: SubpassContents) -> SubpassBeginInfoKHRBuilder<'a> { + self.inner.contents = contents; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SubpassBeginInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SubpassBeginInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SubpassEndInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, +} +impl ::std::default::Default for SubpassEndInfoKHR { + fn default() -> SubpassEndInfoKHR { + SubpassEndInfoKHR { + s_type: StructureType::SUBPASS_END_INFO_KHR, + p_next: ::std::ptr::null(), + } + } +} +impl SubpassEndInfoKHR { + pub fn builder<'a>() -> SubpassEndInfoKHRBuilder<'a> { + SubpassEndInfoKHRBuilder { + inner: SubpassEndInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SubpassEndInfoKHRBuilder<'a> { + inner: SubpassEndInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSubpassEndInfoKHR {} +impl<'a> ::std::ops::Deref for SubpassEndInfoKHRBuilder<'a> { + type Target = SubpassEndInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SubpassEndInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SubpassEndInfoKHRBuilder<'a> { + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SubpassEndInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SubpassEndInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct VertexInputBindingDivisorDescriptionEXT { + pub binding: u32, + pub divisor: u32, +} +impl VertexInputBindingDivisorDescriptionEXT { + pub fn builder<'a>() -> VertexInputBindingDivisorDescriptionEXTBuilder<'a> { + VertexInputBindingDivisorDescriptionEXTBuilder { + inner: VertexInputBindingDivisorDescriptionEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct VertexInputBindingDivisorDescriptionEXTBuilder<'a> { + inner: VertexInputBindingDivisorDescriptionEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for VertexInputBindingDivisorDescriptionEXTBuilder<'a> { + type Target = VertexInputBindingDivisorDescriptionEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for VertexInputBindingDivisorDescriptionEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> VertexInputBindingDivisorDescriptionEXTBuilder<'a> { + pub fn binding(mut self, binding: u32) -> VertexInputBindingDivisorDescriptionEXTBuilder<'a> { + self.inner.binding = binding; + self + } + pub fn divisor(mut self, divisor: u32) -> VertexInputBindingDivisorDescriptionEXTBuilder<'a> { + self.inner.divisor = divisor; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> VertexInputBindingDivisorDescriptionEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineVertexInputDivisorStateCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub vertex_binding_divisor_count: u32, + pub p_vertex_binding_divisors: *const VertexInputBindingDivisorDescriptionEXT, +} +impl ::std::default::Default for PipelineVertexInputDivisorStateCreateInfoEXT { + fn default() -> PipelineVertexInputDivisorStateCreateInfoEXT { + PipelineVertexInputDivisorStateCreateInfoEXT { + s_type: StructureType::PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + vertex_binding_divisor_count: u32::default(), + p_vertex_binding_divisors: ::std::ptr::null(), + } + } +} +impl PipelineVertexInputDivisorStateCreateInfoEXT { + pub fn builder<'a>() -> PipelineVertexInputDivisorStateCreateInfoEXTBuilder<'a> { + PipelineVertexInputDivisorStateCreateInfoEXTBuilder { + inner: PipelineVertexInputDivisorStateCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineVertexInputDivisorStateCreateInfoEXTBuilder<'a> { + inner: PipelineVertexInputDivisorStateCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineVertexInputStateCreateInfo + for PipelineVertexInputDivisorStateCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsPipelineVertexInputStateCreateInfo + for PipelineVertexInputDivisorStateCreateInfoEXT +{ +} +impl<'a> ::std::ops::Deref for PipelineVertexInputDivisorStateCreateInfoEXTBuilder<'a> { + type Target = PipelineVertexInputDivisorStateCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineVertexInputDivisorStateCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineVertexInputDivisorStateCreateInfoEXTBuilder<'a> { + pub fn vertex_binding_divisors( + mut self, + vertex_binding_divisors: &'a [VertexInputBindingDivisorDescriptionEXT], + ) -> PipelineVertexInputDivisorStateCreateInfoEXTBuilder<'a> { + self.inner.vertex_binding_divisor_count = vertex_binding_divisors.len() as _; + self.inner.p_vertex_binding_divisors = vertex_binding_divisors.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineVertexInputDivisorStateCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceVertexAttributeDivisorPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_vertex_attrib_divisor: u32, +} +impl ::std::default::Default for PhysicalDeviceVertexAttributeDivisorPropertiesEXT { + fn default() -> PhysicalDeviceVertexAttributeDivisorPropertiesEXT { + PhysicalDeviceVertexAttributeDivisorPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + max_vertex_attrib_divisor: u32::default(), + } + } +} +impl PhysicalDeviceVertexAttributeDivisorPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceVertexAttributeDivisorPropertiesEXTBuilder<'a> { + PhysicalDeviceVertexAttributeDivisorPropertiesEXTBuilder { + inner: PhysicalDeviceVertexAttributeDivisorPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceVertexAttributeDivisorPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceVertexAttributeDivisorPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceVertexAttributeDivisorPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceVertexAttributeDivisorPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceVertexAttributeDivisorPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceVertexAttributeDivisorPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceVertexAttributeDivisorPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceVertexAttributeDivisorPropertiesEXTBuilder<'a> { + pub fn max_vertex_attrib_divisor( + mut self, + max_vertex_attrib_divisor: u32, + ) -> PhysicalDeviceVertexAttributeDivisorPropertiesEXTBuilder<'a> { + self.inner.max_vertex_attrib_divisor = max_vertex_attrib_divisor; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceVertexAttributeDivisorPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDevicePCIBusInfoPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub pci_domain: u32, + pub pci_bus: u32, + pub pci_device: u32, + pub pci_function: u32, +} +impl ::std::default::Default for PhysicalDevicePCIBusInfoPropertiesEXT { + fn default() -> PhysicalDevicePCIBusInfoPropertiesEXT { + PhysicalDevicePCIBusInfoPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + pci_domain: u32::default(), + pci_bus: u32::default(), + pci_device: u32::default(), + pci_function: u32::default(), + } + } +} +impl PhysicalDevicePCIBusInfoPropertiesEXT { + pub fn builder<'a>() -> PhysicalDevicePCIBusInfoPropertiesEXTBuilder<'a> { + PhysicalDevicePCIBusInfoPropertiesEXTBuilder { + inner: PhysicalDevicePCIBusInfoPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDevicePCIBusInfoPropertiesEXTBuilder<'a> { + inner: PhysicalDevicePCIBusInfoPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDevicePCIBusInfoPropertiesEXTBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDevicePCIBusInfoPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDevicePCIBusInfoPropertiesEXTBuilder<'a> { + type Target = PhysicalDevicePCIBusInfoPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDevicePCIBusInfoPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDevicePCIBusInfoPropertiesEXTBuilder<'a> { + pub fn pci_domain( + mut self, + pci_domain: u32, + ) -> PhysicalDevicePCIBusInfoPropertiesEXTBuilder<'a> { + self.inner.pci_domain = pci_domain; + self + } + pub fn pci_bus(mut self, pci_bus: u32) -> PhysicalDevicePCIBusInfoPropertiesEXTBuilder<'a> { + self.inner.pci_bus = pci_bus; + self + } + pub fn pci_device( + mut self, + pci_device: u32, + ) -> PhysicalDevicePCIBusInfoPropertiesEXTBuilder<'a> { + self.inner.pci_device = pci_device; + self + } + pub fn pci_function( + mut self, + pci_function: u32, + ) -> PhysicalDevicePCIBusInfoPropertiesEXTBuilder<'a> { + self.inner.pci_function = pci_function; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDevicePCIBusInfoPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImportAndroidHardwareBufferInfoANDROID { + pub s_type: StructureType, + pub p_next: *const c_void, + pub buffer: *mut AHardwareBuffer, +} +impl ::std::default::Default for ImportAndroidHardwareBufferInfoANDROID { + fn default() -> ImportAndroidHardwareBufferInfoANDROID { + ImportAndroidHardwareBufferInfoANDROID { + s_type: StructureType::IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID, + p_next: ::std::ptr::null(), + buffer: ::std::ptr::null_mut(), + } + } +} +impl ImportAndroidHardwareBufferInfoANDROID { + pub fn builder<'a>() -> ImportAndroidHardwareBufferInfoANDROIDBuilder<'a> { + ImportAndroidHardwareBufferInfoANDROIDBuilder { + inner: ImportAndroidHardwareBufferInfoANDROID::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImportAndroidHardwareBufferInfoANDROIDBuilder<'a> { + inner: ImportAndroidHardwareBufferInfoANDROID, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for ImportAndroidHardwareBufferInfoANDROIDBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for ImportAndroidHardwareBufferInfoANDROID {} +impl<'a> ::std::ops::Deref for ImportAndroidHardwareBufferInfoANDROIDBuilder<'a> { + type Target = ImportAndroidHardwareBufferInfoANDROID; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImportAndroidHardwareBufferInfoANDROIDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImportAndroidHardwareBufferInfoANDROIDBuilder<'a> { + pub fn buffer( + mut self, + buffer: *mut AHardwareBuffer, + ) -> ImportAndroidHardwareBufferInfoANDROIDBuilder<'a> { + self.inner.buffer = buffer; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImportAndroidHardwareBufferInfoANDROID { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AndroidHardwareBufferUsageANDROID { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub android_hardware_buffer_usage: u64, +} +impl ::std::default::Default for AndroidHardwareBufferUsageANDROID { + fn default() -> AndroidHardwareBufferUsageANDROID { + AndroidHardwareBufferUsageANDROID { + s_type: StructureType::ANDROID_HARDWARE_BUFFER_USAGE_ANDROID, + p_next: ::std::ptr::null_mut(), + android_hardware_buffer_usage: u64::default(), + } + } +} +impl AndroidHardwareBufferUsageANDROID { + pub fn builder<'a>() -> AndroidHardwareBufferUsageANDROIDBuilder<'a> { + AndroidHardwareBufferUsageANDROIDBuilder { + inner: AndroidHardwareBufferUsageANDROID::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AndroidHardwareBufferUsageANDROIDBuilder<'a> { + inner: AndroidHardwareBufferUsageANDROID, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageFormatProperties2 for AndroidHardwareBufferUsageANDROIDBuilder<'_> {} +unsafe impl ExtendsImageFormatProperties2 for AndroidHardwareBufferUsageANDROID {} +impl<'a> ::std::ops::Deref for AndroidHardwareBufferUsageANDROIDBuilder<'a> { + type Target = AndroidHardwareBufferUsageANDROID; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AndroidHardwareBufferUsageANDROIDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AndroidHardwareBufferUsageANDROIDBuilder<'a> { + pub fn android_hardware_buffer_usage( + mut self, + android_hardware_buffer_usage: u64, + ) -> AndroidHardwareBufferUsageANDROIDBuilder<'a> { + self.inner.android_hardware_buffer_usage = android_hardware_buffer_usage; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AndroidHardwareBufferUsageANDROID { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AndroidHardwareBufferPropertiesANDROID { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub allocation_size: DeviceSize, + pub memory_type_bits: u32, +} +impl ::std::default::Default for AndroidHardwareBufferPropertiesANDROID { + fn default() -> AndroidHardwareBufferPropertiesANDROID { + AndroidHardwareBufferPropertiesANDROID { + s_type: StructureType::ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID, + p_next: ::std::ptr::null_mut(), + allocation_size: DeviceSize::default(), + memory_type_bits: u32::default(), + } + } +} +impl AndroidHardwareBufferPropertiesANDROID { + pub fn builder<'a>() -> AndroidHardwareBufferPropertiesANDROIDBuilder<'a> { + AndroidHardwareBufferPropertiesANDROIDBuilder { + inner: AndroidHardwareBufferPropertiesANDROID::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AndroidHardwareBufferPropertiesANDROIDBuilder<'a> { + inner: AndroidHardwareBufferPropertiesANDROID, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAndroidHardwareBufferPropertiesANDROID {} +impl<'a> ::std::ops::Deref for AndroidHardwareBufferPropertiesANDROIDBuilder<'a> { + type Target = AndroidHardwareBufferPropertiesANDROID; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AndroidHardwareBufferPropertiesANDROIDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AndroidHardwareBufferPropertiesANDROIDBuilder<'a> { + pub fn allocation_size( + mut self, + allocation_size: DeviceSize, + ) -> AndroidHardwareBufferPropertiesANDROIDBuilder<'a> { + self.inner.allocation_size = allocation_size; + self + } + pub fn memory_type_bits( + mut self, + memory_type_bits: u32, + ) -> AndroidHardwareBufferPropertiesANDROIDBuilder<'a> { + self.inner.memory_type_bits = memory_type_bits; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AndroidHardwareBufferPropertiesANDROIDBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AndroidHardwareBufferPropertiesANDROID { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryGetAndroidHardwareBufferInfoANDROID { + pub s_type: StructureType, + pub p_next: *const c_void, + pub memory: DeviceMemory, +} +impl ::std::default::Default for MemoryGetAndroidHardwareBufferInfoANDROID { + fn default() -> MemoryGetAndroidHardwareBufferInfoANDROID { + MemoryGetAndroidHardwareBufferInfoANDROID { + s_type: StructureType::MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID, + p_next: ::std::ptr::null(), + memory: DeviceMemory::default(), + } + } +} +impl MemoryGetAndroidHardwareBufferInfoANDROID { + pub fn builder<'a>() -> MemoryGetAndroidHardwareBufferInfoANDROIDBuilder<'a> { + MemoryGetAndroidHardwareBufferInfoANDROIDBuilder { + inner: MemoryGetAndroidHardwareBufferInfoANDROID::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryGetAndroidHardwareBufferInfoANDROIDBuilder<'a> { + inner: MemoryGetAndroidHardwareBufferInfoANDROID, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMemoryGetAndroidHardwareBufferInfoANDROID {} +impl<'a> ::std::ops::Deref for MemoryGetAndroidHardwareBufferInfoANDROIDBuilder<'a> { + type Target = MemoryGetAndroidHardwareBufferInfoANDROID; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryGetAndroidHardwareBufferInfoANDROIDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryGetAndroidHardwareBufferInfoANDROIDBuilder<'a> { + pub fn memory( + mut self, + memory: DeviceMemory, + ) -> MemoryGetAndroidHardwareBufferInfoANDROIDBuilder<'a> { + self.inner.memory = memory; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MemoryGetAndroidHardwareBufferInfoANDROIDBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryGetAndroidHardwareBufferInfoANDROID { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AndroidHardwareBufferFormatPropertiesANDROID { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub format: Format, + pub external_format: u64, + pub format_features: FormatFeatureFlags, + pub sampler_ycbcr_conversion_components: ComponentMapping, + pub suggested_ycbcr_model: SamplerYcbcrModelConversion, + pub suggested_ycbcr_range: SamplerYcbcrRange, + pub suggested_x_chroma_offset: ChromaLocation, + pub suggested_y_chroma_offset: ChromaLocation, +} +impl ::std::default::Default for AndroidHardwareBufferFormatPropertiesANDROID { + fn default() -> AndroidHardwareBufferFormatPropertiesANDROID { + AndroidHardwareBufferFormatPropertiesANDROID { + s_type: StructureType::ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID, + p_next: ::std::ptr::null_mut(), + format: Format::default(), + external_format: u64::default(), + format_features: FormatFeatureFlags::default(), + sampler_ycbcr_conversion_components: ComponentMapping::default(), + suggested_ycbcr_model: SamplerYcbcrModelConversion::default(), + suggested_ycbcr_range: SamplerYcbcrRange::default(), + suggested_x_chroma_offset: ChromaLocation::default(), + suggested_y_chroma_offset: ChromaLocation::default(), + } + } +} +impl AndroidHardwareBufferFormatPropertiesANDROID { + pub fn builder<'a>() -> AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + AndroidHardwareBufferFormatPropertiesANDROIDBuilder { + inner: AndroidHardwareBufferFormatPropertiesANDROID::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + inner: AndroidHardwareBufferFormatPropertiesANDROID, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsAndroidHardwareBufferPropertiesANDROID + for AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'_> +{ +} +unsafe impl ExtendsAndroidHardwareBufferPropertiesANDROID + for AndroidHardwareBufferFormatPropertiesANDROID +{ +} +impl<'a> ::std::ops::Deref for AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + type Target = AndroidHardwareBufferFormatPropertiesANDROID; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + pub fn format( + mut self, + format: Format, + ) -> AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + self.inner.format = format; + self + } + pub fn external_format( + mut self, + external_format: u64, + ) -> AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + self.inner.external_format = external_format; + self + } + pub fn format_features( + mut self, + format_features: FormatFeatureFlags, + ) -> AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + self.inner.format_features = format_features; + self + } + pub fn sampler_ycbcr_conversion_components( + mut self, + sampler_ycbcr_conversion_components: ComponentMapping, + ) -> AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + self.inner.sampler_ycbcr_conversion_components = sampler_ycbcr_conversion_components; + self + } + pub fn suggested_ycbcr_model( + mut self, + suggested_ycbcr_model: SamplerYcbcrModelConversion, + ) -> AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + self.inner.suggested_ycbcr_model = suggested_ycbcr_model; + self + } + pub fn suggested_ycbcr_range( + mut self, + suggested_ycbcr_range: SamplerYcbcrRange, + ) -> AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + self.inner.suggested_ycbcr_range = suggested_ycbcr_range; + self + } + pub fn suggested_x_chroma_offset( + mut self, + suggested_x_chroma_offset: ChromaLocation, + ) -> AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + self.inner.suggested_x_chroma_offset = suggested_x_chroma_offset; + self + } + pub fn suggested_y_chroma_offset( + mut self, + suggested_y_chroma_offset: ChromaLocation, + ) -> AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { + self.inner.suggested_y_chroma_offset = suggested_y_chroma_offset; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AndroidHardwareBufferFormatPropertiesANDROID { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CommandBufferInheritanceConditionalRenderingInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub conditional_rendering_enable: Bool32, +} +impl ::std::default::Default for CommandBufferInheritanceConditionalRenderingInfoEXT { + fn default() -> CommandBufferInheritanceConditionalRenderingInfoEXT { + CommandBufferInheritanceConditionalRenderingInfoEXT { + s_type: StructureType::COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT, + p_next: ::std::ptr::null(), + conditional_rendering_enable: Bool32::default(), + } + } +} +impl CommandBufferInheritanceConditionalRenderingInfoEXT { + pub fn builder<'a>() -> CommandBufferInheritanceConditionalRenderingInfoEXTBuilder<'a> { + CommandBufferInheritanceConditionalRenderingInfoEXTBuilder { + inner: CommandBufferInheritanceConditionalRenderingInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CommandBufferInheritanceConditionalRenderingInfoEXTBuilder<'a> { + inner: CommandBufferInheritanceConditionalRenderingInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsCommandBufferInheritanceInfo + for CommandBufferInheritanceConditionalRenderingInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsCommandBufferInheritanceInfo + for CommandBufferInheritanceConditionalRenderingInfoEXT +{ +} +impl<'a> ::std::ops::Deref for CommandBufferInheritanceConditionalRenderingInfoEXTBuilder<'a> { + type Target = CommandBufferInheritanceConditionalRenderingInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CommandBufferInheritanceConditionalRenderingInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CommandBufferInheritanceConditionalRenderingInfoEXTBuilder<'a> { + pub fn conditional_rendering_enable( + mut self, + conditional_rendering_enable: bool, + ) -> CommandBufferInheritanceConditionalRenderingInfoEXTBuilder<'a> { + self.inner.conditional_rendering_enable = conditional_rendering_enable.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CommandBufferInheritanceConditionalRenderingInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ExternalFormatANDROID { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub external_format: u64, +} +impl ::std::default::Default for ExternalFormatANDROID { + fn default() -> ExternalFormatANDROID { + ExternalFormatANDROID { + s_type: StructureType::EXTERNAL_FORMAT_ANDROID, + p_next: ::std::ptr::null_mut(), + external_format: u64::default(), + } + } +} +impl ExternalFormatANDROID { + pub fn builder<'a>() -> ExternalFormatANDROIDBuilder<'a> { + ExternalFormatANDROIDBuilder { + inner: ExternalFormatANDROID::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ExternalFormatANDROIDBuilder<'a> { + inner: ExternalFormatANDROID, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageCreateInfo for ExternalFormatANDROIDBuilder<'_> {} +unsafe impl ExtendsImageCreateInfo for ExternalFormatANDROID {} +unsafe impl ExtendsSamplerYcbcrConversionCreateInfo for ExternalFormatANDROIDBuilder<'_> {} +unsafe impl ExtendsSamplerYcbcrConversionCreateInfo for ExternalFormatANDROID {} +impl<'a> ::std::ops::Deref for ExternalFormatANDROIDBuilder<'a> { + type Target = ExternalFormatANDROID; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ExternalFormatANDROIDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ExternalFormatANDROIDBuilder<'a> { + pub fn external_format(mut self, external_format: u64) -> ExternalFormatANDROIDBuilder<'a> { + self.inner.external_format = external_format; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ExternalFormatANDROID { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDevice8BitStorageFeaturesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub storage_buffer8_bit_access: Bool32, + pub uniform_and_storage_buffer8_bit_access: Bool32, + pub storage_push_constant8: Bool32, +} +impl ::std::default::Default for PhysicalDevice8BitStorageFeaturesKHR { + fn default() -> PhysicalDevice8BitStorageFeaturesKHR { + PhysicalDevice8BitStorageFeaturesKHR { + s_type: StructureType::PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR, + p_next: ::std::ptr::null_mut(), + storage_buffer8_bit_access: Bool32::default(), + uniform_and_storage_buffer8_bit_access: Bool32::default(), + storage_push_constant8: Bool32::default(), + } + } +} +impl PhysicalDevice8BitStorageFeaturesKHR { + pub fn builder<'a>() -> PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { + PhysicalDevice8BitStorageFeaturesKHRBuilder { + inner: PhysicalDevice8BitStorageFeaturesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { + inner: PhysicalDevice8BitStorageFeaturesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDevice8BitStorageFeaturesKHRBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDevice8BitStorageFeaturesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { + type Target = PhysicalDevice8BitStorageFeaturesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { + pub fn storage_buffer8_bit_access( + mut self, + storage_buffer8_bit_access: bool, + ) -> PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { + self.inner.storage_buffer8_bit_access = storage_buffer8_bit_access.into(); + self + } + pub fn uniform_and_storage_buffer8_bit_access( + mut self, + uniform_and_storage_buffer8_bit_access: bool, + ) -> PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { + self.inner.uniform_and_storage_buffer8_bit_access = + uniform_and_storage_buffer8_bit_access.into(); + self + } + pub fn storage_push_constant8( + mut self, + storage_push_constant8: bool, + ) -> PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { + self.inner.storage_push_constant8 = storage_push_constant8.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDevice8BitStorageFeaturesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceConditionalRenderingFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub conditional_rendering: Bool32, + pub inherited_conditional_rendering: Bool32, +} +impl ::std::default::Default for PhysicalDeviceConditionalRenderingFeaturesEXT { + fn default() -> PhysicalDeviceConditionalRenderingFeaturesEXT { + PhysicalDeviceConditionalRenderingFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + conditional_rendering: Bool32::default(), + inherited_conditional_rendering: Bool32::default(), + } + } +} +impl PhysicalDeviceConditionalRenderingFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceConditionalRenderingFeaturesEXTBuilder<'a> { + PhysicalDeviceConditionalRenderingFeaturesEXTBuilder { + inner: PhysicalDeviceConditionalRenderingFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceConditionalRenderingFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceConditionalRenderingFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceConditionalRenderingFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceConditionalRenderingFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceConditionalRenderingFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceConditionalRenderingFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceConditionalRenderingFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceConditionalRenderingFeaturesEXTBuilder<'a> { + pub fn conditional_rendering( + mut self, + conditional_rendering: bool, + ) -> PhysicalDeviceConditionalRenderingFeaturesEXTBuilder<'a> { + self.inner.conditional_rendering = conditional_rendering.into(); + self + } + pub fn inherited_conditional_rendering( + mut self, + inherited_conditional_rendering: bool, + ) -> PhysicalDeviceConditionalRenderingFeaturesEXTBuilder<'a> { + self.inner.inherited_conditional_rendering = inherited_conditional_rendering.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceConditionalRenderingFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceVulkanMemoryModelFeaturesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub vulkan_memory_model: Bool32, + pub vulkan_memory_model_device_scope: Bool32, +} +impl ::std::default::Default for PhysicalDeviceVulkanMemoryModelFeaturesKHR { + fn default() -> PhysicalDeviceVulkanMemoryModelFeaturesKHR { + PhysicalDeviceVulkanMemoryModelFeaturesKHR { + s_type: StructureType::PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR, + p_next: ::std::ptr::null_mut(), + vulkan_memory_model: Bool32::default(), + vulkan_memory_model_device_scope: Bool32::default(), + } + } +} +impl PhysicalDeviceVulkanMemoryModelFeaturesKHR { + pub fn builder<'a>() -> PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { + PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder { + inner: PhysicalDeviceVulkanMemoryModelFeaturesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { + inner: PhysicalDeviceVulkanMemoryModelFeaturesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVulkanMemoryModelFeaturesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { + type Target = PhysicalDeviceVulkanMemoryModelFeaturesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { + pub fn vulkan_memory_model( + mut self, + vulkan_memory_model: bool, + ) -> PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { + self.inner.vulkan_memory_model = vulkan_memory_model.into(); + self + } + pub fn vulkan_memory_model_device_scope( + mut self, + vulkan_memory_model_device_scope: bool, + ) -> PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { + self.inner.vulkan_memory_model_device_scope = vulkan_memory_model_device_scope.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceVulkanMemoryModelFeaturesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceShaderAtomicInt64FeaturesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_buffer_int64_atomics: Bool32, + pub shader_shared_int64_atomics: Bool32, +} +impl ::std::default::Default for PhysicalDeviceShaderAtomicInt64FeaturesKHR { + fn default() -> PhysicalDeviceShaderAtomicInt64FeaturesKHR { + PhysicalDeviceShaderAtomicInt64FeaturesKHR { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR, + p_next: ::std::ptr::null_mut(), + shader_buffer_int64_atomics: Bool32::default(), + shader_shared_int64_atomics: Bool32::default(), + } + } +} +impl PhysicalDeviceShaderAtomicInt64FeaturesKHR { + pub fn builder<'a>() -> PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { + PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder { + inner: PhysicalDeviceShaderAtomicInt64FeaturesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { + inner: PhysicalDeviceShaderAtomicInt64FeaturesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderAtomicInt64FeaturesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { + type Target = PhysicalDeviceShaderAtomicInt64FeaturesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { + pub fn shader_buffer_int64_atomics( + mut self, + shader_buffer_int64_atomics: bool, + ) -> PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { + self.inner.shader_buffer_int64_atomics = shader_buffer_int64_atomics.into(); + self + } + pub fn shader_shared_int64_atomics( + mut self, + shader_shared_int64_atomics: bool, + ) -> PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { + self.inner.shader_shared_int64_atomics = shader_shared_int64_atomics.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderAtomicInt64FeaturesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceVertexAttributeDivisorFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub vertex_attribute_instance_rate_divisor: Bool32, + pub vertex_attribute_instance_rate_zero_divisor: Bool32, +} +impl ::std::default::Default for PhysicalDeviceVertexAttributeDivisorFeaturesEXT { + fn default() -> PhysicalDeviceVertexAttributeDivisorFeaturesEXT { + PhysicalDeviceVertexAttributeDivisorFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + vertex_attribute_instance_rate_divisor: Bool32::default(), + vertex_attribute_instance_rate_zero_divisor: Bool32::default(), + } + } +} +impl PhysicalDeviceVertexAttributeDivisorFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceVertexAttributeDivisorFeaturesEXTBuilder<'a> { + PhysicalDeviceVertexAttributeDivisorFeaturesEXTBuilder { + inner: PhysicalDeviceVertexAttributeDivisorFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceVertexAttributeDivisorFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceVertexAttributeDivisorFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVertexAttributeDivisorFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVertexAttributeDivisorFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceVertexAttributeDivisorFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceVertexAttributeDivisorFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceVertexAttributeDivisorFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceVertexAttributeDivisorFeaturesEXTBuilder<'a> { + pub fn vertex_attribute_instance_rate_divisor( + mut self, + vertex_attribute_instance_rate_divisor: bool, + ) -> PhysicalDeviceVertexAttributeDivisorFeaturesEXTBuilder<'a> { + self.inner.vertex_attribute_instance_rate_divisor = + vertex_attribute_instance_rate_divisor.into(); + self + } + pub fn vertex_attribute_instance_rate_zero_divisor( + mut self, + vertex_attribute_instance_rate_zero_divisor: bool, + ) -> PhysicalDeviceVertexAttributeDivisorFeaturesEXTBuilder<'a> { + self.inner.vertex_attribute_instance_rate_zero_divisor = + vertex_attribute_instance_rate_zero_divisor.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceVertexAttributeDivisorFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct QueueFamilyCheckpointPropertiesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub checkpoint_execution_stage_mask: PipelineStageFlags, +} +impl ::std::default::Default for QueueFamilyCheckpointPropertiesNV { + fn default() -> QueueFamilyCheckpointPropertiesNV { + QueueFamilyCheckpointPropertiesNV { + s_type: StructureType::QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV, + p_next: ::std::ptr::null_mut(), + checkpoint_execution_stage_mask: PipelineStageFlags::default(), + } + } +} +impl QueueFamilyCheckpointPropertiesNV { + pub fn builder<'a>() -> QueueFamilyCheckpointPropertiesNVBuilder<'a> { + QueueFamilyCheckpointPropertiesNVBuilder { + inner: QueueFamilyCheckpointPropertiesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct QueueFamilyCheckpointPropertiesNVBuilder<'a> { + inner: QueueFamilyCheckpointPropertiesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsQueueFamilyProperties2 for QueueFamilyCheckpointPropertiesNVBuilder<'_> {} +unsafe impl ExtendsQueueFamilyProperties2 for QueueFamilyCheckpointPropertiesNV {} +impl<'a> ::std::ops::Deref for QueueFamilyCheckpointPropertiesNVBuilder<'a> { + type Target = QueueFamilyCheckpointPropertiesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for QueueFamilyCheckpointPropertiesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> QueueFamilyCheckpointPropertiesNVBuilder<'a> { + pub fn checkpoint_execution_stage_mask( + mut self, + checkpoint_execution_stage_mask: PipelineStageFlags, + ) -> QueueFamilyCheckpointPropertiesNVBuilder<'a> { + self.inner.checkpoint_execution_stage_mask = checkpoint_execution_stage_mask; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> QueueFamilyCheckpointPropertiesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CheckpointDataNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub stage: PipelineStageFlags, + pub p_checkpoint_marker: *mut c_void, +} +impl ::std::default::Default for CheckpointDataNV { + fn default() -> CheckpointDataNV { + CheckpointDataNV { + s_type: StructureType::CHECKPOINT_DATA_NV, + p_next: ::std::ptr::null_mut(), + stage: PipelineStageFlags::default(), + p_checkpoint_marker: ::std::ptr::null_mut(), + } + } +} +impl CheckpointDataNV { + pub fn builder<'a>() -> CheckpointDataNVBuilder<'a> { + CheckpointDataNVBuilder { + inner: CheckpointDataNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CheckpointDataNVBuilder<'a> { + inner: CheckpointDataNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCheckpointDataNV {} +impl<'a> ::std::ops::Deref for CheckpointDataNVBuilder<'a> { + type Target = CheckpointDataNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CheckpointDataNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CheckpointDataNVBuilder<'a> { + pub fn stage(mut self, stage: PipelineStageFlags) -> CheckpointDataNVBuilder<'a> { + self.inner.stage = stage; + self + } + pub fn checkpoint_marker( + mut self, + checkpoint_marker: *mut c_void, + ) -> CheckpointDataNVBuilder<'a> { + self.inner.p_checkpoint_marker = checkpoint_marker; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CheckpointDataNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CheckpointDataNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceDepthStencilResolvePropertiesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub supported_depth_resolve_modes: ResolveModeFlagsKHR, + pub supported_stencil_resolve_modes: ResolveModeFlagsKHR, + pub independent_resolve_none: Bool32, + pub independent_resolve: Bool32, +} +impl ::std::default::Default for PhysicalDeviceDepthStencilResolvePropertiesKHR { + fn default() -> PhysicalDeviceDepthStencilResolvePropertiesKHR { + PhysicalDeviceDepthStencilResolvePropertiesKHR { + s_type: StructureType::PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR, + p_next: ::std::ptr::null_mut(), + supported_depth_resolve_modes: ResolveModeFlagsKHR::default(), + supported_stencil_resolve_modes: ResolveModeFlagsKHR::default(), + independent_resolve_none: Bool32::default(), + independent_resolve: Bool32::default(), + } + } +} +impl PhysicalDeviceDepthStencilResolvePropertiesKHR { + pub fn builder<'a>() -> PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder { + inner: PhysicalDeviceDepthStencilResolvePropertiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + inner: PhysicalDeviceDepthStencilResolvePropertiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDepthStencilResolvePropertiesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + type Target = PhysicalDeviceDepthStencilResolvePropertiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + pub fn supported_depth_resolve_modes( + mut self, + supported_depth_resolve_modes: ResolveModeFlagsKHR, + ) -> PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + self.inner.supported_depth_resolve_modes = supported_depth_resolve_modes; + self + } + pub fn supported_stencil_resolve_modes( + mut self, + supported_stencil_resolve_modes: ResolveModeFlagsKHR, + ) -> PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + self.inner.supported_stencil_resolve_modes = supported_stencil_resolve_modes; + self + } + pub fn independent_resolve_none( + mut self, + independent_resolve_none: bool, + ) -> PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + self.inner.independent_resolve_none = independent_resolve_none.into(); + self + } + pub fn independent_resolve( + mut self, + independent_resolve: bool, + ) -> PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + self.inner.independent_resolve = independent_resolve.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceDepthStencilResolvePropertiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SubpassDescriptionDepthStencilResolveKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub depth_resolve_mode: ResolveModeFlagsKHR, + pub stencil_resolve_mode: ResolveModeFlagsKHR, + pub p_depth_stencil_resolve_attachment: *const AttachmentReference2KHR, +} +impl ::std::default::Default for SubpassDescriptionDepthStencilResolveKHR { + fn default() -> SubpassDescriptionDepthStencilResolveKHR { + SubpassDescriptionDepthStencilResolveKHR { + s_type: StructureType::SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR, + p_next: ::std::ptr::null(), + depth_resolve_mode: ResolveModeFlagsKHR::default(), + stencil_resolve_mode: ResolveModeFlagsKHR::default(), + p_depth_stencil_resolve_attachment: ::std::ptr::null(), + } + } +} +impl SubpassDescriptionDepthStencilResolveKHR { + pub fn builder<'a>() -> SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { + SubpassDescriptionDepthStencilResolveKHRBuilder { + inner: SubpassDescriptionDepthStencilResolveKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { + inner: SubpassDescriptionDepthStencilResolveKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSubpassDescription2KHR for SubpassDescriptionDepthStencilResolveKHRBuilder<'_> {} +unsafe impl ExtendsSubpassDescription2KHR for SubpassDescriptionDepthStencilResolveKHR {} +impl<'a> ::std::ops::Deref for SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { + type Target = SubpassDescriptionDepthStencilResolveKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { + pub fn depth_resolve_mode( + mut self, + depth_resolve_mode: ResolveModeFlagsKHR, + ) -> SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { + self.inner.depth_resolve_mode = depth_resolve_mode; + self + } + pub fn stencil_resolve_mode( + mut self, + stencil_resolve_mode: ResolveModeFlagsKHR, + ) -> SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { + self.inner.stencil_resolve_mode = stencil_resolve_mode; + self + } + pub fn depth_stencil_resolve_attachment( + mut self, + depth_stencil_resolve_attachment: &'a AttachmentReference2KHR, + ) -> SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { + self.inner.p_depth_stencil_resolve_attachment = depth_stencil_resolve_attachment; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SubpassDescriptionDepthStencilResolveKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageViewASTCDecodeModeEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub decode_mode: Format, +} +impl ::std::default::Default for ImageViewASTCDecodeModeEXT { + fn default() -> ImageViewASTCDecodeModeEXT { + ImageViewASTCDecodeModeEXT { + s_type: StructureType::IMAGE_VIEW_ASTC_DECODE_MODE_EXT, + p_next: ::std::ptr::null(), + decode_mode: Format::default(), + } + } +} +impl ImageViewASTCDecodeModeEXT { + pub fn builder<'a>() -> ImageViewASTCDecodeModeEXTBuilder<'a> { + ImageViewASTCDecodeModeEXTBuilder { + inner: ImageViewASTCDecodeModeEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageViewASTCDecodeModeEXTBuilder<'a> { + inner: ImageViewASTCDecodeModeEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageViewCreateInfo for ImageViewASTCDecodeModeEXTBuilder<'_> {} +unsafe impl ExtendsImageViewCreateInfo for ImageViewASTCDecodeModeEXT {} +impl<'a> ::std::ops::Deref for ImageViewASTCDecodeModeEXTBuilder<'a> { + type Target = ImageViewASTCDecodeModeEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageViewASTCDecodeModeEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageViewASTCDecodeModeEXTBuilder<'a> { + pub fn decode_mode(mut self, decode_mode: Format) -> ImageViewASTCDecodeModeEXTBuilder<'a> { + self.inner.decode_mode = decode_mode; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageViewASTCDecodeModeEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceASTCDecodeFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub decode_mode_shared_exponent: Bool32, +} +impl ::std::default::Default for PhysicalDeviceASTCDecodeFeaturesEXT { + fn default() -> PhysicalDeviceASTCDecodeFeaturesEXT { + PhysicalDeviceASTCDecodeFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + decode_mode_shared_exponent: Bool32::default(), + } + } +} +impl PhysicalDeviceASTCDecodeFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceASTCDecodeFeaturesEXTBuilder<'a> { + PhysicalDeviceASTCDecodeFeaturesEXTBuilder { + inner: PhysicalDeviceASTCDecodeFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceASTCDecodeFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceASTCDecodeFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceASTCDecodeFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceASTCDecodeFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceASTCDecodeFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceASTCDecodeFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceASTCDecodeFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceASTCDecodeFeaturesEXTBuilder<'a> { + pub fn decode_mode_shared_exponent( + mut self, + decode_mode_shared_exponent: bool, + ) -> PhysicalDeviceASTCDecodeFeaturesEXTBuilder<'a> { + self.inner.decode_mode_shared_exponent = decode_mode_shared_exponent.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceASTCDecodeFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceTransformFeedbackFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub transform_feedback: Bool32, + pub geometry_streams: Bool32, +} +impl ::std::default::Default for PhysicalDeviceTransformFeedbackFeaturesEXT { + fn default() -> PhysicalDeviceTransformFeedbackFeaturesEXT { + PhysicalDeviceTransformFeedbackFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + transform_feedback: Bool32::default(), + geometry_streams: Bool32::default(), + } + } +} +impl PhysicalDeviceTransformFeedbackFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceTransformFeedbackFeaturesEXTBuilder<'a> { + PhysicalDeviceTransformFeedbackFeaturesEXTBuilder { + inner: PhysicalDeviceTransformFeedbackFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceTransformFeedbackFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceTransformFeedbackFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceTransformFeedbackFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceTransformFeedbackFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceTransformFeedbackFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceTransformFeedbackFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceTransformFeedbackFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceTransformFeedbackFeaturesEXTBuilder<'a> { + pub fn transform_feedback( + mut self, + transform_feedback: bool, + ) -> PhysicalDeviceTransformFeedbackFeaturesEXTBuilder<'a> { + self.inner.transform_feedback = transform_feedback.into(); + self + } + pub fn geometry_streams( + mut self, + geometry_streams: bool, + ) -> PhysicalDeviceTransformFeedbackFeaturesEXTBuilder<'a> { + self.inner.geometry_streams = geometry_streams.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceTransformFeedbackFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceTransformFeedbackPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_transform_feedback_streams: u32, + pub max_transform_feedback_buffers: u32, + pub max_transform_feedback_buffer_size: DeviceSize, + pub max_transform_feedback_stream_data_size: u32, + pub max_transform_feedback_buffer_data_size: u32, + pub max_transform_feedback_buffer_data_stride: u32, + pub transform_feedback_queries: Bool32, + pub transform_feedback_streams_lines_triangles: Bool32, + pub transform_feedback_rasterization_stream_select: Bool32, + pub transform_feedback_draw: Bool32, +} +impl ::std::default::Default for PhysicalDeviceTransformFeedbackPropertiesEXT { + fn default() -> PhysicalDeviceTransformFeedbackPropertiesEXT { + PhysicalDeviceTransformFeedbackPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + max_transform_feedback_streams: u32::default(), + max_transform_feedback_buffers: u32::default(), + max_transform_feedback_buffer_size: DeviceSize::default(), + max_transform_feedback_stream_data_size: u32::default(), + max_transform_feedback_buffer_data_size: u32::default(), + max_transform_feedback_buffer_data_stride: u32::default(), + transform_feedback_queries: Bool32::default(), + transform_feedback_streams_lines_triangles: Bool32::default(), + transform_feedback_rasterization_stream_select: Bool32::default(), + transform_feedback_draw: Bool32::default(), + } + } +} +impl PhysicalDeviceTransformFeedbackPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + PhysicalDeviceTransformFeedbackPropertiesEXTBuilder { + inner: PhysicalDeviceTransformFeedbackPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceTransformFeedbackPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceTransformFeedbackPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceTransformFeedbackPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + pub fn max_transform_feedback_streams( + mut self, + max_transform_feedback_streams: u32, + ) -> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + self.inner.max_transform_feedback_streams = max_transform_feedback_streams; + self + } + pub fn max_transform_feedback_buffers( + mut self, + max_transform_feedback_buffers: u32, + ) -> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + self.inner.max_transform_feedback_buffers = max_transform_feedback_buffers; + self + } + pub fn max_transform_feedback_buffer_size( + mut self, + max_transform_feedback_buffer_size: DeviceSize, + ) -> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + self.inner.max_transform_feedback_buffer_size = max_transform_feedback_buffer_size; + self + } + pub fn max_transform_feedback_stream_data_size( + mut self, + max_transform_feedback_stream_data_size: u32, + ) -> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + self.inner.max_transform_feedback_stream_data_size = + max_transform_feedback_stream_data_size; + self + } + pub fn max_transform_feedback_buffer_data_size( + mut self, + max_transform_feedback_buffer_data_size: u32, + ) -> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + self.inner.max_transform_feedback_buffer_data_size = + max_transform_feedback_buffer_data_size; + self + } + pub fn max_transform_feedback_buffer_data_stride( + mut self, + max_transform_feedback_buffer_data_stride: u32, + ) -> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + self.inner.max_transform_feedback_buffer_data_stride = + max_transform_feedback_buffer_data_stride; + self + } + pub fn transform_feedback_queries( + mut self, + transform_feedback_queries: bool, + ) -> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + self.inner.transform_feedback_queries = transform_feedback_queries.into(); + self + } + pub fn transform_feedback_streams_lines_triangles( + mut self, + transform_feedback_streams_lines_triangles: bool, + ) -> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + self.inner.transform_feedback_streams_lines_triangles = + transform_feedback_streams_lines_triangles.into(); + self + } + pub fn transform_feedback_rasterization_stream_select( + mut self, + transform_feedback_rasterization_stream_select: bool, + ) -> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + self.inner.transform_feedback_rasterization_stream_select = + transform_feedback_rasterization_stream_select.into(); + self + } + pub fn transform_feedback_draw( + mut self, + transform_feedback_draw: bool, + ) -> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { + self.inner.transform_feedback_draw = transform_feedback_draw.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceTransformFeedbackPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineRasterizationStateStreamCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineRasterizationStateStreamCreateFlagsEXT, + pub rasterization_stream: u32, +} +impl ::std::default::Default for PipelineRasterizationStateStreamCreateInfoEXT { + fn default() -> PipelineRasterizationStateStreamCreateInfoEXT { + PipelineRasterizationStateStreamCreateInfoEXT { + s_type: StructureType::PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + flags: PipelineRasterizationStateStreamCreateFlagsEXT::default(), + rasterization_stream: u32::default(), + } + } +} +impl PipelineRasterizationStateStreamCreateInfoEXT { + pub fn builder<'a>() -> PipelineRasterizationStateStreamCreateInfoEXTBuilder<'a> { + PipelineRasterizationStateStreamCreateInfoEXTBuilder { + inner: PipelineRasterizationStateStreamCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineRasterizationStateStreamCreateInfoEXTBuilder<'a> { + inner: PipelineRasterizationStateStreamCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineRasterizationStateCreateInfo + for PipelineRasterizationStateStreamCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsPipelineRasterizationStateCreateInfo + for PipelineRasterizationStateStreamCreateInfoEXT +{ +} +impl<'a> ::std::ops::Deref for PipelineRasterizationStateStreamCreateInfoEXTBuilder<'a> { + type Target = PipelineRasterizationStateStreamCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineRasterizationStateStreamCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineRasterizationStateStreamCreateInfoEXTBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineRasterizationStateStreamCreateFlagsEXT, + ) -> PipelineRasterizationStateStreamCreateInfoEXTBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn rasterization_stream( + mut self, + rasterization_stream: u32, + ) -> PipelineRasterizationStateStreamCreateInfoEXTBuilder<'a> { + self.inner.rasterization_stream = rasterization_stream; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineRasterizationStateStreamCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceRepresentativeFragmentTestFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub representative_fragment_test: Bool32, +} +impl ::std::default::Default for PhysicalDeviceRepresentativeFragmentTestFeaturesNV { + fn default() -> PhysicalDeviceRepresentativeFragmentTestFeaturesNV { + PhysicalDeviceRepresentativeFragmentTestFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + representative_fragment_test: Bool32::default(), + } + } +} +impl PhysicalDeviceRepresentativeFragmentTestFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceRepresentativeFragmentTestFeaturesNVBuilder<'a> { + PhysicalDeviceRepresentativeFragmentTestFeaturesNVBuilder { + inner: PhysicalDeviceRepresentativeFragmentTestFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceRepresentativeFragmentTestFeaturesNVBuilder<'a> { + inner: PhysicalDeviceRepresentativeFragmentTestFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceRepresentativeFragmentTestFeaturesNVBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceRepresentativeFragmentTestFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceRepresentativeFragmentTestFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceRepresentativeFragmentTestFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceRepresentativeFragmentTestFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceRepresentativeFragmentTestFeaturesNVBuilder<'a> { + pub fn representative_fragment_test( + mut self, + representative_fragment_test: bool, + ) -> PhysicalDeviceRepresentativeFragmentTestFeaturesNVBuilder<'a> { + self.inner.representative_fragment_test = representative_fragment_test.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceRepresentativeFragmentTestFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineRepresentativeFragmentTestStateCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub representative_fragment_test_enable: Bool32, +} +impl ::std::default::Default for PipelineRepresentativeFragmentTestStateCreateInfoNV { + fn default() -> PipelineRepresentativeFragmentTestStateCreateInfoNV { + PipelineRepresentativeFragmentTestStateCreateInfoNV { + s_type: StructureType::PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + representative_fragment_test_enable: Bool32::default(), + } + } +} +impl PipelineRepresentativeFragmentTestStateCreateInfoNV { + pub fn builder<'a>() -> PipelineRepresentativeFragmentTestStateCreateInfoNVBuilder<'a> { + PipelineRepresentativeFragmentTestStateCreateInfoNVBuilder { + inner: PipelineRepresentativeFragmentTestStateCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineRepresentativeFragmentTestStateCreateInfoNVBuilder<'a> { + inner: PipelineRepresentativeFragmentTestStateCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsGraphicsPipelineCreateInfo + for PipelineRepresentativeFragmentTestStateCreateInfoNVBuilder<'_> +{ +} +unsafe impl ExtendsGraphicsPipelineCreateInfo + for PipelineRepresentativeFragmentTestStateCreateInfoNV +{ +} +impl<'a> ::std::ops::Deref for PipelineRepresentativeFragmentTestStateCreateInfoNVBuilder<'a> { + type Target = PipelineRepresentativeFragmentTestStateCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineRepresentativeFragmentTestStateCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineRepresentativeFragmentTestStateCreateInfoNVBuilder<'a> { + pub fn representative_fragment_test_enable( + mut self, + representative_fragment_test_enable: bool, + ) -> PipelineRepresentativeFragmentTestStateCreateInfoNVBuilder<'a> { + self.inner.representative_fragment_test_enable = representative_fragment_test_enable.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineRepresentativeFragmentTestStateCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceExclusiveScissorFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub exclusive_scissor: Bool32, +} +impl ::std::default::Default for PhysicalDeviceExclusiveScissorFeaturesNV { + fn default() -> PhysicalDeviceExclusiveScissorFeaturesNV { + PhysicalDeviceExclusiveScissorFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + exclusive_scissor: Bool32::default(), + } + } +} +impl PhysicalDeviceExclusiveScissorFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceExclusiveScissorFeaturesNVBuilder<'a> { + PhysicalDeviceExclusiveScissorFeaturesNVBuilder { + inner: PhysicalDeviceExclusiveScissorFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceExclusiveScissorFeaturesNVBuilder<'a> { + inner: PhysicalDeviceExclusiveScissorFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceExclusiveScissorFeaturesNVBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceExclusiveScissorFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceExclusiveScissorFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceExclusiveScissorFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceExclusiveScissorFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceExclusiveScissorFeaturesNVBuilder<'a> { + pub fn exclusive_scissor( + mut self, + exclusive_scissor: bool, + ) -> PhysicalDeviceExclusiveScissorFeaturesNVBuilder<'a> { + self.inner.exclusive_scissor = exclusive_scissor.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceExclusiveScissorFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineViewportExclusiveScissorStateCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub exclusive_scissor_count: u32, + pub p_exclusive_scissors: *const Rect2D, +} +impl ::std::default::Default for PipelineViewportExclusiveScissorStateCreateInfoNV { + fn default() -> PipelineViewportExclusiveScissorStateCreateInfoNV { + PipelineViewportExclusiveScissorStateCreateInfoNV { + s_type: StructureType::PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + exclusive_scissor_count: u32::default(), + p_exclusive_scissors: ::std::ptr::null(), + } + } +} +impl PipelineViewportExclusiveScissorStateCreateInfoNV { + pub fn builder<'a>() -> PipelineViewportExclusiveScissorStateCreateInfoNVBuilder<'a> { + PipelineViewportExclusiveScissorStateCreateInfoNVBuilder { + inner: PipelineViewportExclusiveScissorStateCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineViewportExclusiveScissorStateCreateInfoNVBuilder<'a> { + inner: PipelineViewportExclusiveScissorStateCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineViewportStateCreateInfo + for PipelineViewportExclusiveScissorStateCreateInfoNVBuilder<'_> +{ +} +unsafe impl ExtendsPipelineViewportStateCreateInfo + for PipelineViewportExclusiveScissorStateCreateInfoNV +{ +} +impl<'a> ::std::ops::Deref for PipelineViewportExclusiveScissorStateCreateInfoNVBuilder<'a> { + type Target = PipelineViewportExclusiveScissorStateCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineViewportExclusiveScissorStateCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineViewportExclusiveScissorStateCreateInfoNVBuilder<'a> { + pub fn exclusive_scissors( + mut self, + exclusive_scissors: &'a [Rect2D], + ) -> PipelineViewportExclusiveScissorStateCreateInfoNVBuilder<'a> { + self.inner.exclusive_scissor_count = exclusive_scissors.len() as _; + self.inner.p_exclusive_scissors = exclusive_scissors.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineViewportExclusiveScissorStateCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceCornerSampledImageFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub corner_sampled_image: Bool32, +} +impl ::std::default::Default for PhysicalDeviceCornerSampledImageFeaturesNV { + fn default() -> PhysicalDeviceCornerSampledImageFeaturesNV { + PhysicalDeviceCornerSampledImageFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + corner_sampled_image: Bool32::default(), + } + } +} +impl PhysicalDeviceCornerSampledImageFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceCornerSampledImageFeaturesNVBuilder<'a> { + PhysicalDeviceCornerSampledImageFeaturesNVBuilder { + inner: PhysicalDeviceCornerSampledImageFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceCornerSampledImageFeaturesNVBuilder<'a> { + inner: PhysicalDeviceCornerSampledImageFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceCornerSampledImageFeaturesNVBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceCornerSampledImageFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceCornerSampledImageFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceCornerSampledImageFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceCornerSampledImageFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceCornerSampledImageFeaturesNVBuilder<'a> { + pub fn corner_sampled_image( + mut self, + corner_sampled_image: bool, + ) -> PhysicalDeviceCornerSampledImageFeaturesNVBuilder<'a> { + self.inner.corner_sampled_image = corner_sampled_image.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceCornerSampledImageFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceComputeShaderDerivativesFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub compute_derivative_group_quads: Bool32, + pub compute_derivative_group_linear: Bool32, +} +impl ::std::default::Default for PhysicalDeviceComputeShaderDerivativesFeaturesNV { + fn default() -> PhysicalDeviceComputeShaderDerivativesFeaturesNV { + PhysicalDeviceComputeShaderDerivativesFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + compute_derivative_group_quads: Bool32::default(), + compute_derivative_group_linear: Bool32::default(), + } + } +} +impl PhysicalDeviceComputeShaderDerivativesFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceComputeShaderDerivativesFeaturesNVBuilder<'a> { + PhysicalDeviceComputeShaderDerivativesFeaturesNVBuilder { + inner: PhysicalDeviceComputeShaderDerivativesFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceComputeShaderDerivativesFeaturesNVBuilder<'a> { + inner: PhysicalDeviceComputeShaderDerivativesFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceComputeShaderDerivativesFeaturesNVBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceComputeShaderDerivativesFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceComputeShaderDerivativesFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceComputeShaderDerivativesFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceComputeShaderDerivativesFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceComputeShaderDerivativesFeaturesNVBuilder<'a> { + pub fn compute_derivative_group_quads( + mut self, + compute_derivative_group_quads: bool, + ) -> PhysicalDeviceComputeShaderDerivativesFeaturesNVBuilder<'a> { + self.inner.compute_derivative_group_quads = compute_derivative_group_quads.into(); + self + } + pub fn compute_derivative_group_linear( + mut self, + compute_derivative_group_linear: bool, + ) -> PhysicalDeviceComputeShaderDerivativesFeaturesNVBuilder<'a> { + self.inner.compute_derivative_group_linear = compute_derivative_group_linear.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceComputeShaderDerivativesFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceFragmentShaderBarycentricFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub fragment_shader_barycentric: Bool32, +} +impl ::std::default::Default for PhysicalDeviceFragmentShaderBarycentricFeaturesNV { + fn default() -> PhysicalDeviceFragmentShaderBarycentricFeaturesNV { + PhysicalDeviceFragmentShaderBarycentricFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + fragment_shader_barycentric: Bool32::default(), + } + } +} +impl PhysicalDeviceFragmentShaderBarycentricFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceFragmentShaderBarycentricFeaturesNVBuilder<'a> { + PhysicalDeviceFragmentShaderBarycentricFeaturesNVBuilder { + inner: PhysicalDeviceFragmentShaderBarycentricFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceFragmentShaderBarycentricFeaturesNVBuilder<'a> { + inner: PhysicalDeviceFragmentShaderBarycentricFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceFragmentShaderBarycentricFeaturesNVBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceFragmentShaderBarycentricFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceFragmentShaderBarycentricFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceFragmentShaderBarycentricFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceFragmentShaderBarycentricFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceFragmentShaderBarycentricFeaturesNVBuilder<'a> { + pub fn fragment_shader_barycentric( + mut self, + fragment_shader_barycentric: bool, + ) -> PhysicalDeviceFragmentShaderBarycentricFeaturesNVBuilder<'a> { + self.inner.fragment_shader_barycentric = fragment_shader_barycentric.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceFragmentShaderBarycentricFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceShaderImageFootprintFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub image_footprint: Bool32, +} +impl ::std::default::Default for PhysicalDeviceShaderImageFootprintFeaturesNV { + fn default() -> PhysicalDeviceShaderImageFootprintFeaturesNV { + PhysicalDeviceShaderImageFootprintFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + image_footprint: Bool32::default(), + } + } +} +impl PhysicalDeviceShaderImageFootprintFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceShaderImageFootprintFeaturesNVBuilder<'a> { + PhysicalDeviceShaderImageFootprintFeaturesNVBuilder { + inner: PhysicalDeviceShaderImageFootprintFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderImageFootprintFeaturesNVBuilder<'a> { + inner: PhysicalDeviceShaderImageFootprintFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderImageFootprintFeaturesNVBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderImageFootprintFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderImageFootprintFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceShaderImageFootprintFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderImageFootprintFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderImageFootprintFeaturesNVBuilder<'a> { + pub fn image_footprint( + mut self, + image_footprint: bool, + ) -> PhysicalDeviceShaderImageFootprintFeaturesNVBuilder<'a> { + self.inner.image_footprint = image_footprint.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderImageFootprintFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ShadingRatePaletteNV { + pub shading_rate_palette_entry_count: u32, + pub p_shading_rate_palette_entries: *const ShadingRatePaletteEntryNV, +} +impl ::std::default::Default for ShadingRatePaletteNV { + fn default() -> ShadingRatePaletteNV { + ShadingRatePaletteNV { + shading_rate_palette_entry_count: u32::default(), + p_shading_rate_palette_entries: ::std::ptr::null(), + } + } +} +impl ShadingRatePaletteNV { + pub fn builder<'a>() -> ShadingRatePaletteNVBuilder<'a> { + ShadingRatePaletteNVBuilder { + inner: ShadingRatePaletteNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ShadingRatePaletteNVBuilder<'a> { + inner: ShadingRatePaletteNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for ShadingRatePaletteNVBuilder<'a> { + type Target = ShadingRatePaletteNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ShadingRatePaletteNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ShadingRatePaletteNVBuilder<'a> { + pub fn shading_rate_palette_entries( + mut self, + shading_rate_palette_entries: &'a [ShadingRatePaletteEntryNV], + ) -> ShadingRatePaletteNVBuilder<'a> { + self.inner.shading_rate_palette_entry_count = shading_rate_palette_entries.len() as _; + self.inner.p_shading_rate_palette_entries = shading_rate_palette_entries.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ShadingRatePaletteNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineViewportShadingRateImageStateCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub shading_rate_image_enable: Bool32, + pub viewport_count: u32, + pub p_shading_rate_palettes: *const ShadingRatePaletteNV, +} +impl ::std::default::Default for PipelineViewportShadingRateImageStateCreateInfoNV { + fn default() -> PipelineViewportShadingRateImageStateCreateInfoNV { + PipelineViewportShadingRateImageStateCreateInfoNV { + s_type: StructureType::PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + shading_rate_image_enable: Bool32::default(), + viewport_count: u32::default(), + p_shading_rate_palettes: ::std::ptr::null(), + } + } +} +impl PipelineViewportShadingRateImageStateCreateInfoNV { + pub fn builder<'a>() -> PipelineViewportShadingRateImageStateCreateInfoNVBuilder<'a> { + PipelineViewportShadingRateImageStateCreateInfoNVBuilder { + inner: PipelineViewportShadingRateImageStateCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineViewportShadingRateImageStateCreateInfoNVBuilder<'a> { + inner: PipelineViewportShadingRateImageStateCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineViewportStateCreateInfo + for PipelineViewportShadingRateImageStateCreateInfoNVBuilder<'_> +{ +} +unsafe impl ExtendsPipelineViewportStateCreateInfo + for PipelineViewportShadingRateImageStateCreateInfoNV +{ +} +impl<'a> ::std::ops::Deref for PipelineViewportShadingRateImageStateCreateInfoNVBuilder<'a> { + type Target = PipelineViewportShadingRateImageStateCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineViewportShadingRateImageStateCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineViewportShadingRateImageStateCreateInfoNVBuilder<'a> { + pub fn shading_rate_image_enable( + mut self, + shading_rate_image_enable: bool, + ) -> PipelineViewportShadingRateImageStateCreateInfoNVBuilder<'a> { + self.inner.shading_rate_image_enable = shading_rate_image_enable.into(); + self + } + pub fn shading_rate_palettes( + mut self, + shading_rate_palettes: &'a [ShadingRatePaletteNV], + ) -> PipelineViewportShadingRateImageStateCreateInfoNVBuilder<'a> { + self.inner.viewport_count = shading_rate_palettes.len() as _; + self.inner.p_shading_rate_palettes = shading_rate_palettes.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineViewportShadingRateImageStateCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceShadingRateImageFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shading_rate_image: Bool32, + pub shading_rate_coarse_sample_order: Bool32, +} +impl ::std::default::Default for PhysicalDeviceShadingRateImageFeaturesNV { + fn default() -> PhysicalDeviceShadingRateImageFeaturesNV { + PhysicalDeviceShadingRateImageFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + shading_rate_image: Bool32::default(), + shading_rate_coarse_sample_order: Bool32::default(), + } + } +} +impl PhysicalDeviceShadingRateImageFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceShadingRateImageFeaturesNVBuilder<'a> { + PhysicalDeviceShadingRateImageFeaturesNVBuilder { + inner: PhysicalDeviceShadingRateImageFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShadingRateImageFeaturesNVBuilder<'a> { + inner: PhysicalDeviceShadingRateImageFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShadingRateImageFeaturesNVBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShadingRateImageFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShadingRateImageFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceShadingRateImageFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShadingRateImageFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShadingRateImageFeaturesNVBuilder<'a> { + pub fn shading_rate_image( + mut self, + shading_rate_image: bool, + ) -> PhysicalDeviceShadingRateImageFeaturesNVBuilder<'a> { + self.inner.shading_rate_image = shading_rate_image.into(); + self + } + pub fn shading_rate_coarse_sample_order( + mut self, + shading_rate_coarse_sample_order: bool, + ) -> PhysicalDeviceShadingRateImageFeaturesNVBuilder<'a> { + self.inner.shading_rate_coarse_sample_order = shading_rate_coarse_sample_order.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShadingRateImageFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceShadingRateImagePropertiesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shading_rate_texel_size: Extent2D, + pub shading_rate_palette_size: u32, + pub shading_rate_max_coarse_samples: u32, +} +impl ::std::default::Default for PhysicalDeviceShadingRateImagePropertiesNV { + fn default() -> PhysicalDeviceShadingRateImagePropertiesNV { + PhysicalDeviceShadingRateImagePropertiesNV { + s_type: StructureType::PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV, + p_next: ::std::ptr::null_mut(), + shading_rate_texel_size: Extent2D::default(), + shading_rate_palette_size: u32::default(), + shading_rate_max_coarse_samples: u32::default(), + } + } +} +impl PhysicalDeviceShadingRateImagePropertiesNV { + pub fn builder<'a>() -> PhysicalDeviceShadingRateImagePropertiesNVBuilder<'a> { + PhysicalDeviceShadingRateImagePropertiesNVBuilder { + inner: PhysicalDeviceShadingRateImagePropertiesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShadingRateImagePropertiesNVBuilder<'a> { + inner: PhysicalDeviceShadingRateImagePropertiesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceShadingRateImagePropertiesNVBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceShadingRateImagePropertiesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShadingRateImagePropertiesNVBuilder<'a> { + type Target = PhysicalDeviceShadingRateImagePropertiesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShadingRateImagePropertiesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShadingRateImagePropertiesNVBuilder<'a> { + pub fn shading_rate_texel_size( + mut self, + shading_rate_texel_size: Extent2D, + ) -> PhysicalDeviceShadingRateImagePropertiesNVBuilder<'a> { + self.inner.shading_rate_texel_size = shading_rate_texel_size; + self + } + pub fn shading_rate_palette_size( + mut self, + shading_rate_palette_size: u32, + ) -> PhysicalDeviceShadingRateImagePropertiesNVBuilder<'a> { + self.inner.shading_rate_palette_size = shading_rate_palette_size; + self + } + pub fn shading_rate_max_coarse_samples( + mut self, + shading_rate_max_coarse_samples: u32, + ) -> PhysicalDeviceShadingRateImagePropertiesNVBuilder<'a> { + self.inner.shading_rate_max_coarse_samples = shading_rate_max_coarse_samples; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShadingRateImagePropertiesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct CoarseSampleLocationNV { + pub pixel_x: u32, + pub pixel_y: u32, + pub sample: u32, +} +impl CoarseSampleLocationNV { + pub fn builder<'a>() -> CoarseSampleLocationNVBuilder<'a> { + CoarseSampleLocationNVBuilder { + inner: CoarseSampleLocationNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CoarseSampleLocationNVBuilder<'a> { + inner: CoarseSampleLocationNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for CoarseSampleLocationNVBuilder<'a> { + type Target = CoarseSampleLocationNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CoarseSampleLocationNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CoarseSampleLocationNVBuilder<'a> { + pub fn pixel_x(mut self, pixel_x: u32) -> CoarseSampleLocationNVBuilder<'a> { + self.inner.pixel_x = pixel_x; + self + } + pub fn pixel_y(mut self, pixel_y: u32) -> CoarseSampleLocationNVBuilder<'a> { + self.inner.pixel_y = pixel_y; + self + } + pub fn sample(mut self, sample: u32) -> CoarseSampleLocationNVBuilder<'a> { + self.inner.sample = sample; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CoarseSampleLocationNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CoarseSampleOrderCustomNV { + pub shading_rate: ShadingRatePaletteEntryNV, + pub sample_count: u32, + pub sample_location_count: u32, + pub p_sample_locations: *const CoarseSampleLocationNV, +} +impl ::std::default::Default for CoarseSampleOrderCustomNV { + fn default() -> CoarseSampleOrderCustomNV { + CoarseSampleOrderCustomNV { + shading_rate: ShadingRatePaletteEntryNV::default(), + sample_count: u32::default(), + sample_location_count: u32::default(), + p_sample_locations: ::std::ptr::null(), + } + } +} +impl CoarseSampleOrderCustomNV { + pub fn builder<'a>() -> CoarseSampleOrderCustomNVBuilder<'a> { + CoarseSampleOrderCustomNVBuilder { + inner: CoarseSampleOrderCustomNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CoarseSampleOrderCustomNVBuilder<'a> { + inner: CoarseSampleOrderCustomNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for CoarseSampleOrderCustomNVBuilder<'a> { + type Target = CoarseSampleOrderCustomNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CoarseSampleOrderCustomNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CoarseSampleOrderCustomNVBuilder<'a> { + pub fn shading_rate( + mut self, + shading_rate: ShadingRatePaletteEntryNV, + ) -> CoarseSampleOrderCustomNVBuilder<'a> { + self.inner.shading_rate = shading_rate; + self + } + pub fn sample_count(mut self, sample_count: u32) -> CoarseSampleOrderCustomNVBuilder<'a> { + self.inner.sample_count = sample_count; + self + } + pub fn sample_locations( + mut self, + sample_locations: &'a [CoarseSampleLocationNV], + ) -> CoarseSampleOrderCustomNVBuilder<'a> { + self.inner.sample_location_count = sample_locations.len() as _; + self.inner.p_sample_locations = sample_locations.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CoarseSampleOrderCustomNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineViewportCoarseSampleOrderStateCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub sample_order_type: CoarseSampleOrderTypeNV, + pub custom_sample_order_count: u32, + pub p_custom_sample_orders: *const CoarseSampleOrderCustomNV, +} +impl ::std::default::Default for PipelineViewportCoarseSampleOrderStateCreateInfoNV { + fn default() -> PipelineViewportCoarseSampleOrderStateCreateInfoNV { + PipelineViewportCoarseSampleOrderStateCreateInfoNV { + s_type: StructureType::PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + sample_order_type: CoarseSampleOrderTypeNV::default(), + custom_sample_order_count: u32::default(), + p_custom_sample_orders: ::std::ptr::null(), + } + } +} +impl PipelineViewportCoarseSampleOrderStateCreateInfoNV { + pub fn builder<'a>() -> PipelineViewportCoarseSampleOrderStateCreateInfoNVBuilder<'a> { + PipelineViewportCoarseSampleOrderStateCreateInfoNVBuilder { + inner: PipelineViewportCoarseSampleOrderStateCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineViewportCoarseSampleOrderStateCreateInfoNVBuilder<'a> { + inner: PipelineViewportCoarseSampleOrderStateCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineViewportStateCreateInfo + for PipelineViewportCoarseSampleOrderStateCreateInfoNVBuilder<'_> +{ +} +unsafe impl ExtendsPipelineViewportStateCreateInfo + for PipelineViewportCoarseSampleOrderStateCreateInfoNV +{ +} +impl<'a> ::std::ops::Deref for PipelineViewportCoarseSampleOrderStateCreateInfoNVBuilder<'a> { + type Target = PipelineViewportCoarseSampleOrderStateCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineViewportCoarseSampleOrderStateCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineViewportCoarseSampleOrderStateCreateInfoNVBuilder<'a> { + pub fn sample_order_type( + mut self, + sample_order_type: CoarseSampleOrderTypeNV, + ) -> PipelineViewportCoarseSampleOrderStateCreateInfoNVBuilder<'a> { + self.inner.sample_order_type = sample_order_type; + self + } + pub fn custom_sample_orders( + mut self, + custom_sample_orders: &'a [CoarseSampleOrderCustomNV], + ) -> PipelineViewportCoarseSampleOrderStateCreateInfoNVBuilder<'a> { + self.inner.custom_sample_order_count = custom_sample_orders.len() as _; + self.inner.p_custom_sample_orders = custom_sample_orders.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineViewportCoarseSampleOrderStateCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceMeshShaderFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub task_shader: Bool32, + pub mesh_shader: Bool32, +} +impl ::std::default::Default for PhysicalDeviceMeshShaderFeaturesNV { + fn default() -> PhysicalDeviceMeshShaderFeaturesNV { + PhysicalDeviceMeshShaderFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + task_shader: Bool32::default(), + mesh_shader: Bool32::default(), + } + } +} +impl PhysicalDeviceMeshShaderFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceMeshShaderFeaturesNVBuilder<'a> { + PhysicalDeviceMeshShaderFeaturesNVBuilder { + inner: PhysicalDeviceMeshShaderFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceMeshShaderFeaturesNVBuilder<'a> { + inner: PhysicalDeviceMeshShaderFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceMeshShaderFeaturesNVBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceMeshShaderFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceMeshShaderFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceMeshShaderFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceMeshShaderFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceMeshShaderFeaturesNVBuilder<'a> { + pub fn task_shader( + mut self, + task_shader: bool, + ) -> PhysicalDeviceMeshShaderFeaturesNVBuilder<'a> { + self.inner.task_shader = task_shader.into(); + self + } + pub fn mesh_shader( + mut self, + mesh_shader: bool, + ) -> PhysicalDeviceMeshShaderFeaturesNVBuilder<'a> { + self.inner.mesh_shader = mesh_shader.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceMeshShaderFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceMeshShaderPropertiesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_draw_mesh_tasks_count: u32, + pub max_task_work_group_invocations: u32, + pub max_task_work_group_size: [u32; 3], + pub max_task_total_memory_size: u32, + pub max_task_output_count: u32, + pub max_mesh_work_group_invocations: u32, + pub max_mesh_work_group_size: [u32; 3], + pub max_mesh_total_memory_size: u32, + pub max_mesh_output_vertices: u32, + pub max_mesh_output_primitives: u32, + pub max_mesh_multiview_view_count: u32, + pub mesh_output_per_vertex_granularity: u32, + pub mesh_output_per_primitive_granularity: u32, +} +impl ::std::default::Default for PhysicalDeviceMeshShaderPropertiesNV { + fn default() -> PhysicalDeviceMeshShaderPropertiesNV { + PhysicalDeviceMeshShaderPropertiesNV { + s_type: StructureType::PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV, + p_next: ::std::ptr::null_mut(), + max_draw_mesh_tasks_count: u32::default(), + max_task_work_group_invocations: u32::default(), + max_task_work_group_size: unsafe { ::std::mem::zeroed() }, + max_task_total_memory_size: u32::default(), + max_task_output_count: u32::default(), + max_mesh_work_group_invocations: u32::default(), + max_mesh_work_group_size: unsafe { ::std::mem::zeroed() }, + max_mesh_total_memory_size: u32::default(), + max_mesh_output_vertices: u32::default(), + max_mesh_output_primitives: u32::default(), + max_mesh_multiview_view_count: u32::default(), + mesh_output_per_vertex_granularity: u32::default(), + mesh_output_per_primitive_granularity: u32::default(), + } + } +} +impl PhysicalDeviceMeshShaderPropertiesNV { + pub fn builder<'a>() -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + PhysicalDeviceMeshShaderPropertiesNVBuilder { + inner: PhysicalDeviceMeshShaderPropertiesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + inner: PhysicalDeviceMeshShaderPropertiesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceMeshShaderPropertiesNVBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceMeshShaderPropertiesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + type Target = PhysicalDeviceMeshShaderPropertiesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + pub fn max_draw_mesh_tasks_count( + mut self, + max_draw_mesh_tasks_count: u32, + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.max_draw_mesh_tasks_count = max_draw_mesh_tasks_count; + self + } + pub fn max_task_work_group_invocations( + mut self, + max_task_work_group_invocations: u32, + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.max_task_work_group_invocations = max_task_work_group_invocations; + self + } + pub fn max_task_work_group_size( + mut self, + max_task_work_group_size: [u32; 3], + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.max_task_work_group_size = max_task_work_group_size; + self + } + pub fn max_task_total_memory_size( + mut self, + max_task_total_memory_size: u32, + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.max_task_total_memory_size = max_task_total_memory_size; + self + } + pub fn max_task_output_count( + mut self, + max_task_output_count: u32, + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.max_task_output_count = max_task_output_count; + self + } + pub fn max_mesh_work_group_invocations( + mut self, + max_mesh_work_group_invocations: u32, + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.max_mesh_work_group_invocations = max_mesh_work_group_invocations; + self + } + pub fn max_mesh_work_group_size( + mut self, + max_mesh_work_group_size: [u32; 3], + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.max_mesh_work_group_size = max_mesh_work_group_size; + self + } + pub fn max_mesh_total_memory_size( + mut self, + max_mesh_total_memory_size: u32, + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.max_mesh_total_memory_size = max_mesh_total_memory_size; + self + } + pub fn max_mesh_output_vertices( + mut self, + max_mesh_output_vertices: u32, + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.max_mesh_output_vertices = max_mesh_output_vertices; + self + } + pub fn max_mesh_output_primitives( + mut self, + max_mesh_output_primitives: u32, + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.max_mesh_output_primitives = max_mesh_output_primitives; + self + } + pub fn max_mesh_multiview_view_count( + mut self, + max_mesh_multiview_view_count: u32, + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.max_mesh_multiview_view_count = max_mesh_multiview_view_count; + self + } + pub fn mesh_output_per_vertex_granularity( + mut self, + mesh_output_per_vertex_granularity: u32, + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.mesh_output_per_vertex_granularity = mesh_output_per_vertex_granularity; + self + } + pub fn mesh_output_per_primitive_granularity( + mut self, + mesh_output_per_primitive_granularity: u32, + ) -> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { + self.inner.mesh_output_per_primitive_granularity = mesh_output_per_primitive_granularity; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceMeshShaderPropertiesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DrawMeshTasksIndirectCommandNV { + pub task_count: u32, + pub first_task: u32, +} +impl DrawMeshTasksIndirectCommandNV { + pub fn builder<'a>() -> DrawMeshTasksIndirectCommandNVBuilder<'a> { + DrawMeshTasksIndirectCommandNVBuilder { + inner: DrawMeshTasksIndirectCommandNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DrawMeshTasksIndirectCommandNVBuilder<'a> { + inner: DrawMeshTasksIndirectCommandNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DrawMeshTasksIndirectCommandNVBuilder<'a> { + type Target = DrawMeshTasksIndirectCommandNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DrawMeshTasksIndirectCommandNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DrawMeshTasksIndirectCommandNVBuilder<'a> { + pub fn task_count(mut self, task_count: u32) -> DrawMeshTasksIndirectCommandNVBuilder<'a> { + self.inner.task_count = task_count; + self + } + pub fn first_task(mut self, first_task: u32) -> DrawMeshTasksIndirectCommandNVBuilder<'a> { + self.inner.first_task = first_task; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DrawMeshTasksIndirectCommandNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct RayTracingShaderGroupCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub ty: RayTracingShaderGroupTypeNV, + pub general_shader: u32, + pub closest_hit_shader: u32, + pub any_hit_shader: u32, + pub intersection_shader: u32, +} +impl ::std::default::Default for RayTracingShaderGroupCreateInfoNV { + fn default() -> RayTracingShaderGroupCreateInfoNV { + RayTracingShaderGroupCreateInfoNV { + s_type: StructureType::RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + ty: RayTracingShaderGroupTypeNV::default(), + general_shader: u32::default(), + closest_hit_shader: u32::default(), + any_hit_shader: u32::default(), + intersection_shader: u32::default(), + } + } +} +impl RayTracingShaderGroupCreateInfoNV { + pub fn builder<'a>() -> RayTracingShaderGroupCreateInfoNVBuilder<'a> { + RayTracingShaderGroupCreateInfoNVBuilder { + inner: RayTracingShaderGroupCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RayTracingShaderGroupCreateInfoNVBuilder<'a> { + inner: RayTracingShaderGroupCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsRayTracingShaderGroupCreateInfoNV {} +impl<'a> ::std::ops::Deref for RayTracingShaderGroupCreateInfoNVBuilder<'a> { + type Target = RayTracingShaderGroupCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RayTracingShaderGroupCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RayTracingShaderGroupCreateInfoNVBuilder<'a> { + pub fn ty( + mut self, + ty: RayTracingShaderGroupTypeNV, + ) -> RayTracingShaderGroupCreateInfoNVBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn general_shader( + mut self, + general_shader: u32, + ) -> RayTracingShaderGroupCreateInfoNVBuilder<'a> { + self.inner.general_shader = general_shader; + self + } + pub fn closest_hit_shader( + mut self, + closest_hit_shader: u32, + ) -> RayTracingShaderGroupCreateInfoNVBuilder<'a> { + self.inner.closest_hit_shader = closest_hit_shader; + self + } + pub fn any_hit_shader( + mut self, + any_hit_shader: u32, + ) -> RayTracingShaderGroupCreateInfoNVBuilder<'a> { + self.inner.any_hit_shader = any_hit_shader; + self + } + pub fn intersection_shader( + mut self, + intersection_shader: u32, + ) -> RayTracingShaderGroupCreateInfoNVBuilder<'a> { + self.inner.intersection_shader = intersection_shader; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> RayTracingShaderGroupCreateInfoNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RayTracingShaderGroupCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct RayTracingPipelineCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineCreateFlags, + pub stage_count: u32, + pub p_stages: *const PipelineShaderStageCreateInfo, + pub group_count: u32, + pub p_groups: *const RayTracingShaderGroupCreateInfoNV, + pub max_recursion_depth: u32, + pub layout: PipelineLayout, + pub base_pipeline_handle: Pipeline, + pub base_pipeline_index: i32, +} +impl ::std::default::Default for RayTracingPipelineCreateInfoNV { + fn default() -> RayTracingPipelineCreateInfoNV { + RayTracingPipelineCreateInfoNV { + s_type: StructureType::RAY_TRACING_PIPELINE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + flags: PipelineCreateFlags::default(), + stage_count: u32::default(), + p_stages: ::std::ptr::null(), + group_count: u32::default(), + p_groups: ::std::ptr::null(), + max_recursion_depth: u32::default(), + layout: PipelineLayout::default(), + base_pipeline_handle: Pipeline::default(), + base_pipeline_index: i32::default(), + } + } +} +impl RayTracingPipelineCreateInfoNV { + pub fn builder<'a>() -> RayTracingPipelineCreateInfoNVBuilder<'a> { + RayTracingPipelineCreateInfoNVBuilder { + inner: RayTracingPipelineCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RayTracingPipelineCreateInfoNVBuilder<'a> { + inner: RayTracingPipelineCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsRayTracingPipelineCreateInfoNV {} +impl<'a> ::std::ops::Deref for RayTracingPipelineCreateInfoNVBuilder<'a> { + type Target = RayTracingPipelineCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RayTracingPipelineCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RayTracingPipelineCreateInfoNVBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineCreateFlags, + ) -> RayTracingPipelineCreateInfoNVBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn stages( + mut self, + stages: &'a [PipelineShaderStageCreateInfo], + ) -> RayTracingPipelineCreateInfoNVBuilder<'a> { + self.inner.stage_count = stages.len() as _; + self.inner.p_stages = stages.as_ptr(); + self + } + pub fn groups( + mut self, + groups: &'a [RayTracingShaderGroupCreateInfoNV], + ) -> RayTracingPipelineCreateInfoNVBuilder<'a> { + self.inner.group_count = groups.len() as _; + self.inner.p_groups = groups.as_ptr(); + self + } + pub fn max_recursion_depth( + mut self, + max_recursion_depth: u32, + ) -> RayTracingPipelineCreateInfoNVBuilder<'a> { + self.inner.max_recursion_depth = max_recursion_depth; + self + } + pub fn layout(mut self, layout: PipelineLayout) -> RayTracingPipelineCreateInfoNVBuilder<'a> { + self.inner.layout = layout; + self + } + pub fn base_pipeline_handle( + mut self, + base_pipeline_handle: Pipeline, + ) -> RayTracingPipelineCreateInfoNVBuilder<'a> { + self.inner.base_pipeline_handle = base_pipeline_handle; + self + } + pub fn base_pipeline_index( + mut self, + base_pipeline_index: i32, + ) -> RayTracingPipelineCreateInfoNVBuilder<'a> { + self.inner.base_pipeline_index = base_pipeline_index; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> RayTracingPipelineCreateInfoNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RayTracingPipelineCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct GeometryTrianglesNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub vertex_data: Buffer, + pub vertex_offset: DeviceSize, + pub vertex_count: u32, + pub vertex_stride: DeviceSize, + pub vertex_format: Format, + pub index_data: Buffer, + pub index_offset: DeviceSize, + pub index_count: u32, + pub index_type: IndexType, + pub transform_data: Buffer, + pub transform_offset: DeviceSize, +} +impl ::std::default::Default for GeometryTrianglesNV { + fn default() -> GeometryTrianglesNV { + GeometryTrianglesNV { + s_type: StructureType::GEOMETRY_TRIANGLES_NV, + p_next: ::std::ptr::null(), + vertex_data: Buffer::default(), + vertex_offset: DeviceSize::default(), + vertex_count: u32::default(), + vertex_stride: DeviceSize::default(), + vertex_format: Format::default(), + index_data: Buffer::default(), + index_offset: DeviceSize::default(), + index_count: u32::default(), + index_type: IndexType::default(), + transform_data: Buffer::default(), + transform_offset: DeviceSize::default(), + } + } +} +impl GeometryTrianglesNV { + pub fn builder<'a>() -> GeometryTrianglesNVBuilder<'a> { + GeometryTrianglesNVBuilder { + inner: GeometryTrianglesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct GeometryTrianglesNVBuilder<'a> { + inner: GeometryTrianglesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsGeometryTrianglesNV {} +impl<'a> ::std::ops::Deref for GeometryTrianglesNVBuilder<'a> { + type Target = GeometryTrianglesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for GeometryTrianglesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> GeometryTrianglesNVBuilder<'a> { + pub fn vertex_data(mut self, vertex_data: Buffer) -> GeometryTrianglesNVBuilder<'a> { + self.inner.vertex_data = vertex_data; + self + } + pub fn vertex_offset(mut self, vertex_offset: DeviceSize) -> GeometryTrianglesNVBuilder<'a> { + self.inner.vertex_offset = vertex_offset; + self + } + pub fn vertex_count(mut self, vertex_count: u32) -> GeometryTrianglesNVBuilder<'a> { + self.inner.vertex_count = vertex_count; + self + } + pub fn vertex_stride(mut self, vertex_stride: DeviceSize) -> GeometryTrianglesNVBuilder<'a> { + self.inner.vertex_stride = vertex_stride; + self + } + pub fn vertex_format(mut self, vertex_format: Format) -> GeometryTrianglesNVBuilder<'a> { + self.inner.vertex_format = vertex_format; + self + } + pub fn index_data(mut self, index_data: Buffer) -> GeometryTrianglesNVBuilder<'a> { + self.inner.index_data = index_data; + self + } + pub fn index_offset(mut self, index_offset: DeviceSize) -> GeometryTrianglesNVBuilder<'a> { + self.inner.index_offset = index_offset; + self + } + pub fn index_count(mut self, index_count: u32) -> GeometryTrianglesNVBuilder<'a> { + self.inner.index_count = index_count; + self + } + pub fn index_type(mut self, index_type: IndexType) -> GeometryTrianglesNVBuilder<'a> { + self.inner.index_type = index_type; + self + } + pub fn transform_data(mut self, transform_data: Buffer) -> GeometryTrianglesNVBuilder<'a> { + self.inner.transform_data = transform_data; + self + } + pub fn transform_offset( + mut self, + transform_offset: DeviceSize, + ) -> GeometryTrianglesNVBuilder<'a> { + self.inner.transform_offset = transform_offset; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> GeometryTrianglesNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> GeometryTrianglesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct GeometryAABBNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub aabb_data: Buffer, + pub num_aab_bs: u32, + pub stride: u32, + pub offset: DeviceSize, +} +impl ::std::default::Default for GeometryAABBNV { + fn default() -> GeometryAABBNV { + GeometryAABBNV { + s_type: StructureType::GEOMETRY_AABB_NV, + p_next: ::std::ptr::null(), + aabb_data: Buffer::default(), + num_aab_bs: u32::default(), + stride: u32::default(), + offset: DeviceSize::default(), + } + } +} +impl GeometryAABBNV { + pub fn builder<'a>() -> GeometryAABBNVBuilder<'a> { + GeometryAABBNVBuilder { + inner: GeometryAABBNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct GeometryAABBNVBuilder<'a> { + inner: GeometryAABBNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsGeometryAABBNV {} +impl<'a> ::std::ops::Deref for GeometryAABBNVBuilder<'a> { + type Target = GeometryAABBNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for GeometryAABBNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> GeometryAABBNVBuilder<'a> { + pub fn aabb_data(mut self, aabb_data: Buffer) -> GeometryAABBNVBuilder<'a> { + self.inner.aabb_data = aabb_data; + self + } + pub fn num_aab_bs(mut self, num_aab_bs: u32) -> GeometryAABBNVBuilder<'a> { + self.inner.num_aab_bs = num_aab_bs; + self + } + pub fn stride(mut self, stride: u32) -> GeometryAABBNVBuilder<'a> { + self.inner.stride = stride; + self + } + pub fn offset(mut self, offset: DeviceSize) -> GeometryAABBNVBuilder<'a> { + self.inner.offset = offset; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> GeometryAABBNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> GeometryAABBNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct GeometryDataNV { + pub triangles: GeometryTrianglesNV, + pub aabbs: GeometryAABBNV, +} +impl GeometryDataNV { + pub fn builder<'a>() -> GeometryDataNVBuilder<'a> { + GeometryDataNVBuilder { + inner: GeometryDataNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct GeometryDataNVBuilder<'a> { + inner: GeometryDataNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for GeometryDataNVBuilder<'a> { + type Target = GeometryDataNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for GeometryDataNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> GeometryDataNVBuilder<'a> { + pub fn triangles(mut self, triangles: GeometryTrianglesNV) -> GeometryDataNVBuilder<'a> { + self.inner.triangles = triangles; + self + } + pub fn aabbs(mut self, aabbs: GeometryAABBNV) -> GeometryDataNVBuilder<'a> { + self.inner.aabbs = aabbs; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> GeometryDataNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct GeometryNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub geometry_type: GeometryTypeNV, + pub geometry: GeometryDataNV, + pub flags: GeometryFlagsNV, +} +impl ::std::default::Default for GeometryNV { + fn default() -> GeometryNV { + GeometryNV { + s_type: StructureType::GEOMETRY_NV, + p_next: ::std::ptr::null(), + geometry_type: GeometryTypeNV::default(), + geometry: GeometryDataNV::default(), + flags: GeometryFlagsNV::default(), + } + } +} +impl GeometryNV { + pub fn builder<'a>() -> GeometryNVBuilder<'a> { + GeometryNVBuilder { + inner: GeometryNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct GeometryNVBuilder<'a> { + inner: GeometryNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsGeometryNV {} +impl<'a> ::std::ops::Deref for GeometryNVBuilder<'a> { + type Target = GeometryNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for GeometryNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> GeometryNVBuilder<'a> { + pub fn geometry_type(mut self, geometry_type: GeometryTypeNV) -> GeometryNVBuilder<'a> { + self.inner.geometry_type = geometry_type; + self + } + pub fn geometry(mut self, geometry: GeometryDataNV) -> GeometryNVBuilder<'a> { + self.inner.geometry = geometry; + self + } + pub fn flags(mut self, flags: GeometryFlagsNV) -> GeometryNVBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next(mut self, next: &'a mut T) -> GeometryNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> GeometryNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AccelerationStructureInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub ty: AccelerationStructureTypeNV, + pub flags: BuildAccelerationStructureFlagsNV, + pub instance_count: u32, + pub geometry_count: u32, + pub p_geometries: *const GeometryNV, +} +impl ::std::default::Default for AccelerationStructureInfoNV { + fn default() -> AccelerationStructureInfoNV { + AccelerationStructureInfoNV { + s_type: StructureType::ACCELERATION_STRUCTURE_INFO_NV, + p_next: ::std::ptr::null(), + ty: AccelerationStructureTypeNV::default(), + flags: BuildAccelerationStructureFlagsNV::default(), + instance_count: u32::default(), + geometry_count: u32::default(), + p_geometries: ::std::ptr::null(), + } + } +} +impl AccelerationStructureInfoNV { + pub fn builder<'a>() -> AccelerationStructureInfoNVBuilder<'a> { + AccelerationStructureInfoNVBuilder { + inner: AccelerationStructureInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureInfoNVBuilder<'a> { + inner: AccelerationStructureInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureInfoNV {} +impl<'a> ::std::ops::Deref for AccelerationStructureInfoNVBuilder<'a> { + type Target = AccelerationStructureInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureInfoNVBuilder<'a> { + pub fn ty(mut self, ty: AccelerationStructureTypeNV) -> AccelerationStructureInfoNVBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn flags( + mut self, + flags: BuildAccelerationStructureFlagsNV, + ) -> AccelerationStructureInfoNVBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn instance_count(mut self, instance_count: u32) -> AccelerationStructureInfoNVBuilder<'a> { + self.inner.instance_count = instance_count; + self + } + pub fn geometries( + mut self, + geometries: &'a [GeometryNV], + ) -> AccelerationStructureInfoNVBuilder<'a> { + self.inner.geometry_count = geometries.len() as _; + self.inner.p_geometries = geometries.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureInfoNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AccelerationStructureCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub compacted_size: DeviceSize, + pub info: AccelerationStructureInfoNV, +} +impl ::std::default::Default for AccelerationStructureCreateInfoNV { + fn default() -> AccelerationStructureCreateInfoNV { + AccelerationStructureCreateInfoNV { + s_type: StructureType::ACCELERATION_STRUCTURE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + compacted_size: DeviceSize::default(), + info: AccelerationStructureInfoNV::default(), + } + } +} +impl AccelerationStructureCreateInfoNV { + pub fn builder<'a>() -> AccelerationStructureCreateInfoNVBuilder<'a> { + AccelerationStructureCreateInfoNVBuilder { + inner: AccelerationStructureCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureCreateInfoNVBuilder<'a> { + inner: AccelerationStructureCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureCreateInfoNV {} +impl<'a> ::std::ops::Deref for AccelerationStructureCreateInfoNVBuilder<'a> { + type Target = AccelerationStructureCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureCreateInfoNVBuilder<'a> { + pub fn compacted_size( + mut self, + compacted_size: DeviceSize, + ) -> AccelerationStructureCreateInfoNVBuilder<'a> { + self.inner.compacted_size = compacted_size; + self + } + pub fn info( + mut self, + info: AccelerationStructureInfoNV, + ) -> AccelerationStructureCreateInfoNVBuilder<'a> { + self.inner.info = info; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureCreateInfoNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BindAccelerationStructureMemoryInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub acceleration_structure: AccelerationStructureNV, + pub memory: DeviceMemory, + pub memory_offset: DeviceSize, + pub device_index_count: u32, + pub p_device_indices: *const u32, +} +impl ::std::default::Default for BindAccelerationStructureMemoryInfoNV { + fn default() -> BindAccelerationStructureMemoryInfoNV { + BindAccelerationStructureMemoryInfoNV { + s_type: StructureType::BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV, + p_next: ::std::ptr::null(), + acceleration_structure: AccelerationStructureNV::default(), + memory: DeviceMemory::default(), + memory_offset: DeviceSize::default(), + device_index_count: u32::default(), + p_device_indices: ::std::ptr::null(), + } + } +} +impl BindAccelerationStructureMemoryInfoNV { + pub fn builder<'a>() -> BindAccelerationStructureMemoryInfoNVBuilder<'a> { + BindAccelerationStructureMemoryInfoNVBuilder { + inner: BindAccelerationStructureMemoryInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BindAccelerationStructureMemoryInfoNVBuilder<'a> { + inner: BindAccelerationStructureMemoryInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsBindAccelerationStructureMemoryInfoNV {} +impl<'a> ::std::ops::Deref for BindAccelerationStructureMemoryInfoNVBuilder<'a> { + type Target = BindAccelerationStructureMemoryInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BindAccelerationStructureMemoryInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BindAccelerationStructureMemoryInfoNVBuilder<'a> { + pub fn acceleration_structure( + mut self, + acceleration_structure: AccelerationStructureNV, + ) -> BindAccelerationStructureMemoryInfoNVBuilder<'a> { + self.inner.acceleration_structure = acceleration_structure; + self + } + pub fn memory( + mut self, + memory: DeviceMemory, + ) -> BindAccelerationStructureMemoryInfoNVBuilder<'a> { + self.inner.memory = memory; + self + } + pub fn memory_offset( + mut self, + memory_offset: DeviceSize, + ) -> BindAccelerationStructureMemoryInfoNVBuilder<'a> { + self.inner.memory_offset = memory_offset; + self + } + pub fn device_indices( + mut self, + device_indices: &'a [u32], + ) -> BindAccelerationStructureMemoryInfoNVBuilder<'a> { + self.inner.device_index_count = device_indices.len() as _; + self.inner.p_device_indices = device_indices.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> BindAccelerationStructureMemoryInfoNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BindAccelerationStructureMemoryInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct WriteDescriptorSetAccelerationStructureNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub acceleration_structure_count: u32, + pub p_acceleration_structures: *const AccelerationStructureNV, +} +impl ::std::default::Default for WriteDescriptorSetAccelerationStructureNV { + fn default() -> WriteDescriptorSetAccelerationStructureNV { + WriteDescriptorSetAccelerationStructureNV { + s_type: StructureType::WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV, + p_next: ::std::ptr::null(), + acceleration_structure_count: u32::default(), + p_acceleration_structures: ::std::ptr::null(), + } + } +} +impl WriteDescriptorSetAccelerationStructureNV { + pub fn builder<'a>() -> WriteDescriptorSetAccelerationStructureNVBuilder<'a> { + WriteDescriptorSetAccelerationStructureNVBuilder { + inner: WriteDescriptorSetAccelerationStructureNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct WriteDescriptorSetAccelerationStructureNVBuilder<'a> { + inner: WriteDescriptorSetAccelerationStructureNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsWriteDescriptorSet for WriteDescriptorSetAccelerationStructureNVBuilder<'_> {} +unsafe impl ExtendsWriteDescriptorSet for WriteDescriptorSetAccelerationStructureNV {} +impl<'a> ::std::ops::Deref for WriteDescriptorSetAccelerationStructureNVBuilder<'a> { + type Target = WriteDescriptorSetAccelerationStructureNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for WriteDescriptorSetAccelerationStructureNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> WriteDescriptorSetAccelerationStructureNVBuilder<'a> { + pub fn acceleration_structures( + mut self, + acceleration_structures: &'a [AccelerationStructureNV], + ) -> WriteDescriptorSetAccelerationStructureNVBuilder<'a> { + self.inner.acceleration_structure_count = acceleration_structures.len() as _; + self.inner.p_acceleration_structures = acceleration_structures.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> WriteDescriptorSetAccelerationStructureNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AccelerationStructureMemoryRequirementsInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub ty: AccelerationStructureMemoryRequirementsTypeNV, + pub acceleration_structure: AccelerationStructureNV, +} +impl ::std::default::Default for AccelerationStructureMemoryRequirementsInfoNV { + fn default() -> AccelerationStructureMemoryRequirementsInfoNV { + AccelerationStructureMemoryRequirementsInfoNV { + s_type: StructureType::ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV, + p_next: ::std::ptr::null(), + ty: AccelerationStructureMemoryRequirementsTypeNV::default(), + acceleration_structure: AccelerationStructureNV::default(), + } + } +} +impl AccelerationStructureMemoryRequirementsInfoNV { + pub fn builder<'a>() -> AccelerationStructureMemoryRequirementsInfoNVBuilder<'a> { + AccelerationStructureMemoryRequirementsInfoNVBuilder { + inner: AccelerationStructureMemoryRequirementsInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureMemoryRequirementsInfoNVBuilder<'a> { + inner: AccelerationStructureMemoryRequirementsInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureMemoryRequirementsInfoNV {} +impl<'a> ::std::ops::Deref for AccelerationStructureMemoryRequirementsInfoNVBuilder<'a> { + type Target = AccelerationStructureMemoryRequirementsInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureMemoryRequirementsInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureMemoryRequirementsInfoNVBuilder<'a> { + pub fn ty( + mut self, + ty: AccelerationStructureMemoryRequirementsTypeNV, + ) -> AccelerationStructureMemoryRequirementsInfoNVBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn acceleration_structure( + mut self, + acceleration_structure: AccelerationStructureNV, + ) -> AccelerationStructureMemoryRequirementsInfoNVBuilder<'a> { + self.inner.acceleration_structure = acceleration_structure; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureMemoryRequirementsInfoNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureMemoryRequirementsInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceRayTracingPropertiesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_group_handle_size: u32, + pub max_recursion_depth: u32, + pub max_shader_group_stride: u32, + pub shader_group_base_alignment: u32, + pub max_geometry_count: u64, + pub max_instance_count: u64, + pub max_triangle_count: u64, + pub max_descriptor_set_acceleration_structures: u32, +} +impl ::std::default::Default for PhysicalDeviceRayTracingPropertiesNV { + fn default() -> PhysicalDeviceRayTracingPropertiesNV { + PhysicalDeviceRayTracingPropertiesNV { + s_type: StructureType::PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV, + p_next: ::std::ptr::null_mut(), + shader_group_handle_size: u32::default(), + max_recursion_depth: u32::default(), + max_shader_group_stride: u32::default(), + shader_group_base_alignment: u32::default(), + max_geometry_count: u64::default(), + max_instance_count: u64::default(), + max_triangle_count: u64::default(), + max_descriptor_set_acceleration_structures: u32::default(), + } + } +} +impl PhysicalDeviceRayTracingPropertiesNV { + pub fn builder<'a>() -> PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + PhysicalDeviceRayTracingPropertiesNVBuilder { + inner: PhysicalDeviceRayTracingPropertiesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + inner: PhysicalDeviceRayTracingPropertiesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceRayTracingPropertiesNVBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceRayTracingPropertiesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + type Target = PhysicalDeviceRayTracingPropertiesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + pub fn shader_group_handle_size( + mut self, + shader_group_handle_size: u32, + ) -> PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + self.inner.shader_group_handle_size = shader_group_handle_size; + self + } + pub fn max_recursion_depth( + mut self, + max_recursion_depth: u32, + ) -> PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + self.inner.max_recursion_depth = max_recursion_depth; + self + } + pub fn max_shader_group_stride( + mut self, + max_shader_group_stride: u32, + ) -> PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + self.inner.max_shader_group_stride = max_shader_group_stride; + self + } + pub fn shader_group_base_alignment( + mut self, + shader_group_base_alignment: u32, + ) -> PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + self.inner.shader_group_base_alignment = shader_group_base_alignment; + self + } + pub fn max_geometry_count( + mut self, + max_geometry_count: u64, + ) -> PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + self.inner.max_geometry_count = max_geometry_count; + self + } + pub fn max_instance_count( + mut self, + max_instance_count: u64, + ) -> PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + self.inner.max_instance_count = max_instance_count; + self + } + pub fn max_triangle_count( + mut self, + max_triangle_count: u64, + ) -> PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + self.inner.max_triangle_count = max_triangle_count; + self + } + pub fn max_descriptor_set_acceleration_structures( + mut self, + max_descriptor_set_acceleration_structures: u32, + ) -> PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { + self.inner.max_descriptor_set_acceleration_structures = + max_descriptor_set_acceleration_structures; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceRayTracingPropertiesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DrmFormatModifierPropertiesListEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub drm_format_modifier_count: u32, + pub p_drm_format_modifier_properties: *mut DrmFormatModifierPropertiesEXT, +} +impl ::std::default::Default for DrmFormatModifierPropertiesListEXT { + fn default() -> DrmFormatModifierPropertiesListEXT { + DrmFormatModifierPropertiesListEXT { + s_type: StructureType::DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT, + p_next: ::std::ptr::null_mut(), + drm_format_modifier_count: u32::default(), + p_drm_format_modifier_properties: ::std::ptr::null_mut(), + } + } +} +impl DrmFormatModifierPropertiesListEXT { + pub fn builder<'a>() -> DrmFormatModifierPropertiesListEXTBuilder<'a> { + DrmFormatModifierPropertiesListEXTBuilder { + inner: DrmFormatModifierPropertiesListEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DrmFormatModifierPropertiesListEXTBuilder<'a> { + inner: DrmFormatModifierPropertiesListEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsFormatProperties2 for DrmFormatModifierPropertiesListEXTBuilder<'_> {} +unsafe impl ExtendsFormatProperties2 for DrmFormatModifierPropertiesListEXT {} +impl<'a> ::std::ops::Deref for DrmFormatModifierPropertiesListEXTBuilder<'a> { + type Target = DrmFormatModifierPropertiesListEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DrmFormatModifierPropertiesListEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DrmFormatModifierPropertiesListEXTBuilder<'a> { + pub fn drm_format_modifier_properties( + mut self, + drm_format_modifier_properties: &'a mut [DrmFormatModifierPropertiesEXT], + ) -> DrmFormatModifierPropertiesListEXTBuilder<'a> { + self.inner.drm_format_modifier_count = drm_format_modifier_properties.len() as _; + self.inner.p_drm_format_modifier_properties = drm_format_modifier_properties.as_mut_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DrmFormatModifierPropertiesListEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct DrmFormatModifierPropertiesEXT { + pub drm_format_modifier: u64, + pub drm_format_modifier_plane_count: u32, + pub drm_format_modifier_tiling_features: FormatFeatureFlags, +} +impl DrmFormatModifierPropertiesEXT { + pub fn builder<'a>() -> DrmFormatModifierPropertiesEXTBuilder<'a> { + DrmFormatModifierPropertiesEXTBuilder { + inner: DrmFormatModifierPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DrmFormatModifierPropertiesEXTBuilder<'a> { + inner: DrmFormatModifierPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for DrmFormatModifierPropertiesEXTBuilder<'a> { + type Target = DrmFormatModifierPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DrmFormatModifierPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DrmFormatModifierPropertiesEXTBuilder<'a> { + pub fn drm_format_modifier( + mut self, + drm_format_modifier: u64, + ) -> DrmFormatModifierPropertiesEXTBuilder<'a> { + self.inner.drm_format_modifier = drm_format_modifier; + self + } + pub fn drm_format_modifier_plane_count( + mut self, + drm_format_modifier_plane_count: u32, + ) -> DrmFormatModifierPropertiesEXTBuilder<'a> { + self.inner.drm_format_modifier_plane_count = drm_format_modifier_plane_count; + self + } + pub fn drm_format_modifier_tiling_features( + mut self, + drm_format_modifier_tiling_features: FormatFeatureFlags, + ) -> DrmFormatModifierPropertiesEXTBuilder<'a> { + self.inner.drm_format_modifier_tiling_features = drm_format_modifier_tiling_features; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DrmFormatModifierPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceImageDrmFormatModifierInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub drm_format_modifier: u64, + pub sharing_mode: SharingMode, + pub queue_family_index_count: u32, + pub p_queue_family_indices: *const u32, +} +impl ::std::default::Default for PhysicalDeviceImageDrmFormatModifierInfoEXT { + fn default() -> PhysicalDeviceImageDrmFormatModifierInfoEXT { + PhysicalDeviceImageDrmFormatModifierInfoEXT { + s_type: StructureType::PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT, + p_next: ::std::ptr::null(), + drm_format_modifier: u64::default(), + sharing_mode: SharingMode::default(), + queue_family_index_count: u32::default(), + p_queue_family_indices: ::std::ptr::null(), + } + } +} +impl PhysicalDeviceImageDrmFormatModifierInfoEXT { + pub fn builder<'a>() -> PhysicalDeviceImageDrmFormatModifierInfoEXTBuilder<'a> { + PhysicalDeviceImageDrmFormatModifierInfoEXTBuilder { + inner: PhysicalDeviceImageDrmFormatModifierInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceImageDrmFormatModifierInfoEXTBuilder<'a> { + inner: PhysicalDeviceImageDrmFormatModifierInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 + for PhysicalDeviceImageDrmFormatModifierInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for PhysicalDeviceImageDrmFormatModifierInfoEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceImageDrmFormatModifierInfoEXTBuilder<'a> { + type Target = PhysicalDeviceImageDrmFormatModifierInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceImageDrmFormatModifierInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceImageDrmFormatModifierInfoEXTBuilder<'a> { + pub fn drm_format_modifier( + mut self, + drm_format_modifier: u64, + ) -> PhysicalDeviceImageDrmFormatModifierInfoEXTBuilder<'a> { + self.inner.drm_format_modifier = drm_format_modifier; + self + } + pub fn sharing_mode( + mut self, + sharing_mode: SharingMode, + ) -> PhysicalDeviceImageDrmFormatModifierInfoEXTBuilder<'a> { + self.inner.sharing_mode = sharing_mode; + self + } + pub fn queue_family_indices( + mut self, + queue_family_indices: &'a [u32], + ) -> PhysicalDeviceImageDrmFormatModifierInfoEXTBuilder<'a> { + self.inner.queue_family_index_count = queue_family_indices.len() as _; + self.inner.p_queue_family_indices = queue_family_indices.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceImageDrmFormatModifierInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageDrmFormatModifierListCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub drm_format_modifier_count: u32, + pub p_drm_format_modifiers: *const u64, +} +impl ::std::default::Default for ImageDrmFormatModifierListCreateInfoEXT { + fn default() -> ImageDrmFormatModifierListCreateInfoEXT { + ImageDrmFormatModifierListCreateInfoEXT { + s_type: StructureType::IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + drm_format_modifier_count: u32::default(), + p_drm_format_modifiers: ::std::ptr::null(), + } + } +} +impl ImageDrmFormatModifierListCreateInfoEXT { + pub fn builder<'a>() -> ImageDrmFormatModifierListCreateInfoEXTBuilder<'a> { + ImageDrmFormatModifierListCreateInfoEXTBuilder { + inner: ImageDrmFormatModifierListCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageDrmFormatModifierListCreateInfoEXTBuilder<'a> { + inner: ImageDrmFormatModifierListCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageCreateInfo for ImageDrmFormatModifierListCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsImageCreateInfo for ImageDrmFormatModifierListCreateInfoEXT {} +impl<'a> ::std::ops::Deref for ImageDrmFormatModifierListCreateInfoEXTBuilder<'a> { + type Target = ImageDrmFormatModifierListCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageDrmFormatModifierListCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageDrmFormatModifierListCreateInfoEXTBuilder<'a> { + pub fn drm_format_modifiers( + mut self, + drm_format_modifiers: &'a [u64], + ) -> ImageDrmFormatModifierListCreateInfoEXTBuilder<'a> { + self.inner.drm_format_modifier_count = drm_format_modifiers.len() as _; + self.inner.p_drm_format_modifiers = drm_format_modifiers.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageDrmFormatModifierListCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageDrmFormatModifierExplicitCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub drm_format_modifier: u64, + pub drm_format_modifier_plane_count: u32, + pub p_plane_layouts: *const SubresourceLayout, +} +impl ::std::default::Default for ImageDrmFormatModifierExplicitCreateInfoEXT { + fn default() -> ImageDrmFormatModifierExplicitCreateInfoEXT { + ImageDrmFormatModifierExplicitCreateInfoEXT { + s_type: StructureType::IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + drm_format_modifier: u64::default(), + drm_format_modifier_plane_count: u32::default(), + p_plane_layouts: ::std::ptr::null(), + } + } +} +impl ImageDrmFormatModifierExplicitCreateInfoEXT { + pub fn builder<'a>() -> ImageDrmFormatModifierExplicitCreateInfoEXTBuilder<'a> { + ImageDrmFormatModifierExplicitCreateInfoEXTBuilder { + inner: ImageDrmFormatModifierExplicitCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageDrmFormatModifierExplicitCreateInfoEXTBuilder<'a> { + inner: ImageDrmFormatModifierExplicitCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageCreateInfo for ImageDrmFormatModifierExplicitCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsImageCreateInfo for ImageDrmFormatModifierExplicitCreateInfoEXT {} +impl<'a> ::std::ops::Deref for ImageDrmFormatModifierExplicitCreateInfoEXTBuilder<'a> { + type Target = ImageDrmFormatModifierExplicitCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageDrmFormatModifierExplicitCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageDrmFormatModifierExplicitCreateInfoEXTBuilder<'a> { + pub fn drm_format_modifier( + mut self, + drm_format_modifier: u64, + ) -> ImageDrmFormatModifierExplicitCreateInfoEXTBuilder<'a> { + self.inner.drm_format_modifier = drm_format_modifier; + self + } + pub fn plane_layouts( + mut self, + plane_layouts: &'a [SubresourceLayout], + ) -> ImageDrmFormatModifierExplicitCreateInfoEXTBuilder<'a> { + self.inner.drm_format_modifier_plane_count = plane_layouts.len() as _; + self.inner.p_plane_layouts = plane_layouts.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageDrmFormatModifierExplicitCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageDrmFormatModifierPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub drm_format_modifier: u64, +} +impl ::std::default::Default for ImageDrmFormatModifierPropertiesEXT { + fn default() -> ImageDrmFormatModifierPropertiesEXT { + ImageDrmFormatModifierPropertiesEXT { + s_type: StructureType::IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + drm_format_modifier: u64::default(), + } + } +} +impl ImageDrmFormatModifierPropertiesEXT { + pub fn builder<'a>() -> ImageDrmFormatModifierPropertiesEXTBuilder<'a> { + ImageDrmFormatModifierPropertiesEXTBuilder { + inner: ImageDrmFormatModifierPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageDrmFormatModifierPropertiesEXTBuilder<'a> { + inner: ImageDrmFormatModifierPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImageDrmFormatModifierPropertiesEXT {} +impl<'a> ::std::ops::Deref for ImageDrmFormatModifierPropertiesEXTBuilder<'a> { + type Target = ImageDrmFormatModifierPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageDrmFormatModifierPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageDrmFormatModifierPropertiesEXTBuilder<'a> { + pub fn drm_format_modifier( + mut self, + drm_format_modifier: u64, + ) -> ImageDrmFormatModifierPropertiesEXTBuilder<'a> { + self.inner.drm_format_modifier = drm_format_modifier; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImageDrmFormatModifierPropertiesEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageDrmFormatModifierPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageStencilUsageCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub stencil_usage: ImageUsageFlags, +} +impl ::std::default::Default for ImageStencilUsageCreateInfoEXT { + fn default() -> ImageStencilUsageCreateInfoEXT { + ImageStencilUsageCreateInfoEXT { + s_type: StructureType::IMAGE_STENCIL_USAGE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + stencil_usage: ImageUsageFlags::default(), + } + } +} +impl ImageStencilUsageCreateInfoEXT { + pub fn builder<'a>() -> ImageStencilUsageCreateInfoEXTBuilder<'a> { + ImageStencilUsageCreateInfoEXTBuilder { + inner: ImageStencilUsageCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageStencilUsageCreateInfoEXTBuilder<'a> { + inner: ImageStencilUsageCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageCreateInfo for ImageStencilUsageCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsImageCreateInfo for ImageStencilUsageCreateInfoEXT {} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for ImageStencilUsageCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for ImageStencilUsageCreateInfoEXT {} +impl<'a> ::std::ops::Deref for ImageStencilUsageCreateInfoEXTBuilder<'a> { + type Target = ImageStencilUsageCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageStencilUsageCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageStencilUsageCreateInfoEXTBuilder<'a> { + pub fn stencil_usage( + mut self, + stencil_usage: ImageUsageFlags, + ) -> ImageStencilUsageCreateInfoEXTBuilder<'a> { + self.inner.stencil_usage = stencil_usage; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageStencilUsageCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceMemoryOverallocationCreateInfoAMD { + pub s_type: StructureType, + pub p_next: *const c_void, + pub overallocation_behavior: MemoryOverallocationBehaviorAMD, +} +impl ::std::default::Default for DeviceMemoryOverallocationCreateInfoAMD { + fn default() -> DeviceMemoryOverallocationCreateInfoAMD { + DeviceMemoryOverallocationCreateInfoAMD { + s_type: StructureType::DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD, + p_next: ::std::ptr::null(), + overallocation_behavior: MemoryOverallocationBehaviorAMD::default(), + } + } +} +impl DeviceMemoryOverallocationCreateInfoAMD { + pub fn builder<'a>() -> DeviceMemoryOverallocationCreateInfoAMDBuilder<'a> { + DeviceMemoryOverallocationCreateInfoAMDBuilder { + inner: DeviceMemoryOverallocationCreateInfoAMD::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceMemoryOverallocationCreateInfoAMDBuilder<'a> { + inner: DeviceMemoryOverallocationCreateInfoAMD, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for DeviceMemoryOverallocationCreateInfoAMDBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for DeviceMemoryOverallocationCreateInfoAMD {} +impl<'a> ::std::ops::Deref for DeviceMemoryOverallocationCreateInfoAMDBuilder<'a> { + type Target = DeviceMemoryOverallocationCreateInfoAMD; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceMemoryOverallocationCreateInfoAMDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceMemoryOverallocationCreateInfoAMDBuilder<'a> { + pub fn overallocation_behavior( + mut self, + overallocation_behavior: MemoryOverallocationBehaviorAMD, + ) -> DeviceMemoryOverallocationCreateInfoAMDBuilder<'a> { + self.inner.overallocation_behavior = overallocation_behavior; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceMemoryOverallocationCreateInfoAMD { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceFragmentDensityMapFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub fragment_density_map: Bool32, + pub fragment_density_map_dynamic: Bool32, + pub fragment_density_map_non_subsampled_images: Bool32, +} +impl ::std::default::Default for PhysicalDeviceFragmentDensityMapFeaturesEXT { + fn default() -> PhysicalDeviceFragmentDensityMapFeaturesEXT { + PhysicalDeviceFragmentDensityMapFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + fragment_density_map: Bool32::default(), + fragment_density_map_dynamic: Bool32::default(), + fragment_density_map_non_subsampled_images: Bool32::default(), + } + } +} +impl PhysicalDeviceFragmentDensityMapFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceFragmentDensityMapFeaturesEXTBuilder<'a> { + PhysicalDeviceFragmentDensityMapFeaturesEXTBuilder { + inner: PhysicalDeviceFragmentDensityMapFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceFragmentDensityMapFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceFragmentDensityMapFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceFragmentDensityMapFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceFragmentDensityMapFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceFragmentDensityMapFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceFragmentDensityMapFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceFragmentDensityMapFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceFragmentDensityMapFeaturesEXTBuilder<'a> { + pub fn fragment_density_map( + mut self, + fragment_density_map: bool, + ) -> PhysicalDeviceFragmentDensityMapFeaturesEXTBuilder<'a> { + self.inner.fragment_density_map = fragment_density_map.into(); + self + } + pub fn fragment_density_map_dynamic( + mut self, + fragment_density_map_dynamic: bool, + ) -> PhysicalDeviceFragmentDensityMapFeaturesEXTBuilder<'a> { + self.inner.fragment_density_map_dynamic = fragment_density_map_dynamic.into(); + self + } + pub fn fragment_density_map_non_subsampled_images( + mut self, + fragment_density_map_non_subsampled_images: bool, + ) -> PhysicalDeviceFragmentDensityMapFeaturesEXTBuilder<'a> { + self.inner.fragment_density_map_non_subsampled_images = + fragment_density_map_non_subsampled_images.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceFragmentDensityMapFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceFragmentDensityMapPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub min_fragment_density_texel_size: Extent2D, + pub max_fragment_density_texel_size: Extent2D, + pub fragment_density_invocations: Bool32, +} +impl ::std::default::Default for PhysicalDeviceFragmentDensityMapPropertiesEXT { + fn default() -> PhysicalDeviceFragmentDensityMapPropertiesEXT { + PhysicalDeviceFragmentDensityMapPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + min_fragment_density_texel_size: Extent2D::default(), + max_fragment_density_texel_size: Extent2D::default(), + fragment_density_invocations: Bool32::default(), + } + } +} +impl PhysicalDeviceFragmentDensityMapPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceFragmentDensityMapPropertiesEXTBuilder<'a> { + PhysicalDeviceFragmentDensityMapPropertiesEXTBuilder { + inner: PhysicalDeviceFragmentDensityMapPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceFragmentDensityMapPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceFragmentDensityMapPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceFragmentDensityMapPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceFragmentDensityMapPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceFragmentDensityMapPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceFragmentDensityMapPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceFragmentDensityMapPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceFragmentDensityMapPropertiesEXTBuilder<'a> { + pub fn min_fragment_density_texel_size( + mut self, + min_fragment_density_texel_size: Extent2D, + ) -> PhysicalDeviceFragmentDensityMapPropertiesEXTBuilder<'a> { + self.inner.min_fragment_density_texel_size = min_fragment_density_texel_size; + self + } + pub fn max_fragment_density_texel_size( + mut self, + max_fragment_density_texel_size: Extent2D, + ) -> PhysicalDeviceFragmentDensityMapPropertiesEXTBuilder<'a> { + self.inner.max_fragment_density_texel_size = max_fragment_density_texel_size; + self + } + pub fn fragment_density_invocations( + mut self, + fragment_density_invocations: bool, + ) -> PhysicalDeviceFragmentDensityMapPropertiesEXTBuilder<'a> { + self.inner.fragment_density_invocations = fragment_density_invocations.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceFragmentDensityMapPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct RenderPassFragmentDensityMapCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub fragment_density_map_attachment: AttachmentReference, +} +impl ::std::default::Default for RenderPassFragmentDensityMapCreateInfoEXT { + fn default() -> RenderPassFragmentDensityMapCreateInfoEXT { + RenderPassFragmentDensityMapCreateInfoEXT { + s_type: StructureType::RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + fragment_density_map_attachment: AttachmentReference::default(), + } + } +} +impl RenderPassFragmentDensityMapCreateInfoEXT { + pub fn builder<'a>() -> RenderPassFragmentDensityMapCreateInfoEXTBuilder<'a> { + RenderPassFragmentDensityMapCreateInfoEXTBuilder { + inner: RenderPassFragmentDensityMapCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RenderPassFragmentDensityMapCreateInfoEXTBuilder<'a> { + inner: RenderPassFragmentDensityMapCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsRenderPassCreateInfo for RenderPassFragmentDensityMapCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsRenderPassCreateInfo for RenderPassFragmentDensityMapCreateInfoEXT {} +impl<'a> ::std::ops::Deref for RenderPassFragmentDensityMapCreateInfoEXTBuilder<'a> { + type Target = RenderPassFragmentDensityMapCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RenderPassFragmentDensityMapCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RenderPassFragmentDensityMapCreateInfoEXTBuilder<'a> { + pub fn fragment_density_map_attachment( + mut self, + fragment_density_map_attachment: AttachmentReference, + ) -> RenderPassFragmentDensityMapCreateInfoEXTBuilder<'a> { + self.inner.fragment_density_map_attachment = fragment_density_map_attachment; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RenderPassFragmentDensityMapCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceScalarBlockLayoutFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub scalar_block_layout: Bool32, +} +impl ::std::default::Default for PhysicalDeviceScalarBlockLayoutFeaturesEXT { + fn default() -> PhysicalDeviceScalarBlockLayoutFeaturesEXT { + PhysicalDeviceScalarBlockLayoutFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + scalar_block_layout: Bool32::default(), + } + } +} +impl PhysicalDeviceScalarBlockLayoutFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'a> { + PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder { + inner: PhysicalDeviceScalarBlockLayoutFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceScalarBlockLayoutFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceScalarBlockLayoutFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceScalarBlockLayoutFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'a> { + pub fn scalar_block_layout( + mut self, + scalar_block_layout: bool, + ) -> PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'a> { + self.inner.scalar_block_layout = scalar_block_layout.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceScalarBlockLayoutFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceMemoryBudgetPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub heap_budget: [DeviceSize; MAX_MEMORY_HEAPS], + pub heap_usage: [DeviceSize; MAX_MEMORY_HEAPS], +} +impl ::std::default::Default for PhysicalDeviceMemoryBudgetPropertiesEXT { + fn default() -> PhysicalDeviceMemoryBudgetPropertiesEXT { + PhysicalDeviceMemoryBudgetPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + heap_budget: unsafe { ::std::mem::zeroed() }, + heap_usage: unsafe { ::std::mem::zeroed() }, + } + } +} +impl PhysicalDeviceMemoryBudgetPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceMemoryBudgetPropertiesEXTBuilder<'a> { + PhysicalDeviceMemoryBudgetPropertiesEXTBuilder { + inner: PhysicalDeviceMemoryBudgetPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceMemoryBudgetPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceMemoryBudgetPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceMemoryProperties2 + for PhysicalDeviceMemoryBudgetPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceMemoryProperties2 for PhysicalDeviceMemoryBudgetPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceMemoryBudgetPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceMemoryBudgetPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceMemoryBudgetPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceMemoryBudgetPropertiesEXTBuilder<'a> { + pub fn heap_budget( + mut self, + heap_budget: [DeviceSize; MAX_MEMORY_HEAPS], + ) -> PhysicalDeviceMemoryBudgetPropertiesEXTBuilder<'a> { + self.inner.heap_budget = heap_budget; + self + } + pub fn heap_usage( + mut self, + heap_usage: [DeviceSize; MAX_MEMORY_HEAPS], + ) -> PhysicalDeviceMemoryBudgetPropertiesEXTBuilder<'a> { + self.inner.heap_usage = heap_usage; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceMemoryBudgetPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceMemoryPriorityFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub memory_priority: Bool32, +} +impl ::std::default::Default for PhysicalDeviceMemoryPriorityFeaturesEXT { + fn default() -> PhysicalDeviceMemoryPriorityFeaturesEXT { + PhysicalDeviceMemoryPriorityFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + memory_priority: Bool32::default(), + } + } +} +impl PhysicalDeviceMemoryPriorityFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceMemoryPriorityFeaturesEXTBuilder<'a> { + PhysicalDeviceMemoryPriorityFeaturesEXTBuilder { + inner: PhysicalDeviceMemoryPriorityFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceMemoryPriorityFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceMemoryPriorityFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceMemoryPriorityFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceMemoryPriorityFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceMemoryPriorityFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceMemoryPriorityFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceMemoryPriorityFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceMemoryPriorityFeaturesEXTBuilder<'a> { + pub fn memory_priority( + mut self, + memory_priority: bool, + ) -> PhysicalDeviceMemoryPriorityFeaturesEXTBuilder<'a> { + self.inner.memory_priority = memory_priority.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceMemoryPriorityFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryPriorityAllocateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub priority: f32, +} +impl ::std::default::Default for MemoryPriorityAllocateInfoEXT { + fn default() -> MemoryPriorityAllocateInfoEXT { + MemoryPriorityAllocateInfoEXT { + s_type: StructureType::MEMORY_PRIORITY_ALLOCATE_INFO_EXT, + p_next: ::std::ptr::null(), + priority: f32::default(), + } + } +} +impl MemoryPriorityAllocateInfoEXT { + pub fn builder<'a>() -> MemoryPriorityAllocateInfoEXTBuilder<'a> { + MemoryPriorityAllocateInfoEXTBuilder { + inner: MemoryPriorityAllocateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryPriorityAllocateInfoEXTBuilder<'a> { + inner: MemoryPriorityAllocateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for MemoryPriorityAllocateInfoEXTBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for MemoryPriorityAllocateInfoEXT {} +impl<'a> ::std::ops::Deref for MemoryPriorityAllocateInfoEXTBuilder<'a> { + type Target = MemoryPriorityAllocateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryPriorityAllocateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryPriorityAllocateInfoEXTBuilder<'a> { + pub fn priority(mut self, priority: f32) -> MemoryPriorityAllocateInfoEXTBuilder<'a> { + self.inner.priority = priority; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryPriorityAllocateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceBufferAddressFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub buffer_device_address: Bool32, + pub buffer_device_address_capture_replay: Bool32, + pub buffer_device_address_multi_device: Bool32, +} +impl ::std::default::Default for PhysicalDeviceBufferAddressFeaturesEXT { + fn default() -> PhysicalDeviceBufferAddressFeaturesEXT { + PhysicalDeviceBufferAddressFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + buffer_device_address: Bool32::default(), + buffer_device_address_capture_replay: Bool32::default(), + buffer_device_address_multi_device: Bool32::default(), + } + } +} +impl PhysicalDeviceBufferAddressFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { + PhysicalDeviceBufferAddressFeaturesEXTBuilder { + inner: PhysicalDeviceBufferAddressFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceBufferAddressFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceBufferAddressFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceBufferAddressFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceBufferAddressFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { + pub fn buffer_device_address( + mut self, + buffer_device_address: bool, + ) -> PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { + self.inner.buffer_device_address = buffer_device_address.into(); + self + } + pub fn buffer_device_address_capture_replay( + mut self, + buffer_device_address_capture_replay: bool, + ) -> PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { + self.inner.buffer_device_address_capture_replay = + buffer_device_address_capture_replay.into(); + self + } + pub fn buffer_device_address_multi_device( + mut self, + buffer_device_address_multi_device: bool, + ) -> PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { + self.inner.buffer_device_address_multi_device = buffer_device_address_multi_device.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceBufferAddressFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BufferDeviceAddressInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub buffer: Buffer, +} +impl ::std::default::Default for BufferDeviceAddressInfoEXT { + fn default() -> BufferDeviceAddressInfoEXT { + BufferDeviceAddressInfoEXT { + s_type: StructureType::BUFFER_DEVICE_ADDRESS_INFO_EXT, + p_next: ::std::ptr::null(), + buffer: Buffer::default(), + } + } +} +impl BufferDeviceAddressInfoEXT { + pub fn builder<'a>() -> BufferDeviceAddressInfoEXTBuilder<'a> { + BufferDeviceAddressInfoEXTBuilder { + inner: BufferDeviceAddressInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BufferDeviceAddressInfoEXTBuilder<'a> { + inner: BufferDeviceAddressInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsBufferDeviceAddressInfoEXT {} +impl<'a> ::std::ops::Deref for BufferDeviceAddressInfoEXTBuilder<'a> { + type Target = BufferDeviceAddressInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BufferDeviceAddressInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BufferDeviceAddressInfoEXTBuilder<'a> { + pub fn buffer(mut self, buffer: Buffer) -> BufferDeviceAddressInfoEXTBuilder<'a> { + self.inner.buffer = buffer; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> BufferDeviceAddressInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BufferDeviceAddressInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BufferDeviceAddressCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub device_address: DeviceSize, +} +impl ::std::default::Default for BufferDeviceAddressCreateInfoEXT { + fn default() -> BufferDeviceAddressCreateInfoEXT { + BufferDeviceAddressCreateInfoEXT { + s_type: StructureType::BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + device_address: DeviceSize::default(), + } + } +} +impl BufferDeviceAddressCreateInfoEXT { + pub fn builder<'a>() -> BufferDeviceAddressCreateInfoEXTBuilder<'a> { + BufferDeviceAddressCreateInfoEXTBuilder { + inner: BufferDeviceAddressCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BufferDeviceAddressCreateInfoEXTBuilder<'a> { + inner: BufferDeviceAddressCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsBufferCreateInfo for BufferDeviceAddressCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsBufferCreateInfo for BufferDeviceAddressCreateInfoEXT {} +impl<'a> ::std::ops::Deref for BufferDeviceAddressCreateInfoEXTBuilder<'a> { + type Target = BufferDeviceAddressCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BufferDeviceAddressCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BufferDeviceAddressCreateInfoEXTBuilder<'a> { + pub fn device_address( + mut self, + device_address: DeviceSize, + ) -> BufferDeviceAddressCreateInfoEXTBuilder<'a> { + self.inner.device_address = device_address; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BufferDeviceAddressCreateInfoEXT { + self.inner + } +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ImageLayout(pub(crate) i32); +impl ImageLayout { + pub fn from_raw(x: i32) -> Self { + ImageLayout(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ImageLayout { + #[doc = "Implicit layout an image is when its contents are undefined due to various reasons (e.g. right after creation)"] + pub const UNDEFINED: Self = ImageLayout(0); + #[doc = "General layout when image can be used for any kind of access"] + pub const GENERAL: Self = ImageLayout(1); + #[doc = "Optimal layout when image is only used for color attachment read/write"] + pub const COLOR_ATTACHMENT_OPTIMAL: Self = ImageLayout(2); + #[doc = "Optimal layout when image is only used for depth/stencil attachment read/write"] + pub const DEPTH_STENCIL_ATTACHMENT_OPTIMAL: Self = ImageLayout(3); + #[doc = "Optimal layout when image is used for read only depth/stencil attachment and shader access"] + pub const DEPTH_STENCIL_READ_ONLY_OPTIMAL: Self = ImageLayout(4); + #[doc = "Optimal layout when image is used for read only shader access"] + pub const SHADER_READ_ONLY_OPTIMAL: Self = ImageLayout(5); + #[doc = "Optimal layout when image is used only as source of transfer operations"] + pub const TRANSFER_SRC_OPTIMAL: Self = ImageLayout(6); + #[doc = "Optimal layout when image is used only as destination of transfer operations"] + pub const TRANSFER_DST_OPTIMAL: Self = ImageLayout(7); + #[doc = "Initial layout used when the data is populated by the CPU"] + pub const PREINITIALIZED: Self = ImageLayout(8); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct AttachmentLoadOp(pub(crate) i32); +impl AttachmentLoadOp { + pub fn from_raw(x: i32) -> Self { + AttachmentLoadOp(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl AttachmentLoadOp { + pub const LOAD: Self = AttachmentLoadOp(0); + pub const CLEAR: Self = AttachmentLoadOp(1); + pub const DONT_CARE: Self = AttachmentLoadOp(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct AttachmentStoreOp(pub(crate) i32); +impl AttachmentStoreOp { + pub fn from_raw(x: i32) -> Self { + AttachmentStoreOp(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl AttachmentStoreOp { + pub const STORE: Self = AttachmentStoreOp(0); + pub const DONT_CARE: Self = AttachmentStoreOp(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ImageType(pub(crate) i32); +impl ImageType { + pub fn from_raw(x: i32) -> Self { + ImageType(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ImageType { + pub const TYPE_1D: Self = ImageType(0); + pub const TYPE_2D: Self = ImageType(1); + pub const TYPE_3D: Self = ImageType(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ImageTiling(pub(crate) i32); +impl ImageTiling { + pub fn from_raw(x: i32) -> Self { + ImageTiling(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ImageTiling { + pub const OPTIMAL: Self = ImageTiling(0); + pub const LINEAR: Self = ImageTiling(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ImageViewType(pub(crate) i32); +impl ImageViewType { + pub fn from_raw(x: i32) -> Self { + ImageViewType(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ImageViewType { + pub const TYPE_1D: Self = ImageViewType(0); + pub const TYPE_2D: Self = ImageViewType(1); + pub const TYPE_3D: Self = ImageViewType(2); + pub const CUBE: Self = ImageViewType(3); + pub const TYPE_1D_ARRAY: Self = ImageViewType(4); + pub const TYPE_2D_ARRAY: Self = ImageViewType(5); + pub const CUBE_ARRAY: Self = ImageViewType(6); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct CommandBufferLevel(pub(crate) i32); +impl CommandBufferLevel { + pub fn from_raw(x: i32) -> Self { + CommandBufferLevel(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl CommandBufferLevel { + pub const PRIMARY: Self = CommandBufferLevel(0); + pub const SECONDARY: Self = CommandBufferLevel(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ComponentSwizzle(pub(crate) i32); +impl ComponentSwizzle { + pub fn from_raw(x: i32) -> Self { + ComponentSwizzle(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ComponentSwizzle { + pub const IDENTITY: Self = ComponentSwizzle(0); + pub const ZERO: Self = ComponentSwizzle(1); + pub const ONE: Self = ComponentSwizzle(2); + pub const R: Self = ComponentSwizzle(3); + pub const G: Self = ComponentSwizzle(4); + pub const B: Self = ComponentSwizzle(5); + pub const A: Self = ComponentSwizzle(6); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct DescriptorType(pub(crate) i32); +impl DescriptorType { + pub fn from_raw(x: i32) -> Self { + DescriptorType(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl DescriptorType { + pub const SAMPLER: Self = DescriptorType(0); + pub const COMBINED_IMAGE_SAMPLER: Self = DescriptorType(1); + pub const SAMPLED_IMAGE: Self = DescriptorType(2); + pub const STORAGE_IMAGE: Self = DescriptorType(3); + pub const UNIFORM_TEXEL_BUFFER: Self = DescriptorType(4); + pub const STORAGE_TEXEL_BUFFER: Self = DescriptorType(5); + pub const UNIFORM_BUFFER: Self = DescriptorType(6); + pub const STORAGE_BUFFER: Self = DescriptorType(7); + pub const UNIFORM_BUFFER_DYNAMIC: Self = DescriptorType(8); + pub const STORAGE_BUFFER_DYNAMIC: Self = DescriptorType(9); + pub const INPUT_ATTACHMENT: Self = DescriptorType(10); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct QueryType(pub(crate) i32); +impl QueryType { + pub fn from_raw(x: i32) -> Self { + QueryType(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl QueryType { + pub const OCCLUSION: Self = QueryType(0); + #[doc = "Optional"] + pub const PIPELINE_STATISTICS: Self = QueryType(1); + pub const TIMESTAMP: Self = QueryType(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct BorderColor(pub(crate) i32); +impl BorderColor { + pub fn from_raw(x: i32) -> Self { + BorderColor(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl BorderColor { + pub const FLOAT_TRANSPARENT_BLACK: Self = BorderColor(0); + pub const INT_TRANSPARENT_BLACK: Self = BorderColor(1); + pub const FLOAT_OPAQUE_BLACK: Self = BorderColor(2); + pub const INT_OPAQUE_BLACK: Self = BorderColor(3); + pub const FLOAT_OPAQUE_WHITE: Self = BorderColor(4); + pub const INT_OPAQUE_WHITE: Self = BorderColor(5); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PipelineBindPoint(pub(crate) i32); +impl PipelineBindPoint { + pub fn from_raw(x: i32) -> Self { + PipelineBindPoint(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PipelineBindPoint { + pub const GRAPHICS: Self = PipelineBindPoint(0); + pub const COMPUTE: Self = PipelineBindPoint(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PipelineCacheHeaderVersion(pub(crate) i32); +impl PipelineCacheHeaderVersion { + pub fn from_raw(x: i32) -> Self { + PipelineCacheHeaderVersion(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PipelineCacheHeaderVersion { + pub const ONE: Self = PipelineCacheHeaderVersion(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PrimitiveTopology(pub(crate) i32); +impl PrimitiveTopology { + pub fn from_raw(x: i32) -> Self { + PrimitiveTopology(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PrimitiveTopology { + pub const POINT_LIST: Self = PrimitiveTopology(0); + pub const LINE_LIST: Self = PrimitiveTopology(1); + pub const LINE_STRIP: Self = PrimitiveTopology(2); + pub const TRIANGLE_LIST: Self = PrimitiveTopology(3); + pub const TRIANGLE_STRIP: Self = PrimitiveTopology(4); + pub const TRIANGLE_FAN: Self = PrimitiveTopology(5); + pub const LINE_LIST_WITH_ADJACENCY: Self = PrimitiveTopology(6); + pub const LINE_STRIP_WITH_ADJACENCY: Self = PrimitiveTopology(7); + pub const TRIANGLE_LIST_WITH_ADJACENCY: Self = PrimitiveTopology(8); + pub const TRIANGLE_STRIP_WITH_ADJACENCY: Self = PrimitiveTopology(9); + pub const PATCH_LIST: Self = PrimitiveTopology(10); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct SharingMode(pub(crate) i32); +impl SharingMode { + pub fn from_raw(x: i32) -> Self { + SharingMode(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl SharingMode { + pub const EXCLUSIVE: Self = SharingMode(0); + pub const CONCURRENT: Self = SharingMode(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct IndexType(pub(crate) i32); +impl IndexType { + pub fn from_raw(x: i32) -> Self { + IndexType(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl IndexType { + pub const UINT16: Self = IndexType(0); + pub const UINT32: Self = IndexType(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct Filter(pub(crate) i32); +impl Filter { + pub fn from_raw(x: i32) -> Self { + Filter(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl Filter { + pub const NEAREST: Self = Filter(0); + pub const LINEAR: Self = Filter(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct SamplerMipmapMode(pub(crate) i32); +impl SamplerMipmapMode { + pub fn from_raw(x: i32) -> Self { + SamplerMipmapMode(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl SamplerMipmapMode { + #[doc = "Choose nearest mip level"] + pub const NEAREST: Self = SamplerMipmapMode(0); + #[doc = "Linear filter between mip levels"] + pub const LINEAR: Self = SamplerMipmapMode(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct SamplerAddressMode(pub(crate) i32); +impl SamplerAddressMode { + pub fn from_raw(x: i32) -> Self { + SamplerAddressMode(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl SamplerAddressMode { + pub const REPEAT: Self = SamplerAddressMode(0); + pub const MIRRORED_REPEAT: Self = SamplerAddressMode(1); + pub const CLAMP_TO_EDGE: Self = SamplerAddressMode(2); + pub const CLAMP_TO_BORDER: Self = SamplerAddressMode(3); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct CompareOp(pub(crate) i32); +impl CompareOp { + pub fn from_raw(x: i32) -> Self { + CompareOp(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl CompareOp { + pub const NEVER: Self = CompareOp(0); + pub const LESS: Self = CompareOp(1); + pub const EQUAL: Self = CompareOp(2); + pub const LESS_OR_EQUAL: Self = CompareOp(3); + pub const GREATER: Self = CompareOp(4); + pub const NOT_EQUAL: Self = CompareOp(5); + pub const GREATER_OR_EQUAL: Self = CompareOp(6); + pub const ALWAYS: Self = CompareOp(7); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PolygonMode(pub(crate) i32); +impl PolygonMode { + pub fn from_raw(x: i32) -> Self { + PolygonMode(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PolygonMode { + pub const FILL: Self = PolygonMode(0); + pub const LINE: Self = PolygonMode(1); + pub const POINT: Self = PolygonMode(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct FrontFace(pub(crate) i32); +impl FrontFace { + pub fn from_raw(x: i32) -> Self { + FrontFace(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl FrontFace { + pub const COUNTER_CLOCKWISE: Self = FrontFace(0); + pub const CLOCKWISE: Self = FrontFace(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct BlendFactor(pub(crate) i32); +impl BlendFactor { + pub fn from_raw(x: i32) -> Self { + BlendFactor(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl BlendFactor { + pub const ZERO: Self = BlendFactor(0); + pub const ONE: Self = BlendFactor(1); + pub const SRC_COLOR: Self = BlendFactor(2); + pub const ONE_MINUS_SRC_COLOR: Self = BlendFactor(3); + pub const DST_COLOR: Self = BlendFactor(4); + pub const ONE_MINUS_DST_COLOR: Self = BlendFactor(5); + pub const SRC_ALPHA: Self = BlendFactor(6); + pub const ONE_MINUS_SRC_ALPHA: Self = BlendFactor(7); + pub const DST_ALPHA: Self = BlendFactor(8); + pub const ONE_MINUS_DST_ALPHA: Self = BlendFactor(9); + pub const CONSTANT_COLOR: Self = BlendFactor(10); + pub const ONE_MINUS_CONSTANT_COLOR: Self = BlendFactor(11); + pub const CONSTANT_ALPHA: Self = BlendFactor(12); + pub const ONE_MINUS_CONSTANT_ALPHA: Self = BlendFactor(13); + pub const SRC_ALPHA_SATURATE: Self = BlendFactor(14); + pub const SRC1_COLOR: Self = BlendFactor(15); + pub const ONE_MINUS_SRC1_COLOR: Self = BlendFactor(16); + pub const SRC1_ALPHA: Self = BlendFactor(17); + pub const ONE_MINUS_SRC1_ALPHA: Self = BlendFactor(18); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct BlendOp(pub(crate) i32); +impl BlendOp { + pub fn from_raw(x: i32) -> Self { + BlendOp(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl BlendOp { + pub const ADD: Self = BlendOp(0); + pub const SUBTRACT: Self = BlendOp(1); + pub const REVERSE_SUBTRACT: Self = BlendOp(2); + pub const MIN: Self = BlendOp(3); + pub const MAX: Self = BlendOp(4); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct StencilOp(pub(crate) i32); +impl StencilOp { + pub fn from_raw(x: i32) -> Self { + StencilOp(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl StencilOp { + pub const KEEP: Self = StencilOp(0); + pub const ZERO: Self = StencilOp(1); + pub const REPLACE: Self = StencilOp(2); + pub const INCREMENT_AND_CLAMP: Self = StencilOp(3); + pub const DECREMENT_AND_CLAMP: Self = StencilOp(4); + pub const INVERT: Self = StencilOp(5); + pub const INCREMENT_AND_WRAP: Self = StencilOp(6); + pub const DECREMENT_AND_WRAP: Self = StencilOp(7); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct LogicOp(pub(crate) i32); +impl LogicOp { + pub fn from_raw(x: i32) -> Self { + LogicOp(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl LogicOp { + pub const CLEAR: Self = LogicOp(0); + pub const AND: Self = LogicOp(1); + pub const AND_REVERSE: Self = LogicOp(2); + pub const COPY: Self = LogicOp(3); + pub const AND_INVERTED: Self = LogicOp(4); + pub const NO_OP: Self = LogicOp(5); + pub const XOR: Self = LogicOp(6); + pub const OR: Self = LogicOp(7); + pub const NOR: Self = LogicOp(8); + pub const EQUIVALENT: Self = LogicOp(9); + pub const INVERT: Self = LogicOp(10); + pub const OR_REVERSE: Self = LogicOp(11); + pub const COPY_INVERTED: Self = LogicOp(12); + pub const OR_INVERTED: Self = LogicOp(13); + pub const NAND: Self = LogicOp(14); + pub const SET: Self = LogicOp(15); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct InternalAllocationType(pub(crate) i32); +impl InternalAllocationType { + pub fn from_raw(x: i32) -> Self { + InternalAllocationType(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl InternalAllocationType { + pub const EXECUTABLE: Self = InternalAllocationType(0); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct SystemAllocationScope(pub(crate) i32); +impl SystemAllocationScope { + pub fn from_raw(x: i32) -> Self { + SystemAllocationScope(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl SystemAllocationScope { + pub const COMMAND: Self = SystemAllocationScope(0); + pub const OBJECT: Self = SystemAllocationScope(1); + pub const CACHE: Self = SystemAllocationScope(2); + pub const DEVICE: Self = SystemAllocationScope(3); + pub const INSTANCE: Self = SystemAllocationScope(4); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PhysicalDeviceType(pub(crate) i32); +impl PhysicalDeviceType { + pub fn from_raw(x: i32) -> Self { + PhysicalDeviceType(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PhysicalDeviceType { + pub const OTHER: Self = PhysicalDeviceType(0); + pub const INTEGRATED_GPU: Self = PhysicalDeviceType(1); + pub const DISCRETE_GPU: Self = PhysicalDeviceType(2); + pub const VIRTUAL_GPU: Self = PhysicalDeviceType(3); + pub const CPU: Self = PhysicalDeviceType(4); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct VertexInputRate(pub(crate) i32); +impl VertexInputRate { + pub fn from_raw(x: i32) -> Self { + VertexInputRate(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl VertexInputRate { + pub const VERTEX: Self = VertexInputRate(0); + pub const INSTANCE: Self = VertexInputRate(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct Format(pub(crate) i32); +impl Format { + pub fn from_raw(x: i32) -> Self { + Format(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl Format { + pub const UNDEFINED: Self = Format(0); + pub const R4G4_UNORM_PACK8: Self = Format(1); + pub const R4G4B4A4_UNORM_PACK16: Self = Format(2); + pub const B4G4R4A4_UNORM_PACK16: Self = Format(3); + pub const R5G6B5_UNORM_PACK16: Self = Format(4); + pub const B5G6R5_UNORM_PACK16: Self = Format(5); + pub const R5G5B5A1_UNORM_PACK16: Self = Format(6); + pub const B5G5R5A1_UNORM_PACK16: Self = Format(7); + pub const A1R5G5B5_UNORM_PACK16: Self = Format(8); + pub const R8_UNORM: Self = Format(9); + pub const R8_SNORM: Self = Format(10); + pub const R8_USCALED: Self = Format(11); + pub const R8_SSCALED: Self = Format(12); + pub const R8_UINT: Self = Format(13); + pub const R8_SINT: Self = Format(14); + pub const R8_SRGB: Self = Format(15); + pub const R8G8_UNORM: Self = Format(16); + pub const R8G8_SNORM: Self = Format(17); + pub const R8G8_USCALED: Self = Format(18); + pub const R8G8_SSCALED: Self = Format(19); + pub const R8G8_UINT: Self = Format(20); + pub const R8G8_SINT: Self = Format(21); + pub const R8G8_SRGB: Self = Format(22); + pub const R8G8B8_UNORM: Self = Format(23); + pub const R8G8B8_SNORM: Self = Format(24); + pub const R8G8B8_USCALED: Self = Format(25); + pub const R8G8B8_SSCALED: Self = Format(26); + pub const R8G8B8_UINT: Self = Format(27); + pub const R8G8B8_SINT: Self = Format(28); + pub const R8G8B8_SRGB: Self = Format(29); + pub const B8G8R8_UNORM: Self = Format(30); + pub const B8G8R8_SNORM: Self = Format(31); + pub const B8G8R8_USCALED: Self = Format(32); + pub const B8G8R8_SSCALED: Self = Format(33); + pub const B8G8R8_UINT: Self = Format(34); + pub const B8G8R8_SINT: Self = Format(35); + pub const B8G8R8_SRGB: Self = Format(36); + pub const R8G8B8A8_UNORM: Self = Format(37); + pub const R8G8B8A8_SNORM: Self = Format(38); + pub const R8G8B8A8_USCALED: Self = Format(39); + pub const R8G8B8A8_SSCALED: Self = Format(40); + pub const R8G8B8A8_UINT: Self = Format(41); + pub const R8G8B8A8_SINT: Self = Format(42); + pub const R8G8B8A8_SRGB: Self = Format(43); + pub const B8G8R8A8_UNORM: Self = Format(44); + pub const B8G8R8A8_SNORM: Self = Format(45); + pub const B8G8R8A8_USCALED: Self = Format(46); + pub const B8G8R8A8_SSCALED: Self = Format(47); + pub const B8G8R8A8_UINT: Self = Format(48); + pub const B8G8R8A8_SINT: Self = Format(49); + pub const B8G8R8A8_SRGB: Self = Format(50); + pub const A8B8G8R8_UNORM_PACK32: Self = Format(51); + pub const A8B8G8R8_SNORM_PACK32: Self = Format(52); + pub const A8B8G8R8_USCALED_PACK32: Self = Format(53); + pub const A8B8G8R8_SSCALED_PACK32: Self = Format(54); + pub const A8B8G8R8_UINT_PACK32: Self = Format(55); + pub const A8B8G8R8_SINT_PACK32: Self = Format(56); + pub const A8B8G8R8_SRGB_PACK32: Self = Format(57); + pub const A2R10G10B10_UNORM_PACK32: Self = Format(58); + pub const A2R10G10B10_SNORM_PACK32: Self = Format(59); + pub const A2R10G10B10_USCALED_PACK32: Self = Format(60); + pub const A2R10G10B10_SSCALED_PACK32: Self = Format(61); + pub const A2R10G10B10_UINT_PACK32: Self = Format(62); + pub const A2R10G10B10_SINT_PACK32: Self = Format(63); + pub const A2B10G10R10_UNORM_PACK32: Self = Format(64); + pub const A2B10G10R10_SNORM_PACK32: Self = Format(65); + pub const A2B10G10R10_USCALED_PACK32: Self = Format(66); + pub const A2B10G10R10_SSCALED_PACK32: Self = Format(67); + pub const A2B10G10R10_UINT_PACK32: Self = Format(68); + pub const A2B10G10R10_SINT_PACK32: Self = Format(69); + pub const R16_UNORM: Self = Format(70); + pub const R16_SNORM: Self = Format(71); + pub const R16_USCALED: Self = Format(72); + pub const R16_SSCALED: Self = Format(73); + pub const R16_UINT: Self = Format(74); + pub const R16_SINT: Self = Format(75); + pub const R16_SFLOAT: Self = Format(76); + pub const R16G16_UNORM: Self = Format(77); + pub const R16G16_SNORM: Self = Format(78); + pub const R16G16_USCALED: Self = Format(79); + pub const R16G16_SSCALED: Self = Format(80); + pub const R16G16_UINT: Self = Format(81); + pub const R16G16_SINT: Self = Format(82); + pub const R16G16_SFLOAT: Self = Format(83); + pub const R16G16B16_UNORM: Self = Format(84); + pub const R16G16B16_SNORM: Self = Format(85); + pub const R16G16B16_USCALED: Self = Format(86); + pub const R16G16B16_SSCALED: Self = Format(87); + pub const R16G16B16_UINT: Self = Format(88); + pub const R16G16B16_SINT: Self = Format(89); + pub const R16G16B16_SFLOAT: Self = Format(90); + pub const R16G16B16A16_UNORM: Self = Format(91); + pub const R16G16B16A16_SNORM: Self = Format(92); + pub const R16G16B16A16_USCALED: Self = Format(93); + pub const R16G16B16A16_SSCALED: Self = Format(94); + pub const R16G16B16A16_UINT: Self = Format(95); + pub const R16G16B16A16_SINT: Self = Format(96); + pub const R16G16B16A16_SFLOAT: Self = Format(97); + pub const R32_UINT: Self = Format(98); + pub const R32_SINT: Self = Format(99); + pub const R32_SFLOAT: Self = Format(100); + pub const R32G32_UINT: Self = Format(101); + pub const R32G32_SINT: Self = Format(102); + pub const R32G32_SFLOAT: Self = Format(103); + pub const R32G32B32_UINT: Self = Format(104); + pub const R32G32B32_SINT: Self = Format(105); + pub const R32G32B32_SFLOAT: Self = Format(106); + pub const R32G32B32A32_UINT: Self = Format(107); + pub const R32G32B32A32_SINT: Self = Format(108); + pub const R32G32B32A32_SFLOAT: Self = Format(109); + pub const R64_UINT: Self = Format(110); + pub const R64_SINT: Self = Format(111); + pub const R64_SFLOAT: Self = Format(112); + pub const R64G64_UINT: Self = Format(113); + pub const R64G64_SINT: Self = Format(114); + pub const R64G64_SFLOAT: Self = Format(115); + pub const R64G64B64_UINT: Self = Format(116); + pub const R64G64B64_SINT: Self = Format(117); + pub const R64G64B64_SFLOAT: Self = Format(118); + pub const R64G64B64A64_UINT: Self = Format(119); + pub const R64G64B64A64_SINT: Self = Format(120); + pub const R64G64B64A64_SFLOAT: Self = Format(121); + pub const B10G11R11_UFLOAT_PACK32: Self = Format(122); + pub const E5B9G9R9_UFLOAT_PACK32: Self = Format(123); + pub const D16_UNORM: Self = Format(124); + pub const X8_D24_UNORM_PACK32: Self = Format(125); + pub const D32_SFLOAT: Self = Format(126); + pub const S8_UINT: Self = Format(127); + pub const D16_UNORM_S8_UINT: Self = Format(128); + pub const D24_UNORM_S8_UINT: Self = Format(129); + pub const D32_SFLOAT_S8_UINT: Self = Format(130); + pub const BC1_RGB_UNORM_BLOCK: Self = Format(131); + pub const BC1_RGB_SRGB_BLOCK: Self = Format(132); + pub const BC1_RGBA_UNORM_BLOCK: Self = Format(133); + pub const BC1_RGBA_SRGB_BLOCK: Self = Format(134); + pub const BC2_UNORM_BLOCK: Self = Format(135); + pub const BC2_SRGB_BLOCK: Self = Format(136); + pub const BC3_UNORM_BLOCK: Self = Format(137); + pub const BC3_SRGB_BLOCK: Self = Format(138); + pub const BC4_UNORM_BLOCK: Self = Format(139); + pub const BC4_SNORM_BLOCK: Self = Format(140); + pub const BC5_UNORM_BLOCK: Self = Format(141); + pub const BC5_SNORM_BLOCK: Self = Format(142); + pub const BC6H_UFLOAT_BLOCK: Self = Format(143); + pub const BC6H_SFLOAT_BLOCK: Self = Format(144); + pub const BC7_UNORM_BLOCK: Self = Format(145); + pub const BC7_SRGB_BLOCK: Self = Format(146); + pub const ETC2_R8G8B8_UNORM_BLOCK: Self = Format(147); + pub const ETC2_R8G8B8_SRGB_BLOCK: Self = Format(148); + pub const ETC2_R8G8B8A1_UNORM_BLOCK: Self = Format(149); + pub const ETC2_R8G8B8A1_SRGB_BLOCK: Self = Format(150); + pub const ETC2_R8G8B8A8_UNORM_BLOCK: Self = Format(151); + pub const ETC2_R8G8B8A8_SRGB_BLOCK: Self = Format(152); + pub const EAC_R11_UNORM_BLOCK: Self = Format(153); + pub const EAC_R11_SNORM_BLOCK: Self = Format(154); + pub const EAC_R11G11_UNORM_BLOCK: Self = Format(155); + pub const EAC_R11G11_SNORM_BLOCK: Self = Format(156); + pub const ASTC_4X4_UNORM_BLOCK: Self = Format(157); + pub const ASTC_4X4_SRGB_BLOCK: Self = Format(158); + pub const ASTC_5X4_UNORM_BLOCK: Self = Format(159); + pub const ASTC_5X4_SRGB_BLOCK: Self = Format(160); + pub const ASTC_5X5_UNORM_BLOCK: Self = Format(161); + pub const ASTC_5X5_SRGB_BLOCK: Self = Format(162); + pub const ASTC_6X5_UNORM_BLOCK: Self = Format(163); + pub const ASTC_6X5_SRGB_BLOCK: Self = Format(164); + pub const ASTC_6X6_UNORM_BLOCK: Self = Format(165); + pub const ASTC_6X6_SRGB_BLOCK: Self = Format(166); + pub const ASTC_8X5_UNORM_BLOCK: Self = Format(167); + pub const ASTC_8X5_SRGB_BLOCK: Self = Format(168); + pub const ASTC_8X6_UNORM_BLOCK: Self = Format(169); + pub const ASTC_8X6_SRGB_BLOCK: Self = Format(170); + pub const ASTC_8X8_UNORM_BLOCK: Self = Format(171); + pub const ASTC_8X8_SRGB_BLOCK: Self = Format(172); + pub const ASTC_10X5_UNORM_BLOCK: Self = Format(173); + pub const ASTC_10X5_SRGB_BLOCK: Self = Format(174); + pub const ASTC_10X6_UNORM_BLOCK: Self = Format(175); + pub const ASTC_10X6_SRGB_BLOCK: Self = Format(176); + pub const ASTC_10X8_UNORM_BLOCK: Self = Format(177); + pub const ASTC_10X8_SRGB_BLOCK: Self = Format(178); + pub const ASTC_10X10_UNORM_BLOCK: Self = Format(179); + pub const ASTC_10X10_SRGB_BLOCK: Self = Format(180); + pub const ASTC_12X10_UNORM_BLOCK: Self = Format(181); + pub const ASTC_12X10_SRGB_BLOCK: Self = Format(182); + pub const ASTC_12X12_UNORM_BLOCK: Self = Format(183); + pub const ASTC_12X12_SRGB_BLOCK: Self = Format(184); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct StructureType(pub(crate) i32); +impl StructureType { + pub fn from_raw(x: i32) -> Self { + StructureType(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl StructureType { + pub const APPLICATION_INFO: Self = StructureType(0); + pub const INSTANCE_CREATE_INFO: Self = StructureType(1); + pub const DEVICE_QUEUE_CREATE_INFO: Self = StructureType(2); + pub const DEVICE_CREATE_INFO: Self = StructureType(3); + pub const SUBMIT_INFO: Self = StructureType(4); + pub const MEMORY_ALLOCATE_INFO: Self = StructureType(5); + pub const MAPPED_MEMORY_RANGE: Self = StructureType(6); + pub const BIND_SPARSE_INFO: Self = StructureType(7); + pub const FENCE_CREATE_INFO: Self = StructureType(8); + pub const SEMAPHORE_CREATE_INFO: Self = StructureType(9); + pub const EVENT_CREATE_INFO: Self = StructureType(10); + pub const QUERY_POOL_CREATE_INFO: Self = StructureType(11); + pub const BUFFER_CREATE_INFO: Self = StructureType(12); + pub const BUFFER_VIEW_CREATE_INFO: Self = StructureType(13); + pub const IMAGE_CREATE_INFO: Self = StructureType(14); + pub const IMAGE_VIEW_CREATE_INFO: Self = StructureType(15); + pub const SHADER_MODULE_CREATE_INFO: Self = StructureType(16); + pub const PIPELINE_CACHE_CREATE_INFO: Self = StructureType(17); + pub const PIPELINE_SHADER_STAGE_CREATE_INFO: Self = StructureType(18); + pub const PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO: Self = StructureType(19); + pub const PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO: Self = StructureType(20); + pub const PIPELINE_TESSELLATION_STATE_CREATE_INFO: Self = StructureType(21); + pub const PIPELINE_VIEWPORT_STATE_CREATE_INFO: Self = StructureType(22); + pub const PIPELINE_RASTERIZATION_STATE_CREATE_INFO: Self = StructureType(23); + pub const PIPELINE_MULTISAMPLE_STATE_CREATE_INFO: Self = StructureType(24); + pub const PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO: Self = StructureType(25); + pub const PIPELINE_COLOR_BLEND_STATE_CREATE_INFO: Self = StructureType(26); + pub const PIPELINE_DYNAMIC_STATE_CREATE_INFO: Self = StructureType(27); + pub const GRAPHICS_PIPELINE_CREATE_INFO: Self = StructureType(28); + pub const COMPUTE_PIPELINE_CREATE_INFO: Self = StructureType(29); + pub const PIPELINE_LAYOUT_CREATE_INFO: Self = StructureType(30); + pub const SAMPLER_CREATE_INFO: Self = StructureType(31); + pub const DESCRIPTOR_SET_LAYOUT_CREATE_INFO: Self = StructureType(32); + pub const DESCRIPTOR_POOL_CREATE_INFO: Self = StructureType(33); + pub const DESCRIPTOR_SET_ALLOCATE_INFO: Self = StructureType(34); + pub const WRITE_DESCRIPTOR_SET: Self = StructureType(35); + pub const COPY_DESCRIPTOR_SET: Self = StructureType(36); + pub const FRAMEBUFFER_CREATE_INFO: Self = StructureType(37); + pub const RENDER_PASS_CREATE_INFO: Self = StructureType(38); + pub const COMMAND_POOL_CREATE_INFO: Self = StructureType(39); + pub const COMMAND_BUFFER_ALLOCATE_INFO: Self = StructureType(40); + pub const COMMAND_BUFFER_INHERITANCE_INFO: Self = StructureType(41); + pub const COMMAND_BUFFER_BEGIN_INFO: Self = StructureType(42); + pub const RENDER_PASS_BEGIN_INFO: Self = StructureType(43); + pub const BUFFER_MEMORY_BARRIER: Self = StructureType(44); + pub const IMAGE_MEMORY_BARRIER: Self = StructureType(45); + pub const MEMORY_BARRIER: Self = StructureType(46); + #[doc = "Reserved for internal use by the loader, layers, and ICDs"] + pub const LOADER_INSTANCE_CREATE_INFO: Self = StructureType(47); + #[doc = "Reserved for internal use by the loader, layers, and ICDs"] + pub const LOADER_DEVICE_CREATE_INFO: Self = StructureType(48); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct SubpassContents(pub(crate) i32); +impl SubpassContents { + pub fn from_raw(x: i32) -> Self { + SubpassContents(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl SubpassContents { + pub const INLINE: Self = SubpassContents(0); + pub const SECONDARY_COMMAND_BUFFERS: Self = SubpassContents(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct Result(pub(crate) i32); +impl Result { + pub fn from_raw(x: i32) -> Self { + Result(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl Result { + #[doc = "Command completed successfully"] + pub const SUCCESS: Self = Result(0); + #[doc = "A fence or query has not yet completed"] + pub const NOT_READY: Self = Result(1); + #[doc = "A wait operation has not completed in the specified time"] + pub const TIMEOUT: Self = Result(2); + #[doc = "An event is signaled"] + pub const EVENT_SET: Self = Result(3); + #[doc = "An event is unsignaled"] + pub const EVENT_RESET: Self = Result(4); + #[doc = "A return array was too small for the result"] + pub const INCOMPLETE: Self = Result(5); + #[doc = "A host memory allocation has failed"] + pub const ERROR_OUT_OF_HOST_MEMORY: Self = Result(-1); + #[doc = "A device memory allocation has failed"] + pub const ERROR_OUT_OF_DEVICE_MEMORY: Self = Result(-2); + #[doc = "Initialization of a object has failed"] + pub const ERROR_INITIALIZATION_FAILED: Self = Result(-3); + #[doc = "The logical device has been lost. See <>"] + pub const ERROR_DEVICE_LOST: Self = Result(-4); + #[doc = "Mapping of a memory object has failed"] + pub const ERROR_MEMORY_MAP_FAILED: Self = Result(-5); + #[doc = "Layer specified does not exist"] + pub const ERROR_LAYER_NOT_PRESENT: Self = Result(-6); + #[doc = "Extension specified does not exist"] + pub const ERROR_EXTENSION_NOT_PRESENT: Self = Result(-7); + #[doc = "Requested feature is not available on this device"] + pub const ERROR_FEATURE_NOT_PRESENT: Self = Result(-8); + #[doc = "Unable to find a Vulkan driver"] + pub const ERROR_INCOMPATIBLE_DRIVER: Self = Result(-9); + #[doc = "Too many objects of the type have already been created"] + pub const ERROR_TOO_MANY_OBJECTS: Self = Result(-10); + #[doc = "Requested format is not supported on this device"] + pub const ERROR_FORMAT_NOT_SUPPORTED: Self = Result(-11); + #[doc = "A requested pool allocation has failed due to fragmentation of the pool\'s memory"] + pub const ERROR_FRAGMENTED_POOL: Self = Result(-12); +} +impl ::std::error::Error for Result { + fn description(&self) -> &str { + let name = match *self { + Result::SUCCESS => Some("Command completed successfully"), + Result::NOT_READY => Some("A fence or query has not yet completed"), + Result::TIMEOUT => Some("A wait operation has not completed in the specified time"), + Result::EVENT_SET => Some("An event is signaled"), + Result::EVENT_RESET => Some("An event is unsignaled"), + Result::INCOMPLETE => Some("A return array was too small for the result"), + Result::ERROR_OUT_OF_HOST_MEMORY => Some("A host memory allocation has failed"), + Result::ERROR_OUT_OF_DEVICE_MEMORY => Some("A device memory allocation has failed"), + Result::ERROR_INITIALIZATION_FAILED => Some("Initialization of a object has failed"), + Result::ERROR_DEVICE_LOST => { + Some("The logical device has been lost. See <>") + } + Result::ERROR_MEMORY_MAP_FAILED => Some("Mapping of a memory object has failed"), + Result::ERROR_LAYER_NOT_PRESENT => Some("Layer specified does not exist"), + Result::ERROR_EXTENSION_NOT_PRESENT => Some("Extension specified does not exist"), + Result::ERROR_FEATURE_NOT_PRESENT => { + Some("Requested feature is not available on this device") + } + Result::ERROR_INCOMPATIBLE_DRIVER => Some("Unable to find a Vulkan driver"), + Result::ERROR_TOO_MANY_OBJECTS => { + Some("Too many objects of the type have already been created") + } + Result::ERROR_FORMAT_NOT_SUPPORTED => { + Some("Requested format is not supported on this device") + } + Result::ERROR_FRAGMENTED_POOL => Some( + "A requested pool allocation has failed due to fragmentation of the pool\'s memory", + ), + _ => None, + }; + name.unwrap_or("unknown error") + } +} +impl fmt::Display for Result { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Result::SUCCESS => Some("Command completed successfully"), + Result::NOT_READY => Some("A fence or query has not yet completed"), + Result::TIMEOUT => Some("A wait operation has not completed in the specified time"), + Result::EVENT_SET => Some("An event is signaled"), + Result::EVENT_RESET => Some("An event is unsignaled"), + Result::INCOMPLETE => Some("A return array was too small for the result"), + Result::ERROR_OUT_OF_HOST_MEMORY => Some("A host memory allocation has failed"), + Result::ERROR_OUT_OF_DEVICE_MEMORY => Some("A device memory allocation has failed"), + Result::ERROR_INITIALIZATION_FAILED => Some("Initialization of a object has failed"), + Result::ERROR_DEVICE_LOST => { + Some("The logical device has been lost. See <>") + } + Result::ERROR_MEMORY_MAP_FAILED => Some("Mapping of a memory object has failed"), + Result::ERROR_LAYER_NOT_PRESENT => Some("Layer specified does not exist"), + Result::ERROR_EXTENSION_NOT_PRESENT => Some("Extension specified does not exist"), + Result::ERROR_FEATURE_NOT_PRESENT => { + Some("Requested feature is not available on this device") + } + Result::ERROR_INCOMPATIBLE_DRIVER => Some("Unable to find a Vulkan driver"), + Result::ERROR_TOO_MANY_OBJECTS => { + Some("Too many objects of the type have already been created") + } + Result::ERROR_FORMAT_NOT_SUPPORTED => { + Some("Requested format is not supported on this device") + } + Result::ERROR_FRAGMENTED_POOL => Some( + "A requested pool allocation has failed due to fragmentation of the pool\'s memory", + ), + _ => None, + }; + if let Some(x) = name { + fmt.write_str(x) + } else { + self.0.fmt(fmt) + } + } +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct DynamicState(pub(crate) i32); +impl DynamicState { + pub fn from_raw(x: i32) -> Self { + DynamicState(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl DynamicState { + pub const VIEWPORT: Self = DynamicState(0); + pub const SCISSOR: Self = DynamicState(1); + pub const LINE_WIDTH: Self = DynamicState(2); + pub const DEPTH_BIAS: Self = DynamicState(3); + pub const BLEND_CONSTANTS: Self = DynamicState(4); + pub const DEPTH_BOUNDS: Self = DynamicState(5); + pub const STENCIL_COMPARE_MASK: Self = DynamicState(6); + pub const STENCIL_WRITE_MASK: Self = DynamicState(7); + pub const STENCIL_REFERENCE: Self = DynamicState(8); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct DescriptorUpdateTemplateType(pub(crate) i32); +impl DescriptorUpdateTemplateType { + pub fn from_raw(x: i32) -> Self { + DescriptorUpdateTemplateType(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl DescriptorUpdateTemplateType { + #[doc = "Create descriptor update template for descriptor set updates"] + pub const DESCRIPTOR_SET: Self = DescriptorUpdateTemplateType(0); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ObjectType(pub(crate) i32); +impl ObjectType { + pub fn from_raw(x: i32) -> Self { + ObjectType(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ObjectType { + pub const UNKNOWN: Self = ObjectType(0); + #[doc = "VkInstance"] + pub const INSTANCE: Self = ObjectType(1); + #[doc = "VkPhysicalDevice"] + pub const PHYSICAL_DEVICE: Self = ObjectType(2); + #[doc = "VkDevice"] + pub const DEVICE: Self = ObjectType(3); + #[doc = "VkQueue"] + pub const QUEUE: Self = ObjectType(4); + #[doc = "VkSemaphore"] + pub const SEMAPHORE: Self = ObjectType(5); + #[doc = "VkCommandBuffer"] + pub const COMMAND_BUFFER: Self = ObjectType(6); + #[doc = "VkFence"] + pub const FENCE: Self = ObjectType(7); + #[doc = "VkDeviceMemory"] + pub const DEVICE_MEMORY: Self = ObjectType(8); + #[doc = "VkBuffer"] + pub const BUFFER: Self = ObjectType(9); + #[doc = "VkImage"] + pub const IMAGE: Self = ObjectType(10); + #[doc = "VkEvent"] + pub const EVENT: Self = ObjectType(11); + #[doc = "VkQueryPool"] + pub const QUERY_POOL: Self = ObjectType(12); + #[doc = "VkBufferView"] + pub const BUFFER_VIEW: Self = ObjectType(13); + #[doc = "VkImageView"] + pub const IMAGE_VIEW: Self = ObjectType(14); + #[doc = "VkShaderModule"] + pub const SHADER_MODULE: Self = ObjectType(15); + #[doc = "VkPipelineCache"] + pub const PIPELINE_CACHE: Self = ObjectType(16); + #[doc = "VkPipelineLayout"] + pub const PIPELINE_LAYOUT: Self = ObjectType(17); + #[doc = "VkRenderPass"] + pub const RENDER_PASS: Self = ObjectType(18); + #[doc = "VkPipeline"] + pub const PIPELINE: Self = ObjectType(19); + #[doc = "VkDescriptorSetLayout"] + pub const DESCRIPTOR_SET_LAYOUT: Self = ObjectType(20); + #[doc = "VkSampler"] + pub const SAMPLER: Self = ObjectType(21); + #[doc = "VkDescriptorPool"] + pub const DESCRIPTOR_POOL: Self = ObjectType(22); + #[doc = "VkDescriptorSet"] + pub const DESCRIPTOR_SET: Self = ObjectType(23); + #[doc = "VkFramebuffer"] + pub const FRAMEBUFFER: Self = ObjectType(24); + #[doc = "VkCommandPool"] + pub const COMMAND_POOL: Self = ObjectType(25); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PresentModeKHR(pub(crate) i32); +impl PresentModeKHR { + pub fn from_raw(x: i32) -> Self { + PresentModeKHR(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PresentModeKHR { + pub const IMMEDIATE: Self = PresentModeKHR(0); + pub const MAILBOX: Self = PresentModeKHR(1); + pub const FIFO: Self = PresentModeKHR(2); + pub const FIFO_RELAXED: Self = PresentModeKHR(3); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ColorSpaceKHR(pub(crate) i32); +impl ColorSpaceKHR { + pub fn from_raw(x: i32) -> Self { + ColorSpaceKHR(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ColorSpaceKHR { + pub const SRGB_NONLINEAR: Self = ColorSpaceKHR(0); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct TimeDomainEXT(pub(crate) i32); +impl TimeDomainEXT { + pub fn from_raw(x: i32) -> Self { + TimeDomainEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl TimeDomainEXT { + pub const DEVICE: Self = TimeDomainEXT(0); + pub const CLOCK_MONOTONIC: Self = TimeDomainEXT(1); + pub const CLOCK_MONOTONIC_RAW: Self = TimeDomainEXT(2); + pub const QUERY_PERFORMANCE_COUNTER: Self = TimeDomainEXT(3); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct DebugReportObjectTypeEXT(pub(crate) i32); +impl DebugReportObjectTypeEXT { + pub fn from_raw(x: i32) -> Self { + DebugReportObjectTypeEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl DebugReportObjectTypeEXT { + pub const UNKNOWN: Self = DebugReportObjectTypeEXT(0); + pub const INSTANCE: Self = DebugReportObjectTypeEXT(1); + pub const PHYSICAL_DEVICE: Self = DebugReportObjectTypeEXT(2); + pub const DEVICE: Self = DebugReportObjectTypeEXT(3); + pub const QUEUE: Self = DebugReportObjectTypeEXT(4); + pub const SEMAPHORE: Self = DebugReportObjectTypeEXT(5); + pub const COMMAND_BUFFER: Self = DebugReportObjectTypeEXT(6); + pub const FENCE: Self = DebugReportObjectTypeEXT(7); + pub const DEVICE_MEMORY: Self = DebugReportObjectTypeEXT(8); + pub const BUFFER: Self = DebugReportObjectTypeEXT(9); + pub const IMAGE: Self = DebugReportObjectTypeEXT(10); + pub const EVENT: Self = DebugReportObjectTypeEXT(11); + pub const QUERY_POOL: Self = DebugReportObjectTypeEXT(12); + pub const BUFFER_VIEW: Self = DebugReportObjectTypeEXT(13); + pub const IMAGE_VIEW: Self = DebugReportObjectTypeEXT(14); + pub const SHADER_MODULE: Self = DebugReportObjectTypeEXT(15); + pub const PIPELINE_CACHE: Self = DebugReportObjectTypeEXT(16); + pub const PIPELINE_LAYOUT: Self = DebugReportObjectTypeEXT(17); + pub const RENDER_PASS: Self = DebugReportObjectTypeEXT(18); + pub const PIPELINE: Self = DebugReportObjectTypeEXT(19); + pub const DESCRIPTOR_SET_LAYOUT: Self = DebugReportObjectTypeEXT(20); + pub const SAMPLER: Self = DebugReportObjectTypeEXT(21); + pub const DESCRIPTOR_POOL: Self = DebugReportObjectTypeEXT(22); + pub const DESCRIPTOR_SET: Self = DebugReportObjectTypeEXT(23); + pub const FRAMEBUFFER: Self = DebugReportObjectTypeEXT(24); + pub const COMMAND_POOL: Self = DebugReportObjectTypeEXT(25); + pub const SURFACE_KHR: Self = DebugReportObjectTypeEXT(26); + pub const SWAPCHAIN_KHR: Self = DebugReportObjectTypeEXT(27); + pub const DEBUG_REPORT_CALLBACK: Self = DebugReportObjectTypeEXT(28); + pub const DISPLAY_KHR: Self = DebugReportObjectTypeEXT(29); + pub const DISPLAY_MODE_KHR: Self = DebugReportObjectTypeEXT(30); + pub const OBJECT_TABLE_NVX: Self = DebugReportObjectTypeEXT(31); + pub const INDIRECT_COMMANDS_LAYOUT_NVX: Self = DebugReportObjectTypeEXT(32); + pub const VALIDATION_CACHE: Self = DebugReportObjectTypeEXT(33); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct RasterizationOrderAMD(pub(crate) i32); +impl RasterizationOrderAMD { + pub fn from_raw(x: i32) -> Self { + RasterizationOrderAMD(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl RasterizationOrderAMD { + pub const STRICT: Self = RasterizationOrderAMD(0); + pub const RELAXED: Self = RasterizationOrderAMD(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ValidationCheckEXT(pub(crate) i32); +impl ValidationCheckEXT { + pub fn from_raw(x: i32) -> Self { + ValidationCheckEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ValidationCheckEXT { + pub const ALL: Self = ValidationCheckEXT(0); + pub const SHADERS: Self = ValidationCheckEXT(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ValidationFeatureEnableEXT(pub(crate) i32); +impl ValidationFeatureEnableEXT { + pub fn from_raw(x: i32) -> Self { + ValidationFeatureEnableEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ValidationFeatureEnableEXT { + pub const GPU_ASSISTED: Self = ValidationFeatureEnableEXT(0); + pub const GPU_ASSISTED_RESERVE_BINDING_SLOT: Self = ValidationFeatureEnableEXT(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ValidationFeatureDisableEXT(pub(crate) i32); +impl ValidationFeatureDisableEXT { + pub fn from_raw(x: i32) -> Self { + ValidationFeatureDisableEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ValidationFeatureDisableEXT { + pub const ALL: Self = ValidationFeatureDisableEXT(0); + pub const SHADERS: Self = ValidationFeatureDisableEXT(1); + pub const THREAD_SAFETY: Self = ValidationFeatureDisableEXT(2); + pub const API_PARAMETERS: Self = ValidationFeatureDisableEXT(3); + pub const OBJECT_LIFETIMES: Self = ValidationFeatureDisableEXT(4); + pub const CORE_CHECKS: Self = ValidationFeatureDisableEXT(5); + pub const UNIQUE_HANDLES: Self = ValidationFeatureDisableEXT(6); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct IndirectCommandsTokenTypeNVX(pub(crate) i32); +impl IndirectCommandsTokenTypeNVX { + pub fn from_raw(x: i32) -> Self { + IndirectCommandsTokenTypeNVX(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl IndirectCommandsTokenTypeNVX { + pub const PIPELINE: Self = IndirectCommandsTokenTypeNVX(0); + pub const DESCRIPTOR_SET: Self = IndirectCommandsTokenTypeNVX(1); + pub const INDEX_BUFFER: Self = IndirectCommandsTokenTypeNVX(2); + pub const VERTEX_BUFFER: Self = IndirectCommandsTokenTypeNVX(3); + pub const PUSH_CONSTANT: Self = IndirectCommandsTokenTypeNVX(4); + pub const DRAW_INDEXED: Self = IndirectCommandsTokenTypeNVX(5); + pub const DRAW: Self = IndirectCommandsTokenTypeNVX(6); + pub const DISPATCH: Self = IndirectCommandsTokenTypeNVX(7); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ObjectEntryTypeNVX(pub(crate) i32); +impl ObjectEntryTypeNVX { + pub fn from_raw(x: i32) -> Self { + ObjectEntryTypeNVX(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ObjectEntryTypeNVX { + pub const DESCRIPTOR_SET: Self = ObjectEntryTypeNVX(0); + pub const PIPELINE: Self = ObjectEntryTypeNVX(1); + pub const INDEX_BUFFER: Self = ObjectEntryTypeNVX(2); + pub const VERTEX_BUFFER: Self = ObjectEntryTypeNVX(3); + pub const PUSH_CONSTANT: Self = ObjectEntryTypeNVX(4); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct DisplayPowerStateEXT(pub(crate) i32); +impl DisplayPowerStateEXT { + pub fn from_raw(x: i32) -> Self { + DisplayPowerStateEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl DisplayPowerStateEXT { + pub const OFF: Self = DisplayPowerStateEXT(0); + pub const SUSPEND: Self = DisplayPowerStateEXT(1); + pub const ON: Self = DisplayPowerStateEXT(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct DeviceEventTypeEXT(pub(crate) i32); +impl DeviceEventTypeEXT { + pub fn from_raw(x: i32) -> Self { + DeviceEventTypeEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl DeviceEventTypeEXT { + pub const DISPLAY_HOTPLUG: Self = DeviceEventTypeEXT(0); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct DisplayEventTypeEXT(pub(crate) i32); +impl DisplayEventTypeEXT { + pub fn from_raw(x: i32) -> Self { + DisplayEventTypeEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl DisplayEventTypeEXT { + pub const FIRST_PIXEL_OUT: Self = DisplayEventTypeEXT(0); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ViewportCoordinateSwizzleNV(pub(crate) i32); +impl ViewportCoordinateSwizzleNV { + pub fn from_raw(x: i32) -> Self { + ViewportCoordinateSwizzleNV(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ViewportCoordinateSwizzleNV { + pub const POSITIVE_X: Self = ViewportCoordinateSwizzleNV(0); + pub const NEGATIVE_X: Self = ViewportCoordinateSwizzleNV(1); + pub const POSITIVE_Y: Self = ViewportCoordinateSwizzleNV(2); + pub const NEGATIVE_Y: Self = ViewportCoordinateSwizzleNV(3); + pub const POSITIVE_Z: Self = ViewportCoordinateSwizzleNV(4); + pub const NEGATIVE_Z: Self = ViewportCoordinateSwizzleNV(5); + pub const POSITIVE_W: Self = ViewportCoordinateSwizzleNV(6); + pub const NEGATIVE_W: Self = ViewportCoordinateSwizzleNV(7); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct DiscardRectangleModeEXT(pub(crate) i32); +impl DiscardRectangleModeEXT { + pub fn from_raw(x: i32) -> Self { + DiscardRectangleModeEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl DiscardRectangleModeEXT { + pub const INCLUSIVE: Self = DiscardRectangleModeEXT(0); + pub const EXCLUSIVE: Self = DiscardRectangleModeEXT(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PointClippingBehavior(pub(crate) i32); +impl PointClippingBehavior { + pub fn from_raw(x: i32) -> Self { + PointClippingBehavior(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PointClippingBehavior { + pub const ALL_CLIP_PLANES: Self = PointClippingBehavior(0); + pub const USER_CLIP_PLANES_ONLY: Self = PointClippingBehavior(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct SamplerReductionModeEXT(pub(crate) i32); +impl SamplerReductionModeEXT { + pub fn from_raw(x: i32) -> Self { + SamplerReductionModeEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl SamplerReductionModeEXT { + pub const WEIGHTED_AVERAGE: Self = SamplerReductionModeEXT(0); + pub const MIN: Self = SamplerReductionModeEXT(1); + pub const MAX: Self = SamplerReductionModeEXT(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct TessellationDomainOrigin(pub(crate) i32); +impl TessellationDomainOrigin { + pub fn from_raw(x: i32) -> Self { + TessellationDomainOrigin(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl TessellationDomainOrigin { + pub const UPPER_LEFT: Self = TessellationDomainOrigin(0); + pub const LOWER_LEFT: Self = TessellationDomainOrigin(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct SamplerYcbcrModelConversion(pub(crate) i32); +impl SamplerYcbcrModelConversion { + pub fn from_raw(x: i32) -> Self { + SamplerYcbcrModelConversion(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl SamplerYcbcrModelConversion { + pub const RGB_IDENTITY: Self = SamplerYcbcrModelConversion(0); + #[doc = "just range expansion"] + pub const YCBCR_IDENTITY: Self = SamplerYcbcrModelConversion(1); + #[doc = "aka HD YUV"] + pub const YCBCR_709: Self = SamplerYcbcrModelConversion(2); + #[doc = "aka SD YUV"] + pub const YCBCR_601: Self = SamplerYcbcrModelConversion(3); + #[doc = "aka UHD YUV"] + pub const YCBCR_2020: Self = SamplerYcbcrModelConversion(4); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct SamplerYcbcrRange(pub(crate) i32); +impl SamplerYcbcrRange { + pub fn from_raw(x: i32) -> Self { + SamplerYcbcrRange(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl SamplerYcbcrRange { + #[doc = "Luma 0..1 maps to 0..255, chroma -0.5..0.5 to 1..255 (clamped)"] + pub const ITU_FULL: Self = SamplerYcbcrRange(0); + #[doc = "Luma 0..1 maps to 16..235, chroma -0.5..0.5 to 16..240"] + pub const ITU_NARROW: Self = SamplerYcbcrRange(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ChromaLocation(pub(crate) i32); +impl ChromaLocation { + pub fn from_raw(x: i32) -> Self { + ChromaLocation(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ChromaLocation { + pub const COSITED_EVEN: Self = ChromaLocation(0); + pub const MIDPOINT: Self = ChromaLocation(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct BlendOverlapEXT(pub(crate) i32); +impl BlendOverlapEXT { + pub fn from_raw(x: i32) -> Self { + BlendOverlapEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl BlendOverlapEXT { + pub const UNCORRELATED: Self = BlendOverlapEXT(0); + pub const DISJOINT: Self = BlendOverlapEXT(1); + pub const CONJOINT: Self = BlendOverlapEXT(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct CoverageModulationModeNV(pub(crate) i32); +impl CoverageModulationModeNV { + pub fn from_raw(x: i32) -> Self { + CoverageModulationModeNV(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl CoverageModulationModeNV { + pub const NONE: Self = CoverageModulationModeNV(0); + pub const RGB: Self = CoverageModulationModeNV(1); + pub const ALPHA: Self = CoverageModulationModeNV(2); + pub const RGBA: Self = CoverageModulationModeNV(3); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ValidationCacheHeaderVersionEXT(pub(crate) i32); +impl ValidationCacheHeaderVersionEXT { + pub fn from_raw(x: i32) -> Self { + ValidationCacheHeaderVersionEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ValidationCacheHeaderVersionEXT { + pub const ONE: Self = ValidationCacheHeaderVersionEXT(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ShaderInfoTypeAMD(pub(crate) i32); +impl ShaderInfoTypeAMD { + pub fn from_raw(x: i32) -> Self { + ShaderInfoTypeAMD(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ShaderInfoTypeAMD { + pub const STATISTICS: Self = ShaderInfoTypeAMD(0); + pub const BINARY: Self = ShaderInfoTypeAMD(1); + pub const DISASSEMBLY: Self = ShaderInfoTypeAMD(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct QueueGlobalPriorityEXT(pub(crate) i32); +impl QueueGlobalPriorityEXT { + pub fn from_raw(x: i32) -> Self { + QueueGlobalPriorityEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl QueueGlobalPriorityEXT { + pub const LOW: Self = QueueGlobalPriorityEXT(128); + pub const MEDIUM: Self = QueueGlobalPriorityEXT(256); + pub const HIGH: Self = QueueGlobalPriorityEXT(512); + pub const REALTIME: Self = QueueGlobalPriorityEXT(1024); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ConservativeRasterizationModeEXT(pub(crate) i32); +impl ConservativeRasterizationModeEXT { + pub fn from_raw(x: i32) -> Self { + ConservativeRasterizationModeEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ConservativeRasterizationModeEXT { + pub const DISABLED: Self = ConservativeRasterizationModeEXT(0); + pub const OVERESTIMATE: Self = ConservativeRasterizationModeEXT(1); + pub const UNDERESTIMATE: Self = ConservativeRasterizationModeEXT(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct VendorId(pub(crate) i32); +impl VendorId { + pub fn from_raw(x: i32) -> Self { + VendorId(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl VendorId { + #[doc = "Vivante vendor ID"] + pub const VIV: Self = VendorId(0x10001); + #[doc = "VeriSilicon vendor ID"] + pub const VSI: Self = VendorId(0x10002); + #[doc = "Kazan Software Renderer"] + pub const KAZAN: Self = VendorId(0x10003); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct DriverIdKHR(pub(crate) i32); +impl DriverIdKHR { + pub fn from_raw(x: i32) -> Self { + DriverIdKHR(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl DriverIdKHR { + #[doc = "Advanced Micro Devices, Inc."] + pub const AMD_PROPRIETARY: Self = DriverIdKHR(1); + #[doc = "Advanced Micro Devices, Inc."] + pub const AMD_OPEN_SOURCE: Self = DriverIdKHR(2); + #[doc = "Mesa open source project"] + pub const MESA_RADV: Self = DriverIdKHR(3); + #[doc = "NVIDIA Corporation"] + pub const NVIDIA_PROPRIETARY: Self = DriverIdKHR(4); + #[doc = "Intel Corporation"] + pub const INTEL_PROPRIETARY_WINDOWS: Self = DriverIdKHR(5); + #[doc = "Intel Corporation"] + pub const INTEL_OPEN_SOURCE_MESA: Self = DriverIdKHR(6); + #[doc = "Imagination Technologies"] + pub const IMAGINATION_PROPRIETARY: Self = DriverIdKHR(7); + #[doc = "Qualcomm Technologies, Inc."] + pub const QUALCOMM_PROPRIETARY: Self = DriverIdKHR(8); + #[doc = "Arm Limited"] + pub const ARM_PROPRIETARY: Self = DriverIdKHR(9); + #[doc = "Google LLC"] + pub const GOOGLE_PASTEL: Self = DriverIdKHR(10); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ShadingRatePaletteEntryNV(pub(crate) i32); +impl ShadingRatePaletteEntryNV { + pub fn from_raw(x: i32) -> Self { + ShadingRatePaletteEntryNV(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ShadingRatePaletteEntryNV { + pub const NO_INVOCATIONS: Self = ShadingRatePaletteEntryNV(0); + pub const TYPE_16_INVOCATIONS_PER_PIXEL: Self = ShadingRatePaletteEntryNV(1); + pub const TYPE_8_INVOCATIONS_PER_PIXEL: Self = ShadingRatePaletteEntryNV(2); + pub const TYPE_4_INVOCATIONS_PER_PIXEL: Self = ShadingRatePaletteEntryNV(3); + pub const TYPE_2_INVOCATIONS_PER_PIXEL: Self = ShadingRatePaletteEntryNV(4); + pub const TYPE_1_INVOCATION_PER_PIXEL: Self = ShadingRatePaletteEntryNV(5); + pub const TYPE_1_INVOCATION_PER_2X1_PIXELS: Self = ShadingRatePaletteEntryNV(6); + pub const TYPE_1_INVOCATION_PER_1X2_PIXELS: Self = ShadingRatePaletteEntryNV(7); + pub const TYPE_1_INVOCATION_PER_2X2_PIXELS: Self = ShadingRatePaletteEntryNV(8); + pub const TYPE_1_INVOCATION_PER_4X2_PIXELS: Self = ShadingRatePaletteEntryNV(9); + pub const TYPE_1_INVOCATION_PER_2X4_PIXELS: Self = ShadingRatePaletteEntryNV(10); + pub const TYPE_1_INVOCATION_PER_4X4_PIXELS: Self = ShadingRatePaletteEntryNV(11); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct CoarseSampleOrderTypeNV(pub(crate) i32); +impl CoarseSampleOrderTypeNV { + pub fn from_raw(x: i32) -> Self { + CoarseSampleOrderTypeNV(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl CoarseSampleOrderTypeNV { + pub const DEFAULT: Self = CoarseSampleOrderTypeNV(0); + pub const CUSTOM: Self = CoarseSampleOrderTypeNV(1); + pub const PIXEL_MAJOR: Self = CoarseSampleOrderTypeNV(2); + pub const SAMPLE_MAJOR: Self = CoarseSampleOrderTypeNV(3); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct CopyAccelerationStructureModeNV(pub(crate) i32); +impl CopyAccelerationStructureModeNV { + pub fn from_raw(x: i32) -> Self { + CopyAccelerationStructureModeNV(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl CopyAccelerationStructureModeNV { + pub const CLONE: Self = CopyAccelerationStructureModeNV(0); + pub const COMPACT: Self = CopyAccelerationStructureModeNV(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct AccelerationStructureTypeNV(pub(crate) i32); +impl AccelerationStructureTypeNV { + pub fn from_raw(x: i32) -> Self { + AccelerationStructureTypeNV(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl AccelerationStructureTypeNV { + pub const TOP_LEVEL: Self = AccelerationStructureTypeNV(0); + pub const BOTTOM_LEVEL: Self = AccelerationStructureTypeNV(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct GeometryTypeNV(pub(crate) i32); +impl GeometryTypeNV { + pub fn from_raw(x: i32) -> Self { + GeometryTypeNV(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl GeometryTypeNV { + pub const TRIANGLES: Self = GeometryTypeNV(0); + pub const AABBS: Self = GeometryTypeNV(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct AccelerationStructureMemoryRequirementsTypeNV(pub(crate) i32); +impl AccelerationStructureMemoryRequirementsTypeNV { + pub fn from_raw(x: i32) -> Self { + AccelerationStructureMemoryRequirementsTypeNV(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl AccelerationStructureMemoryRequirementsTypeNV { + pub const OBJECT: Self = AccelerationStructureMemoryRequirementsTypeNV(0); + pub const BUILD_SCRATCH: Self = AccelerationStructureMemoryRequirementsTypeNV(1); + pub const UPDATE_SCRATCH: Self = AccelerationStructureMemoryRequirementsTypeNV(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct RayTracingShaderGroupTypeNV(pub(crate) i32); +impl RayTracingShaderGroupTypeNV { + pub fn from_raw(x: i32) -> Self { + RayTracingShaderGroupTypeNV(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl RayTracingShaderGroupTypeNV { + pub const GENERAL: Self = RayTracingShaderGroupTypeNV(0); + pub const TRIANGLES_HIT_GROUP: Self = RayTracingShaderGroupTypeNV(1); + pub const PROCEDURAL_HIT_GROUP: Self = RayTracingShaderGroupTypeNV(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct MemoryOverallocationBehaviorAMD(pub(crate) i32); +impl MemoryOverallocationBehaviorAMD { + pub fn from_raw(x: i32) -> Self { + MemoryOverallocationBehaviorAMD(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl MemoryOverallocationBehaviorAMD { + pub const DEFAULT: Self = MemoryOverallocationBehaviorAMD(0); + pub const ALLOWED: Self = MemoryOverallocationBehaviorAMD(1); + pub const DISALLOWED: Self = MemoryOverallocationBehaviorAMD(2); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct CullModeFlags(pub(crate) Flags); +vk_bitflags_wrapped!(CullModeFlags, 0b11, Flags); +impl CullModeFlags { + pub const NONE: Self = CullModeFlags(0); + pub const FRONT: Self = CullModeFlags(0b1); + pub const BACK: Self = CullModeFlags(0b10); + pub const FRONT_AND_BACK: Self = CullModeFlags(0x00000003); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct QueueFlags(pub(crate) Flags); +vk_bitflags_wrapped!(QueueFlags, 0b1111, Flags); +impl QueueFlags { + #[doc = "Queue supports graphics operations"] + pub const GRAPHICS: Self = QueueFlags(0b1); + #[doc = "Queue supports compute operations"] + pub const COMPUTE: Self = QueueFlags(0b10); + #[doc = "Queue supports transfer operations"] + pub const TRANSFER: Self = QueueFlags(0b100); + #[doc = "Queue supports sparse resource memory management operations"] + pub const SPARSE_BINDING: Self = QueueFlags(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct RenderPassCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(RenderPassCreateFlags, 0b0, Flags); +impl RenderPassCreateFlags {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DeviceQueueCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(DeviceQueueCreateFlags, 0b0, Flags); +impl DeviceQueueCreateFlags {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct MemoryPropertyFlags(pub(crate) Flags); +vk_bitflags_wrapped!(MemoryPropertyFlags, 0b11111, Flags); +impl MemoryPropertyFlags { + #[doc = "If otherwise stated, then allocate memory on device"] + pub const DEVICE_LOCAL: Self = MemoryPropertyFlags(0b1); + #[doc = "Memory is mappable by host"] + pub const HOST_VISIBLE: Self = MemoryPropertyFlags(0b10); + #[doc = "Memory will have i/o coherency. If not set, application may need to use vkFlushMappedMemoryRanges and vkInvalidateMappedMemoryRanges to flush/invalidate host cache"] + pub const HOST_COHERENT: Self = MemoryPropertyFlags(0b100); + #[doc = "Memory will be cached by the host"] + pub const HOST_CACHED: Self = MemoryPropertyFlags(0b1000); + #[doc = "Memory may be allocated by the driver when it is required"] + pub const LAZILY_ALLOCATED: Self = MemoryPropertyFlags(0b10000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct MemoryHeapFlags(pub(crate) Flags); +vk_bitflags_wrapped!(MemoryHeapFlags, 0b1, Flags); +impl MemoryHeapFlags { + #[doc = "If set, heap represents device memory"] + pub const DEVICE_LOCAL: Self = MemoryHeapFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct AccessFlags(pub(crate) Flags); +vk_bitflags_wrapped!(AccessFlags, 0b11111111111111111, Flags); +impl AccessFlags { + #[doc = "Controls coherency of indirect command reads"] + pub const INDIRECT_COMMAND_READ: Self = AccessFlags(0b1); + #[doc = "Controls coherency of index reads"] + pub const INDEX_READ: Self = AccessFlags(0b10); + #[doc = "Controls coherency of vertex attribute reads"] + pub const VERTEX_ATTRIBUTE_READ: Self = AccessFlags(0b100); + #[doc = "Controls coherency of uniform buffer reads"] + pub const UNIFORM_READ: Self = AccessFlags(0b1000); + #[doc = "Controls coherency of input attachment reads"] + pub const INPUT_ATTACHMENT_READ: Self = AccessFlags(0b10000); + #[doc = "Controls coherency of shader reads"] + pub const SHADER_READ: Self = AccessFlags(0b100000); + #[doc = "Controls coherency of shader writes"] + pub const SHADER_WRITE: Self = AccessFlags(0b1000000); + #[doc = "Controls coherency of color attachment reads"] + pub const COLOR_ATTACHMENT_READ: Self = AccessFlags(0b10000000); + #[doc = "Controls coherency of color attachment writes"] + pub const COLOR_ATTACHMENT_WRITE: Self = AccessFlags(0b100000000); + #[doc = "Controls coherency of depth/stencil attachment reads"] + pub const DEPTH_STENCIL_ATTACHMENT_READ: Self = AccessFlags(0b1000000000); + #[doc = "Controls coherency of depth/stencil attachment writes"] + pub const DEPTH_STENCIL_ATTACHMENT_WRITE: Self = AccessFlags(0b10000000000); + #[doc = "Controls coherency of transfer reads"] + pub const TRANSFER_READ: Self = AccessFlags(0b100000000000); + #[doc = "Controls coherency of transfer writes"] + pub const TRANSFER_WRITE: Self = AccessFlags(0b1000000000000); + #[doc = "Controls coherency of host reads"] + pub const HOST_READ: Self = AccessFlags(0b10000000000000); + #[doc = "Controls coherency of host writes"] + pub const HOST_WRITE: Self = AccessFlags(0b100000000000000); + #[doc = "Controls coherency of memory reads"] + pub const MEMORY_READ: Self = AccessFlags(0b1000000000000000); + #[doc = "Controls coherency of memory writes"] + pub const MEMORY_WRITE: Self = AccessFlags(0b10000000000000000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct BufferUsageFlags(pub(crate) Flags); +vk_bitflags_wrapped!(BufferUsageFlags, 0b111111111, Flags); +impl BufferUsageFlags { + #[doc = "Can be used as a source of transfer operations"] + pub const TRANSFER_SRC: Self = BufferUsageFlags(0b1); + #[doc = "Can be used as a destination of transfer operations"] + pub const TRANSFER_DST: Self = BufferUsageFlags(0b10); + #[doc = "Can be used as TBO"] + pub const UNIFORM_TEXEL_BUFFER: Self = BufferUsageFlags(0b100); + #[doc = "Can be used as IBO"] + pub const STORAGE_TEXEL_BUFFER: Self = BufferUsageFlags(0b1000); + #[doc = "Can be used as UBO"] + pub const UNIFORM_BUFFER: Self = BufferUsageFlags(0b10000); + #[doc = "Can be used as SSBO"] + pub const STORAGE_BUFFER: Self = BufferUsageFlags(0b100000); + #[doc = "Can be used as source of fixed-function index fetch (index buffer)"] + pub const INDEX_BUFFER: Self = BufferUsageFlags(0b1000000); + #[doc = "Can be used as source of fixed-function vertex fetch (VBO)"] + pub const VERTEX_BUFFER: Self = BufferUsageFlags(0b10000000); + #[doc = "Can be the source of indirect parameters (e.g. indirect buffer, parameter buffer)"] + pub const INDIRECT_BUFFER: Self = BufferUsageFlags(0b100000000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct BufferCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(BufferCreateFlags, 0b111, Flags); +impl BufferCreateFlags { + #[doc = "Buffer should support sparse backing"] + pub const SPARSE_BINDING: Self = BufferCreateFlags(0b1); + #[doc = "Buffer should support sparse backing with partial residency"] + pub const SPARSE_RESIDENCY: Self = BufferCreateFlags(0b10); + #[doc = "Buffer should support constent data access to physical memory ranges mapped into multiple locations of sparse buffers"] + pub const SPARSE_ALIASED: Self = BufferCreateFlags(0b100); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ShaderStageFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ShaderStageFlags, 0b1111111111111111111111111111111, Flags); +impl ShaderStageFlags { + pub const VERTEX: Self = ShaderStageFlags(0b1); + pub const TESSELLATION_CONTROL: Self = ShaderStageFlags(0b10); + pub const TESSELLATION_EVALUATION: Self = ShaderStageFlags(0b100); + pub const GEOMETRY: Self = ShaderStageFlags(0b1000); + pub const FRAGMENT: Self = ShaderStageFlags(0b10000); + pub const COMPUTE: Self = ShaderStageFlags(0b100000); + pub const ALL_GRAPHICS: Self = ShaderStageFlags(0x0000001F); + pub const ALL: Self = ShaderStageFlags(0x7FFFFFFF); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ImageUsageFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ImageUsageFlags, 0b11111111, Flags); +impl ImageUsageFlags { + #[doc = "Can be used as a source of transfer operations"] + pub const TRANSFER_SRC: Self = ImageUsageFlags(0b1); + #[doc = "Can be used as a destination of transfer operations"] + pub const TRANSFER_DST: Self = ImageUsageFlags(0b10); + #[doc = "Can be sampled from (SAMPLED_IMAGE and COMBINED_IMAGE_SAMPLER descriptor types)"] + pub const SAMPLED: Self = ImageUsageFlags(0b100); + #[doc = "Can be used as storage image (STORAGE_IMAGE descriptor type)"] + pub const STORAGE: Self = ImageUsageFlags(0b1000); + #[doc = "Can be used as framebuffer color attachment"] + pub const COLOR_ATTACHMENT: Self = ImageUsageFlags(0b10000); + #[doc = "Can be used as framebuffer depth/stencil attachment"] + pub const DEPTH_STENCIL_ATTACHMENT: Self = ImageUsageFlags(0b100000); + #[doc = "Image data not needed outside of rendering"] + pub const TRANSIENT_ATTACHMENT: Self = ImageUsageFlags(0b1000000); + #[doc = "Can be used as framebuffer input attachment"] + pub const INPUT_ATTACHMENT: Self = ImageUsageFlags(0b10000000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ImageCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ImageCreateFlags, 0b11111, Flags); +impl ImageCreateFlags { + #[doc = "Image should support sparse backing"] + pub const SPARSE_BINDING: Self = ImageCreateFlags(0b1); + #[doc = "Image should support sparse backing with partial residency"] + pub const SPARSE_RESIDENCY: Self = ImageCreateFlags(0b10); + #[doc = "Image should support constent data access to physical memory ranges mapped into multiple locations of sparse images"] + pub const SPARSE_ALIASED: Self = ImageCreateFlags(0b100); + #[doc = "Allows image views to have different format than the base image"] + pub const MUTABLE_FORMAT: Self = ImageCreateFlags(0b1000); + #[doc = "Allows creating image views with cube type from the created image"] + pub const CUBE_COMPATIBLE: Self = ImageCreateFlags(0b10000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ImageViewCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ImageViewCreateFlags, 0b0, Flags); +impl ImageViewCreateFlags {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct SamplerCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(SamplerCreateFlags, 0b0, Flags); +impl SamplerCreateFlags {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(PipelineCreateFlags, 0b111, Flags); +impl PipelineCreateFlags { + pub const DISABLE_OPTIMIZATION: Self = PipelineCreateFlags(0b1); + pub const ALLOW_DERIVATIVES: Self = PipelineCreateFlags(0b10); + pub const DERIVATIVE: Self = PipelineCreateFlags(0b100); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ColorComponentFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ColorComponentFlags, 0b1111, Flags); +impl ColorComponentFlags { + pub const R: Self = ColorComponentFlags(0b1); + pub const G: Self = ColorComponentFlags(0b10); + pub const B: Self = ColorComponentFlags(0b100); + pub const A: Self = ColorComponentFlags(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct FenceCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(FenceCreateFlags, 0b1, Flags); +impl FenceCreateFlags { + pub const SIGNALED: Self = FenceCreateFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct FormatFeatureFlags(pub(crate) Flags); +vk_bitflags_wrapped!(FormatFeatureFlags, 0b1111111111111, Flags); +impl FormatFeatureFlags { + #[doc = "Format can be used for sampled images (SAMPLED_IMAGE and COMBINED_IMAGE_SAMPLER descriptor types)"] + pub const SAMPLED_IMAGE: Self = FormatFeatureFlags(0b1); + #[doc = "Format can be used for storage images (STORAGE_IMAGE descriptor type)"] + pub const STORAGE_IMAGE: Self = FormatFeatureFlags(0b10); + #[doc = "Format supports atomic operations in case it is used for storage images"] + pub const STORAGE_IMAGE_ATOMIC: Self = FormatFeatureFlags(0b100); + #[doc = "Format can be used for uniform texel buffers (TBOs)"] + pub const UNIFORM_TEXEL_BUFFER: Self = FormatFeatureFlags(0b1000); + #[doc = "Format can be used for storage texel buffers (IBOs)"] + pub const STORAGE_TEXEL_BUFFER: Self = FormatFeatureFlags(0b10000); + #[doc = "Format supports atomic operations in case it is used for storage texel buffers"] + pub const STORAGE_TEXEL_BUFFER_ATOMIC: Self = FormatFeatureFlags(0b100000); + #[doc = "Format can be used for vertex buffers (VBOs)"] + pub const VERTEX_BUFFER: Self = FormatFeatureFlags(0b1000000); + #[doc = "Format can be used for color attachment images"] + pub const COLOR_ATTACHMENT: Self = FormatFeatureFlags(0b10000000); + #[doc = "Format supports blending in case it is used for color attachment images"] + pub const COLOR_ATTACHMENT_BLEND: Self = FormatFeatureFlags(0b100000000); + #[doc = "Format can be used for depth/stencil attachment images"] + pub const DEPTH_STENCIL_ATTACHMENT: Self = FormatFeatureFlags(0b1000000000); + #[doc = "Format can be used as the source image of blits with vkCmdBlitImage"] + pub const BLIT_SRC: Self = FormatFeatureFlags(0b10000000000); + #[doc = "Format can be used as the destination image of blits with vkCmdBlitImage"] + pub const BLIT_DST: Self = FormatFeatureFlags(0b100000000000); + #[doc = "Format can be filtered with VK_FILTER_LINEAR when being sampled"] + pub const SAMPLED_IMAGE_FILTER_LINEAR: Self = FormatFeatureFlags(0b1000000000000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct QueryControlFlags(pub(crate) Flags); +vk_bitflags_wrapped!(QueryControlFlags, 0b1, Flags); +impl QueryControlFlags { + #[doc = "Require precise results to be collected by the query"] + pub const PRECISE: Self = QueryControlFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct QueryResultFlags(pub(crate) Flags); +vk_bitflags_wrapped!(QueryResultFlags, 0b1111, Flags); +impl QueryResultFlags { + #[doc = "Results of the queries are written to the destination buffer as 64-bit values"] + pub const TYPE_64: Self = QueryResultFlags(0b1); + #[doc = "Results of the queries are waited on before proceeding with the result copy"] + pub const WAIT: Self = QueryResultFlags(0b10); + #[doc = "Besides the results of the query, the availability of the results is also written"] + pub const WITH_AVAILABILITY: Self = QueryResultFlags(0b100); + #[doc = "Copy the partial results of the query even if the final results are not available"] + pub const PARTIAL: Self = QueryResultFlags(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct CommandBufferUsageFlags(pub(crate) Flags); +vk_bitflags_wrapped!(CommandBufferUsageFlags, 0b111, Flags); +impl CommandBufferUsageFlags { + pub const ONE_TIME_SUBMIT: Self = CommandBufferUsageFlags(0b1); + pub const RENDER_PASS_CONTINUE: Self = CommandBufferUsageFlags(0b10); + #[doc = "Command buffer may be submitted/executed more than once simultaneously"] + pub const SIMULTANEOUS_USE: Self = CommandBufferUsageFlags(0b100); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct QueryPipelineStatisticFlags(pub(crate) Flags); +vk_bitflags_wrapped!(QueryPipelineStatisticFlags, 0b11111111111, Flags); +impl QueryPipelineStatisticFlags { + #[doc = "Optional"] + pub const INPUT_ASSEMBLY_VERTICES: Self = QueryPipelineStatisticFlags(0b1); + #[doc = "Optional"] + pub const INPUT_ASSEMBLY_PRIMITIVES: Self = QueryPipelineStatisticFlags(0b10); + #[doc = "Optional"] + pub const VERTEX_SHADER_INVOCATIONS: Self = QueryPipelineStatisticFlags(0b100); + #[doc = "Optional"] + pub const GEOMETRY_SHADER_INVOCATIONS: Self = QueryPipelineStatisticFlags(0b1000); + #[doc = "Optional"] + pub const GEOMETRY_SHADER_PRIMITIVES: Self = QueryPipelineStatisticFlags(0b10000); + #[doc = "Optional"] + pub const CLIPPING_INVOCATIONS: Self = QueryPipelineStatisticFlags(0b100000); + #[doc = "Optional"] + pub const CLIPPING_PRIMITIVES: Self = QueryPipelineStatisticFlags(0b1000000); + #[doc = "Optional"] + pub const FRAGMENT_SHADER_INVOCATIONS: Self = QueryPipelineStatisticFlags(0b10000000); + #[doc = "Optional"] + pub const TESSELLATION_CONTROL_SHADER_PATCHES: Self = QueryPipelineStatisticFlags(0b100000000); + #[doc = "Optional"] + pub const TESSELLATION_EVALUATION_SHADER_INVOCATIONS: Self = + QueryPipelineStatisticFlags(0b1000000000); + #[doc = "Optional"] + pub const COMPUTE_SHADER_INVOCATIONS: Self = QueryPipelineStatisticFlags(0b10000000000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ImageAspectFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ImageAspectFlags, 0b1111, Flags); +impl ImageAspectFlags { + pub const COLOR: Self = ImageAspectFlags(0b1); + pub const DEPTH: Self = ImageAspectFlags(0b10); + pub const STENCIL: Self = ImageAspectFlags(0b100); + pub const METADATA: Self = ImageAspectFlags(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct SparseImageFormatFlags(pub(crate) Flags); +vk_bitflags_wrapped!(SparseImageFormatFlags, 0b111, Flags); +impl SparseImageFormatFlags { + #[doc = "Image uses a single mip tail region for all array layers"] + pub const SINGLE_MIPTAIL: Self = SparseImageFormatFlags(0b1); + #[doc = "Image requires mip level dimensions to be an integer multiple of the sparse image block dimensions for non-tail mip levels."] + pub const ALIGNED_MIP_SIZE: Self = SparseImageFormatFlags(0b10); + #[doc = "Image uses a non-standard sparse image block dimensions"] + pub const NONSTANDARD_BLOCK_SIZE: Self = SparseImageFormatFlags(0b100); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct SparseMemoryBindFlags(pub(crate) Flags); +vk_bitflags_wrapped!(SparseMemoryBindFlags, 0b1, Flags); +impl SparseMemoryBindFlags { + #[doc = "Operation binds resource metadata to memory"] + pub const METADATA: Self = SparseMemoryBindFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineStageFlags(pub(crate) Flags); +vk_bitflags_wrapped!(PipelineStageFlags, 0b11111111111111111, Flags); +impl PipelineStageFlags { + #[doc = "Before subsequent commands are processed"] + pub const TOP_OF_PIPE: Self = PipelineStageFlags(0b1); + #[doc = "Draw/DispatchIndirect command fetch"] + pub const DRAW_INDIRECT: Self = PipelineStageFlags(0b10); + #[doc = "Vertex/index fetch"] + pub const VERTEX_INPUT: Self = PipelineStageFlags(0b100); + #[doc = "Vertex shading"] + pub const VERTEX_SHADER: Self = PipelineStageFlags(0b1000); + #[doc = "Tessellation control shading"] + pub const TESSELLATION_CONTROL_SHADER: Self = PipelineStageFlags(0b10000); + #[doc = "Tessellation evaluation shading"] + pub const TESSELLATION_EVALUATION_SHADER: Self = PipelineStageFlags(0b100000); + #[doc = "Geometry shading"] + pub const GEOMETRY_SHADER: Self = PipelineStageFlags(0b1000000); + #[doc = "Fragment shading"] + pub const FRAGMENT_SHADER: Self = PipelineStageFlags(0b10000000); + #[doc = "Early fragment (depth and stencil) tests"] + pub const EARLY_FRAGMENT_TESTS: Self = PipelineStageFlags(0b100000000); + #[doc = "Late fragment (depth and stencil) tests"] + pub const LATE_FRAGMENT_TESTS: Self = PipelineStageFlags(0b1000000000); + #[doc = "Color attachment writes"] + pub const COLOR_ATTACHMENT_OUTPUT: Self = PipelineStageFlags(0b10000000000); + #[doc = "Compute shading"] + pub const COMPUTE_SHADER: Self = PipelineStageFlags(0b100000000000); + #[doc = "Transfer/copy operations"] + pub const TRANSFER: Self = PipelineStageFlags(0b1000000000000); + #[doc = "After previous commands have completed"] + pub const BOTTOM_OF_PIPE: Self = PipelineStageFlags(0b10000000000000); + #[doc = "Indicates host (CPU) is a source/sink of the dependency"] + pub const HOST: Self = PipelineStageFlags(0b100000000000000); + #[doc = "All stages of the graphics pipeline"] + pub const ALL_GRAPHICS: Self = PipelineStageFlags(0b1000000000000000); + #[doc = "All stages supported on the queue"] + pub const ALL_COMMANDS: Self = PipelineStageFlags(0b10000000000000000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct CommandPoolCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(CommandPoolCreateFlags, 0b11, Flags); +impl CommandPoolCreateFlags { + #[doc = "Command buffers have a short lifetime"] + pub const TRANSIENT: Self = CommandPoolCreateFlags(0b1); + #[doc = "Command buffers may release their memory individually"] + pub const RESET_COMMAND_BUFFER: Self = CommandPoolCreateFlags(0b10); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct CommandPoolResetFlags(pub(crate) Flags); +vk_bitflags_wrapped!(CommandPoolResetFlags, 0b1, Flags); +impl CommandPoolResetFlags { + #[doc = "Release resources owned by the pool"] + pub const RELEASE_RESOURCES: Self = CommandPoolResetFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct CommandBufferResetFlags(pub(crate) Flags); +vk_bitflags_wrapped!(CommandBufferResetFlags, 0b1, Flags); +impl CommandBufferResetFlags { + #[doc = "Release resources owned by the buffer"] + pub const RELEASE_RESOURCES: Self = CommandBufferResetFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct SampleCountFlags(pub(crate) Flags); +vk_bitflags_wrapped!(SampleCountFlags, 0b1111111, Flags); +impl SampleCountFlags { + #[doc = "Sample count 1 supported"] + pub const TYPE_1: Self = SampleCountFlags(0b1); + #[doc = "Sample count 2 supported"] + pub const TYPE_2: Self = SampleCountFlags(0b10); + #[doc = "Sample count 4 supported"] + pub const TYPE_4: Self = SampleCountFlags(0b100); + #[doc = "Sample count 8 supported"] + pub const TYPE_8: Self = SampleCountFlags(0b1000); + #[doc = "Sample count 16 supported"] + pub const TYPE_16: Self = SampleCountFlags(0b10000); + #[doc = "Sample count 32 supported"] + pub const TYPE_32: Self = SampleCountFlags(0b100000); + #[doc = "Sample count 64 supported"] + pub const TYPE_64: Self = SampleCountFlags(0b1000000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct AttachmentDescriptionFlags(pub(crate) Flags); +vk_bitflags_wrapped!(AttachmentDescriptionFlags, 0b1, Flags); +impl AttachmentDescriptionFlags { + #[doc = "The attachment may alias physical memory of another attachment in the same render pass"] + pub const MAY_ALIAS: Self = AttachmentDescriptionFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct StencilFaceFlags(pub(crate) Flags); +vk_bitflags_wrapped!(StencilFaceFlags, 0b11, Flags); +impl StencilFaceFlags { + #[doc = "Front face"] + pub const FRONT: Self = StencilFaceFlags(0b1); + #[doc = "Back face"] + pub const BACK: Self = StencilFaceFlags(0b10); + #[doc = "Front and back faces"] + pub const STENCIL_FRONT_AND_BACK: Self = StencilFaceFlags(0x00000003); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DescriptorPoolCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(DescriptorPoolCreateFlags, 0b1, Flags); +impl DescriptorPoolCreateFlags { + #[doc = "Descriptor sets may be freed individually"] + pub const FREE_DESCRIPTOR_SET: Self = DescriptorPoolCreateFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DependencyFlags(pub(crate) Flags); +vk_bitflags_wrapped!(DependencyFlags, 0b1, Flags); +impl DependencyFlags { + #[doc = "Dependency is per pixel region "] + pub const BY_REGION: Self = DependencyFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DisplayPlaneAlphaFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(DisplayPlaneAlphaFlagsKHR, 0b1111, Flags); +impl DisplayPlaneAlphaFlagsKHR { + pub const OPAQUE: Self = DisplayPlaneAlphaFlagsKHR(0b1); + pub const GLOBAL: Self = DisplayPlaneAlphaFlagsKHR(0b10); + pub const PER_PIXEL: Self = DisplayPlaneAlphaFlagsKHR(0b100); + pub const PER_PIXEL_PREMULTIPLIED: Self = DisplayPlaneAlphaFlagsKHR(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct CompositeAlphaFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(CompositeAlphaFlagsKHR, 0b1111, Flags); +impl CompositeAlphaFlagsKHR { + pub const OPAQUE: Self = CompositeAlphaFlagsKHR(0b1); + pub const PRE_MULTIPLIED: Self = CompositeAlphaFlagsKHR(0b10); + pub const POST_MULTIPLIED: Self = CompositeAlphaFlagsKHR(0b100); + pub const INHERIT: Self = CompositeAlphaFlagsKHR(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct SurfaceTransformFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(SurfaceTransformFlagsKHR, 0b111111111, Flags); +impl SurfaceTransformFlagsKHR { + pub const IDENTITY: Self = SurfaceTransformFlagsKHR(0b1); + pub const ROTATE_90: Self = SurfaceTransformFlagsKHR(0b10); + pub const ROTATE_180: Self = SurfaceTransformFlagsKHR(0b100); + pub const ROTATE_270: Self = SurfaceTransformFlagsKHR(0b1000); + pub const HORIZONTAL_MIRROR: Self = SurfaceTransformFlagsKHR(0b10000); + pub const HORIZONTAL_MIRROR_ROTATE_90: Self = SurfaceTransformFlagsKHR(0b100000); + pub const HORIZONTAL_MIRROR_ROTATE_180: Self = SurfaceTransformFlagsKHR(0b1000000); + pub const HORIZONTAL_MIRROR_ROTATE_270: Self = SurfaceTransformFlagsKHR(0b10000000); + pub const INHERIT: Self = SurfaceTransformFlagsKHR(0b100000000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DebugReportFlagsEXT(pub(crate) Flags); +vk_bitflags_wrapped!(DebugReportFlagsEXT, 0b11111, Flags); +impl DebugReportFlagsEXT { + pub const INFORMATION: Self = DebugReportFlagsEXT(0b1); + pub const WARNING: Self = DebugReportFlagsEXT(0b10); + pub const PERFORMANCE_WARNING: Self = DebugReportFlagsEXT(0b100); + pub const ERROR: Self = DebugReportFlagsEXT(0b1000); + pub const DEBUG: Self = DebugReportFlagsEXT(0b10000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ExternalMemoryHandleTypeFlagsNV(pub(crate) Flags); +vk_bitflags_wrapped!(ExternalMemoryHandleTypeFlagsNV, 0b1111, Flags); +impl ExternalMemoryHandleTypeFlagsNV { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_NV: Self = + ExternalMemoryHandleTypeFlagsNV(0b1); + pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_NV: Self = + ExternalMemoryHandleTypeFlagsNV(0b10); + pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_NV: Self = + ExternalMemoryHandleTypeFlagsNV(0b100); + pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_NV: Self = + ExternalMemoryHandleTypeFlagsNV(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ExternalMemoryFeatureFlagsNV(pub(crate) Flags); +vk_bitflags_wrapped!(ExternalMemoryFeatureFlagsNV, 0b111, Flags); +impl ExternalMemoryFeatureFlagsNV { + pub const EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_NV: Self = ExternalMemoryFeatureFlagsNV(0b1); + pub const EXTERNAL_MEMORY_FEATURE_EXPORTABLE_NV: Self = ExternalMemoryFeatureFlagsNV(0b10); + pub const EXTERNAL_MEMORY_FEATURE_IMPORTABLE_NV: Self = ExternalMemoryFeatureFlagsNV(0b100); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct SubgroupFeatureFlags(pub(crate) Flags); +vk_bitflags_wrapped!(SubgroupFeatureFlags, 0b11111111, Flags); +impl SubgroupFeatureFlags { + #[doc = "Basic subgroup operations"] + pub const BASIC: Self = SubgroupFeatureFlags(0b1); + #[doc = "Vote subgroup operations"] + pub const VOTE: Self = SubgroupFeatureFlags(0b10); + #[doc = "Arithmetic subgroup operations"] + pub const ARITHMETIC: Self = SubgroupFeatureFlags(0b100); + #[doc = "Ballot subgroup operations"] + pub const BALLOT: Self = SubgroupFeatureFlags(0b1000); + #[doc = "Shuffle subgroup operations"] + pub const SHUFFLE: Self = SubgroupFeatureFlags(0b10000); + #[doc = "Shuffle relative subgroup operations"] + pub const SHUFFLE_RELATIVE: Self = SubgroupFeatureFlags(0b100000); + #[doc = "Clustered subgroup operations"] + pub const CLUSTERED: Self = SubgroupFeatureFlags(0b1000000); + #[doc = "Quad subgroup operations"] + pub const QUAD: Self = SubgroupFeatureFlags(0b10000000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct IndirectCommandsLayoutUsageFlagsNVX(pub(crate) Flags); +vk_bitflags_wrapped!(IndirectCommandsLayoutUsageFlagsNVX, 0b1111, Flags); +impl IndirectCommandsLayoutUsageFlagsNVX { + pub const UNORDERED_SEQUENCES: Self = IndirectCommandsLayoutUsageFlagsNVX(0b1); + pub const SPARSE_SEQUENCES: Self = IndirectCommandsLayoutUsageFlagsNVX(0b10); + pub const EMPTY_EXECUTIONS: Self = IndirectCommandsLayoutUsageFlagsNVX(0b100); + pub const INDEXED_SEQUENCES: Self = IndirectCommandsLayoutUsageFlagsNVX(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ObjectEntryUsageFlagsNVX(pub(crate) Flags); +vk_bitflags_wrapped!(ObjectEntryUsageFlagsNVX, 0b11, Flags); +impl ObjectEntryUsageFlagsNVX { + pub const GRAPHICS: Self = ObjectEntryUsageFlagsNVX(0b1); + pub const COMPUTE: Self = ObjectEntryUsageFlagsNVX(0b10); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DescriptorSetLayoutCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(DescriptorSetLayoutCreateFlags, 0b0, Flags); +impl DescriptorSetLayoutCreateFlags {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ExternalMemoryHandleTypeFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ExternalMemoryHandleTypeFlags, 0b1111111, Flags); +impl ExternalMemoryHandleTypeFlags { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD: Self = ExternalMemoryHandleTypeFlags(0b1); + pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32: Self = ExternalMemoryHandleTypeFlags(0b10); + pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT: Self = + ExternalMemoryHandleTypeFlags(0b100); + pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE: Self = + ExternalMemoryHandleTypeFlags(0b1000); + pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT: Self = + ExternalMemoryHandleTypeFlags(0b10000); + pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP: Self = + ExternalMemoryHandleTypeFlags(0b100000); + pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE: Self = + ExternalMemoryHandleTypeFlags(0b1000000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ExternalMemoryFeatureFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ExternalMemoryFeatureFlags, 0b111, Flags); +impl ExternalMemoryFeatureFlags { + pub const EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY: Self = ExternalMemoryFeatureFlags(0b1); + pub const EXTERNAL_MEMORY_FEATURE_EXPORTABLE: Self = ExternalMemoryFeatureFlags(0b10); + pub const EXTERNAL_MEMORY_FEATURE_IMPORTABLE: Self = ExternalMemoryFeatureFlags(0b100); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ExternalSemaphoreHandleTypeFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ExternalSemaphoreHandleTypeFlags, 0b11111, Flags); +impl ExternalSemaphoreHandleTypeFlags { + pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD: Self = + ExternalSemaphoreHandleTypeFlags(0b1); + pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32: Self = + ExternalSemaphoreHandleTypeFlags(0b10); + pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT: Self = + ExternalSemaphoreHandleTypeFlags(0b100); + pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE: Self = + ExternalSemaphoreHandleTypeFlags(0b1000); + pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD: Self = + ExternalSemaphoreHandleTypeFlags(0b10000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ExternalSemaphoreFeatureFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ExternalSemaphoreFeatureFlags, 0b11, Flags); +impl ExternalSemaphoreFeatureFlags { + pub const EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE: Self = ExternalSemaphoreFeatureFlags(0b1); + pub const EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE: Self = ExternalSemaphoreFeatureFlags(0b10); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct SemaphoreImportFlags(pub(crate) Flags); +vk_bitflags_wrapped!(SemaphoreImportFlags, 0b1, Flags); +impl SemaphoreImportFlags { + pub const TEMPORARY: Self = SemaphoreImportFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ExternalFenceHandleTypeFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ExternalFenceHandleTypeFlags, 0b1111, Flags); +impl ExternalFenceHandleTypeFlags { + pub const EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD: Self = ExternalFenceHandleTypeFlags(0b1); + pub const EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32: Self = ExternalFenceHandleTypeFlags(0b10); + pub const EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT: Self = + ExternalFenceHandleTypeFlags(0b100); + pub const EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD: Self = ExternalFenceHandleTypeFlags(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ExternalFenceFeatureFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ExternalFenceFeatureFlags, 0b11, Flags); +impl ExternalFenceFeatureFlags { + pub const EXTERNAL_FENCE_FEATURE_EXPORTABLE: Self = ExternalFenceFeatureFlags(0b1); + pub const EXTERNAL_FENCE_FEATURE_IMPORTABLE: Self = ExternalFenceFeatureFlags(0b10); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct FenceImportFlags(pub(crate) Flags); +vk_bitflags_wrapped!(FenceImportFlags, 0b1, Flags); +impl FenceImportFlags { + pub const TEMPORARY: Self = FenceImportFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct SurfaceCounterFlagsEXT(pub(crate) Flags); +vk_bitflags_wrapped!(SurfaceCounterFlagsEXT, 0b1, Flags); +impl SurfaceCounterFlagsEXT { + pub const VBLANK: Self = SurfaceCounterFlagsEXT(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PeerMemoryFeatureFlags(pub(crate) Flags); +vk_bitflags_wrapped!(PeerMemoryFeatureFlags, 0b1111, Flags); +impl PeerMemoryFeatureFlags { + #[doc = "Can read with vkCmdCopy commands"] + pub const COPY_SRC: Self = PeerMemoryFeatureFlags(0b1); + #[doc = "Can write with vkCmdCopy commands"] + pub const COPY_DST: Self = PeerMemoryFeatureFlags(0b10); + #[doc = "Can read with any access type/command"] + pub const GENERIC_SRC: Self = PeerMemoryFeatureFlags(0b100); + #[doc = "Can write with and access type/command"] + pub const GENERIC_DST: Self = PeerMemoryFeatureFlags(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct MemoryAllocateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(MemoryAllocateFlags, 0b1, Flags); +impl MemoryAllocateFlags { + #[doc = "Force allocation on specific devices"] + pub const DEVICE_MASK: Self = MemoryAllocateFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DeviceGroupPresentModeFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(DeviceGroupPresentModeFlagsKHR, 0b1111, Flags); +impl DeviceGroupPresentModeFlagsKHR { + #[doc = "Present from local memory"] + pub const LOCAL: Self = DeviceGroupPresentModeFlagsKHR(0b1); + #[doc = "Present from remote memory"] + pub const REMOTE: Self = DeviceGroupPresentModeFlagsKHR(0b10); + #[doc = "Present sum of local and/or remote memory"] + pub const SUM: Self = DeviceGroupPresentModeFlagsKHR(0b100); + #[doc = "Each physical device presents from local memory"] + pub const LOCAL_MULTI_DEVICE: Self = DeviceGroupPresentModeFlagsKHR(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct SwapchainCreateFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(SwapchainCreateFlagsKHR, 0b0, Flags); +impl SwapchainCreateFlagsKHR {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct SubpassDescriptionFlags(pub(crate) Flags); +vk_bitflags_wrapped!(SubpassDescriptionFlags, 0b0, Flags); +impl SubpassDescriptionFlags {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DebugUtilsMessageSeverityFlagsEXT(pub(crate) Flags); +vk_bitflags_wrapped!(DebugUtilsMessageSeverityFlagsEXT, 0b1000100010001, Flags); +impl DebugUtilsMessageSeverityFlagsEXT { + pub const VERBOSE: Self = DebugUtilsMessageSeverityFlagsEXT(0b1); + pub const INFO: Self = DebugUtilsMessageSeverityFlagsEXT(0b10000); + pub const WARNING: Self = DebugUtilsMessageSeverityFlagsEXT(0b100000000); + pub const ERROR: Self = DebugUtilsMessageSeverityFlagsEXT(0b1000000000000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DebugUtilsMessageTypeFlagsEXT(pub(crate) Flags); +vk_bitflags_wrapped!(DebugUtilsMessageTypeFlagsEXT, 0b111, Flags); +impl DebugUtilsMessageTypeFlagsEXT { + pub const GENERAL: Self = DebugUtilsMessageTypeFlagsEXT(0b1); + pub const VALIDATION: Self = DebugUtilsMessageTypeFlagsEXT(0b10); + pub const PERFORMANCE: Self = DebugUtilsMessageTypeFlagsEXT(0b100); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DescriptorBindingFlagsEXT(pub(crate) Flags); +vk_bitflags_wrapped!(DescriptorBindingFlagsEXT, 0b1111, Flags); +impl DescriptorBindingFlagsEXT { + pub const UPDATE_AFTER_BIND: Self = DescriptorBindingFlagsEXT(0b1); + pub const UPDATE_UNUSED_WHILE_PENDING: Self = DescriptorBindingFlagsEXT(0b10); + pub const PARTIALLY_BOUND: Self = DescriptorBindingFlagsEXT(0b100); + pub const VARIABLE_DESCRIPTOR_COUNT: Self = DescriptorBindingFlagsEXT(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ConditionalRenderingFlagsEXT(pub(crate) Flags); +vk_bitflags_wrapped!(ConditionalRenderingFlagsEXT, 0b1, Flags); +impl ConditionalRenderingFlagsEXT { + pub const INVERTED: Self = ConditionalRenderingFlagsEXT(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ResolveModeFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(ResolveModeFlagsKHR, 0b1111, Flags); +impl ResolveModeFlagsKHR { + pub const NONE: Self = ResolveModeFlagsKHR(0); + pub const SAMPLE_ZERO: Self = ResolveModeFlagsKHR(0b1); + pub const AVERAGE: Self = ResolveModeFlagsKHR(0b10); + pub const MIN: Self = ResolveModeFlagsKHR(0b100); + pub const MAX: Self = ResolveModeFlagsKHR(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct GeometryInstanceFlagsNV(pub(crate) Flags); +vk_bitflags_wrapped!(GeometryInstanceFlagsNV, 0b1111, Flags); +impl GeometryInstanceFlagsNV { + pub const TRIANGLE_CULL_DISABLE: Self = GeometryInstanceFlagsNV(0b1); + pub const TRIANGLE_FRONT_COUNTERCLOCKWISE: Self = GeometryInstanceFlagsNV(0b10); + pub const FORCE_OPAQUE: Self = GeometryInstanceFlagsNV(0b100); + pub const FORCE_NO_OPAQUE: Self = GeometryInstanceFlagsNV(0b1000); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct GeometryFlagsNV(pub(crate) Flags); +vk_bitflags_wrapped!(GeometryFlagsNV, 0b11, Flags); +impl GeometryFlagsNV { + pub const OPAQUE: Self = GeometryFlagsNV(0b1); + pub const NO_DUPLICATE_ANY_HIT_INVOCATION: Self = GeometryFlagsNV(0b10); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct BuildAccelerationStructureFlagsNV(pub(crate) Flags); +vk_bitflags_wrapped!(BuildAccelerationStructureFlagsNV, 0b11111, Flags); +impl BuildAccelerationStructureFlagsNV { + pub const ALLOW_UPDATE: Self = BuildAccelerationStructureFlagsNV(0b1); + pub const ALLOW_COMPACTION: Self = BuildAccelerationStructureFlagsNV(0b10); + pub const PREFER_FAST_TRACE: Self = BuildAccelerationStructureFlagsNV(0b100); + pub const PREFER_FAST_BUILD: Self = BuildAccelerationStructureFlagsNV(0b1000); + pub const LOW_MEMORY: Self = BuildAccelerationStructureFlagsNV(0b10000); +} +pub const MAX_PHYSICAL_DEVICE_NAME_SIZE: usize = 256; +pub const UUID_SIZE: usize = 16; +pub const LUID_SIZE: usize = 8; +pub const MAX_EXTENSION_NAME_SIZE: usize = 256; +pub const MAX_DESCRIPTION_SIZE: usize = 256; +pub const MAX_MEMORY_TYPES: usize = 32; +pub const MAX_MEMORY_HEAPS: usize = 16; +pub const LOD_CLAMP_NONE: f32 = 1000.00; +pub const REMAINING_MIP_LEVELS: u32 = !0; +pub const REMAINING_ARRAY_LAYERS: u32 = !0; +pub const WHOLE_SIZE: u64 = !0; +pub const ATTACHMENT_UNUSED: u32 = !0; +pub const TRUE: Bool32 = 1; +pub const FALSE: Bool32 = 0; +pub const QUEUE_FAMILY_IGNORED: u32 = !0; +pub const QUEUE_FAMILY_EXTERNAL: u32 = !0 - 1; +pub const QUEUE_FAMILY_FOREIGN_EXT: u32 = !0 - 2; +pub const SUBPASS_EXTERNAL: u32 = !0; +pub const MAX_DEVICE_GROUP_SIZE: usize = 32; +pub const MAX_DRIVER_NAME_SIZE_KHR: usize = 256; +pub const MAX_DRIVER_INFO_SIZE_KHR: usize = 256; +pub const SHADER_UNUSED_NV: u32 = !0; +impl KhrSurfaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_surface\0").expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkDestroySurfaceKHR = extern "system" fn( + instance: Instance, + surface: SurfaceKHR, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceSurfaceSupportKHR = extern "system" fn( + physical_device: PhysicalDevice, + queue_family_index: u32, + surface: SurfaceKHR, + p_supported: *mut Bool32, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR = extern "system" fn( + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_surface_capabilities: *mut SurfaceCapabilitiesKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceSurfaceFormatsKHR = extern "system" fn( + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_surface_format_count: *mut u32, + p_surface_formats: *mut SurfaceFormatKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceSurfacePresentModesKHR = extern "system" fn( + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_present_mode_count: *mut u32, + p_present_modes: *mut PresentModeKHR, +) -> Result; +pub struct KhrSurfaceFn { + pub destroy_surface_khr: extern "system" fn( + instance: Instance, + surface: SurfaceKHR, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub get_physical_device_surface_support_khr: extern "system" fn( + physical_device: PhysicalDevice, + queue_family_index: u32, + surface: SurfaceKHR, + p_supported: *mut Bool32, + ) -> Result, + pub get_physical_device_surface_capabilities_khr: extern "system" fn( + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_surface_capabilities: *mut SurfaceCapabilitiesKHR, + ) -> Result, + pub get_physical_device_surface_formats_khr: extern "system" fn( + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_surface_format_count: *mut u32, + p_surface_formats: *mut SurfaceFormatKHR, + ) -> Result, + pub get_physical_device_surface_present_modes_khr: extern "system" fn( + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_present_mode_count: *mut u32, + p_present_modes: *mut PresentModeKHR, + ) -> Result, +} +unsafe impl Send for KhrSurfaceFn {} +unsafe impl Sync for KhrSurfaceFn {} +impl ::std::clone::Clone for KhrSurfaceFn { + fn clone(&self) -> Self { + KhrSurfaceFn { + destroy_surface_khr: self.destroy_surface_khr, + get_physical_device_surface_support_khr: self.get_physical_device_surface_support_khr, + get_physical_device_surface_capabilities_khr: self + .get_physical_device_surface_capabilities_khr, + get_physical_device_surface_formats_khr: self.get_physical_device_surface_formats_khr, + get_physical_device_surface_present_modes_khr: self + .get_physical_device_surface_present_modes_khr, + } + } +} +impl KhrSurfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrSurfaceFn { + destroy_surface_khr: unsafe { + extern "system" fn destroy_surface_khr( + _instance: Instance, + _surface: SurfaceKHR, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(destroy_surface_khr))) + } + let raw_name = stringify!(vkDestroySurfaceKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_surface_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_surface_support_khr: unsafe { + extern "system" fn get_physical_device_surface_support_khr( + _physical_device: PhysicalDevice, + _queue_family_index: u32, + _surface: SurfaceKHR, + _p_supported: *mut Bool32, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_surface_support_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceSurfaceSupportKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_surface_support_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_surface_capabilities_khr: unsafe { + extern "system" fn get_physical_device_surface_capabilities_khr( + _physical_device: PhysicalDevice, + _surface: SurfaceKHR, + _p_surface_capabilities: *mut SurfaceCapabilitiesKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_surface_capabilities_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceSurfaceCapabilitiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_surface_capabilities_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_surface_formats_khr: unsafe { + extern "system" fn get_physical_device_surface_formats_khr( + _physical_device: PhysicalDevice, + _surface: SurfaceKHR, + _p_surface_format_count: *mut u32, + _p_surface_formats: *mut SurfaceFormatKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_surface_formats_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceSurfaceFormatsKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_surface_formats_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_surface_present_modes_khr: unsafe { + extern "system" fn get_physical_device_surface_present_modes_khr( + _physical_device: PhysicalDevice, + _surface: SurfaceKHR, + _p_present_mode_count: *mut u32, + _p_present_modes: *mut PresentModeKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_surface_present_modes_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceSurfacePresentModesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_surface_present_modes_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn destroy_surface_khr( + &self, + instance: Instance, + surface: SurfaceKHR, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_surface_khr)(instance, surface, p_allocator) + } + #[doc = ""] + pub unsafe fn get_physical_device_surface_support_khr( + &self, + physical_device: PhysicalDevice, + queue_family_index: u32, + surface: SurfaceKHR, + p_supported: *mut Bool32, + ) -> Result { + (self.get_physical_device_surface_support_khr)( + physical_device, + queue_family_index, + surface, + p_supported, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_surface_capabilities_khr( + &self, + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_surface_capabilities: *mut SurfaceCapabilitiesKHR, + ) -> Result { + (self.get_physical_device_surface_capabilities_khr)( + physical_device, + surface, + p_surface_capabilities, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_surface_formats_khr( + &self, + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_surface_format_count: *mut u32, + p_surface_formats: *mut SurfaceFormatKHR, + ) -> Result { + (self.get_physical_device_surface_formats_khr)( + physical_device, + surface, + p_surface_format_count, + p_surface_formats, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_surface_present_modes_khr( + &self, + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_present_mode_count: *mut u32, + p_present_modes: *mut PresentModeKHR, + ) -> Result { + (self.get_physical_device_surface_present_modes_khr)( + physical_device, + surface, + p_present_mode_count, + p_present_modes, + ) + } +} +#[doc = "Generated from \'VK_KHR_surface\'"] +impl Result { + pub const ERROR_SURFACE_LOST_KHR: Self = Result(-1000000000); +} +#[doc = "Generated from \'VK_KHR_surface\'"] +impl Result { + pub const ERROR_NATIVE_WINDOW_IN_USE_KHR: Self = Result(-1000000001); +} +#[doc = "Generated from \'VK_KHR_surface\'"] +impl ObjectType { + pub const SURFACE_KHR: Self = ObjectType(1000000000); +} +impl KhrSwapchainFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_swapchain\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateSwapchainKHR = extern "system" fn( + device: Device, + p_create_info: *const SwapchainCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_swapchain: *mut SwapchainKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroySwapchainKHR = extern "system" fn( + device: Device, + swapchain: SwapchainKHR, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetSwapchainImagesKHR = extern "system" fn( + device: Device, + swapchain: SwapchainKHR, + p_swapchain_image_count: *mut u32, + p_swapchain_images: *mut Image, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkAcquireNextImageKHR = extern "system" fn( + device: Device, + swapchain: SwapchainKHR, + timeout: u64, + semaphore: Semaphore, + fence: Fence, + p_image_index: *mut u32, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkQueuePresentKHR = + extern "system" fn(queue: Queue, p_present_info: *const PresentInfoKHR) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeviceGroupPresentCapabilitiesKHR = extern "system" fn( + device: Device, + p_device_group_present_capabilities: *mut DeviceGroupPresentCapabilitiesKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeviceGroupSurfacePresentModesKHR = extern "system" fn( + device: Device, + surface: SurfaceKHR, + p_modes: *mut DeviceGroupPresentModeFlagsKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDevicePresentRectanglesKHR = extern "system" fn( + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_rect_count: *mut u32, + p_rects: *mut Rect2D, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkAcquireNextImage2KHR = extern "system" fn( + device: Device, + p_acquire_info: *const AcquireNextImageInfoKHR, + p_image_index: *mut u32, +) -> Result; +pub struct KhrSwapchainFn { + pub create_swapchain_khr: extern "system" fn( + device: Device, + p_create_info: *const SwapchainCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_swapchain: *mut SwapchainKHR, + ) -> Result, + pub destroy_swapchain_khr: extern "system" fn( + device: Device, + swapchain: SwapchainKHR, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub get_swapchain_images_khr: extern "system" fn( + device: Device, + swapchain: SwapchainKHR, + p_swapchain_image_count: *mut u32, + p_swapchain_images: *mut Image, + ) -> Result, + pub acquire_next_image_khr: extern "system" fn( + device: Device, + swapchain: SwapchainKHR, + timeout: u64, + semaphore: Semaphore, + fence: Fence, + p_image_index: *mut u32, + ) -> Result, + pub queue_present_khr: + extern "system" fn(queue: Queue, p_present_info: *const PresentInfoKHR) -> Result, + pub get_device_group_present_capabilities_khr: extern "system" fn( + device: Device, + p_device_group_present_capabilities: *mut DeviceGroupPresentCapabilitiesKHR, + ) -> Result, + pub get_device_group_surface_present_modes_khr: extern "system" fn( + device: Device, + surface: SurfaceKHR, + p_modes: *mut DeviceGroupPresentModeFlagsKHR, + ) -> Result, + pub get_physical_device_present_rectangles_khr: extern "system" fn( + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_rect_count: *mut u32, + p_rects: *mut Rect2D, + ) -> Result, + pub acquire_next_image2_khr: extern "system" fn( + device: Device, + p_acquire_info: *const AcquireNextImageInfoKHR, + p_image_index: *mut u32, + ) -> Result, +} +unsafe impl Send for KhrSwapchainFn {} +unsafe impl Sync for KhrSwapchainFn {} +impl ::std::clone::Clone for KhrSwapchainFn { + fn clone(&self) -> Self { + KhrSwapchainFn { + create_swapchain_khr: self.create_swapchain_khr, + destroy_swapchain_khr: self.destroy_swapchain_khr, + get_swapchain_images_khr: self.get_swapchain_images_khr, + acquire_next_image_khr: self.acquire_next_image_khr, + queue_present_khr: self.queue_present_khr, + get_device_group_present_capabilities_khr: self + .get_device_group_present_capabilities_khr, + get_device_group_surface_present_modes_khr: self + .get_device_group_surface_present_modes_khr, + get_physical_device_present_rectangles_khr: self + .get_physical_device_present_rectangles_khr, + acquire_next_image2_khr: self.acquire_next_image2_khr, + } + } +} +impl KhrSwapchainFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrSwapchainFn { + create_swapchain_khr: unsafe { + extern "system" fn create_swapchain_khr( + _device: Device, + _p_create_info: *const SwapchainCreateInfoKHR, + _p_allocator: *const AllocationCallbacks, + _p_swapchain: *mut SwapchainKHR, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_swapchain_khr))) + } + let raw_name = stringify!(vkCreateSwapchainKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_swapchain_khr + } else { + ::std::mem::transmute(val) + } + }, + destroy_swapchain_khr: unsafe { + extern "system" fn destroy_swapchain_khr( + _device: Device, + _swapchain: SwapchainKHR, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_swapchain_khr) + )) + } + let raw_name = stringify!(vkDestroySwapchainKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_swapchain_khr + } else { + ::std::mem::transmute(val) + } + }, + get_swapchain_images_khr: unsafe { + extern "system" fn get_swapchain_images_khr( + _device: Device, + _swapchain: SwapchainKHR, + _p_swapchain_image_count: *mut u32, + _p_swapchain_images: *mut Image, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_swapchain_images_khr) + )) + } + let raw_name = stringify!(vkGetSwapchainImagesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_swapchain_images_khr + } else { + ::std::mem::transmute(val) + } + }, + acquire_next_image_khr: unsafe { + extern "system" fn acquire_next_image_khr( + _device: Device, + _swapchain: SwapchainKHR, + _timeout: u64, + _semaphore: Semaphore, + _fence: Fence, + _p_image_index: *mut u32, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(acquire_next_image_khr) + )) + } + let raw_name = stringify!(vkAcquireNextImageKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + acquire_next_image_khr + } else { + ::std::mem::transmute(val) + } + }, + queue_present_khr: unsafe { + extern "system" fn queue_present_khr( + _queue: Queue, + _p_present_info: *const PresentInfoKHR, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(queue_present_khr))) + } + let raw_name = stringify!(vkQueuePresentKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + queue_present_khr + } else { + ::std::mem::transmute(val) + } + }, + get_device_group_present_capabilities_khr: unsafe { + extern "system" fn get_device_group_present_capabilities_khr( + _device: Device, + _p_device_group_present_capabilities: *mut DeviceGroupPresentCapabilitiesKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_device_group_present_capabilities_khr) + )) + } + let raw_name = stringify!(vkGetDeviceGroupPresentCapabilitiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_group_present_capabilities_khr + } else { + ::std::mem::transmute(val) + } + }, + get_device_group_surface_present_modes_khr: unsafe { + extern "system" fn get_device_group_surface_present_modes_khr( + _device: Device, + _surface: SurfaceKHR, + _p_modes: *mut DeviceGroupPresentModeFlagsKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_device_group_surface_present_modes_khr) + )) + } + let raw_name = stringify!(vkGetDeviceGroupSurfacePresentModesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_group_surface_present_modes_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_present_rectangles_khr: unsafe { + extern "system" fn get_physical_device_present_rectangles_khr( + _physical_device: PhysicalDevice, + _surface: SurfaceKHR, + _p_rect_count: *mut u32, + _p_rects: *mut Rect2D, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_present_rectangles_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDevicePresentRectanglesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_present_rectangles_khr + } else { + ::std::mem::transmute(val) + } + }, + acquire_next_image2_khr: unsafe { + extern "system" fn acquire_next_image2_khr( + _device: Device, + _p_acquire_info: *const AcquireNextImageInfoKHR, + _p_image_index: *mut u32, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(acquire_next_image2_khr) + )) + } + let raw_name = stringify!(vkAcquireNextImage2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + acquire_next_image2_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_swapchain_khr( + &self, + device: Device, + p_create_info: *const SwapchainCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_swapchain: *mut SwapchainKHR, + ) -> Result { + (self.create_swapchain_khr)(device, p_create_info, p_allocator, p_swapchain) + } + #[doc = ""] + pub unsafe fn destroy_swapchain_khr( + &self, + device: Device, + swapchain: SwapchainKHR, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_swapchain_khr)(device, swapchain, p_allocator) + } + #[doc = ""] + pub unsafe fn get_swapchain_images_khr( + &self, + device: Device, + swapchain: SwapchainKHR, + p_swapchain_image_count: *mut u32, + p_swapchain_images: *mut Image, + ) -> Result { + (self.get_swapchain_images_khr)( + device, + swapchain, + p_swapchain_image_count, + p_swapchain_images, + ) + } + #[doc = ""] + pub unsafe fn acquire_next_image_khr( + &self, + device: Device, + swapchain: SwapchainKHR, + timeout: u64, + semaphore: Semaphore, + fence: Fence, + p_image_index: *mut u32, + ) -> Result { + (self.acquire_next_image_khr)(device, swapchain, timeout, semaphore, fence, p_image_index) + } + #[doc = ""] + pub unsafe fn queue_present_khr( + &self, + queue: Queue, + p_present_info: *const PresentInfoKHR, + ) -> Result { + (self.queue_present_khr)(queue, p_present_info) + } + #[doc = ""] + pub unsafe fn get_device_group_present_capabilities_khr( + &self, + device: Device, + p_device_group_present_capabilities: *mut DeviceGroupPresentCapabilitiesKHR, + ) -> Result { + (self.get_device_group_present_capabilities_khr)( + device, + p_device_group_present_capabilities, + ) + } + #[doc = ""] + pub unsafe fn get_device_group_surface_present_modes_khr( + &self, + device: Device, + surface: SurfaceKHR, + p_modes: *mut DeviceGroupPresentModeFlagsKHR, + ) -> Result { + (self.get_device_group_surface_present_modes_khr)(device, surface, p_modes) + } + #[doc = ""] + pub unsafe fn get_physical_device_present_rectangles_khr( + &self, + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_rect_count: *mut u32, + p_rects: *mut Rect2D, + ) -> Result { + (self.get_physical_device_present_rectangles_khr)( + physical_device, + surface, + p_rect_count, + p_rects, + ) + } + #[doc = ""] + pub unsafe fn acquire_next_image2_khr( + &self, + device: Device, + p_acquire_info: *const AcquireNextImageInfoKHR, + p_image_index: *mut u32, + ) -> Result { + (self.acquire_next_image2_khr)(device, p_acquire_info, p_image_index) + } +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl StructureType { + pub const SWAPCHAIN_CREATE_INFO_KHR: Self = StructureType(1000001000); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl StructureType { + pub const PRESENT_INFO_KHR: Self = StructureType(1000001001); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl ImageLayout { + pub const PRESENT_SRC_KHR: Self = ImageLayout(1000001002); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl Result { + pub const SUBOPTIMAL_KHR: Self = Result(1000001003); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl Result { + pub const ERROR_OUT_OF_DATE_KHR: Self = Result(-1000001004); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl ObjectType { + pub const SWAPCHAIN_KHR: Self = ObjectType(1000001000); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl StructureType { + pub const DEVICE_GROUP_PRESENT_CAPABILITIES_KHR: Self = StructureType(1000060007); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl StructureType { + pub const IMAGE_SWAPCHAIN_CREATE_INFO_KHR: Self = StructureType(1000060008); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl StructureType { + pub const BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR: Self = StructureType(1000060009); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl StructureType { + pub const ACQUIRE_NEXT_IMAGE_INFO_KHR: Self = StructureType(1000060010); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl StructureType { + pub const DEVICE_GROUP_PRESENT_INFO_KHR: Self = StructureType(1000060011); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl StructureType { + pub const DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR: Self = StructureType(1000060012); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl SwapchainCreateFlagsKHR { + pub const SPLIT_INSTANCE_BIND_REGIONS: Self = SwapchainCreateFlagsKHR(0b1); +} +#[doc = "Generated from \'VK_KHR_swapchain\'"] +impl SwapchainCreateFlagsKHR { + pub const PROTECTED: Self = SwapchainCreateFlagsKHR(0b10); +} +impl KhrDisplayFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_display\0").expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceDisplayPropertiesKHR = extern "system" fn( + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut DisplayPropertiesKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR = extern "system" fn( + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut DisplayPlanePropertiesKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDisplayPlaneSupportedDisplaysKHR = extern "system" fn( + physical_device: PhysicalDevice, + plane_index: u32, + p_display_count: *mut u32, + p_displays: *mut DisplayKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDisplayModePropertiesKHR = extern "system" fn( + physical_device: PhysicalDevice, + display: DisplayKHR, + p_property_count: *mut u32, + p_properties: *mut DisplayModePropertiesKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateDisplayModeKHR = extern "system" fn( + physical_device: PhysicalDevice, + display: DisplayKHR, + p_create_info: *const DisplayModeCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_mode: *mut DisplayModeKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDisplayPlaneCapabilitiesKHR = extern "system" fn( + physical_device: PhysicalDevice, + mode: DisplayModeKHR, + plane_index: u32, + p_capabilities: *mut DisplayPlaneCapabilitiesKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateDisplayPlaneSurfaceKHR = extern "system" fn( + instance: Instance, + p_create_info: *const DisplaySurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +pub struct KhrDisplayFn { + pub get_physical_device_display_properties_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut DisplayPropertiesKHR, + ) -> Result, + pub get_physical_device_display_plane_properties_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut DisplayPlanePropertiesKHR, + ) -> Result, + pub get_display_plane_supported_displays_khr: extern "system" fn( + physical_device: PhysicalDevice, + plane_index: u32, + p_display_count: *mut u32, + p_displays: *mut DisplayKHR, + ) -> Result, + pub get_display_mode_properties_khr: extern "system" fn( + physical_device: PhysicalDevice, + display: DisplayKHR, + p_property_count: *mut u32, + p_properties: *mut DisplayModePropertiesKHR, + ) -> Result, + pub create_display_mode_khr: extern "system" fn( + physical_device: PhysicalDevice, + display: DisplayKHR, + p_create_info: *const DisplayModeCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_mode: *mut DisplayModeKHR, + ) -> Result, + pub get_display_plane_capabilities_khr: extern "system" fn( + physical_device: PhysicalDevice, + mode: DisplayModeKHR, + plane_index: u32, + p_capabilities: *mut DisplayPlaneCapabilitiesKHR, + ) -> Result, + pub create_display_plane_surface_khr: extern "system" fn( + instance: Instance, + p_create_info: *const DisplaySurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, +} +unsafe impl Send for KhrDisplayFn {} +unsafe impl Sync for KhrDisplayFn {} +impl ::std::clone::Clone for KhrDisplayFn { + fn clone(&self) -> Self { + KhrDisplayFn { + get_physical_device_display_properties_khr: self + .get_physical_device_display_properties_khr, + get_physical_device_display_plane_properties_khr: self + .get_physical_device_display_plane_properties_khr, + get_display_plane_supported_displays_khr: self.get_display_plane_supported_displays_khr, + get_display_mode_properties_khr: self.get_display_mode_properties_khr, + create_display_mode_khr: self.create_display_mode_khr, + get_display_plane_capabilities_khr: self.get_display_plane_capabilities_khr, + create_display_plane_surface_khr: self.create_display_plane_surface_khr, + } + } +} +impl KhrDisplayFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrDisplayFn { + get_physical_device_display_properties_khr: unsafe { + extern "system" fn get_physical_device_display_properties_khr( + _physical_device: PhysicalDevice, + _p_property_count: *mut u32, + _p_properties: *mut DisplayPropertiesKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_display_properties_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceDisplayPropertiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_display_properties_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_display_plane_properties_khr: unsafe { + extern "system" fn get_physical_device_display_plane_properties_khr( + _physical_device: PhysicalDevice, + _p_property_count: *mut u32, + _p_properties: *mut DisplayPlanePropertiesKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_display_plane_properties_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceDisplayPlanePropertiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_display_plane_properties_khr + } else { + ::std::mem::transmute(val) + } + }, + get_display_plane_supported_displays_khr: unsafe { + extern "system" fn get_display_plane_supported_displays_khr( + _physical_device: PhysicalDevice, + _plane_index: u32, + _p_display_count: *mut u32, + _p_displays: *mut DisplayKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_display_plane_supported_displays_khr) + )) + } + let raw_name = stringify!(vkGetDisplayPlaneSupportedDisplaysKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_display_plane_supported_displays_khr + } else { + ::std::mem::transmute(val) + } + }, + get_display_mode_properties_khr: unsafe { + extern "system" fn get_display_mode_properties_khr( + _physical_device: PhysicalDevice, + _display: DisplayKHR, + _p_property_count: *mut u32, + _p_properties: *mut DisplayModePropertiesKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_display_mode_properties_khr) + )) + } + let raw_name = stringify!(vkGetDisplayModePropertiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_display_mode_properties_khr + } else { + ::std::mem::transmute(val) + } + }, + create_display_mode_khr: unsafe { + extern "system" fn create_display_mode_khr( + _physical_device: PhysicalDevice, + _display: DisplayKHR, + _p_create_info: *const DisplayModeCreateInfoKHR, + _p_allocator: *const AllocationCallbacks, + _p_mode: *mut DisplayModeKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_display_mode_khr) + )) + } + let raw_name = stringify!(vkCreateDisplayModeKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_display_mode_khr + } else { + ::std::mem::transmute(val) + } + }, + get_display_plane_capabilities_khr: unsafe { + extern "system" fn get_display_plane_capabilities_khr( + _physical_device: PhysicalDevice, + _mode: DisplayModeKHR, + _plane_index: u32, + _p_capabilities: *mut DisplayPlaneCapabilitiesKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_display_plane_capabilities_khr) + )) + } + let raw_name = stringify!(vkGetDisplayPlaneCapabilitiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_display_plane_capabilities_khr + } else { + ::std::mem::transmute(val) + } + }, + create_display_plane_surface_khr: unsafe { + extern "system" fn create_display_plane_surface_khr( + _instance: Instance, + _p_create_info: *const DisplaySurfaceCreateInfoKHR, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_display_plane_surface_khr) + )) + } + let raw_name = stringify!(vkCreateDisplayPlaneSurfaceKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_display_plane_surface_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_physical_device_display_properties_khr( + &self, + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut DisplayPropertiesKHR, + ) -> Result { + (self.get_physical_device_display_properties_khr)( + physical_device, + p_property_count, + p_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_display_plane_properties_khr( + &self, + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut DisplayPlanePropertiesKHR, + ) -> Result { + (self.get_physical_device_display_plane_properties_khr)( + physical_device, + p_property_count, + p_properties, + ) + } + #[doc = ""] + pub unsafe fn get_display_plane_supported_displays_khr( + &self, + physical_device: PhysicalDevice, + plane_index: u32, + p_display_count: *mut u32, + p_displays: *mut DisplayKHR, + ) -> Result { + (self.get_display_plane_supported_displays_khr)( + physical_device, + plane_index, + p_display_count, + p_displays, + ) + } + #[doc = ""] + pub unsafe fn get_display_mode_properties_khr( + &self, + physical_device: PhysicalDevice, + display: DisplayKHR, + p_property_count: *mut u32, + p_properties: *mut DisplayModePropertiesKHR, + ) -> Result { + (self.get_display_mode_properties_khr)( + physical_device, + display, + p_property_count, + p_properties, + ) + } + #[doc = ""] + pub unsafe fn create_display_mode_khr( + &self, + physical_device: PhysicalDevice, + display: DisplayKHR, + p_create_info: *const DisplayModeCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_mode: *mut DisplayModeKHR, + ) -> Result { + (self.create_display_mode_khr)(physical_device, display, p_create_info, p_allocator, p_mode) + } + #[doc = ""] + pub unsafe fn get_display_plane_capabilities_khr( + &self, + physical_device: PhysicalDevice, + mode: DisplayModeKHR, + plane_index: u32, + p_capabilities: *mut DisplayPlaneCapabilitiesKHR, + ) -> Result { + (self.get_display_plane_capabilities_khr)( + physical_device, + mode, + plane_index, + p_capabilities, + ) + } + #[doc = ""] + pub unsafe fn create_display_plane_surface_khr( + &self, + instance: Instance, + p_create_info: *const DisplaySurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_display_plane_surface_khr)(instance, p_create_info, p_allocator, p_surface) + } +} +#[doc = "Generated from \'VK_KHR_display\'"] +impl StructureType { + pub const DISPLAY_MODE_CREATE_INFO_KHR: Self = StructureType(1000002000); +} +#[doc = "Generated from \'VK_KHR_display\'"] +impl StructureType { + pub const DISPLAY_SURFACE_CREATE_INFO_KHR: Self = StructureType(1000002001); +} +#[doc = "Generated from \'VK_KHR_display\'"] +impl ObjectType { + pub const DISPLAY_KHR: Self = ObjectType(1000002000); +} +#[doc = "Generated from \'VK_KHR_display\'"] +impl ObjectType { + pub const DISPLAY_MODE_KHR: Self = ObjectType(1000002001); +} +impl KhrDisplaySwapchainFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_display_swapchain\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateSharedSwapchainsKHR = extern "system" fn( + device: Device, + swapchain_count: u32, + p_create_infos: *const SwapchainCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_swapchains: *mut SwapchainKHR, +) -> Result; +pub struct KhrDisplaySwapchainFn { + pub create_shared_swapchains_khr: extern "system" fn( + device: Device, + swapchain_count: u32, + p_create_infos: *const SwapchainCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_swapchains: *mut SwapchainKHR, + ) -> Result, +} +unsafe impl Send for KhrDisplaySwapchainFn {} +unsafe impl Sync for KhrDisplaySwapchainFn {} +impl ::std::clone::Clone for KhrDisplaySwapchainFn { + fn clone(&self) -> Self { + KhrDisplaySwapchainFn { + create_shared_swapchains_khr: self.create_shared_swapchains_khr, + } + } +} +impl KhrDisplaySwapchainFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrDisplaySwapchainFn { + create_shared_swapchains_khr: unsafe { + extern "system" fn create_shared_swapchains_khr( + _device: Device, + _swapchain_count: u32, + _p_create_infos: *const SwapchainCreateInfoKHR, + _p_allocator: *const AllocationCallbacks, + _p_swapchains: *mut SwapchainKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_shared_swapchains_khr) + )) + } + let raw_name = stringify!(vkCreateSharedSwapchainsKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_shared_swapchains_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_shared_swapchains_khr( + &self, + device: Device, + swapchain_count: u32, + p_create_infos: *const SwapchainCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_swapchains: *mut SwapchainKHR, + ) -> Result { + (self.create_shared_swapchains_khr)( + device, + swapchain_count, + p_create_infos, + p_allocator, + p_swapchains, + ) + } +} +#[doc = "Generated from \'VK_KHR_display_swapchain\'"] +impl StructureType { + pub const DISPLAY_PRESENT_INFO_KHR: Self = StructureType(1000003000); +} +#[doc = "Generated from \'VK_KHR_display_swapchain\'"] +impl Result { + pub const ERROR_INCOMPATIBLE_DISPLAY_KHR: Self = Result(-1000003001); +} +impl KhrXlibSurfaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_xlib_surface\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateXlibSurfaceKHR = extern "system" fn( + instance: Instance, + p_create_info: *const XlibSurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR = extern "system" fn( + physical_device: PhysicalDevice, + queue_family_index: u32, + dpy: *mut Display, + visual_id: VisualID, +) -> Bool32; +pub struct KhrXlibSurfaceFn { + pub create_xlib_surface_khr: extern "system" fn( + instance: Instance, + p_create_info: *const XlibSurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, + pub get_physical_device_xlib_presentation_support_khr: extern "system" fn( + physical_device: PhysicalDevice, + queue_family_index: u32, + dpy: *mut Display, + visual_id: VisualID, + ) -> Bool32, +} +unsafe impl Send for KhrXlibSurfaceFn {} +unsafe impl Sync for KhrXlibSurfaceFn {} +impl ::std::clone::Clone for KhrXlibSurfaceFn { + fn clone(&self) -> Self { + KhrXlibSurfaceFn { + create_xlib_surface_khr: self.create_xlib_surface_khr, + get_physical_device_xlib_presentation_support_khr: self + .get_physical_device_xlib_presentation_support_khr, + } + } +} +impl KhrXlibSurfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrXlibSurfaceFn { + create_xlib_surface_khr: unsafe { + extern "system" fn create_xlib_surface_khr( + _instance: Instance, + _p_create_info: *const XlibSurfaceCreateInfoKHR, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_xlib_surface_khr) + )) + } + let raw_name = stringify!(vkCreateXlibSurfaceKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_xlib_surface_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_xlib_presentation_support_khr: unsafe { + extern "system" fn get_physical_device_xlib_presentation_support_khr( + _physical_device: PhysicalDevice, + _queue_family_index: u32, + _dpy: *mut Display, + _visual_id: VisualID, + ) -> Bool32 { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_xlib_presentation_support_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceXlibPresentationSupportKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_xlib_presentation_support_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_xlib_surface_khr( + &self, + instance: Instance, + p_create_info: *const XlibSurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_xlib_surface_khr)(instance, p_create_info, p_allocator, p_surface) + } + #[doc = ""] + pub unsafe fn get_physical_device_xlib_presentation_support_khr( + &self, + physical_device: PhysicalDevice, + queue_family_index: u32, + dpy: *mut Display, + visual_id: VisualID, + ) -> Bool32 { + (self.get_physical_device_xlib_presentation_support_khr)( + physical_device, + queue_family_index, + dpy, + visual_id, + ) + } +} +#[doc = "Generated from \'VK_KHR_xlib_surface\'"] +impl StructureType { + pub const XLIB_SURFACE_CREATE_INFO_KHR: Self = StructureType(1000004000); +} +impl KhrXcbSurfaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_xcb_surface\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateXcbSurfaceKHR = extern "system" fn( + instance: Instance, + p_create_info: *const XcbSurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR = extern "system" fn( + physical_device: PhysicalDevice, + queue_family_index: u32, + connection: *mut xcb_connection_t, + visual_id: xcb_visualid_t, +) -> Bool32; +pub struct KhrXcbSurfaceFn { + pub create_xcb_surface_khr: extern "system" fn( + instance: Instance, + p_create_info: *const XcbSurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, + pub get_physical_device_xcb_presentation_support_khr: extern "system" fn( + physical_device: PhysicalDevice, + queue_family_index: u32, + connection: *mut xcb_connection_t, + visual_id: xcb_visualid_t, + ) -> Bool32, +} +unsafe impl Send for KhrXcbSurfaceFn {} +unsafe impl Sync for KhrXcbSurfaceFn {} +impl ::std::clone::Clone for KhrXcbSurfaceFn { + fn clone(&self) -> Self { + KhrXcbSurfaceFn { + create_xcb_surface_khr: self.create_xcb_surface_khr, + get_physical_device_xcb_presentation_support_khr: self + .get_physical_device_xcb_presentation_support_khr, + } + } +} +impl KhrXcbSurfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrXcbSurfaceFn { + create_xcb_surface_khr: unsafe { + extern "system" fn create_xcb_surface_khr( + _instance: Instance, + _p_create_info: *const XcbSurfaceCreateInfoKHR, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_xcb_surface_khr) + )) + } + let raw_name = stringify!(vkCreateXcbSurfaceKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_xcb_surface_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_xcb_presentation_support_khr: unsafe { + extern "system" fn get_physical_device_xcb_presentation_support_khr( + _physical_device: PhysicalDevice, + _queue_family_index: u32, + _connection: *mut xcb_connection_t, + _visual_id: xcb_visualid_t, + ) -> Bool32 { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_xcb_presentation_support_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceXcbPresentationSupportKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_xcb_presentation_support_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_xcb_surface_khr( + &self, + instance: Instance, + p_create_info: *const XcbSurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_xcb_surface_khr)(instance, p_create_info, p_allocator, p_surface) + } + #[doc = ""] + pub unsafe fn get_physical_device_xcb_presentation_support_khr( + &self, + physical_device: PhysicalDevice, + queue_family_index: u32, + connection: *mut xcb_connection_t, + visual_id: xcb_visualid_t, + ) -> Bool32 { + (self.get_physical_device_xcb_presentation_support_khr)( + physical_device, + queue_family_index, + connection, + visual_id, + ) + } +} +#[doc = "Generated from \'VK_KHR_xcb_surface\'"] +impl StructureType { + pub const XCB_SURFACE_CREATE_INFO_KHR: Self = StructureType(1000005000); +} +impl KhrWaylandSurfaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_wayland_surface\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateWaylandSurfaceKHR = extern "system" fn( + instance: Instance, + p_create_info: *const WaylandSurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR = extern "system" fn( + physical_device: PhysicalDevice, + queue_family_index: u32, + display: *mut wl_display, +) -> Bool32; +pub struct KhrWaylandSurfaceFn { + pub create_wayland_surface_khr: extern "system" fn( + instance: Instance, + p_create_info: *const WaylandSurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, + pub get_physical_device_wayland_presentation_support_khr: extern "system" fn( + physical_device: PhysicalDevice, + queue_family_index: u32, + display: *mut wl_display, + ) -> Bool32, +} +unsafe impl Send for KhrWaylandSurfaceFn {} +unsafe impl Sync for KhrWaylandSurfaceFn {} +impl ::std::clone::Clone for KhrWaylandSurfaceFn { + fn clone(&self) -> Self { + KhrWaylandSurfaceFn { + create_wayland_surface_khr: self.create_wayland_surface_khr, + get_physical_device_wayland_presentation_support_khr: self + .get_physical_device_wayland_presentation_support_khr, + } + } +} +impl KhrWaylandSurfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrWaylandSurfaceFn { + create_wayland_surface_khr: unsafe { + extern "system" fn create_wayland_surface_khr( + _instance: Instance, + _p_create_info: *const WaylandSurfaceCreateInfoKHR, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_wayland_surface_khr) + )) + } + let raw_name = stringify!(vkCreateWaylandSurfaceKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_wayland_surface_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_wayland_presentation_support_khr: unsafe { + extern "system" fn get_physical_device_wayland_presentation_support_khr( + _physical_device: PhysicalDevice, + _queue_family_index: u32, + _display: *mut wl_display, + ) -> Bool32 { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_wayland_presentation_support_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceWaylandPresentationSupportKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_wayland_presentation_support_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_wayland_surface_khr( + &self, + instance: Instance, + p_create_info: *const WaylandSurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_wayland_surface_khr)(instance, p_create_info, p_allocator, p_surface) + } + #[doc = ""] + pub unsafe fn get_physical_device_wayland_presentation_support_khr( + &self, + physical_device: PhysicalDevice, + queue_family_index: u32, + display: *mut wl_display, + ) -> Bool32 { + (self.get_physical_device_wayland_presentation_support_khr)( + physical_device, + queue_family_index, + display, + ) + } +} +#[doc = "Generated from \'VK_KHR_wayland_surface\'"] +impl StructureType { + pub const WAYLAND_SURFACE_CREATE_INFO_KHR: Self = StructureType(1000006000); +} +impl KhrMirSurfaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_mir_surface\0") + .expect("Wrong extension string") + } +} +pub struct KhrMirSurfaceFn {} +unsafe impl Send for KhrMirSurfaceFn {} +unsafe impl Sync for KhrMirSurfaceFn {} +impl ::std::clone::Clone for KhrMirSurfaceFn { + fn clone(&self) -> Self { + KhrMirSurfaceFn {} + } +} +impl KhrMirSurfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrMirSurfaceFn {} + } +} +impl KhrAndroidSurfaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_android_surface\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateAndroidSurfaceKHR = extern "system" fn( + instance: Instance, + p_create_info: *const AndroidSurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +pub struct KhrAndroidSurfaceFn { + pub create_android_surface_khr: extern "system" fn( + instance: Instance, + p_create_info: *const AndroidSurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, +} +unsafe impl Send for KhrAndroidSurfaceFn {} +unsafe impl Sync for KhrAndroidSurfaceFn {} +impl ::std::clone::Clone for KhrAndroidSurfaceFn { + fn clone(&self) -> Self { + KhrAndroidSurfaceFn { + create_android_surface_khr: self.create_android_surface_khr, + } + } +} +impl KhrAndroidSurfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrAndroidSurfaceFn { + create_android_surface_khr: unsafe { + extern "system" fn create_android_surface_khr( + _instance: Instance, + _p_create_info: *const AndroidSurfaceCreateInfoKHR, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_android_surface_khr) + )) + } + let raw_name = stringify!(vkCreateAndroidSurfaceKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_android_surface_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_android_surface_khr( + &self, + instance: Instance, + p_create_info: *const AndroidSurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_android_surface_khr)(instance, p_create_info, p_allocator, p_surface) + } +} +#[doc = "Generated from \'VK_KHR_android_surface\'"] +impl StructureType { + pub const ANDROID_SURFACE_CREATE_INFO_KHR: Self = StructureType(1000008000); +} +impl KhrWin32SurfaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_win32_surface\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateWin32SurfaceKHR = extern "system" fn( + instance: Instance, + p_create_info: *const Win32SurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR = + extern "system" fn(physical_device: PhysicalDevice, queue_family_index: u32) -> Bool32; +pub struct KhrWin32SurfaceFn { + pub create_win32_surface_khr: extern "system" fn( + instance: Instance, + p_create_info: *const Win32SurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, + pub get_physical_device_win32_presentation_support_khr: + extern "system" fn(physical_device: PhysicalDevice, queue_family_index: u32) -> Bool32, +} +unsafe impl Send for KhrWin32SurfaceFn {} +unsafe impl Sync for KhrWin32SurfaceFn {} +impl ::std::clone::Clone for KhrWin32SurfaceFn { + fn clone(&self) -> Self { + KhrWin32SurfaceFn { + create_win32_surface_khr: self.create_win32_surface_khr, + get_physical_device_win32_presentation_support_khr: self + .get_physical_device_win32_presentation_support_khr, + } + } +} +impl KhrWin32SurfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrWin32SurfaceFn { + create_win32_surface_khr: unsafe { + extern "system" fn create_win32_surface_khr( + _instance: Instance, + _p_create_info: *const Win32SurfaceCreateInfoKHR, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_win32_surface_khr) + )) + } + let raw_name = stringify!(vkCreateWin32SurfaceKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_win32_surface_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_win32_presentation_support_khr: unsafe { + extern "system" fn get_physical_device_win32_presentation_support_khr( + _physical_device: PhysicalDevice, + _queue_family_index: u32, + ) -> Bool32 { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_win32_presentation_support_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceWin32PresentationSupportKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_win32_presentation_support_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_win32_surface_khr( + &self, + instance: Instance, + p_create_info: *const Win32SurfaceCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_win32_surface_khr)(instance, p_create_info, p_allocator, p_surface) + } + #[doc = ""] + pub unsafe fn get_physical_device_win32_presentation_support_khr( + &self, + physical_device: PhysicalDevice, + queue_family_index: u32, + ) -> Bool32 { + (self.get_physical_device_win32_presentation_support_khr)( + physical_device, + queue_family_index, + ) + } +} +#[doc = "Generated from \'VK_KHR_win32_surface\'"] +impl StructureType { + pub const WIN32_SURFACE_CREATE_INFO_KHR: Self = StructureType(1000009000); +} +impl AndroidNativeBufferFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_ANDROID_native_buffer\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetSwapchainGrallocUsageANDROID = extern "system" fn( + device: Device, + format: Format, + image_usage: ImageUsageFlags, + gralloc_usage: *mut c_int, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkAcquireImageANDROID = extern "system" fn( + device: Device, + image: Image, + native_fence_fd: c_int, + semaphore: Semaphore, + fence: Fence, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkQueueSignalReleaseImageANDROID = extern "system" fn( + queue: Queue, + wait_semaphore_count: u32, + p_wait_semaphores: *const Semaphore, + image: Image, + p_native_fence_fd: *mut c_int, +) -> Result; +pub struct AndroidNativeBufferFn { + pub get_swapchain_gralloc_usage_android: extern "system" fn( + device: Device, + format: Format, + image_usage: ImageUsageFlags, + gralloc_usage: *mut c_int, + ) -> Result, + pub acquire_image_android: extern "system" fn( + device: Device, + image: Image, + native_fence_fd: c_int, + semaphore: Semaphore, + fence: Fence, + ) -> Result, + pub queue_signal_release_image_android: extern "system" fn( + queue: Queue, + wait_semaphore_count: u32, + p_wait_semaphores: *const Semaphore, + image: Image, + p_native_fence_fd: *mut c_int, + ) -> Result, +} +unsafe impl Send for AndroidNativeBufferFn {} +unsafe impl Sync for AndroidNativeBufferFn {} +impl ::std::clone::Clone for AndroidNativeBufferFn { + fn clone(&self) -> Self { + AndroidNativeBufferFn { + get_swapchain_gralloc_usage_android: self.get_swapchain_gralloc_usage_android, + acquire_image_android: self.acquire_image_android, + queue_signal_release_image_android: self.queue_signal_release_image_android, + } + } +} +impl AndroidNativeBufferFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AndroidNativeBufferFn { + get_swapchain_gralloc_usage_android: unsafe { + extern "system" fn get_swapchain_gralloc_usage_android( + _device: Device, + _format: Format, + _image_usage: ImageUsageFlags, + _gralloc_usage: *mut c_int, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_swapchain_gralloc_usage_android) + )) + } + let raw_name = stringify!(vkGetSwapchainGrallocUsageANDROID); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_swapchain_gralloc_usage_android + } else { + ::std::mem::transmute(val) + } + }, + acquire_image_android: unsafe { + extern "system" fn acquire_image_android( + _device: Device, + _image: Image, + _native_fence_fd: c_int, + _semaphore: Semaphore, + _fence: Fence, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(acquire_image_android) + )) + } + let raw_name = stringify!(vkAcquireImageANDROID); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + acquire_image_android + } else { + ::std::mem::transmute(val) + } + }, + queue_signal_release_image_android: unsafe { + extern "system" fn queue_signal_release_image_android( + _queue: Queue, + _wait_semaphore_count: u32, + _p_wait_semaphores: *const Semaphore, + _image: Image, + _p_native_fence_fd: *mut c_int, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(queue_signal_release_image_android) + )) + } + let raw_name = stringify!(vkQueueSignalReleaseImageANDROID); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + queue_signal_release_image_android + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_swapchain_gralloc_usage_android( + &self, + device: Device, + format: Format, + image_usage: ImageUsageFlags, + gralloc_usage: *mut c_int, + ) -> Result { + (self.get_swapchain_gralloc_usage_android)(device, format, image_usage, gralloc_usage) + } + #[doc = ""] + pub unsafe fn acquire_image_android( + &self, + device: Device, + image: Image, + native_fence_fd: c_int, + semaphore: Semaphore, + fence: Fence, + ) -> Result { + (self.acquire_image_android)(device, image, native_fence_fd, semaphore, fence) + } + #[doc = ""] + pub unsafe fn queue_signal_release_image_android( + &self, + queue: Queue, + wait_semaphore_count: u32, + p_wait_semaphores: *const Semaphore, + image: Image, + p_native_fence_fd: *mut c_int, + ) -> Result { + (self.queue_signal_release_image_android)( + queue, + wait_semaphore_count, + p_wait_semaphores, + image, + p_native_fence_fd, + ) + } +} +#[doc = "Generated from \'VK_ANDROID_native_buffer\'"] +impl StructureType { + pub const NATIVE_BUFFER_ANDROID: Self = StructureType(1000010000); +} +impl ExtDebugReportFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_debug_report\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateDebugReportCallbackEXT = extern "system" fn( + instance: Instance, + p_create_info: *const DebugReportCallbackCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_callback: *mut DebugReportCallbackEXT, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyDebugReportCallbackEXT = extern "system" fn( + instance: Instance, + callback: DebugReportCallbackEXT, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkDebugReportMessageEXT = extern "system" fn( + instance: Instance, + flags: DebugReportFlagsEXT, + object_type: DebugReportObjectTypeEXT, + object: u64, + location: usize, + message_code: i32, + p_layer_prefix: *const c_char, + p_message: *const c_char, +) -> c_void; +pub struct ExtDebugReportFn { + pub create_debug_report_callback_ext: extern "system" fn( + instance: Instance, + p_create_info: *const DebugReportCallbackCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_callback: *mut DebugReportCallbackEXT, + ) -> Result, + pub destroy_debug_report_callback_ext: extern "system" fn( + instance: Instance, + callback: DebugReportCallbackEXT, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub debug_report_message_ext: extern "system" fn( + instance: Instance, + flags: DebugReportFlagsEXT, + object_type: DebugReportObjectTypeEXT, + object: u64, + location: usize, + message_code: i32, + p_layer_prefix: *const c_char, + p_message: *const c_char, + ) -> c_void, +} +unsafe impl Send for ExtDebugReportFn {} +unsafe impl Sync for ExtDebugReportFn {} +impl ::std::clone::Clone for ExtDebugReportFn { + fn clone(&self) -> Self { + ExtDebugReportFn { + create_debug_report_callback_ext: self.create_debug_report_callback_ext, + destroy_debug_report_callback_ext: self.destroy_debug_report_callback_ext, + debug_report_message_ext: self.debug_report_message_ext, + } + } +} +impl ExtDebugReportFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtDebugReportFn { + create_debug_report_callback_ext: unsafe { + extern "system" fn create_debug_report_callback_ext( + _instance: Instance, + _p_create_info: *const DebugReportCallbackCreateInfoEXT, + _p_allocator: *const AllocationCallbacks, + _p_callback: *mut DebugReportCallbackEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_debug_report_callback_ext) + )) + } + let raw_name = stringify!(vkCreateDebugReportCallbackEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_debug_report_callback_ext + } else { + ::std::mem::transmute(val) + } + }, + destroy_debug_report_callback_ext: unsafe { + extern "system" fn destroy_debug_report_callback_ext( + _instance: Instance, + _callback: DebugReportCallbackEXT, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_debug_report_callback_ext) + )) + } + let raw_name = stringify!(vkDestroyDebugReportCallbackEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_debug_report_callback_ext + } else { + ::std::mem::transmute(val) + } + }, + debug_report_message_ext: unsafe { + extern "system" fn debug_report_message_ext( + _instance: Instance, + _flags: DebugReportFlagsEXT, + _object_type: DebugReportObjectTypeEXT, + _object: u64, + _location: usize, + _message_code: i32, + _p_layer_prefix: *const c_char, + _p_message: *const c_char, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(debug_report_message_ext) + )) + } + let raw_name = stringify!(vkDebugReportMessageEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + debug_report_message_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_debug_report_callback_ext( + &self, + instance: Instance, + p_create_info: *const DebugReportCallbackCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_callback: *mut DebugReportCallbackEXT, + ) -> Result { + (self.create_debug_report_callback_ext)(instance, p_create_info, p_allocator, p_callback) + } + #[doc = ""] + pub unsafe fn destroy_debug_report_callback_ext( + &self, + instance: Instance, + callback: DebugReportCallbackEXT, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_debug_report_callback_ext)(instance, callback, p_allocator) + } + #[doc = ""] + pub unsafe fn debug_report_message_ext( + &self, + instance: Instance, + flags: DebugReportFlagsEXT, + object_type: DebugReportObjectTypeEXT, + object: u64, + location: usize, + message_code: i32, + p_layer_prefix: *const c_char, + p_message: *const c_char, + ) -> c_void { + (self.debug_report_message_ext)( + instance, + flags, + object_type, + object, + location, + message_code, + p_layer_prefix, + p_message, + ) + } +} +#[doc = "Generated from \'VK_EXT_debug_report\'"] +impl StructureType { + pub const DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT: Self = StructureType(1000011000); +} +#[doc = "Generated from \'VK_EXT_debug_report\'"] +impl Result { + pub const ERROR_VALIDATION_FAILED_EXT: Self = Result(-1000011001); +} +#[doc = "Generated from \'VK_EXT_debug_report\'"] +impl ObjectType { + pub const DEBUG_REPORT_CALLBACK_EXT: Self = ObjectType(1000011000); +} +#[doc = "Generated from \'VK_EXT_debug_report\'"] +impl DebugReportObjectTypeEXT { + pub const SAMPLER_YCBCR_CONVERSION: Self = DebugReportObjectTypeEXT(1000156000); +} +#[doc = "Generated from \'VK_EXT_debug_report\'"] +impl DebugReportObjectTypeEXT { + pub const DESCRIPTOR_UPDATE_TEMPLATE: Self = DebugReportObjectTypeEXT(1000085000); +} +impl NvGlslShaderFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_glsl_shader\0") + .expect("Wrong extension string") + } +} +pub struct NvGlslShaderFn {} +unsafe impl Send for NvGlslShaderFn {} +unsafe impl Sync for NvGlslShaderFn {} +impl ::std::clone::Clone for NvGlslShaderFn { + fn clone(&self) -> Self { + NvGlslShaderFn {} + } +} +impl NvGlslShaderFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvGlslShaderFn {} + } +} +#[doc = "Generated from \'VK_NV_glsl_shader\'"] +impl Result { + pub const ERROR_INVALID_SHADER_NV: Self = Result(-1000012000); +} +impl ExtDepthRangeUnrestrictedFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_depth_range_unrestricted\0") + .expect("Wrong extension string") + } +} +pub struct ExtDepthRangeUnrestrictedFn {} +unsafe impl Send for ExtDepthRangeUnrestrictedFn {} +unsafe impl Sync for ExtDepthRangeUnrestrictedFn {} +impl ::std::clone::Clone for ExtDepthRangeUnrestrictedFn { + fn clone(&self) -> Self { + ExtDepthRangeUnrestrictedFn {} + } +} +impl ExtDepthRangeUnrestrictedFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtDepthRangeUnrestrictedFn {} + } +} +impl KhrSamplerMirrorClampToEdgeFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_sampler_mirror_clamp_to_edge\0") + .expect("Wrong extension string") + } +} +pub struct KhrSamplerMirrorClampToEdgeFn {} +unsafe impl Send for KhrSamplerMirrorClampToEdgeFn {} +unsafe impl Sync for KhrSamplerMirrorClampToEdgeFn {} +impl ::std::clone::Clone for KhrSamplerMirrorClampToEdgeFn { + fn clone(&self) -> Self { + KhrSamplerMirrorClampToEdgeFn {} + } +} +impl KhrSamplerMirrorClampToEdgeFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrSamplerMirrorClampToEdgeFn {} + } +} +impl ImgFilterCubicFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_IMG_filter_cubic\0") + .expect("Wrong extension string") + } +} +pub struct ImgFilterCubicFn {} +unsafe impl Send for ImgFilterCubicFn {} +unsafe impl Sync for ImgFilterCubicFn {} +impl ::std::clone::Clone for ImgFilterCubicFn { + fn clone(&self) -> Self { + ImgFilterCubicFn {} + } +} +impl ImgFilterCubicFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ImgFilterCubicFn {} + } +} +#[doc = "Generated from \'VK_IMG_filter_cubic\'"] +impl Filter { + pub const CUBIC_IMG: Self = Filter(1000015000); +} +#[doc = "Generated from \'VK_IMG_filter_cubic\'"] +impl FormatFeatureFlags { + pub const SAMPLED_IMAGE_FILTER_CUBIC_IMG: Self = FormatFeatureFlags(0b10000000000000); +} +impl AmdExtension17Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_17\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension17Fn {} +unsafe impl Send for AmdExtension17Fn {} +unsafe impl Sync for AmdExtension17Fn {} +impl ::std::clone::Clone for AmdExtension17Fn { + fn clone(&self) -> Self { + AmdExtension17Fn {} + } +} +impl AmdExtension17Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension17Fn {} + } +} +impl AmdExtension18Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_18\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension18Fn {} +unsafe impl Send for AmdExtension18Fn {} +unsafe impl Sync for AmdExtension18Fn {} +impl ::std::clone::Clone for AmdExtension18Fn { + fn clone(&self) -> Self { + AmdExtension18Fn {} + } +} +impl AmdExtension18Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension18Fn {} + } +} +impl AmdRasterizationOrderFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_rasterization_order\0") + .expect("Wrong extension string") + } +} +pub struct AmdRasterizationOrderFn {} +unsafe impl Send for AmdRasterizationOrderFn {} +unsafe impl Sync for AmdRasterizationOrderFn {} +impl ::std::clone::Clone for AmdRasterizationOrderFn { + fn clone(&self) -> Self { + AmdRasterizationOrderFn {} + } +} +impl AmdRasterizationOrderFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdRasterizationOrderFn {} + } +} +#[doc = "Generated from \'VK_AMD_rasterization_order\'"] +impl StructureType { + pub const PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD: Self = + StructureType(1000018000); +} +impl AmdExtension20Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_20\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension20Fn {} +unsafe impl Send for AmdExtension20Fn {} +unsafe impl Sync for AmdExtension20Fn {} +impl ::std::clone::Clone for AmdExtension20Fn { + fn clone(&self) -> Self { + AmdExtension20Fn {} + } +} +impl AmdExtension20Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension20Fn {} + } +} +impl AmdShaderTrinaryMinmaxFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_shader_trinary_minmax\0") + .expect("Wrong extension string") + } +} +pub struct AmdShaderTrinaryMinmaxFn {} +unsafe impl Send for AmdShaderTrinaryMinmaxFn {} +unsafe impl Sync for AmdShaderTrinaryMinmaxFn {} +impl ::std::clone::Clone for AmdShaderTrinaryMinmaxFn { + fn clone(&self) -> Self { + AmdShaderTrinaryMinmaxFn {} + } +} +impl AmdShaderTrinaryMinmaxFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdShaderTrinaryMinmaxFn {} + } +} +impl AmdShaderExplicitVertexParameterFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_shader_explicit_vertex_parameter\0") + .expect("Wrong extension string") + } +} +pub struct AmdShaderExplicitVertexParameterFn {} +unsafe impl Send for AmdShaderExplicitVertexParameterFn {} +unsafe impl Sync for AmdShaderExplicitVertexParameterFn {} +impl ::std::clone::Clone for AmdShaderExplicitVertexParameterFn { + fn clone(&self) -> Self { + AmdShaderExplicitVertexParameterFn {} + } +} +impl AmdShaderExplicitVertexParameterFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdShaderExplicitVertexParameterFn {} + } +} +impl ExtDebugMarkerFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_debug_marker\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkDebugMarkerSetObjectTagEXT = + extern "system" fn(device: Device, p_tag_info: *const DebugMarkerObjectTagInfoEXT) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDebugMarkerSetObjectNameEXT = + extern "system" fn(device: Device, p_name_info: *const DebugMarkerObjectNameInfoEXT) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDebugMarkerBeginEXT = extern "system" fn( + command_buffer: CommandBuffer, + p_marker_info: *const DebugMarkerMarkerInfoEXT, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDebugMarkerEndEXT = extern "system" fn(command_buffer: CommandBuffer) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDebugMarkerInsertEXT = extern "system" fn( + command_buffer: CommandBuffer, + p_marker_info: *const DebugMarkerMarkerInfoEXT, +) -> c_void; +pub struct ExtDebugMarkerFn { + pub debug_marker_set_object_tag_ext: extern "system" fn( + device: Device, + p_tag_info: *const DebugMarkerObjectTagInfoEXT, + ) -> Result, + pub debug_marker_set_object_name_ext: extern "system" fn( + device: Device, + p_name_info: *const DebugMarkerObjectNameInfoEXT, + ) -> Result, + pub cmd_debug_marker_begin_ext: extern "system" fn( + command_buffer: CommandBuffer, + p_marker_info: *const DebugMarkerMarkerInfoEXT, + ) -> c_void, + pub cmd_debug_marker_end_ext: extern "system" fn(command_buffer: CommandBuffer) -> c_void, + pub cmd_debug_marker_insert_ext: extern "system" fn( + command_buffer: CommandBuffer, + p_marker_info: *const DebugMarkerMarkerInfoEXT, + ) -> c_void, +} +unsafe impl Send for ExtDebugMarkerFn {} +unsafe impl Sync for ExtDebugMarkerFn {} +impl ::std::clone::Clone for ExtDebugMarkerFn { + fn clone(&self) -> Self { + ExtDebugMarkerFn { + debug_marker_set_object_tag_ext: self.debug_marker_set_object_tag_ext, + debug_marker_set_object_name_ext: self.debug_marker_set_object_name_ext, + cmd_debug_marker_begin_ext: self.cmd_debug_marker_begin_ext, + cmd_debug_marker_end_ext: self.cmd_debug_marker_end_ext, + cmd_debug_marker_insert_ext: self.cmd_debug_marker_insert_ext, + } + } +} +impl ExtDebugMarkerFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtDebugMarkerFn { + debug_marker_set_object_tag_ext: unsafe { + extern "system" fn debug_marker_set_object_tag_ext( + _device: Device, + _p_tag_info: *const DebugMarkerObjectTagInfoEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(debug_marker_set_object_tag_ext) + )) + } + let raw_name = stringify!(vkDebugMarkerSetObjectTagEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + debug_marker_set_object_tag_ext + } else { + ::std::mem::transmute(val) + } + }, + debug_marker_set_object_name_ext: unsafe { + extern "system" fn debug_marker_set_object_name_ext( + _device: Device, + _p_name_info: *const DebugMarkerObjectNameInfoEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(debug_marker_set_object_name_ext) + )) + } + let raw_name = stringify!(vkDebugMarkerSetObjectNameEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + debug_marker_set_object_name_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_debug_marker_begin_ext: unsafe { + extern "system" fn cmd_debug_marker_begin_ext( + _command_buffer: CommandBuffer, + _p_marker_info: *const DebugMarkerMarkerInfoEXT, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_debug_marker_begin_ext) + )) + } + let raw_name = stringify!(vkCmdDebugMarkerBeginEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_debug_marker_begin_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_debug_marker_end_ext: unsafe { + extern "system" fn cmd_debug_marker_end_ext( + _command_buffer: CommandBuffer, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_debug_marker_end_ext) + )) + } + let raw_name = stringify!(vkCmdDebugMarkerEndEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_debug_marker_end_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_debug_marker_insert_ext: unsafe { + extern "system" fn cmd_debug_marker_insert_ext( + _command_buffer: CommandBuffer, + _p_marker_info: *const DebugMarkerMarkerInfoEXT, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_debug_marker_insert_ext) + )) + } + let raw_name = stringify!(vkCmdDebugMarkerInsertEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_debug_marker_insert_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn debug_marker_set_object_tag_ext( + &self, + device: Device, + p_tag_info: *const DebugMarkerObjectTagInfoEXT, + ) -> Result { + (self.debug_marker_set_object_tag_ext)(device, p_tag_info) + } + #[doc = ""] + pub unsafe fn debug_marker_set_object_name_ext( + &self, + device: Device, + p_name_info: *const DebugMarkerObjectNameInfoEXT, + ) -> Result { + (self.debug_marker_set_object_name_ext)(device, p_name_info) + } + #[doc = ""] + pub unsafe fn cmd_debug_marker_begin_ext( + &self, + command_buffer: CommandBuffer, + p_marker_info: *const DebugMarkerMarkerInfoEXT, + ) -> c_void { + (self.cmd_debug_marker_begin_ext)(command_buffer, p_marker_info) + } + #[doc = ""] + pub unsafe fn cmd_debug_marker_end_ext(&self, command_buffer: CommandBuffer) -> c_void { + (self.cmd_debug_marker_end_ext)(command_buffer) + } + #[doc = ""] + pub unsafe fn cmd_debug_marker_insert_ext( + &self, + command_buffer: CommandBuffer, + p_marker_info: *const DebugMarkerMarkerInfoEXT, + ) -> c_void { + (self.cmd_debug_marker_insert_ext)(command_buffer, p_marker_info) + } +} +#[doc = "Generated from \'VK_EXT_debug_marker\'"] +impl StructureType { + pub const DEBUG_MARKER_OBJECT_NAME_INFO_EXT: Self = StructureType(1000022000); +} +#[doc = "Generated from \'VK_EXT_debug_marker\'"] +impl StructureType { + pub const DEBUG_MARKER_OBJECT_TAG_INFO_EXT: Self = StructureType(1000022001); +} +#[doc = "Generated from \'VK_EXT_debug_marker\'"] +impl StructureType { + pub const DEBUG_MARKER_MARKER_INFO_EXT: Self = StructureType(1000022002); +} +impl AmdExtension24Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_24\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension24Fn {} +unsafe impl Send for AmdExtension24Fn {} +unsafe impl Sync for AmdExtension24Fn {} +impl ::std::clone::Clone for AmdExtension24Fn { + fn clone(&self) -> Self { + AmdExtension24Fn {} + } +} +impl AmdExtension24Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension24Fn {} + } +} +#[doc = "Generated from \'VK_AMD_extension_24\'"] +impl QueueFlags { + pub const RESERVED_6_KHR: Self = QueueFlags(0b1000000); +} +#[doc = "Generated from \'VK_AMD_extension_24\'"] +impl PipelineStageFlags { + pub const RESERVED_27_KHR: Self = PipelineStageFlags(0b1000000000000000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_24\'"] +impl AccessFlags { + pub const RESERVED_30_KHR: Self = AccessFlags(0b1000000000000000000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_24\'"] +impl AccessFlags { + pub const RESERVED_31_KHR: Self = AccessFlags(0b10000000000000000000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_24\'"] +impl BufferUsageFlags { + pub const RESERVED_15_KHR: Self = BufferUsageFlags(0b1000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_24\'"] +impl BufferUsageFlags { + pub const RESERVED_16_KHR: Self = BufferUsageFlags(0b10000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_24\'"] +impl ImageUsageFlags { + pub const RESERVED_13_KHR: Self = ImageUsageFlags(0b10000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_24\'"] +impl ImageUsageFlags { + pub const RESERVED_14_KHR: Self = ImageUsageFlags(0b100000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_24\'"] +impl ImageUsageFlags { + pub const RESERVED_15_KHR: Self = ImageUsageFlags(0b1000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_24\'"] +impl FormatFeatureFlags { + pub const RESERVED_27_KHR: Self = FormatFeatureFlags(0b1000000000000000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_24\'"] +impl FormatFeatureFlags { + pub const RESERVED_28_KHR: Self = FormatFeatureFlags(0b10000000000000000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_24\'"] +impl QueryType { + pub const RESERVED_8: Self = QueryType(1000023008); +} +impl AmdExtension25Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_25\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension25Fn {} +unsafe impl Send for AmdExtension25Fn {} +unsafe impl Sync for AmdExtension25Fn {} +impl ::std::clone::Clone for AmdExtension25Fn { + fn clone(&self) -> Self { + AmdExtension25Fn {} + } +} +impl AmdExtension25Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension25Fn {} + } +} +#[doc = "Generated from \'VK_AMD_extension_25\'"] +impl QueueFlags { + pub const RESERVED_5_KHR: Self = QueueFlags(0b100000); +} +#[doc = "Generated from \'VK_AMD_extension_25\'"] +impl PipelineStageFlags { + pub const RESERVED_26_KHR: Self = PipelineStageFlags(0b100000000000000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_25\'"] +impl AccessFlags { + pub const RESERVED_28_KHR: Self = AccessFlags(0b10000000000000000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_25\'"] +impl AccessFlags { + pub const RESERVED_29_KHR: Self = AccessFlags(0b100000000000000000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_25\'"] +impl BufferUsageFlags { + pub const RESERVED_13_KHR: Self = BufferUsageFlags(0b10000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_25\'"] +impl BufferUsageFlags { + pub const RESERVED_14_KHR: Self = BufferUsageFlags(0b100000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_25\'"] +impl ImageUsageFlags { + pub const RESERVED_10_KHR: Self = ImageUsageFlags(0b10000000000); +} +#[doc = "Generated from \'VK_AMD_extension_25\'"] +impl ImageUsageFlags { + pub const RESERVED_11_KHR: Self = ImageUsageFlags(0b100000000000); +} +#[doc = "Generated from \'VK_AMD_extension_25\'"] +impl ImageUsageFlags { + pub const RESERVED_12_KHR: Self = ImageUsageFlags(0b1000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_25\'"] +impl FormatFeatureFlags { + pub const RESERVED_25_KHR: Self = FormatFeatureFlags(0b10000000000000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_25\'"] +impl FormatFeatureFlags { + pub const RESERVED_26_KHR: Self = FormatFeatureFlags(0b100000000000000000000000000); +} +#[doc = "Generated from \'VK_AMD_extension_25\'"] +impl QueryType { + pub const RESERVED_4: Self = QueryType(1000024004); +} +impl AmdGcnShaderFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_gcn_shader\0") + .expect("Wrong extension string") + } +} +pub struct AmdGcnShaderFn {} +unsafe impl Send for AmdGcnShaderFn {} +unsafe impl Sync for AmdGcnShaderFn {} +impl ::std::clone::Clone for AmdGcnShaderFn { + fn clone(&self) -> Self { + AmdGcnShaderFn {} + } +} +impl AmdGcnShaderFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdGcnShaderFn {} + } +} +impl NvDedicatedAllocationFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_dedicated_allocation\0") + .expect("Wrong extension string") + } +} +pub struct NvDedicatedAllocationFn {} +unsafe impl Send for NvDedicatedAllocationFn {} +unsafe impl Sync for NvDedicatedAllocationFn {} +impl ::std::clone::Clone for NvDedicatedAllocationFn { + fn clone(&self) -> Self { + NvDedicatedAllocationFn {} + } +} +impl NvDedicatedAllocationFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvDedicatedAllocationFn {} + } +} +#[doc = "Generated from \'VK_NV_dedicated_allocation\'"] +impl StructureType { + pub const DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV: Self = StructureType(1000026000); +} +#[doc = "Generated from \'VK_NV_dedicated_allocation\'"] +impl StructureType { + pub const DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV: Self = StructureType(1000026001); +} +#[doc = "Generated from \'VK_NV_dedicated_allocation\'"] +impl StructureType { + pub const DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV: Self = StructureType(1000026002); +} +impl ExtExtension28Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_28\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension28Fn {} +unsafe impl Send for ExtExtension28Fn {} +unsafe impl Sync for ExtExtension28Fn {} +impl ::std::clone::Clone for ExtExtension28Fn { + fn clone(&self) -> Self { + ExtExtension28Fn {} + } +} +impl ExtExtension28Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension28Fn {} + } +} +impl ExtTransformFeedbackFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_transform_feedback\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBindTransformFeedbackBuffersEXT = extern "system" fn( + command_buffer: CommandBuffer, + first_binding: u32, + binding_count: u32, + p_buffers: *const Buffer, + p_offsets: *const DeviceSize, + p_sizes: *const DeviceSize, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBeginTransformFeedbackEXT = extern "system" fn( + command_buffer: CommandBuffer, + first_counter_buffer: u32, + counter_buffer_count: u32, + p_counter_buffers: *const Buffer, + p_counter_buffer_offsets: *const DeviceSize, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdEndTransformFeedbackEXT = extern "system" fn( + command_buffer: CommandBuffer, + first_counter_buffer: u32, + counter_buffer_count: u32, + p_counter_buffers: *const Buffer, + p_counter_buffer_offsets: *const DeviceSize, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBeginQueryIndexedEXT = extern "system" fn( + command_buffer: CommandBuffer, + query_pool: QueryPool, + query: u32, + flags: QueryControlFlags, + index: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdEndQueryIndexedEXT = extern "system" fn( + command_buffer: CommandBuffer, + query_pool: QueryPool, + query: u32, + index: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawIndirectByteCountEXT = extern "system" fn( + command_buffer: CommandBuffer, + instance_count: u32, + first_instance: u32, + counter_buffer: Buffer, + counter_buffer_offset: DeviceSize, + counter_offset: u32, + vertex_stride: u32, +) -> c_void; +pub struct ExtTransformFeedbackFn { + pub cmd_bind_transform_feedback_buffers_ext: extern "system" fn( + command_buffer: CommandBuffer, + first_binding: u32, + binding_count: u32, + p_buffers: *const Buffer, + p_offsets: *const DeviceSize, + p_sizes: *const DeviceSize, + ) -> c_void, + pub cmd_begin_transform_feedback_ext: extern "system" fn( + command_buffer: CommandBuffer, + first_counter_buffer: u32, + counter_buffer_count: u32, + p_counter_buffers: *const Buffer, + p_counter_buffer_offsets: *const DeviceSize, + ) -> c_void, + pub cmd_end_transform_feedback_ext: extern "system" fn( + command_buffer: CommandBuffer, + first_counter_buffer: u32, + counter_buffer_count: u32, + p_counter_buffers: *const Buffer, + p_counter_buffer_offsets: *const DeviceSize, + ) -> c_void, + pub cmd_begin_query_indexed_ext: extern "system" fn( + command_buffer: CommandBuffer, + query_pool: QueryPool, + query: u32, + flags: QueryControlFlags, + index: u32, + ) -> c_void, + pub cmd_end_query_indexed_ext: extern "system" fn( + command_buffer: CommandBuffer, + query_pool: QueryPool, + query: u32, + index: u32, + ) -> c_void, + pub cmd_draw_indirect_byte_count_ext: extern "system" fn( + command_buffer: CommandBuffer, + instance_count: u32, + first_instance: u32, + counter_buffer: Buffer, + counter_buffer_offset: DeviceSize, + counter_offset: u32, + vertex_stride: u32, + ) -> c_void, +} +unsafe impl Send for ExtTransformFeedbackFn {} +unsafe impl Sync for ExtTransformFeedbackFn {} +impl ::std::clone::Clone for ExtTransformFeedbackFn { + fn clone(&self) -> Self { + ExtTransformFeedbackFn { + cmd_bind_transform_feedback_buffers_ext: self.cmd_bind_transform_feedback_buffers_ext, + cmd_begin_transform_feedback_ext: self.cmd_begin_transform_feedback_ext, + cmd_end_transform_feedback_ext: self.cmd_end_transform_feedback_ext, + cmd_begin_query_indexed_ext: self.cmd_begin_query_indexed_ext, + cmd_end_query_indexed_ext: self.cmd_end_query_indexed_ext, + cmd_draw_indirect_byte_count_ext: self.cmd_draw_indirect_byte_count_ext, + } + } +} +impl ExtTransformFeedbackFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtTransformFeedbackFn { + cmd_bind_transform_feedback_buffers_ext: unsafe { + extern "system" fn cmd_bind_transform_feedback_buffers_ext( + _command_buffer: CommandBuffer, + _first_binding: u32, + _binding_count: u32, + _p_buffers: *const Buffer, + _p_offsets: *const DeviceSize, + _p_sizes: *const DeviceSize, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_bind_transform_feedback_buffers_ext) + )) + } + let raw_name = stringify!(vkCmdBindTransformFeedbackBuffersEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_bind_transform_feedback_buffers_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_begin_transform_feedback_ext: unsafe { + extern "system" fn cmd_begin_transform_feedback_ext( + _command_buffer: CommandBuffer, + _first_counter_buffer: u32, + _counter_buffer_count: u32, + _p_counter_buffers: *const Buffer, + _p_counter_buffer_offsets: *const DeviceSize, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_begin_transform_feedback_ext) + )) + } + let raw_name = stringify!(vkCmdBeginTransformFeedbackEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_begin_transform_feedback_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_end_transform_feedback_ext: unsafe { + extern "system" fn cmd_end_transform_feedback_ext( + _command_buffer: CommandBuffer, + _first_counter_buffer: u32, + _counter_buffer_count: u32, + _p_counter_buffers: *const Buffer, + _p_counter_buffer_offsets: *const DeviceSize, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_end_transform_feedback_ext) + )) + } + let raw_name = stringify!(vkCmdEndTransformFeedbackEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_end_transform_feedback_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_begin_query_indexed_ext: unsafe { + extern "system" fn cmd_begin_query_indexed_ext( + _command_buffer: CommandBuffer, + _query_pool: QueryPool, + _query: u32, + _flags: QueryControlFlags, + _index: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_begin_query_indexed_ext) + )) + } + let raw_name = stringify!(vkCmdBeginQueryIndexedEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_begin_query_indexed_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_end_query_indexed_ext: unsafe { + extern "system" fn cmd_end_query_indexed_ext( + _command_buffer: CommandBuffer, + _query_pool: QueryPool, + _query: u32, + _index: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_end_query_indexed_ext) + )) + } + let raw_name = stringify!(vkCmdEndQueryIndexedEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_end_query_indexed_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_draw_indirect_byte_count_ext: unsafe { + extern "system" fn cmd_draw_indirect_byte_count_ext( + _command_buffer: CommandBuffer, + _instance_count: u32, + _first_instance: u32, + _counter_buffer: Buffer, + _counter_buffer_offset: DeviceSize, + _counter_offset: u32, + _vertex_stride: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_indirect_byte_count_ext) + )) + } + let raw_name = stringify!(vkCmdDrawIndirectByteCountEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_indirect_byte_count_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_bind_transform_feedback_buffers_ext( + &self, + command_buffer: CommandBuffer, + first_binding: u32, + binding_count: u32, + p_buffers: *const Buffer, + p_offsets: *const DeviceSize, + p_sizes: *const DeviceSize, + ) -> c_void { + (self.cmd_bind_transform_feedback_buffers_ext)( + command_buffer, + first_binding, + binding_count, + p_buffers, + p_offsets, + p_sizes, + ) + } + #[doc = ""] + pub unsafe fn cmd_begin_transform_feedback_ext( + &self, + command_buffer: CommandBuffer, + first_counter_buffer: u32, + counter_buffer_count: u32, + p_counter_buffers: *const Buffer, + p_counter_buffer_offsets: *const DeviceSize, + ) -> c_void { + (self.cmd_begin_transform_feedback_ext)( + command_buffer, + first_counter_buffer, + counter_buffer_count, + p_counter_buffers, + p_counter_buffer_offsets, + ) + } + #[doc = ""] + pub unsafe fn cmd_end_transform_feedback_ext( + &self, + command_buffer: CommandBuffer, + first_counter_buffer: u32, + counter_buffer_count: u32, + p_counter_buffers: *const Buffer, + p_counter_buffer_offsets: *const DeviceSize, + ) -> c_void { + (self.cmd_end_transform_feedback_ext)( + command_buffer, + first_counter_buffer, + counter_buffer_count, + p_counter_buffers, + p_counter_buffer_offsets, + ) + } + #[doc = ""] + pub unsafe fn cmd_begin_query_indexed_ext( + &self, + command_buffer: CommandBuffer, + query_pool: QueryPool, + query: u32, + flags: QueryControlFlags, + index: u32, + ) -> c_void { + (self.cmd_begin_query_indexed_ext)(command_buffer, query_pool, query, flags, index) + } + #[doc = ""] + pub unsafe fn cmd_end_query_indexed_ext( + &self, + command_buffer: CommandBuffer, + query_pool: QueryPool, + query: u32, + index: u32, + ) -> c_void { + (self.cmd_end_query_indexed_ext)(command_buffer, query_pool, query, index) + } + #[doc = ""] + pub unsafe fn cmd_draw_indirect_byte_count_ext( + &self, + command_buffer: CommandBuffer, + instance_count: u32, + first_instance: u32, + counter_buffer: Buffer, + counter_buffer_offset: DeviceSize, + counter_offset: u32, + vertex_stride: u32, + ) -> c_void { + (self.cmd_draw_indirect_byte_count_ext)( + command_buffer, + instance_count, + first_instance, + counter_buffer, + counter_buffer_offset, + counter_offset, + vertex_stride, + ) + } +} +#[doc = "Generated from \'VK_EXT_transform_feedback\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: Self = StructureType(1000028000); +} +#[doc = "Generated from \'VK_EXT_transform_feedback\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: Self = StructureType(1000028001); +} +#[doc = "Generated from \'VK_EXT_transform_feedback\'"] +impl StructureType { + pub const PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT: Self = StructureType(1000028002); +} +#[doc = "Generated from \'VK_EXT_transform_feedback\'"] +impl QueryType { + pub const TRANSFORM_FEEDBACK_STREAM_EXT: Self = QueryType(1000028004); +} +#[doc = "Generated from \'VK_EXT_transform_feedback\'"] +impl BufferUsageFlags { + pub const TRANSFORM_FEEDBACK_BUFFER_EXT: Self = BufferUsageFlags(0b100000000000); +} +#[doc = "Generated from \'VK_EXT_transform_feedback\'"] +impl BufferUsageFlags { + pub const TRANSFORM_FEEDBACK_COUNTER_BUFFER_EXT: Self = BufferUsageFlags(0b1000000000000); +} +#[doc = "Generated from \'VK_EXT_transform_feedback\'"] +impl AccessFlags { + pub const TRANSFORM_FEEDBACK_WRITE_EXT: Self = AccessFlags(0b10000000000000000000000000); +} +#[doc = "Generated from \'VK_EXT_transform_feedback\'"] +impl AccessFlags { + pub const TRANSFORM_FEEDBACK_COUNTER_READ_EXT: Self = + AccessFlags(0b100000000000000000000000000); +} +#[doc = "Generated from \'VK_EXT_transform_feedback\'"] +impl AccessFlags { + pub const TRANSFORM_FEEDBACK_COUNTER_WRITE_EXT: Self = + AccessFlags(0b1000000000000000000000000000); +} +#[doc = "Generated from \'VK_EXT_transform_feedback\'"] +impl PipelineStageFlags { + pub const TRANSFORM_FEEDBACK_EXT: Self = PipelineStageFlags(0b1000000000000000000000000); +} +impl NvxExtension30Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NVX_extension_30\0") + .expect("Wrong extension string") + } +} +pub struct NvxExtension30Fn {} +unsafe impl Send for NvxExtension30Fn {} +unsafe impl Sync for NvxExtension30Fn {} +impl ::std::clone::Clone for NvxExtension30Fn { + fn clone(&self) -> Self { + NvxExtension30Fn {} + } +} +impl NvxExtension30Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvxExtension30Fn {} + } +} +impl NvxExtension31Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NVX_extension_31\0") + .expect("Wrong extension string") + } +} +pub struct NvxExtension31Fn {} +unsafe impl Send for NvxExtension31Fn {} +unsafe impl Sync for NvxExtension31Fn {} +impl ::std::clone::Clone for NvxExtension31Fn { + fn clone(&self) -> Self { + NvxExtension31Fn {} + } +} +impl NvxExtension31Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvxExtension31Fn {} + } +} +impl AmdExtension32Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_32\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension32Fn {} +unsafe impl Send for AmdExtension32Fn {} +unsafe impl Sync for AmdExtension32Fn {} +impl ::std::clone::Clone for AmdExtension32Fn { + fn clone(&self) -> Self { + AmdExtension32Fn {} + } +} +impl AmdExtension32Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension32Fn {} + } +} +impl AmdExtension33Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_33\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension33Fn {} +unsafe impl Send for AmdExtension33Fn {} +unsafe impl Sync for AmdExtension33Fn {} +impl ::std::clone::Clone for AmdExtension33Fn { + fn clone(&self) -> Self { + AmdExtension33Fn {} + } +} +impl AmdExtension33Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension33Fn {} + } +} +impl AmdDrawIndirectCountFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_draw_indirect_count\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawIndirectCountAMD = extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawIndexedIndirectCountAMD = extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, +) -> c_void; +pub struct AmdDrawIndirectCountFn { + pub cmd_draw_indirect_count_amd: extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void, + pub cmd_draw_indexed_indirect_count_amd: extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void, +} +unsafe impl Send for AmdDrawIndirectCountFn {} +unsafe impl Sync for AmdDrawIndirectCountFn {} +impl ::std::clone::Clone for AmdDrawIndirectCountFn { + fn clone(&self) -> Self { + AmdDrawIndirectCountFn { + cmd_draw_indirect_count_amd: self.cmd_draw_indirect_count_amd, + cmd_draw_indexed_indirect_count_amd: self.cmd_draw_indexed_indirect_count_amd, + } + } +} +impl AmdDrawIndirectCountFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdDrawIndirectCountFn { + cmd_draw_indirect_count_amd: unsafe { + extern "system" fn cmd_draw_indirect_count_amd( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + _count_buffer: Buffer, + _count_buffer_offset: DeviceSize, + _max_draw_count: u32, + _stride: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_indirect_count_amd) + )) + } + let raw_name = stringify!(vkCmdDrawIndirectCountAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_indirect_count_amd + } else { + ::std::mem::transmute(val) + } + }, + cmd_draw_indexed_indirect_count_amd: unsafe { + extern "system" fn cmd_draw_indexed_indirect_count_amd( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + _count_buffer: Buffer, + _count_buffer_offset: DeviceSize, + _max_draw_count: u32, + _stride: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_indexed_indirect_count_amd) + )) + } + let raw_name = stringify!(vkCmdDrawIndexedIndirectCountAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_indexed_indirect_count_amd + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_draw_indirect_count_amd( + &self, + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void { + (self.cmd_draw_indirect_count_amd)( + command_buffer, + buffer, + offset, + count_buffer, + count_buffer_offset, + max_draw_count, + stride, + ) + } + #[doc = ""] + pub unsafe fn cmd_draw_indexed_indirect_count_amd( + &self, + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void { + (self.cmd_draw_indexed_indirect_count_amd)( + command_buffer, + buffer, + offset, + count_buffer, + count_buffer_offset, + max_draw_count, + stride, + ) + } +} +impl AmdExtension35Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_35\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension35Fn {} +unsafe impl Send for AmdExtension35Fn {} +unsafe impl Sync for AmdExtension35Fn {} +impl ::std::clone::Clone for AmdExtension35Fn { + fn clone(&self) -> Self { + AmdExtension35Fn {} + } +} +impl AmdExtension35Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension35Fn {} + } +} +impl AmdNegativeViewportHeightFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_negative_viewport_height\0") + .expect("Wrong extension string") + } +} +pub struct AmdNegativeViewportHeightFn {} +unsafe impl Send for AmdNegativeViewportHeightFn {} +unsafe impl Sync for AmdNegativeViewportHeightFn {} +impl ::std::clone::Clone for AmdNegativeViewportHeightFn { + fn clone(&self) -> Self { + AmdNegativeViewportHeightFn {} + } +} +impl AmdNegativeViewportHeightFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdNegativeViewportHeightFn {} + } +} +impl AmdGpuShaderHalfFloatFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_gpu_shader_half_float\0") + .expect("Wrong extension string") + } +} +pub struct AmdGpuShaderHalfFloatFn {} +unsafe impl Send for AmdGpuShaderHalfFloatFn {} +unsafe impl Sync for AmdGpuShaderHalfFloatFn {} +impl ::std::clone::Clone for AmdGpuShaderHalfFloatFn { + fn clone(&self) -> Self { + AmdGpuShaderHalfFloatFn {} + } +} +impl AmdGpuShaderHalfFloatFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdGpuShaderHalfFloatFn {} + } +} +impl AmdShaderBallotFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_shader_ballot\0") + .expect("Wrong extension string") + } +} +pub struct AmdShaderBallotFn {} +unsafe impl Send for AmdShaderBallotFn {} +unsafe impl Sync for AmdShaderBallotFn {} +impl ::std::clone::Clone for AmdShaderBallotFn { + fn clone(&self) -> Self { + AmdShaderBallotFn {} + } +} +impl AmdShaderBallotFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdShaderBallotFn {} + } +} +impl AmdExtension39Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_39\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension39Fn {} +unsafe impl Send for AmdExtension39Fn {} +unsafe impl Sync for AmdExtension39Fn {} +impl ::std::clone::Clone for AmdExtension39Fn { + fn clone(&self) -> Self { + AmdExtension39Fn {} + } +} +impl AmdExtension39Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension39Fn {} + } +} +impl AmdExtension40Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_40\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension40Fn {} +unsafe impl Send for AmdExtension40Fn {} +unsafe impl Sync for AmdExtension40Fn {} +impl ::std::clone::Clone for AmdExtension40Fn { + fn clone(&self) -> Self { + AmdExtension40Fn {} + } +} +impl AmdExtension40Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension40Fn {} + } +} +impl AmdExtension41Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_41\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension41Fn {} +unsafe impl Send for AmdExtension41Fn {} +unsafe impl Sync for AmdExtension41Fn {} +impl ::std::clone::Clone for AmdExtension41Fn { + fn clone(&self) -> Self { + AmdExtension41Fn {} + } +} +impl AmdExtension41Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension41Fn {} + } +} +impl AmdTextureGatherBiasLodFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_texture_gather_bias_lod\0") + .expect("Wrong extension string") + } +} +pub struct AmdTextureGatherBiasLodFn {} +unsafe impl Send for AmdTextureGatherBiasLodFn {} +unsafe impl Sync for AmdTextureGatherBiasLodFn {} +impl ::std::clone::Clone for AmdTextureGatherBiasLodFn { + fn clone(&self) -> Self { + AmdTextureGatherBiasLodFn {} + } +} +impl AmdTextureGatherBiasLodFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdTextureGatherBiasLodFn {} + } +} +#[doc = "Generated from \'VK_AMD_texture_gather_bias_lod\'"] +impl StructureType { + pub const TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD: Self = StructureType(1000041000); +} +impl AmdShaderInfoFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_shader_info\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetShaderInfoAMD = extern "system" fn( + device: Device, + pipeline: Pipeline, + shader_stage: ShaderStageFlags, + info_type: ShaderInfoTypeAMD, + p_info_size: *mut usize, + p_info: *mut c_void, +) -> Result; +pub struct AmdShaderInfoFn { + pub get_shader_info_amd: extern "system" fn( + device: Device, + pipeline: Pipeline, + shader_stage: ShaderStageFlags, + info_type: ShaderInfoTypeAMD, + p_info_size: *mut usize, + p_info: *mut c_void, + ) -> Result, +} +unsafe impl Send for AmdShaderInfoFn {} +unsafe impl Sync for AmdShaderInfoFn {} +impl ::std::clone::Clone for AmdShaderInfoFn { + fn clone(&self) -> Self { + AmdShaderInfoFn { + get_shader_info_amd: self.get_shader_info_amd, + } + } +} +impl AmdShaderInfoFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdShaderInfoFn { + get_shader_info_amd: unsafe { + extern "system" fn get_shader_info_amd( + _device: Device, + _pipeline: Pipeline, + _shader_stage: ShaderStageFlags, + _info_type: ShaderInfoTypeAMD, + _p_info_size: *mut usize, + _p_info: *mut c_void, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(get_shader_info_amd))) + } + let raw_name = stringify!(vkGetShaderInfoAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_shader_info_amd + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_shader_info_amd( + &self, + device: Device, + pipeline: Pipeline, + shader_stage: ShaderStageFlags, + info_type: ShaderInfoTypeAMD, + p_info_size: *mut usize, + p_info: *mut c_void, + ) -> Result { + (self.get_shader_info_amd)( + device, + pipeline, + shader_stage, + info_type, + p_info_size, + p_info, + ) + } +} +impl AmdExtension44Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_44\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension44Fn {} +unsafe impl Send for AmdExtension44Fn {} +unsafe impl Sync for AmdExtension44Fn {} +impl ::std::clone::Clone for AmdExtension44Fn { + fn clone(&self) -> Self { + AmdExtension44Fn {} + } +} +impl AmdExtension44Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension44Fn {} + } +} +impl AmdExtension45Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_45\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension45Fn {} +unsafe impl Send for AmdExtension45Fn {} +unsafe impl Sync for AmdExtension45Fn {} +impl ::std::clone::Clone for AmdExtension45Fn { + fn clone(&self) -> Self { + AmdExtension45Fn {} + } +} +impl AmdExtension45Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension45Fn {} + } +} +impl AmdExtension46Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_46\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension46Fn {} +unsafe impl Send for AmdExtension46Fn {} +unsafe impl Sync for AmdExtension46Fn {} +impl ::std::clone::Clone for AmdExtension46Fn { + fn clone(&self) -> Self { + AmdExtension46Fn {} + } +} +impl AmdExtension46Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension46Fn {} + } +} +impl AmdShaderImageLoadStoreLodFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_shader_image_load_store_lod\0") + .expect("Wrong extension string") + } +} +pub struct AmdShaderImageLoadStoreLodFn {} +unsafe impl Send for AmdShaderImageLoadStoreLodFn {} +unsafe impl Sync for AmdShaderImageLoadStoreLodFn {} +impl ::std::clone::Clone for AmdShaderImageLoadStoreLodFn { + fn clone(&self) -> Self { + AmdShaderImageLoadStoreLodFn {} + } +} +impl AmdShaderImageLoadStoreLodFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdShaderImageLoadStoreLodFn {} + } +} +impl NvxExtension48Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NVX_extension_48\0") + .expect("Wrong extension string") + } +} +pub struct NvxExtension48Fn {} +unsafe impl Send for NvxExtension48Fn {} +unsafe impl Sync for NvxExtension48Fn {} +impl ::std::clone::Clone for NvxExtension48Fn { + fn clone(&self) -> Self { + NvxExtension48Fn {} + } +} +impl NvxExtension48Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvxExtension48Fn {} + } +} +impl GoogleExtension49Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_49\0") + .expect("Wrong extension string") + } +} +pub struct GoogleExtension49Fn {} +unsafe impl Send for GoogleExtension49Fn {} +unsafe impl Sync for GoogleExtension49Fn {} +impl ::std::clone::Clone for GoogleExtension49Fn { + fn clone(&self) -> Self { + GoogleExtension49Fn {} + } +} +impl GoogleExtension49Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleExtension49Fn {} + } +} +impl GoogleExtension50Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_50\0") + .expect("Wrong extension string") + } +} +pub struct GoogleExtension50Fn {} +unsafe impl Send for GoogleExtension50Fn {} +unsafe impl Sync for GoogleExtension50Fn {} +impl ::std::clone::Clone for GoogleExtension50Fn { + fn clone(&self) -> Self { + GoogleExtension50Fn {} + } +} +impl GoogleExtension50Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleExtension50Fn {} + } +} +impl NvCornerSampledImageFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_corner_sampled_image\0") + .expect("Wrong extension string") + } +} +pub struct NvCornerSampledImageFn {} +unsafe impl Send for NvCornerSampledImageFn {} +unsafe impl Sync for NvCornerSampledImageFn {} +impl ::std::clone::Clone for NvCornerSampledImageFn { + fn clone(&self) -> Self { + NvCornerSampledImageFn {} + } +} +impl NvCornerSampledImageFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvCornerSampledImageFn {} + } +} +#[doc = "Generated from \'VK_NV_corner_sampled_image\'"] +impl ImageCreateFlags { + pub const CORNER_SAMPLED_NV: Self = ImageCreateFlags(0b10000000000000); +} +#[doc = "Generated from \'VK_NV_corner_sampled_image\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV: Self = StructureType(1000050000); +} +impl NvxExtension52Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NVX_extension_52\0") + .expect("Wrong extension string") + } +} +pub struct NvxExtension52Fn {} +unsafe impl Send for NvxExtension52Fn {} +unsafe impl Sync for NvxExtension52Fn {} +impl ::std::clone::Clone for NvxExtension52Fn { + fn clone(&self) -> Self { + NvxExtension52Fn {} + } +} +impl NvxExtension52Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvxExtension52Fn {} + } +} +impl NvExtension53Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_53\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension53Fn {} +unsafe impl Send for NvExtension53Fn {} +unsafe impl Sync for NvExtension53Fn {} +impl ::std::clone::Clone for NvExtension53Fn { + fn clone(&self) -> Self { + NvExtension53Fn {} + } +} +impl NvExtension53Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension53Fn {} + } +} +impl KhrMultiviewFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_multiview\0") + .expect("Wrong extension string") + } +} +pub struct KhrMultiviewFn {} +unsafe impl Send for KhrMultiviewFn {} +unsafe impl Sync for KhrMultiviewFn {} +impl ::std::clone::Clone for KhrMultiviewFn { + fn clone(&self) -> Self { + KhrMultiviewFn {} + } +} +impl KhrMultiviewFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrMultiviewFn {} + } +} +impl ImgFormatPvrtcFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_IMG_format_pvrtc\0") + .expect("Wrong extension string") + } +} +pub struct ImgFormatPvrtcFn {} +unsafe impl Send for ImgFormatPvrtcFn {} +unsafe impl Sync for ImgFormatPvrtcFn {} +impl ::std::clone::Clone for ImgFormatPvrtcFn { + fn clone(&self) -> Self { + ImgFormatPvrtcFn {} + } +} +impl ImgFormatPvrtcFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ImgFormatPvrtcFn {} + } +} +#[doc = "Generated from \'VK_IMG_format_pvrtc\'"] +impl Format { + pub const PVRTC1_2BPP_UNORM_BLOCK_IMG: Self = Format(1000054000); +} +#[doc = "Generated from \'VK_IMG_format_pvrtc\'"] +impl Format { + pub const PVRTC1_4BPP_UNORM_BLOCK_IMG: Self = Format(1000054001); +} +#[doc = "Generated from \'VK_IMG_format_pvrtc\'"] +impl Format { + pub const PVRTC2_2BPP_UNORM_BLOCK_IMG: Self = Format(1000054002); +} +#[doc = "Generated from \'VK_IMG_format_pvrtc\'"] +impl Format { + pub const PVRTC2_4BPP_UNORM_BLOCK_IMG: Self = Format(1000054003); +} +#[doc = "Generated from \'VK_IMG_format_pvrtc\'"] +impl Format { + pub const PVRTC1_2BPP_SRGB_BLOCK_IMG: Self = Format(1000054004); +} +#[doc = "Generated from \'VK_IMG_format_pvrtc\'"] +impl Format { + pub const PVRTC1_4BPP_SRGB_BLOCK_IMG: Self = Format(1000054005); +} +#[doc = "Generated from \'VK_IMG_format_pvrtc\'"] +impl Format { + pub const PVRTC2_2BPP_SRGB_BLOCK_IMG: Self = Format(1000054006); +} +#[doc = "Generated from \'VK_IMG_format_pvrtc\'"] +impl Format { + pub const PVRTC2_4BPP_SRGB_BLOCK_IMG: Self = Format(1000054007); +} +impl NvExternalMemoryCapabilitiesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_external_memory_capabilities\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV = extern "system" fn( + physical_device: PhysicalDevice, + format: Format, + ty: ImageType, + tiling: ImageTiling, + usage: ImageUsageFlags, + flags: ImageCreateFlags, + external_handle_type: ExternalMemoryHandleTypeFlagsNV, + p_external_image_format_properties: *mut ExternalImageFormatPropertiesNV, +) -> Result; +pub struct NvExternalMemoryCapabilitiesFn { + pub get_physical_device_external_image_format_properties_nv: extern "system" fn( + physical_device: PhysicalDevice, + format: Format, + ty: ImageType, + tiling: ImageTiling, + usage: ImageUsageFlags, + flags: ImageCreateFlags, + external_handle_type: ExternalMemoryHandleTypeFlagsNV, + p_external_image_format_properties: *mut ExternalImageFormatPropertiesNV, + ) -> Result, +} +unsafe impl Send for NvExternalMemoryCapabilitiesFn {} +unsafe impl Sync for NvExternalMemoryCapabilitiesFn {} +impl ::std::clone::Clone for NvExternalMemoryCapabilitiesFn { + fn clone(&self) -> Self { + NvExternalMemoryCapabilitiesFn { + get_physical_device_external_image_format_properties_nv: self + .get_physical_device_external_image_format_properties_nv, + } + } +} +impl NvExternalMemoryCapabilitiesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExternalMemoryCapabilitiesFn { + get_physical_device_external_image_format_properties_nv: unsafe { + extern "system" fn get_physical_device_external_image_format_properties_nv( + _physical_device: PhysicalDevice, + _format: Format, + _ty: ImageType, + _tiling: ImageTiling, + _usage: ImageUsageFlags, + _flags: ImageCreateFlags, + _external_handle_type: ExternalMemoryHandleTypeFlagsNV, + _p_external_image_format_properties: *mut ExternalImageFormatPropertiesNV, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_external_image_format_properties_nv) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceExternalImageFormatPropertiesNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_external_image_format_properties_nv + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_physical_device_external_image_format_properties_nv( + &self, + physical_device: PhysicalDevice, + format: Format, + ty: ImageType, + tiling: ImageTiling, + usage: ImageUsageFlags, + flags: ImageCreateFlags, + external_handle_type: ExternalMemoryHandleTypeFlagsNV, + p_external_image_format_properties: *mut ExternalImageFormatPropertiesNV, + ) -> Result { + (self.get_physical_device_external_image_format_properties_nv)( + physical_device, + format, + ty, + tiling, + usage, + flags, + external_handle_type, + p_external_image_format_properties, + ) + } +} +impl NvExternalMemoryFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_external_memory\0") + .expect("Wrong extension string") + } +} +pub struct NvExternalMemoryFn {} +unsafe impl Send for NvExternalMemoryFn {} +unsafe impl Sync for NvExternalMemoryFn {} +impl ::std::clone::Clone for NvExternalMemoryFn { + fn clone(&self) -> Self { + NvExternalMemoryFn {} + } +} +impl NvExternalMemoryFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExternalMemoryFn {} + } +} +#[doc = "Generated from \'VK_NV_external_memory\'"] +impl StructureType { + pub const EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV: Self = StructureType(1000056000); +} +#[doc = "Generated from \'VK_NV_external_memory\'"] +impl StructureType { + pub const EXPORT_MEMORY_ALLOCATE_INFO_NV: Self = StructureType(1000056001); +} +impl NvExternalMemoryWin32Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_external_memory_win32\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetMemoryWin32HandleNV = extern "system" fn( + device: Device, + memory: DeviceMemory, + handle_type: ExternalMemoryHandleTypeFlagsNV, + p_handle: *mut HANDLE, +) -> Result; +pub struct NvExternalMemoryWin32Fn { + pub get_memory_win32_handle_nv: extern "system" fn( + device: Device, + memory: DeviceMemory, + handle_type: ExternalMemoryHandleTypeFlagsNV, + p_handle: *mut HANDLE, + ) -> Result, +} +unsafe impl Send for NvExternalMemoryWin32Fn {} +unsafe impl Sync for NvExternalMemoryWin32Fn {} +impl ::std::clone::Clone for NvExternalMemoryWin32Fn { + fn clone(&self) -> Self { + NvExternalMemoryWin32Fn { + get_memory_win32_handle_nv: self.get_memory_win32_handle_nv, + } + } +} +impl NvExternalMemoryWin32Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExternalMemoryWin32Fn { + get_memory_win32_handle_nv: unsafe { + extern "system" fn get_memory_win32_handle_nv( + _device: Device, + _memory: DeviceMemory, + _handle_type: ExternalMemoryHandleTypeFlagsNV, + _p_handle: *mut HANDLE, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_memory_win32_handle_nv) + )) + } + let raw_name = stringify!(vkGetMemoryWin32HandleNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_memory_win32_handle_nv + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_memory_win32_handle_nv( + &self, + device: Device, + memory: DeviceMemory, + handle_type: ExternalMemoryHandleTypeFlagsNV, + p_handle: *mut HANDLE, + ) -> Result { + (self.get_memory_win32_handle_nv)(device, memory, handle_type, p_handle) + } +} +#[doc = "Generated from \'VK_NV_external_memory_win32\'"] +impl StructureType { + pub const IMPORT_MEMORY_WIN32_HANDLE_INFO_NV: Self = StructureType(1000057000); +} +#[doc = "Generated from \'VK_NV_external_memory_win32\'"] +impl StructureType { + pub const EXPORT_MEMORY_WIN32_HANDLE_INFO_NV: Self = StructureType(1000057001); +} +impl NvWin32KeyedMutexFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_win32_keyed_mutex\0") + .expect("Wrong extension string") + } +} +pub struct NvWin32KeyedMutexFn {} +unsafe impl Send for NvWin32KeyedMutexFn {} +unsafe impl Sync for NvWin32KeyedMutexFn {} +impl ::std::clone::Clone for NvWin32KeyedMutexFn { + fn clone(&self) -> Self { + NvWin32KeyedMutexFn {} + } +} +impl NvWin32KeyedMutexFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvWin32KeyedMutexFn {} + } +} +#[doc = "Generated from \'VK_NV_win32_keyed_mutex\'"] +impl StructureType { + pub const WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV: Self = StructureType(1000058000); +} +impl KhrGetPhysicalDeviceProperties2Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_get_physical_device_properties2\0") + .expect("Wrong extension string") + } +} +pub struct KhrGetPhysicalDeviceProperties2Fn {} +unsafe impl Send for KhrGetPhysicalDeviceProperties2Fn {} +unsafe impl Sync for KhrGetPhysicalDeviceProperties2Fn {} +impl ::std::clone::Clone for KhrGetPhysicalDeviceProperties2Fn { + fn clone(&self) -> Self { + KhrGetPhysicalDeviceProperties2Fn {} + } +} +impl KhrGetPhysicalDeviceProperties2Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrGetPhysicalDeviceProperties2Fn {} + } +} +impl KhrDeviceGroupFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_device_group\0") + .expect("Wrong extension string") + } +} +pub struct KhrDeviceGroupFn { + pub get_device_group_present_capabilities_khr: extern "system" fn( + device: Device, + p_device_group_present_capabilities: *mut DeviceGroupPresentCapabilitiesKHR, + ) -> Result, + pub get_device_group_surface_present_modes_khr: extern "system" fn( + device: Device, + surface: SurfaceKHR, + p_modes: *mut DeviceGroupPresentModeFlagsKHR, + ) -> Result, + pub get_physical_device_present_rectangles_khr: extern "system" fn( + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_rect_count: *mut u32, + p_rects: *mut Rect2D, + ) -> Result, + pub acquire_next_image2_khr: extern "system" fn( + device: Device, + p_acquire_info: *const AcquireNextImageInfoKHR, + p_image_index: *mut u32, + ) -> Result, +} +unsafe impl Send for KhrDeviceGroupFn {} +unsafe impl Sync for KhrDeviceGroupFn {} +impl ::std::clone::Clone for KhrDeviceGroupFn { + fn clone(&self) -> Self { + KhrDeviceGroupFn { + get_device_group_present_capabilities_khr: self + .get_device_group_present_capabilities_khr, + get_device_group_surface_present_modes_khr: self + .get_device_group_surface_present_modes_khr, + get_physical_device_present_rectangles_khr: self + .get_physical_device_present_rectangles_khr, + acquire_next_image2_khr: self.acquire_next_image2_khr, + } + } +} +impl KhrDeviceGroupFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrDeviceGroupFn { + get_device_group_present_capabilities_khr: unsafe { + extern "system" fn get_device_group_present_capabilities_khr( + _device: Device, + _p_device_group_present_capabilities: *mut DeviceGroupPresentCapabilitiesKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_device_group_present_capabilities_khr) + )) + } + let raw_name = stringify!(vkGetDeviceGroupPresentCapabilitiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_group_present_capabilities_khr + } else { + ::std::mem::transmute(val) + } + }, + get_device_group_surface_present_modes_khr: unsafe { + extern "system" fn get_device_group_surface_present_modes_khr( + _device: Device, + _surface: SurfaceKHR, + _p_modes: *mut DeviceGroupPresentModeFlagsKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_device_group_surface_present_modes_khr) + )) + } + let raw_name = stringify!(vkGetDeviceGroupSurfacePresentModesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_group_surface_present_modes_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_present_rectangles_khr: unsafe { + extern "system" fn get_physical_device_present_rectangles_khr( + _physical_device: PhysicalDevice, + _surface: SurfaceKHR, + _p_rect_count: *mut u32, + _p_rects: *mut Rect2D, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_present_rectangles_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDevicePresentRectanglesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_present_rectangles_khr + } else { + ::std::mem::transmute(val) + } + }, + acquire_next_image2_khr: unsafe { + extern "system" fn acquire_next_image2_khr( + _device: Device, + _p_acquire_info: *const AcquireNextImageInfoKHR, + _p_image_index: *mut u32, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(acquire_next_image2_khr) + )) + } + let raw_name = stringify!(vkAcquireNextImage2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + acquire_next_image2_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_device_group_present_capabilities_khr( + &self, + device: Device, + p_device_group_present_capabilities: *mut DeviceGroupPresentCapabilitiesKHR, + ) -> Result { + (self.get_device_group_present_capabilities_khr)( + device, + p_device_group_present_capabilities, + ) + } + #[doc = ""] + pub unsafe fn get_device_group_surface_present_modes_khr( + &self, + device: Device, + surface: SurfaceKHR, + p_modes: *mut DeviceGroupPresentModeFlagsKHR, + ) -> Result { + (self.get_device_group_surface_present_modes_khr)(device, surface, p_modes) + } + #[doc = ""] + pub unsafe fn get_physical_device_present_rectangles_khr( + &self, + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_rect_count: *mut u32, + p_rects: *mut Rect2D, + ) -> Result { + (self.get_physical_device_present_rectangles_khr)( + physical_device, + surface, + p_rect_count, + p_rects, + ) + } + #[doc = ""] + pub unsafe fn acquire_next_image2_khr( + &self, + device: Device, + p_acquire_info: *const AcquireNextImageInfoKHR, + p_image_index: *mut u32, + ) -> Result { + (self.acquire_next_image2_khr)(device, p_acquire_info, p_image_index) + } +} +impl ExtValidationFlagsFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_validation_flags\0") + .expect("Wrong extension string") + } +} +pub struct ExtValidationFlagsFn {} +unsafe impl Send for ExtValidationFlagsFn {} +unsafe impl Sync for ExtValidationFlagsFn {} +impl ::std::clone::Clone for ExtValidationFlagsFn { + fn clone(&self) -> Self { + ExtValidationFlagsFn {} + } +} +impl ExtValidationFlagsFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtValidationFlagsFn {} + } +} +#[doc = "Generated from \'VK_EXT_validation_flags\'"] +impl StructureType { + pub const VALIDATION_FLAGS_EXT: Self = StructureType(1000061000); +} +impl NnViSurfaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NN_vi_surface\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateViSurfaceNN = extern "system" fn( + instance: Instance, + p_create_info: *const ViSurfaceCreateInfoNN, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +pub struct NnViSurfaceFn { + pub create_vi_surface_nn: extern "system" fn( + instance: Instance, + p_create_info: *const ViSurfaceCreateInfoNN, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, +} +unsafe impl Send for NnViSurfaceFn {} +unsafe impl Sync for NnViSurfaceFn {} +impl ::std::clone::Clone for NnViSurfaceFn { + fn clone(&self) -> Self { + NnViSurfaceFn { + create_vi_surface_nn: self.create_vi_surface_nn, + } + } +} +impl NnViSurfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NnViSurfaceFn { + create_vi_surface_nn: unsafe { + extern "system" fn create_vi_surface_nn( + _instance: Instance, + _p_create_info: *const ViSurfaceCreateInfoNN, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_vi_surface_nn))) + } + let raw_name = stringify!(vkCreateViSurfaceNN); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_vi_surface_nn + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_vi_surface_nn( + &self, + instance: Instance, + p_create_info: *const ViSurfaceCreateInfoNN, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_vi_surface_nn)(instance, p_create_info, p_allocator, p_surface) + } +} +#[doc = "Generated from \'VK_NN_vi_surface\'"] +impl StructureType { + pub const VI_SURFACE_CREATE_INFO_NN: Self = StructureType(1000062000); +} +impl KhrShaderDrawParametersFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_shader_draw_parameters\0") + .expect("Wrong extension string") + } +} +pub struct KhrShaderDrawParametersFn {} +unsafe impl Send for KhrShaderDrawParametersFn {} +unsafe impl Sync for KhrShaderDrawParametersFn {} +impl ::std::clone::Clone for KhrShaderDrawParametersFn { + fn clone(&self) -> Self { + KhrShaderDrawParametersFn {} + } +} +impl KhrShaderDrawParametersFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrShaderDrawParametersFn {} + } +} +impl ExtShaderSubgroupBallotFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_shader_subgroup_ballot\0") + .expect("Wrong extension string") + } +} +pub struct ExtShaderSubgroupBallotFn {} +unsafe impl Send for ExtShaderSubgroupBallotFn {} +unsafe impl Sync for ExtShaderSubgroupBallotFn {} +impl ::std::clone::Clone for ExtShaderSubgroupBallotFn { + fn clone(&self) -> Self { + ExtShaderSubgroupBallotFn {} + } +} +impl ExtShaderSubgroupBallotFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtShaderSubgroupBallotFn {} + } +} +impl ExtShaderSubgroupVoteFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_shader_subgroup_vote\0") + .expect("Wrong extension string") + } +} +pub struct ExtShaderSubgroupVoteFn {} +unsafe impl Send for ExtShaderSubgroupVoteFn {} +unsafe impl Sync for ExtShaderSubgroupVoteFn {} +impl ::std::clone::Clone for ExtShaderSubgroupVoteFn { + fn clone(&self) -> Self { + ExtShaderSubgroupVoteFn {} + } +} +impl ExtShaderSubgroupVoteFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtShaderSubgroupVoteFn {} + } +} +impl ArmExtension01Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_ARM_extension_01\0") + .expect("Wrong extension string") + } +} +pub struct ArmExtension01Fn {} +unsafe impl Send for ArmExtension01Fn {} +unsafe impl Sync for ArmExtension01Fn {} +impl ::std::clone::Clone for ArmExtension01Fn { + fn clone(&self) -> Self { + ArmExtension01Fn {} + } +} +impl ArmExtension01Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ArmExtension01Fn {} + } +} +impl ExtAstcDecodeModeFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_astc_decode_mode\0") + .expect("Wrong extension string") + } +} +pub struct ExtAstcDecodeModeFn {} +unsafe impl Send for ExtAstcDecodeModeFn {} +unsafe impl Sync for ExtAstcDecodeModeFn {} +impl ::std::clone::Clone for ExtAstcDecodeModeFn { + fn clone(&self) -> Self { + ExtAstcDecodeModeFn {} + } +} +impl ExtAstcDecodeModeFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtAstcDecodeModeFn {} + } +} +#[doc = "Generated from \'VK_EXT_astc_decode_mode\'"] +impl StructureType { + pub const IMAGE_VIEW_ASTC_DECODE_MODE_EXT: Self = StructureType(1000067000); +} +#[doc = "Generated from \'VK_EXT_astc_decode_mode\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT: Self = StructureType(1000067001); +} +impl ImgExtension69Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_IMG_extension_69\0") + .expect("Wrong extension string") + } +} +pub struct ImgExtension69Fn {} +unsafe impl Send for ImgExtension69Fn {} +unsafe impl Sync for ImgExtension69Fn {} +impl ::std::clone::Clone for ImgExtension69Fn { + fn clone(&self) -> Self { + ImgExtension69Fn {} + } +} +impl ImgExtension69Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ImgExtension69Fn {} + } +} +impl KhrMaintenance1Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_maintenance1\0") + .expect("Wrong extension string") + } +} +pub struct KhrMaintenance1Fn {} +unsafe impl Send for KhrMaintenance1Fn {} +unsafe impl Sync for KhrMaintenance1Fn {} +impl ::std::clone::Clone for KhrMaintenance1Fn { + fn clone(&self) -> Self { + KhrMaintenance1Fn {} + } +} +impl KhrMaintenance1Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrMaintenance1Fn {} + } +} +impl KhrDeviceGroupCreationFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_device_group_creation\0") + .expect("Wrong extension string") + } +} +pub struct KhrDeviceGroupCreationFn {} +unsafe impl Send for KhrDeviceGroupCreationFn {} +unsafe impl Sync for KhrDeviceGroupCreationFn {} +impl ::std::clone::Clone for KhrDeviceGroupCreationFn { + fn clone(&self) -> Self { + KhrDeviceGroupCreationFn {} + } +} +impl KhrDeviceGroupCreationFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrDeviceGroupCreationFn {} + } +} +impl KhrExternalMemoryCapabilitiesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_memory_capabilities\0") + .expect("Wrong extension string") + } +} +pub struct KhrExternalMemoryCapabilitiesFn {} +unsafe impl Send for KhrExternalMemoryCapabilitiesFn {} +unsafe impl Sync for KhrExternalMemoryCapabilitiesFn {} +impl ::std::clone::Clone for KhrExternalMemoryCapabilitiesFn { + fn clone(&self) -> Self { + KhrExternalMemoryCapabilitiesFn {} + } +} +impl KhrExternalMemoryCapabilitiesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExternalMemoryCapabilitiesFn {} + } +} +impl KhrExternalMemoryFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_memory\0") + .expect("Wrong extension string") + } +} +pub struct KhrExternalMemoryFn {} +unsafe impl Send for KhrExternalMemoryFn {} +unsafe impl Sync for KhrExternalMemoryFn {} +impl ::std::clone::Clone for KhrExternalMemoryFn { + fn clone(&self) -> Self { + KhrExternalMemoryFn {} + } +} +impl KhrExternalMemoryFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExternalMemoryFn {} + } +} +impl KhrExternalMemoryWin32Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_memory_win32\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetMemoryWin32HandleKHR = extern "system" fn( + device: Device, + p_get_win32_handle_info: *const MemoryGetWin32HandleInfoKHR, + p_handle: *mut HANDLE, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetMemoryWin32HandlePropertiesKHR = extern "system" fn( + device: Device, + handle_type: ExternalMemoryHandleTypeFlags, + handle: HANDLE, + p_memory_win32_handle_properties: *mut MemoryWin32HandlePropertiesKHR, +) -> Result; +pub struct KhrExternalMemoryWin32Fn { + pub get_memory_win32_handle_khr: extern "system" fn( + device: Device, + p_get_win32_handle_info: *const MemoryGetWin32HandleInfoKHR, + p_handle: *mut HANDLE, + ) -> Result, + pub get_memory_win32_handle_properties_khr: extern "system" fn( + device: Device, + handle_type: ExternalMemoryHandleTypeFlags, + handle: HANDLE, + p_memory_win32_handle_properties: *mut MemoryWin32HandlePropertiesKHR, + ) -> Result, +} +unsafe impl Send for KhrExternalMemoryWin32Fn {} +unsafe impl Sync for KhrExternalMemoryWin32Fn {} +impl ::std::clone::Clone for KhrExternalMemoryWin32Fn { + fn clone(&self) -> Self { + KhrExternalMemoryWin32Fn { + get_memory_win32_handle_khr: self.get_memory_win32_handle_khr, + get_memory_win32_handle_properties_khr: self.get_memory_win32_handle_properties_khr, + } + } +} +impl KhrExternalMemoryWin32Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExternalMemoryWin32Fn { + get_memory_win32_handle_khr: unsafe { + extern "system" fn get_memory_win32_handle_khr( + _device: Device, + _p_get_win32_handle_info: *const MemoryGetWin32HandleInfoKHR, + _p_handle: *mut HANDLE, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_memory_win32_handle_khr) + )) + } + let raw_name = stringify!(vkGetMemoryWin32HandleKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_memory_win32_handle_khr + } else { + ::std::mem::transmute(val) + } + }, + get_memory_win32_handle_properties_khr: unsafe { + extern "system" fn get_memory_win32_handle_properties_khr( + _device: Device, + _handle_type: ExternalMemoryHandleTypeFlags, + _handle: HANDLE, + _p_memory_win32_handle_properties: *mut MemoryWin32HandlePropertiesKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_memory_win32_handle_properties_khr) + )) + } + let raw_name = stringify!(vkGetMemoryWin32HandlePropertiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_memory_win32_handle_properties_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_memory_win32_handle_khr( + &self, + device: Device, + p_get_win32_handle_info: *const MemoryGetWin32HandleInfoKHR, + p_handle: *mut HANDLE, + ) -> Result { + (self.get_memory_win32_handle_khr)(device, p_get_win32_handle_info, p_handle) + } + #[doc = ""] + pub unsafe fn get_memory_win32_handle_properties_khr( + &self, + device: Device, + handle_type: ExternalMemoryHandleTypeFlags, + handle: HANDLE, + p_memory_win32_handle_properties: *mut MemoryWin32HandlePropertiesKHR, + ) -> Result { + (self.get_memory_win32_handle_properties_khr)( + device, + handle_type, + handle, + p_memory_win32_handle_properties, + ) + } +} +#[doc = "Generated from \'VK_KHR_external_memory_win32\'"] +impl StructureType { + pub const IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000073000); +} +#[doc = "Generated from \'VK_KHR_external_memory_win32\'"] +impl StructureType { + pub const EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000073001); +} +#[doc = "Generated from \'VK_KHR_external_memory_win32\'"] +impl StructureType { + pub const MEMORY_WIN32_HANDLE_PROPERTIES_KHR: Self = StructureType(1000073002); +} +#[doc = "Generated from \'VK_KHR_external_memory_win32\'"] +impl StructureType { + pub const MEMORY_GET_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000073003); +} +impl KhrExternalMemoryFdFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_memory_fd\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetMemoryFdKHR = extern "system" fn( + device: Device, + p_get_fd_info: *const MemoryGetFdInfoKHR, + p_fd: *mut c_int, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetMemoryFdPropertiesKHR = extern "system" fn( + device: Device, + handle_type: ExternalMemoryHandleTypeFlags, + fd: c_int, + p_memory_fd_properties: *mut MemoryFdPropertiesKHR, +) -> Result; +pub struct KhrExternalMemoryFdFn { + pub get_memory_fd_khr: extern "system" fn( + device: Device, + p_get_fd_info: *const MemoryGetFdInfoKHR, + p_fd: *mut c_int, + ) -> Result, + pub get_memory_fd_properties_khr: extern "system" fn( + device: Device, + handle_type: ExternalMemoryHandleTypeFlags, + fd: c_int, + p_memory_fd_properties: *mut MemoryFdPropertiesKHR, + ) -> Result, +} +unsafe impl Send for KhrExternalMemoryFdFn {} +unsafe impl Sync for KhrExternalMemoryFdFn {} +impl ::std::clone::Clone for KhrExternalMemoryFdFn { + fn clone(&self) -> Self { + KhrExternalMemoryFdFn { + get_memory_fd_khr: self.get_memory_fd_khr, + get_memory_fd_properties_khr: self.get_memory_fd_properties_khr, + } + } +} +impl KhrExternalMemoryFdFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExternalMemoryFdFn { + get_memory_fd_khr: unsafe { + extern "system" fn get_memory_fd_khr( + _device: Device, + _p_get_fd_info: *const MemoryGetFdInfoKHR, + _p_fd: *mut c_int, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(get_memory_fd_khr))) + } + let raw_name = stringify!(vkGetMemoryFdKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_memory_fd_khr + } else { + ::std::mem::transmute(val) + } + }, + get_memory_fd_properties_khr: unsafe { + extern "system" fn get_memory_fd_properties_khr( + _device: Device, + _handle_type: ExternalMemoryHandleTypeFlags, + _fd: c_int, + _p_memory_fd_properties: *mut MemoryFdPropertiesKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_memory_fd_properties_khr) + )) + } + let raw_name = stringify!(vkGetMemoryFdPropertiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_memory_fd_properties_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_memory_fd_khr( + &self, + device: Device, + p_get_fd_info: *const MemoryGetFdInfoKHR, + p_fd: *mut c_int, + ) -> Result { + (self.get_memory_fd_khr)(device, p_get_fd_info, p_fd) + } + #[doc = ""] + pub unsafe fn get_memory_fd_properties_khr( + &self, + device: Device, + handle_type: ExternalMemoryHandleTypeFlags, + fd: c_int, + p_memory_fd_properties: *mut MemoryFdPropertiesKHR, + ) -> Result { + (self.get_memory_fd_properties_khr)(device, handle_type, fd, p_memory_fd_properties) + } +} +#[doc = "Generated from \'VK_KHR_external_memory_fd\'"] +impl StructureType { + pub const IMPORT_MEMORY_FD_INFO_KHR: Self = StructureType(1000074000); +} +#[doc = "Generated from \'VK_KHR_external_memory_fd\'"] +impl StructureType { + pub const MEMORY_FD_PROPERTIES_KHR: Self = StructureType(1000074001); +} +#[doc = "Generated from \'VK_KHR_external_memory_fd\'"] +impl StructureType { + pub const MEMORY_GET_FD_INFO_KHR: Self = StructureType(1000074002); +} +impl KhrWin32KeyedMutexFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_win32_keyed_mutex\0") + .expect("Wrong extension string") + } +} +pub struct KhrWin32KeyedMutexFn {} +unsafe impl Send for KhrWin32KeyedMutexFn {} +unsafe impl Sync for KhrWin32KeyedMutexFn {} +impl ::std::clone::Clone for KhrWin32KeyedMutexFn { + fn clone(&self) -> Self { + KhrWin32KeyedMutexFn {} + } +} +impl KhrWin32KeyedMutexFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrWin32KeyedMutexFn {} + } +} +#[doc = "Generated from \'VK_KHR_win32_keyed_mutex\'"] +impl StructureType { + pub const WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR: Self = StructureType(1000075000); +} +impl KhrExternalSemaphoreCapabilitiesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_semaphore_capabilities\0") + .expect("Wrong extension string") + } +} +pub struct KhrExternalSemaphoreCapabilitiesFn {} +unsafe impl Send for KhrExternalSemaphoreCapabilitiesFn {} +unsafe impl Sync for KhrExternalSemaphoreCapabilitiesFn {} +impl ::std::clone::Clone for KhrExternalSemaphoreCapabilitiesFn { + fn clone(&self) -> Self { + KhrExternalSemaphoreCapabilitiesFn {} + } +} +impl KhrExternalSemaphoreCapabilitiesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExternalSemaphoreCapabilitiesFn {} + } +} +impl KhrExternalSemaphoreFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_semaphore\0") + .expect("Wrong extension string") + } +} +pub struct KhrExternalSemaphoreFn {} +unsafe impl Send for KhrExternalSemaphoreFn {} +unsafe impl Sync for KhrExternalSemaphoreFn {} +impl ::std::clone::Clone for KhrExternalSemaphoreFn { + fn clone(&self) -> Self { + KhrExternalSemaphoreFn {} + } +} +impl KhrExternalSemaphoreFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExternalSemaphoreFn {} + } +} +impl KhrExternalSemaphoreWin32Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_semaphore_win32\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkImportSemaphoreWin32HandleKHR = extern "system" fn( + device: Device, + p_import_semaphore_win32_handle_info: *const ImportSemaphoreWin32HandleInfoKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetSemaphoreWin32HandleKHR = extern "system" fn( + device: Device, + p_get_win32_handle_info: *const SemaphoreGetWin32HandleInfoKHR, + p_handle: *mut HANDLE, +) -> Result; +pub struct KhrExternalSemaphoreWin32Fn { + pub import_semaphore_win32_handle_khr: extern "system" fn( + device: Device, + p_import_semaphore_win32_handle_info: *const ImportSemaphoreWin32HandleInfoKHR, + ) -> Result, + pub get_semaphore_win32_handle_khr: extern "system" fn( + device: Device, + p_get_win32_handle_info: *const SemaphoreGetWin32HandleInfoKHR, + p_handle: *mut HANDLE, + ) -> Result, +} +unsafe impl Send for KhrExternalSemaphoreWin32Fn {} +unsafe impl Sync for KhrExternalSemaphoreWin32Fn {} +impl ::std::clone::Clone for KhrExternalSemaphoreWin32Fn { + fn clone(&self) -> Self { + KhrExternalSemaphoreWin32Fn { + import_semaphore_win32_handle_khr: self.import_semaphore_win32_handle_khr, + get_semaphore_win32_handle_khr: self.get_semaphore_win32_handle_khr, + } + } +} +impl KhrExternalSemaphoreWin32Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExternalSemaphoreWin32Fn { + import_semaphore_win32_handle_khr: unsafe { + extern "system" fn import_semaphore_win32_handle_khr( + _device: Device, + _p_import_semaphore_win32_handle_info: *const ImportSemaphoreWin32HandleInfoKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(import_semaphore_win32_handle_khr) + )) + } + let raw_name = stringify!(vkImportSemaphoreWin32HandleKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + import_semaphore_win32_handle_khr + } else { + ::std::mem::transmute(val) + } + }, + get_semaphore_win32_handle_khr: unsafe { + extern "system" fn get_semaphore_win32_handle_khr( + _device: Device, + _p_get_win32_handle_info: *const SemaphoreGetWin32HandleInfoKHR, + _p_handle: *mut HANDLE, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_semaphore_win32_handle_khr) + )) + } + let raw_name = stringify!(vkGetSemaphoreWin32HandleKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_semaphore_win32_handle_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn import_semaphore_win32_handle_khr( + &self, + device: Device, + p_import_semaphore_win32_handle_info: *const ImportSemaphoreWin32HandleInfoKHR, + ) -> Result { + (self.import_semaphore_win32_handle_khr)(device, p_import_semaphore_win32_handle_info) + } + #[doc = ""] + pub unsafe fn get_semaphore_win32_handle_khr( + &self, + device: Device, + p_get_win32_handle_info: *const SemaphoreGetWin32HandleInfoKHR, + p_handle: *mut HANDLE, + ) -> Result { + (self.get_semaphore_win32_handle_khr)(device, p_get_win32_handle_info, p_handle) + } +} +#[doc = "Generated from \'VK_KHR_external_semaphore_win32\'"] +impl StructureType { + pub const IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000078000); +} +#[doc = "Generated from \'VK_KHR_external_semaphore_win32\'"] +impl StructureType { + pub const EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000078001); +} +#[doc = "Generated from \'VK_KHR_external_semaphore_win32\'"] +impl StructureType { + pub const D3D12_FENCE_SUBMIT_INFO_KHR: Self = StructureType(1000078002); +} +#[doc = "Generated from \'VK_KHR_external_semaphore_win32\'"] +impl StructureType { + pub const SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000078003); +} +impl KhrExternalSemaphoreFdFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_semaphore_fd\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkImportSemaphoreFdKHR = extern "system" fn( + device: Device, + p_import_semaphore_fd_info: *const ImportSemaphoreFdInfoKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetSemaphoreFdKHR = extern "system" fn( + device: Device, + p_get_fd_info: *const SemaphoreGetFdInfoKHR, + p_fd: *mut c_int, +) -> Result; +pub struct KhrExternalSemaphoreFdFn { + pub import_semaphore_fd_khr: extern "system" fn( + device: Device, + p_import_semaphore_fd_info: *const ImportSemaphoreFdInfoKHR, + ) -> Result, + pub get_semaphore_fd_khr: extern "system" fn( + device: Device, + p_get_fd_info: *const SemaphoreGetFdInfoKHR, + p_fd: *mut c_int, + ) -> Result, +} +unsafe impl Send for KhrExternalSemaphoreFdFn {} +unsafe impl Sync for KhrExternalSemaphoreFdFn {} +impl ::std::clone::Clone for KhrExternalSemaphoreFdFn { + fn clone(&self) -> Self { + KhrExternalSemaphoreFdFn { + import_semaphore_fd_khr: self.import_semaphore_fd_khr, + get_semaphore_fd_khr: self.get_semaphore_fd_khr, + } + } +} +impl KhrExternalSemaphoreFdFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExternalSemaphoreFdFn { + import_semaphore_fd_khr: unsafe { + extern "system" fn import_semaphore_fd_khr( + _device: Device, + _p_import_semaphore_fd_info: *const ImportSemaphoreFdInfoKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(import_semaphore_fd_khr) + )) + } + let raw_name = stringify!(vkImportSemaphoreFdKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + import_semaphore_fd_khr + } else { + ::std::mem::transmute(val) + } + }, + get_semaphore_fd_khr: unsafe { + extern "system" fn get_semaphore_fd_khr( + _device: Device, + _p_get_fd_info: *const SemaphoreGetFdInfoKHR, + _p_fd: *mut c_int, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(get_semaphore_fd_khr))) + } + let raw_name = stringify!(vkGetSemaphoreFdKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_semaphore_fd_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn import_semaphore_fd_khr( + &self, + device: Device, + p_import_semaphore_fd_info: *const ImportSemaphoreFdInfoKHR, + ) -> Result { + (self.import_semaphore_fd_khr)(device, p_import_semaphore_fd_info) + } + #[doc = ""] + pub unsafe fn get_semaphore_fd_khr( + &self, + device: Device, + p_get_fd_info: *const SemaphoreGetFdInfoKHR, + p_fd: *mut c_int, + ) -> Result { + (self.get_semaphore_fd_khr)(device, p_get_fd_info, p_fd) + } +} +#[doc = "Generated from \'VK_KHR_external_semaphore_fd\'"] +impl StructureType { + pub const IMPORT_SEMAPHORE_FD_INFO_KHR: Self = StructureType(1000079000); +} +#[doc = "Generated from \'VK_KHR_external_semaphore_fd\'"] +impl StructureType { + pub const SEMAPHORE_GET_FD_INFO_KHR: Self = StructureType(1000079001); +} +impl KhrPushDescriptorFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_push_descriptor\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdPushDescriptorSetKHR = extern "system" fn( + command_buffer: CommandBuffer, + pipeline_bind_point: PipelineBindPoint, + layout: PipelineLayout, + set: u32, + descriptor_write_count: u32, + p_descriptor_writes: *const WriteDescriptorSet, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdPushDescriptorSetWithTemplateKHR = extern "system" fn( + command_buffer: CommandBuffer, + descriptor_update_template: DescriptorUpdateTemplate, + layout: PipelineLayout, + set: u32, + p_data: *const c_void, +) -> c_void; +pub struct KhrPushDescriptorFn { + pub cmd_push_descriptor_set_khr: extern "system" fn( + command_buffer: CommandBuffer, + pipeline_bind_point: PipelineBindPoint, + layout: PipelineLayout, + set: u32, + descriptor_write_count: u32, + p_descriptor_writes: *const WriteDescriptorSet, + ) -> c_void, + pub cmd_push_descriptor_set_with_template_khr: extern "system" fn( + command_buffer: CommandBuffer, + descriptor_update_template: DescriptorUpdateTemplate, + layout: PipelineLayout, + set: u32, + p_data: *const c_void, + ) -> c_void, +} +unsafe impl Send for KhrPushDescriptorFn {} +unsafe impl Sync for KhrPushDescriptorFn {} +impl ::std::clone::Clone for KhrPushDescriptorFn { + fn clone(&self) -> Self { + KhrPushDescriptorFn { + cmd_push_descriptor_set_khr: self.cmd_push_descriptor_set_khr, + cmd_push_descriptor_set_with_template_khr: self + .cmd_push_descriptor_set_with_template_khr, + } + } +} +impl KhrPushDescriptorFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrPushDescriptorFn { + cmd_push_descriptor_set_khr: unsafe { + extern "system" fn cmd_push_descriptor_set_khr( + _command_buffer: CommandBuffer, + _pipeline_bind_point: PipelineBindPoint, + _layout: PipelineLayout, + _set: u32, + _descriptor_write_count: u32, + _p_descriptor_writes: *const WriteDescriptorSet, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_push_descriptor_set_khr) + )) + } + let raw_name = stringify!(vkCmdPushDescriptorSetKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_push_descriptor_set_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_push_descriptor_set_with_template_khr: unsafe { + extern "system" fn cmd_push_descriptor_set_with_template_khr( + _command_buffer: CommandBuffer, + _descriptor_update_template: DescriptorUpdateTemplate, + _layout: PipelineLayout, + _set: u32, + _p_data: *const c_void, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_push_descriptor_set_with_template_khr) + )) + } + let raw_name = stringify!(vkCmdPushDescriptorSetWithTemplateKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_push_descriptor_set_with_template_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_push_descriptor_set_khr( + &self, + command_buffer: CommandBuffer, + pipeline_bind_point: PipelineBindPoint, + layout: PipelineLayout, + set: u32, + descriptor_write_count: u32, + p_descriptor_writes: *const WriteDescriptorSet, + ) -> c_void { + (self.cmd_push_descriptor_set_khr)( + command_buffer, + pipeline_bind_point, + layout, + set, + descriptor_write_count, + p_descriptor_writes, + ) + } + #[doc = ""] + pub unsafe fn cmd_push_descriptor_set_with_template_khr( + &self, + command_buffer: CommandBuffer, + descriptor_update_template: DescriptorUpdateTemplate, + layout: PipelineLayout, + set: u32, + p_data: *const c_void, + ) -> c_void { + (self.cmd_push_descriptor_set_with_template_khr)( + command_buffer, + descriptor_update_template, + layout, + set, + p_data, + ) + } +} +#[doc = "Generated from \'VK_KHR_push_descriptor\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: Self = StructureType(1000080000); +} +#[doc = "Generated from \'VK_KHR_push_descriptor\'"] +impl DescriptorSetLayoutCreateFlags { + pub const PUSH_DESCRIPTOR_KHR: Self = DescriptorSetLayoutCreateFlags(0b1); +} +impl ExtConditionalRenderingFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_conditional_rendering\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBeginConditionalRenderingEXT = extern "system" fn( + command_buffer: CommandBuffer, + p_conditional_rendering_begin: *const ConditionalRenderingBeginInfoEXT, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdEndConditionalRenderingEXT = + extern "system" fn(command_buffer: CommandBuffer) -> c_void; +pub struct ExtConditionalRenderingFn { + pub cmd_begin_conditional_rendering_ext: extern "system" fn( + command_buffer: CommandBuffer, + p_conditional_rendering_begin: *const ConditionalRenderingBeginInfoEXT, + ) -> c_void, + pub cmd_end_conditional_rendering_ext: + extern "system" fn(command_buffer: CommandBuffer) -> c_void, +} +unsafe impl Send for ExtConditionalRenderingFn {} +unsafe impl Sync for ExtConditionalRenderingFn {} +impl ::std::clone::Clone for ExtConditionalRenderingFn { + fn clone(&self) -> Self { + ExtConditionalRenderingFn { + cmd_begin_conditional_rendering_ext: self.cmd_begin_conditional_rendering_ext, + cmd_end_conditional_rendering_ext: self.cmd_end_conditional_rendering_ext, + } + } +} +impl ExtConditionalRenderingFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtConditionalRenderingFn { + cmd_begin_conditional_rendering_ext: unsafe { + extern "system" fn cmd_begin_conditional_rendering_ext( + _command_buffer: CommandBuffer, + _p_conditional_rendering_begin: *const ConditionalRenderingBeginInfoEXT, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_begin_conditional_rendering_ext) + )) + } + let raw_name = stringify!(vkCmdBeginConditionalRenderingEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_begin_conditional_rendering_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_end_conditional_rendering_ext: unsafe { + extern "system" fn cmd_end_conditional_rendering_ext( + _command_buffer: CommandBuffer, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_end_conditional_rendering_ext) + )) + } + let raw_name = stringify!(vkCmdEndConditionalRenderingEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_end_conditional_rendering_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_begin_conditional_rendering_ext( + &self, + command_buffer: CommandBuffer, + p_conditional_rendering_begin: *const ConditionalRenderingBeginInfoEXT, + ) -> c_void { + (self.cmd_begin_conditional_rendering_ext)(command_buffer, p_conditional_rendering_begin) + } + #[doc = ""] + pub unsafe fn cmd_end_conditional_rendering_ext( + &self, + command_buffer: CommandBuffer, + ) -> c_void { + (self.cmd_end_conditional_rendering_ext)(command_buffer) + } +} +#[doc = "Generated from \'VK_EXT_conditional_rendering\'"] +impl StructureType { + pub const COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT: Self = + StructureType(1000081000); +} +#[doc = "Generated from \'VK_EXT_conditional_rendering\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: Self = StructureType(1000081001); +} +#[doc = "Generated from \'VK_EXT_conditional_rendering\'"] +impl StructureType { + pub const CONDITIONAL_RENDERING_BEGIN_INFO_EXT: Self = StructureType(1000081002); +} +#[doc = "Generated from \'VK_EXT_conditional_rendering\'"] +impl AccessFlags { + pub const CONDITIONAL_RENDERING_READ_EXT: Self = AccessFlags(0b100000000000000000000); +} +#[doc = "Generated from \'VK_EXT_conditional_rendering\'"] +impl BufferUsageFlags { + pub const CONDITIONAL_RENDERING_EXT: Self = BufferUsageFlags(0b1000000000); +} +#[doc = "Generated from \'VK_EXT_conditional_rendering\'"] +impl PipelineStageFlags { + pub const CONDITIONAL_RENDERING_EXT: Self = PipelineStageFlags(0b1000000000000000000); +} +impl KhrShaderFloat16Int8Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_shader_float16_int8\0") + .expect("Wrong extension string") + } +} +pub struct KhrShaderFloat16Int8Fn {} +unsafe impl Send for KhrShaderFloat16Int8Fn {} +unsafe impl Sync for KhrShaderFloat16Int8Fn {} +impl ::std::clone::Clone for KhrShaderFloat16Int8Fn { + fn clone(&self) -> Self { + KhrShaderFloat16Int8Fn {} + } +} +impl KhrShaderFloat16Int8Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrShaderFloat16Int8Fn {} + } +} +#[doc = "Generated from \'VK_KHR_shader_float16_int8\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR: Self = StructureType(1000082000); +} +impl Khr16bitStorageFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_16bit_storage\0") + .expect("Wrong extension string") + } +} +pub struct Khr16bitStorageFn {} +unsafe impl Send for Khr16bitStorageFn {} +unsafe impl Sync for Khr16bitStorageFn {} +impl ::std::clone::Clone for Khr16bitStorageFn { + fn clone(&self) -> Self { + Khr16bitStorageFn {} + } +} +impl Khr16bitStorageFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Khr16bitStorageFn {} + } +} +impl KhrIncrementalPresentFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_incremental_present\0") + .expect("Wrong extension string") + } +} +pub struct KhrIncrementalPresentFn {} +unsafe impl Send for KhrIncrementalPresentFn {} +unsafe impl Sync for KhrIncrementalPresentFn {} +impl ::std::clone::Clone for KhrIncrementalPresentFn { + fn clone(&self) -> Self { + KhrIncrementalPresentFn {} + } +} +impl KhrIncrementalPresentFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrIncrementalPresentFn {} + } +} +#[doc = "Generated from \'VK_KHR_incremental_present\'"] +impl StructureType { + pub const PRESENT_REGIONS_KHR: Self = StructureType(1000084000); +} +impl KhrDescriptorUpdateTemplateFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_descriptor_update_template\0") + .expect("Wrong extension string") + } +} +pub struct KhrDescriptorUpdateTemplateFn { + pub cmd_push_descriptor_set_with_template_khr: extern "system" fn( + command_buffer: CommandBuffer, + descriptor_update_template: DescriptorUpdateTemplate, + layout: PipelineLayout, + set: u32, + p_data: *const c_void, + ) -> c_void, +} +unsafe impl Send for KhrDescriptorUpdateTemplateFn {} +unsafe impl Sync for KhrDescriptorUpdateTemplateFn {} +impl ::std::clone::Clone for KhrDescriptorUpdateTemplateFn { + fn clone(&self) -> Self { + KhrDescriptorUpdateTemplateFn { + cmd_push_descriptor_set_with_template_khr: self + .cmd_push_descriptor_set_with_template_khr, + } + } +} +impl KhrDescriptorUpdateTemplateFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrDescriptorUpdateTemplateFn { + cmd_push_descriptor_set_with_template_khr: unsafe { + extern "system" fn cmd_push_descriptor_set_with_template_khr( + _command_buffer: CommandBuffer, + _descriptor_update_template: DescriptorUpdateTemplate, + _layout: PipelineLayout, + _set: u32, + _p_data: *const c_void, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_push_descriptor_set_with_template_khr) + )) + } + let raw_name = stringify!(vkCmdPushDescriptorSetWithTemplateKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_push_descriptor_set_with_template_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_push_descriptor_set_with_template_khr( + &self, + command_buffer: CommandBuffer, + descriptor_update_template: DescriptorUpdateTemplate, + layout: PipelineLayout, + set: u32, + p_data: *const c_void, + ) -> c_void { + (self.cmd_push_descriptor_set_with_template_khr)( + command_buffer, + descriptor_update_template, + layout, + set, + p_data, + ) + } +} +impl NvxDeviceGeneratedCommandsFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NVX_device_generated_commands\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdProcessCommandsNVX = extern "system" fn( + command_buffer: CommandBuffer, + p_process_commands_info: *const CmdProcessCommandsInfoNVX, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdReserveSpaceForCommandsNVX = extern "system" fn( + command_buffer: CommandBuffer, + p_reserve_space_info: *const CmdReserveSpaceForCommandsInfoNVX, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateIndirectCommandsLayoutNVX = extern "system" fn( + device: Device, + p_create_info: *const IndirectCommandsLayoutCreateInfoNVX, + p_allocator: *const AllocationCallbacks, + p_indirect_commands_layout: *mut IndirectCommandsLayoutNVX, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyIndirectCommandsLayoutNVX = extern "system" fn( + device: Device, + indirect_commands_layout: IndirectCommandsLayoutNVX, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateObjectTableNVX = extern "system" fn( + device: Device, + p_create_info: *const ObjectTableCreateInfoNVX, + p_allocator: *const AllocationCallbacks, + p_object_table: *mut ObjectTableNVX, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyObjectTableNVX = extern "system" fn( + device: Device, + object_table: ObjectTableNVX, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkRegisterObjectsNVX = extern "system" fn( + device: Device, + object_table: ObjectTableNVX, + object_count: u32, + pp_object_table_entries: *const *const ObjectTableEntryNVX, + p_object_indices: *const u32, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkUnregisterObjectsNVX = extern "system" fn( + device: Device, + object_table: ObjectTableNVX, + object_count: u32, + p_object_entry_types: *const ObjectEntryTypeNVX, + p_object_indices: *const u32, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX = extern "system" fn( + physical_device: PhysicalDevice, + p_features: *mut DeviceGeneratedCommandsFeaturesNVX, + p_limits: *mut DeviceGeneratedCommandsLimitsNVX, +) -> c_void; +pub struct NvxDeviceGeneratedCommandsFn { + pub cmd_process_commands_nvx: extern "system" fn( + command_buffer: CommandBuffer, + p_process_commands_info: *const CmdProcessCommandsInfoNVX, + ) -> c_void, + pub cmd_reserve_space_for_commands_nvx: extern "system" fn( + command_buffer: CommandBuffer, + p_reserve_space_info: *const CmdReserveSpaceForCommandsInfoNVX, + ) -> c_void, + pub create_indirect_commands_layout_nvx: extern "system" fn( + device: Device, + p_create_info: *const IndirectCommandsLayoutCreateInfoNVX, + p_allocator: *const AllocationCallbacks, + p_indirect_commands_layout: *mut IndirectCommandsLayoutNVX, + ) -> Result, + pub destroy_indirect_commands_layout_nvx: extern "system" fn( + device: Device, + indirect_commands_layout: IndirectCommandsLayoutNVX, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub create_object_table_nvx: extern "system" fn( + device: Device, + p_create_info: *const ObjectTableCreateInfoNVX, + p_allocator: *const AllocationCallbacks, + p_object_table: *mut ObjectTableNVX, + ) -> Result, + pub destroy_object_table_nvx: extern "system" fn( + device: Device, + object_table: ObjectTableNVX, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub register_objects_nvx: extern "system" fn( + device: Device, + object_table: ObjectTableNVX, + object_count: u32, + pp_object_table_entries: *const *const ObjectTableEntryNVX, + p_object_indices: *const u32, + ) -> Result, + pub unregister_objects_nvx: extern "system" fn( + device: Device, + object_table: ObjectTableNVX, + object_count: u32, + p_object_entry_types: *const ObjectEntryTypeNVX, + p_object_indices: *const u32, + ) -> Result, + pub get_physical_device_generated_commands_properties_nvx: extern "system" fn( + physical_device: PhysicalDevice, + p_features: *mut DeviceGeneratedCommandsFeaturesNVX, + p_limits: *mut DeviceGeneratedCommandsLimitsNVX, + ) -> c_void, +} +unsafe impl Send for NvxDeviceGeneratedCommandsFn {} +unsafe impl Sync for NvxDeviceGeneratedCommandsFn {} +impl ::std::clone::Clone for NvxDeviceGeneratedCommandsFn { + fn clone(&self) -> Self { + NvxDeviceGeneratedCommandsFn { + cmd_process_commands_nvx: self.cmd_process_commands_nvx, + cmd_reserve_space_for_commands_nvx: self.cmd_reserve_space_for_commands_nvx, + create_indirect_commands_layout_nvx: self.create_indirect_commands_layout_nvx, + destroy_indirect_commands_layout_nvx: self.destroy_indirect_commands_layout_nvx, + create_object_table_nvx: self.create_object_table_nvx, + destroy_object_table_nvx: self.destroy_object_table_nvx, + register_objects_nvx: self.register_objects_nvx, + unregister_objects_nvx: self.unregister_objects_nvx, + get_physical_device_generated_commands_properties_nvx: self + .get_physical_device_generated_commands_properties_nvx, + } + } +} +impl NvxDeviceGeneratedCommandsFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvxDeviceGeneratedCommandsFn { + cmd_process_commands_nvx: unsafe { + extern "system" fn cmd_process_commands_nvx( + _command_buffer: CommandBuffer, + _p_process_commands_info: *const CmdProcessCommandsInfoNVX, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_process_commands_nvx) + )) + } + let raw_name = stringify!(vkCmdProcessCommandsNVX); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_process_commands_nvx + } else { + ::std::mem::transmute(val) + } + }, + cmd_reserve_space_for_commands_nvx: unsafe { + extern "system" fn cmd_reserve_space_for_commands_nvx( + _command_buffer: CommandBuffer, + _p_reserve_space_info: *const CmdReserveSpaceForCommandsInfoNVX, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_reserve_space_for_commands_nvx) + )) + } + let raw_name = stringify!(vkCmdReserveSpaceForCommandsNVX); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_reserve_space_for_commands_nvx + } else { + ::std::mem::transmute(val) + } + }, + create_indirect_commands_layout_nvx: unsafe { + extern "system" fn create_indirect_commands_layout_nvx( + _device: Device, + _p_create_info: *const IndirectCommandsLayoutCreateInfoNVX, + _p_allocator: *const AllocationCallbacks, + _p_indirect_commands_layout: *mut IndirectCommandsLayoutNVX, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_indirect_commands_layout_nvx) + )) + } + let raw_name = stringify!(vkCreateIndirectCommandsLayoutNVX); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_indirect_commands_layout_nvx + } else { + ::std::mem::transmute(val) + } + }, + destroy_indirect_commands_layout_nvx: unsafe { + extern "system" fn destroy_indirect_commands_layout_nvx( + _device: Device, + _indirect_commands_layout: IndirectCommandsLayoutNVX, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_indirect_commands_layout_nvx) + )) + } + let raw_name = stringify!(vkDestroyIndirectCommandsLayoutNVX); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_indirect_commands_layout_nvx + } else { + ::std::mem::transmute(val) + } + }, + create_object_table_nvx: unsafe { + extern "system" fn create_object_table_nvx( + _device: Device, + _p_create_info: *const ObjectTableCreateInfoNVX, + _p_allocator: *const AllocationCallbacks, + _p_object_table: *mut ObjectTableNVX, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_object_table_nvx) + )) + } + let raw_name = stringify!(vkCreateObjectTableNVX); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_object_table_nvx + } else { + ::std::mem::transmute(val) + } + }, + destroy_object_table_nvx: unsafe { + extern "system" fn destroy_object_table_nvx( + _device: Device, + _object_table: ObjectTableNVX, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_object_table_nvx) + )) + } + let raw_name = stringify!(vkDestroyObjectTableNVX); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_object_table_nvx + } else { + ::std::mem::transmute(val) + } + }, + register_objects_nvx: unsafe { + extern "system" fn register_objects_nvx( + _device: Device, + _object_table: ObjectTableNVX, + _object_count: u32, + _pp_object_table_entries: *const *const ObjectTableEntryNVX, + _p_object_indices: *const u32, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(register_objects_nvx))) + } + let raw_name = stringify!(vkRegisterObjectsNVX); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + register_objects_nvx + } else { + ::std::mem::transmute(val) + } + }, + unregister_objects_nvx: unsafe { + extern "system" fn unregister_objects_nvx( + _device: Device, + _object_table: ObjectTableNVX, + _object_count: u32, + _p_object_entry_types: *const ObjectEntryTypeNVX, + _p_object_indices: *const u32, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(unregister_objects_nvx) + )) + } + let raw_name = stringify!(vkUnregisterObjectsNVX); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + unregister_objects_nvx + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_generated_commands_properties_nvx: unsafe { + extern "system" fn get_physical_device_generated_commands_properties_nvx( + _physical_device: PhysicalDevice, + _p_features: *mut DeviceGeneratedCommandsFeaturesNVX, + _p_limits: *mut DeviceGeneratedCommandsLimitsNVX, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_generated_commands_properties_nvx) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_generated_commands_properties_nvx + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_process_commands_nvx( + &self, + command_buffer: CommandBuffer, + p_process_commands_info: *const CmdProcessCommandsInfoNVX, + ) -> c_void { + (self.cmd_process_commands_nvx)(command_buffer, p_process_commands_info) + } + #[doc = ""] + pub unsafe fn cmd_reserve_space_for_commands_nvx( + &self, + command_buffer: CommandBuffer, + p_reserve_space_info: *const CmdReserveSpaceForCommandsInfoNVX, + ) -> c_void { + (self.cmd_reserve_space_for_commands_nvx)(command_buffer, p_reserve_space_info) + } + #[doc = ""] + pub unsafe fn create_indirect_commands_layout_nvx( + &self, + device: Device, + p_create_info: *const IndirectCommandsLayoutCreateInfoNVX, + p_allocator: *const AllocationCallbacks, + p_indirect_commands_layout: *mut IndirectCommandsLayoutNVX, + ) -> Result { + (self.create_indirect_commands_layout_nvx)( + device, + p_create_info, + p_allocator, + p_indirect_commands_layout, + ) + } + #[doc = ""] + pub unsafe fn destroy_indirect_commands_layout_nvx( + &self, + device: Device, + indirect_commands_layout: IndirectCommandsLayoutNVX, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_indirect_commands_layout_nvx)(device, indirect_commands_layout, p_allocator) + } + #[doc = ""] + pub unsafe fn create_object_table_nvx( + &self, + device: Device, + p_create_info: *const ObjectTableCreateInfoNVX, + p_allocator: *const AllocationCallbacks, + p_object_table: *mut ObjectTableNVX, + ) -> Result { + (self.create_object_table_nvx)(device, p_create_info, p_allocator, p_object_table) + } + #[doc = ""] + pub unsafe fn destroy_object_table_nvx( + &self, + device: Device, + object_table: ObjectTableNVX, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_object_table_nvx)(device, object_table, p_allocator) + } + #[doc = ""] + pub unsafe fn register_objects_nvx( + &self, + device: Device, + object_table: ObjectTableNVX, + object_count: u32, + pp_object_table_entries: *const *const ObjectTableEntryNVX, + p_object_indices: *const u32, + ) -> Result { + (self.register_objects_nvx)( + device, + object_table, + object_count, + pp_object_table_entries, + p_object_indices, + ) + } + #[doc = ""] + pub unsafe fn unregister_objects_nvx( + &self, + device: Device, + object_table: ObjectTableNVX, + object_count: u32, + p_object_entry_types: *const ObjectEntryTypeNVX, + p_object_indices: *const u32, + ) -> Result { + (self.unregister_objects_nvx)( + device, + object_table, + object_count, + p_object_entry_types, + p_object_indices, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_generated_commands_properties_nvx( + &self, + physical_device: PhysicalDevice, + p_features: *mut DeviceGeneratedCommandsFeaturesNVX, + p_limits: *mut DeviceGeneratedCommandsLimitsNVX, + ) -> c_void { + (self.get_physical_device_generated_commands_properties_nvx)( + physical_device, + p_features, + p_limits, + ) + } +} +#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] +impl StructureType { + pub const OBJECT_TABLE_CREATE_INFO_NVX: Self = StructureType(1000086000); +} +#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] +impl StructureType { + pub const INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX: Self = StructureType(1000086001); +} +#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] +impl StructureType { + pub const CMD_PROCESS_COMMANDS_INFO_NVX: Self = StructureType(1000086002); +} +#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] +impl StructureType { + pub const CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX: Self = StructureType(1000086003); +} +#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] +impl StructureType { + pub const DEVICE_GENERATED_COMMANDS_LIMITS_NVX: Self = StructureType(1000086004); +} +#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] +impl StructureType { + pub const DEVICE_GENERATED_COMMANDS_FEATURES_NVX: Self = StructureType(1000086005); +} +#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] +impl PipelineStageFlags { + pub const COMMAND_PROCESS_NVX: Self = PipelineStageFlags(0b100000000000000000); +} +#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] +impl AccessFlags { + pub const COMMAND_PROCESS_READ_NVX: Self = AccessFlags(0b100000000000000000); +} +#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] +impl AccessFlags { + pub const COMMAND_PROCESS_WRITE_NVX: Self = AccessFlags(0b1000000000000000000); +} +#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] +impl ObjectType { + pub const OBJECT_TABLE_NVX: Self = ObjectType(1000086000); +} +#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] +impl ObjectType { + pub const INDIRECT_COMMANDS_LAYOUT_NVX: Self = ObjectType(1000086001); +} +impl NvClipSpaceWScalingFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_clip_space_w_scaling\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetViewportWScalingNV = extern "system" fn( + command_buffer: CommandBuffer, + first_viewport: u32, + viewport_count: u32, + p_viewport_w_scalings: *const ViewportWScalingNV, +) -> c_void; +pub struct NvClipSpaceWScalingFn { + pub cmd_set_viewport_w_scaling_nv: extern "system" fn( + command_buffer: CommandBuffer, + first_viewport: u32, + viewport_count: u32, + p_viewport_w_scalings: *const ViewportWScalingNV, + ) -> c_void, +} +unsafe impl Send for NvClipSpaceWScalingFn {} +unsafe impl Sync for NvClipSpaceWScalingFn {} +impl ::std::clone::Clone for NvClipSpaceWScalingFn { + fn clone(&self) -> Self { + NvClipSpaceWScalingFn { + cmd_set_viewport_w_scaling_nv: self.cmd_set_viewport_w_scaling_nv, + } + } +} +impl NvClipSpaceWScalingFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvClipSpaceWScalingFn { + cmd_set_viewport_w_scaling_nv: unsafe { + extern "system" fn cmd_set_viewport_w_scaling_nv( + _command_buffer: CommandBuffer, + _first_viewport: u32, + _viewport_count: u32, + _p_viewport_w_scalings: *const ViewportWScalingNV, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_viewport_w_scaling_nv) + )) + } + let raw_name = stringify!(vkCmdSetViewportWScalingNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_viewport_w_scaling_nv + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_set_viewport_w_scaling_nv( + &self, + command_buffer: CommandBuffer, + first_viewport: u32, + viewport_count: u32, + p_viewport_w_scalings: *const ViewportWScalingNV, + ) -> c_void { + (self.cmd_set_viewport_w_scaling_nv)( + command_buffer, + first_viewport, + viewport_count, + p_viewport_w_scalings, + ) + } +} +#[doc = "Generated from \'VK_NV_clip_space_w_scaling\'"] +impl StructureType { + pub const PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV: Self = StructureType(1000087000); +} +#[doc = "Generated from \'VK_NV_clip_space_w_scaling\'"] +impl DynamicState { + pub const VIEWPORT_W_SCALING_NV: Self = DynamicState(1000087000); +} +impl ExtDirectModeDisplayFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_direct_mode_display\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkReleaseDisplayEXT = + extern "system" fn(physical_device: PhysicalDevice, display: DisplayKHR) -> Result; +pub struct ExtDirectModeDisplayFn { + pub release_display_ext: + extern "system" fn(physical_device: PhysicalDevice, display: DisplayKHR) -> Result, +} +unsafe impl Send for ExtDirectModeDisplayFn {} +unsafe impl Sync for ExtDirectModeDisplayFn {} +impl ::std::clone::Clone for ExtDirectModeDisplayFn { + fn clone(&self) -> Self { + ExtDirectModeDisplayFn { + release_display_ext: self.release_display_ext, + } + } +} +impl ExtDirectModeDisplayFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtDirectModeDisplayFn { + release_display_ext: unsafe { + extern "system" fn release_display_ext( + _physical_device: PhysicalDevice, + _display: DisplayKHR, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(release_display_ext))) + } + let raw_name = stringify!(vkReleaseDisplayEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + release_display_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn release_display_ext( + &self, + physical_device: PhysicalDevice, + display: DisplayKHR, + ) -> Result { + (self.release_display_ext)(physical_device, display) + } +} +impl ExtAcquireXlibDisplayFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_acquire_xlib_display\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkAcquireXlibDisplayEXT = extern "system" fn( + physical_device: PhysicalDevice, + dpy: *mut Display, + display: DisplayKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetRandROutputDisplayEXT = extern "system" fn( + physical_device: PhysicalDevice, + dpy: *mut Display, + rr_output: RROutput, + p_display: *mut DisplayKHR, +) -> Result; +pub struct ExtAcquireXlibDisplayFn { + pub acquire_xlib_display_ext: extern "system" fn( + physical_device: PhysicalDevice, + dpy: *mut Display, + display: DisplayKHR, + ) -> Result, + pub get_rand_r_output_display_ext: extern "system" fn( + physical_device: PhysicalDevice, + dpy: *mut Display, + rr_output: RROutput, + p_display: *mut DisplayKHR, + ) -> Result, +} +unsafe impl Send for ExtAcquireXlibDisplayFn {} +unsafe impl Sync for ExtAcquireXlibDisplayFn {} +impl ::std::clone::Clone for ExtAcquireXlibDisplayFn { + fn clone(&self) -> Self { + ExtAcquireXlibDisplayFn { + acquire_xlib_display_ext: self.acquire_xlib_display_ext, + get_rand_r_output_display_ext: self.get_rand_r_output_display_ext, + } + } +} +impl ExtAcquireXlibDisplayFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtAcquireXlibDisplayFn { + acquire_xlib_display_ext: unsafe { + extern "system" fn acquire_xlib_display_ext( + _physical_device: PhysicalDevice, + _dpy: *mut Display, + _display: DisplayKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(acquire_xlib_display_ext) + )) + } + let raw_name = stringify!(vkAcquireXlibDisplayEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + acquire_xlib_display_ext + } else { + ::std::mem::transmute(val) + } + }, + get_rand_r_output_display_ext: unsafe { + extern "system" fn get_rand_r_output_display_ext( + _physical_device: PhysicalDevice, + _dpy: *mut Display, + _rr_output: RROutput, + _p_display: *mut DisplayKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_rand_r_output_display_ext) + )) + } + let raw_name = stringify!(vkGetRandROutputDisplayEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_rand_r_output_display_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn acquire_xlib_display_ext( + &self, + physical_device: PhysicalDevice, + dpy: *mut Display, + display: DisplayKHR, + ) -> Result { + (self.acquire_xlib_display_ext)(physical_device, dpy, display) + } + #[doc = ""] + pub unsafe fn get_rand_r_output_display_ext( + &self, + physical_device: PhysicalDevice, + dpy: *mut Display, + rr_output: RROutput, + p_display: *mut DisplayKHR, + ) -> Result { + (self.get_rand_r_output_display_ext)(physical_device, dpy, rr_output, p_display) + } +} +impl ExtDisplaySurfaceCounterFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_display_surface_counter\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT = extern "system" fn( + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_surface_capabilities: *mut SurfaceCapabilities2EXT, +) -> Result; +pub struct ExtDisplaySurfaceCounterFn { + pub get_physical_device_surface_capabilities2_ext: extern "system" fn( + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_surface_capabilities: *mut SurfaceCapabilities2EXT, + ) -> Result, +} +unsafe impl Send for ExtDisplaySurfaceCounterFn {} +unsafe impl Sync for ExtDisplaySurfaceCounterFn {} +impl ::std::clone::Clone for ExtDisplaySurfaceCounterFn { + fn clone(&self) -> Self { + ExtDisplaySurfaceCounterFn { + get_physical_device_surface_capabilities2_ext: self + .get_physical_device_surface_capabilities2_ext, + } + } +} +impl ExtDisplaySurfaceCounterFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtDisplaySurfaceCounterFn { + get_physical_device_surface_capabilities2_ext: unsafe { + extern "system" fn get_physical_device_surface_capabilities2_ext( + _physical_device: PhysicalDevice, + _surface: SurfaceKHR, + _p_surface_capabilities: *mut SurfaceCapabilities2EXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_surface_capabilities2_ext) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceSurfaceCapabilities2EXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_surface_capabilities2_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_physical_device_surface_capabilities2_ext( + &self, + physical_device: PhysicalDevice, + surface: SurfaceKHR, + p_surface_capabilities: *mut SurfaceCapabilities2EXT, + ) -> Result { + (self.get_physical_device_surface_capabilities2_ext)( + physical_device, + surface, + p_surface_capabilities, + ) + } +} +#[doc = "Generated from \'VK_EXT_display_surface_counter\'"] +impl StructureType { + pub const SURFACE_CAPABILITIES_2_EXT: Self = StructureType(1000090000); +} +impl ExtDisplayControlFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_display_control\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkDisplayPowerControlEXT = extern "system" fn( + device: Device, + display: DisplayKHR, + p_display_power_info: *const DisplayPowerInfoEXT, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkRegisterDeviceEventEXT = extern "system" fn( + device: Device, + p_device_event_info: *const DeviceEventInfoEXT, + p_allocator: *const AllocationCallbacks, + p_fence: *mut Fence, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkRegisterDisplayEventEXT = extern "system" fn( + device: Device, + display: DisplayKHR, + p_display_event_info: *const DisplayEventInfoEXT, + p_allocator: *const AllocationCallbacks, + p_fence: *mut Fence, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetSwapchainCounterEXT = extern "system" fn( + device: Device, + swapchain: SwapchainKHR, + counter: SurfaceCounterFlagsEXT, + p_counter_value: *mut u64, +) -> Result; +pub struct ExtDisplayControlFn { + pub display_power_control_ext: extern "system" fn( + device: Device, + display: DisplayKHR, + p_display_power_info: *const DisplayPowerInfoEXT, + ) -> Result, + pub register_device_event_ext: extern "system" fn( + device: Device, + p_device_event_info: *const DeviceEventInfoEXT, + p_allocator: *const AllocationCallbacks, + p_fence: *mut Fence, + ) -> Result, + pub register_display_event_ext: extern "system" fn( + device: Device, + display: DisplayKHR, + p_display_event_info: *const DisplayEventInfoEXT, + p_allocator: *const AllocationCallbacks, + p_fence: *mut Fence, + ) -> Result, + pub get_swapchain_counter_ext: extern "system" fn( + device: Device, + swapchain: SwapchainKHR, + counter: SurfaceCounterFlagsEXT, + p_counter_value: *mut u64, + ) -> Result, +} +unsafe impl Send for ExtDisplayControlFn {} +unsafe impl Sync for ExtDisplayControlFn {} +impl ::std::clone::Clone for ExtDisplayControlFn { + fn clone(&self) -> Self { + ExtDisplayControlFn { + display_power_control_ext: self.display_power_control_ext, + register_device_event_ext: self.register_device_event_ext, + register_display_event_ext: self.register_display_event_ext, + get_swapchain_counter_ext: self.get_swapchain_counter_ext, + } + } +} +impl ExtDisplayControlFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtDisplayControlFn { + display_power_control_ext: unsafe { + extern "system" fn display_power_control_ext( + _device: Device, + _display: DisplayKHR, + _p_display_power_info: *const DisplayPowerInfoEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(display_power_control_ext) + )) + } + let raw_name = stringify!(vkDisplayPowerControlEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + display_power_control_ext + } else { + ::std::mem::transmute(val) + } + }, + register_device_event_ext: unsafe { + extern "system" fn register_device_event_ext( + _device: Device, + _p_device_event_info: *const DeviceEventInfoEXT, + _p_allocator: *const AllocationCallbacks, + _p_fence: *mut Fence, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(register_device_event_ext) + )) + } + let raw_name = stringify!(vkRegisterDeviceEventEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + register_device_event_ext + } else { + ::std::mem::transmute(val) + } + }, + register_display_event_ext: unsafe { + extern "system" fn register_display_event_ext( + _device: Device, + _display: DisplayKHR, + _p_display_event_info: *const DisplayEventInfoEXT, + _p_allocator: *const AllocationCallbacks, + _p_fence: *mut Fence, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(register_display_event_ext) + )) + } + let raw_name = stringify!(vkRegisterDisplayEventEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + register_display_event_ext + } else { + ::std::mem::transmute(val) + } + }, + get_swapchain_counter_ext: unsafe { + extern "system" fn get_swapchain_counter_ext( + _device: Device, + _swapchain: SwapchainKHR, + _counter: SurfaceCounterFlagsEXT, + _p_counter_value: *mut u64, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_swapchain_counter_ext) + )) + } + let raw_name = stringify!(vkGetSwapchainCounterEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_swapchain_counter_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn display_power_control_ext( + &self, + device: Device, + display: DisplayKHR, + p_display_power_info: *const DisplayPowerInfoEXT, + ) -> Result { + (self.display_power_control_ext)(device, display, p_display_power_info) + } + #[doc = ""] + pub unsafe fn register_device_event_ext( + &self, + device: Device, + p_device_event_info: *const DeviceEventInfoEXT, + p_allocator: *const AllocationCallbacks, + p_fence: *mut Fence, + ) -> Result { + (self.register_device_event_ext)(device, p_device_event_info, p_allocator, p_fence) + } + #[doc = ""] + pub unsafe fn register_display_event_ext( + &self, + device: Device, + display: DisplayKHR, + p_display_event_info: *const DisplayEventInfoEXT, + p_allocator: *const AllocationCallbacks, + p_fence: *mut Fence, + ) -> Result { + (self.register_display_event_ext)( + device, + display, + p_display_event_info, + p_allocator, + p_fence, + ) + } + #[doc = ""] + pub unsafe fn get_swapchain_counter_ext( + &self, + device: Device, + swapchain: SwapchainKHR, + counter: SurfaceCounterFlagsEXT, + p_counter_value: *mut u64, + ) -> Result { + (self.get_swapchain_counter_ext)(device, swapchain, counter, p_counter_value) + } +} +#[doc = "Generated from \'VK_EXT_display_control\'"] +impl StructureType { + pub const DISPLAY_POWER_INFO_EXT: Self = StructureType(1000091000); +} +#[doc = "Generated from \'VK_EXT_display_control\'"] +impl StructureType { + pub const DEVICE_EVENT_INFO_EXT: Self = StructureType(1000091001); +} +#[doc = "Generated from \'VK_EXT_display_control\'"] +impl StructureType { + pub const DISPLAY_EVENT_INFO_EXT: Self = StructureType(1000091002); +} +#[doc = "Generated from \'VK_EXT_display_control\'"] +impl StructureType { + pub const SWAPCHAIN_COUNTER_CREATE_INFO_EXT: Self = StructureType(1000091003); +} +impl GoogleDisplayTimingFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_display_timing\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetRefreshCycleDurationGOOGLE = extern "system" fn( + device: Device, + swapchain: SwapchainKHR, + p_display_timing_properties: *mut RefreshCycleDurationGOOGLE, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPastPresentationTimingGOOGLE = extern "system" fn( + device: Device, + swapchain: SwapchainKHR, + p_presentation_timing_count: *mut u32, + p_presentation_timings: *mut PastPresentationTimingGOOGLE, +) -> Result; +pub struct GoogleDisplayTimingFn { + pub get_refresh_cycle_duration_google: extern "system" fn( + device: Device, + swapchain: SwapchainKHR, + p_display_timing_properties: *mut RefreshCycleDurationGOOGLE, + ) -> Result, + pub get_past_presentation_timing_google: extern "system" fn( + device: Device, + swapchain: SwapchainKHR, + p_presentation_timing_count: *mut u32, + p_presentation_timings: *mut PastPresentationTimingGOOGLE, + ) -> Result, +} +unsafe impl Send for GoogleDisplayTimingFn {} +unsafe impl Sync for GoogleDisplayTimingFn {} +impl ::std::clone::Clone for GoogleDisplayTimingFn { + fn clone(&self) -> Self { + GoogleDisplayTimingFn { + get_refresh_cycle_duration_google: self.get_refresh_cycle_duration_google, + get_past_presentation_timing_google: self.get_past_presentation_timing_google, + } + } +} +impl GoogleDisplayTimingFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleDisplayTimingFn { + get_refresh_cycle_duration_google: unsafe { + extern "system" fn get_refresh_cycle_duration_google( + _device: Device, + _swapchain: SwapchainKHR, + _p_display_timing_properties: *mut RefreshCycleDurationGOOGLE, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_refresh_cycle_duration_google) + )) + } + let raw_name = stringify!(vkGetRefreshCycleDurationGOOGLE); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_refresh_cycle_duration_google + } else { + ::std::mem::transmute(val) + } + }, + get_past_presentation_timing_google: unsafe { + extern "system" fn get_past_presentation_timing_google( + _device: Device, + _swapchain: SwapchainKHR, + _p_presentation_timing_count: *mut u32, + _p_presentation_timings: *mut PastPresentationTimingGOOGLE, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_past_presentation_timing_google) + )) + } + let raw_name = stringify!(vkGetPastPresentationTimingGOOGLE); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_past_presentation_timing_google + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_refresh_cycle_duration_google( + &self, + device: Device, + swapchain: SwapchainKHR, + p_display_timing_properties: *mut RefreshCycleDurationGOOGLE, + ) -> Result { + (self.get_refresh_cycle_duration_google)(device, swapchain, p_display_timing_properties) + } + #[doc = ""] + pub unsafe fn get_past_presentation_timing_google( + &self, + device: Device, + swapchain: SwapchainKHR, + p_presentation_timing_count: *mut u32, + p_presentation_timings: *mut PastPresentationTimingGOOGLE, + ) -> Result { + (self.get_past_presentation_timing_google)( + device, + swapchain, + p_presentation_timing_count, + p_presentation_timings, + ) + } +} +#[doc = "Generated from \'VK_GOOGLE_display_timing\'"] +impl StructureType { + pub const PRESENT_TIMES_INFO_GOOGLE: Self = StructureType(1000092000); +} +impl NvSampleMaskOverrideCoverageFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_sample_mask_override_coverage\0") + .expect("Wrong extension string") + } +} +pub struct NvSampleMaskOverrideCoverageFn {} +unsafe impl Send for NvSampleMaskOverrideCoverageFn {} +unsafe impl Sync for NvSampleMaskOverrideCoverageFn {} +impl ::std::clone::Clone for NvSampleMaskOverrideCoverageFn { + fn clone(&self) -> Self { + NvSampleMaskOverrideCoverageFn {} + } +} +impl NvSampleMaskOverrideCoverageFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvSampleMaskOverrideCoverageFn {} + } +} +impl NvGeometryShaderPassthroughFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_geometry_shader_passthrough\0") + .expect("Wrong extension string") + } +} +pub struct NvGeometryShaderPassthroughFn {} +unsafe impl Send for NvGeometryShaderPassthroughFn {} +unsafe impl Sync for NvGeometryShaderPassthroughFn {} +impl ::std::clone::Clone for NvGeometryShaderPassthroughFn { + fn clone(&self) -> Self { + NvGeometryShaderPassthroughFn {} + } +} +impl NvGeometryShaderPassthroughFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvGeometryShaderPassthroughFn {} + } +} +impl NvViewportArray2Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_viewport_array2\0") + .expect("Wrong extension string") + } +} +pub struct NvViewportArray2Fn {} +unsafe impl Send for NvViewportArray2Fn {} +unsafe impl Sync for NvViewportArray2Fn {} +impl ::std::clone::Clone for NvViewportArray2Fn { + fn clone(&self) -> Self { + NvViewportArray2Fn {} + } +} +impl NvViewportArray2Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvViewportArray2Fn {} + } +} +impl NvxMultiviewPerViewAttributesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NVX_multiview_per_view_attributes\0") + .expect("Wrong extension string") + } +} +pub struct NvxMultiviewPerViewAttributesFn {} +unsafe impl Send for NvxMultiviewPerViewAttributesFn {} +unsafe impl Sync for NvxMultiviewPerViewAttributesFn {} +impl ::std::clone::Clone for NvxMultiviewPerViewAttributesFn { + fn clone(&self) -> Self { + NvxMultiviewPerViewAttributesFn {} + } +} +impl NvxMultiviewPerViewAttributesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvxMultiviewPerViewAttributesFn {} + } +} +#[doc = "Generated from \'VK_NVX_multiview_per_view_attributes\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX: Self = + StructureType(1000097000); +} +#[doc = "Generated from \'VK_NVX_multiview_per_view_attributes\'"] +impl SubpassDescriptionFlags { + pub const PER_VIEW_ATTRIBUTES_NVX: Self = SubpassDescriptionFlags(0b1); +} +#[doc = "Generated from \'VK_NVX_multiview_per_view_attributes\'"] +impl SubpassDescriptionFlags { + pub const PER_VIEW_POSITION_X_ONLY_NVX: Self = SubpassDescriptionFlags(0b10); +} +impl NvViewportSwizzleFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_viewport_swizzle\0") + .expect("Wrong extension string") + } +} +pub struct NvViewportSwizzleFn {} +unsafe impl Send for NvViewportSwizzleFn {} +unsafe impl Sync for NvViewportSwizzleFn {} +impl ::std::clone::Clone for NvViewportSwizzleFn { + fn clone(&self) -> Self { + NvViewportSwizzleFn {} + } +} +impl NvViewportSwizzleFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvViewportSwizzleFn {} + } +} +#[doc = "Generated from \'VK_NV_viewport_swizzle\'"] +impl StructureType { + pub const PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV: Self = StructureType(1000098000); +} +impl ExtDiscardRectanglesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_discard_rectangles\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetDiscardRectangleEXT = extern "system" fn( + command_buffer: CommandBuffer, + first_discard_rectangle: u32, + discard_rectangle_count: u32, + p_discard_rectangles: *const Rect2D, +) -> c_void; +pub struct ExtDiscardRectanglesFn { + pub cmd_set_discard_rectangle_ext: extern "system" fn( + command_buffer: CommandBuffer, + first_discard_rectangle: u32, + discard_rectangle_count: u32, + p_discard_rectangles: *const Rect2D, + ) -> c_void, +} +unsafe impl Send for ExtDiscardRectanglesFn {} +unsafe impl Sync for ExtDiscardRectanglesFn {} +impl ::std::clone::Clone for ExtDiscardRectanglesFn { + fn clone(&self) -> Self { + ExtDiscardRectanglesFn { + cmd_set_discard_rectangle_ext: self.cmd_set_discard_rectangle_ext, + } + } +} +impl ExtDiscardRectanglesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtDiscardRectanglesFn { + cmd_set_discard_rectangle_ext: unsafe { + extern "system" fn cmd_set_discard_rectangle_ext( + _command_buffer: CommandBuffer, + _first_discard_rectangle: u32, + _discard_rectangle_count: u32, + _p_discard_rectangles: *const Rect2D, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_discard_rectangle_ext) + )) + } + let raw_name = stringify!(vkCmdSetDiscardRectangleEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_discard_rectangle_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_set_discard_rectangle_ext( + &self, + command_buffer: CommandBuffer, + first_discard_rectangle: u32, + discard_rectangle_count: u32, + p_discard_rectangles: *const Rect2D, + ) -> c_void { + (self.cmd_set_discard_rectangle_ext)( + command_buffer, + first_discard_rectangle, + discard_rectangle_count, + p_discard_rectangles, + ) + } +} +#[doc = "Generated from \'VK_EXT_discard_rectangles\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT: Self = StructureType(1000099000); +} +#[doc = "Generated from \'VK_EXT_discard_rectangles\'"] +impl StructureType { + pub const PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT: Self = StructureType(1000099001); +} +#[doc = "Generated from \'VK_EXT_discard_rectangles\'"] +impl DynamicState { + pub const DISCARD_RECTANGLE_EXT: Self = DynamicState(1000099000); +} +impl NvExtension101Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_101\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension101Fn {} +unsafe impl Send for NvExtension101Fn {} +unsafe impl Sync for NvExtension101Fn {} +impl ::std::clone::Clone for NvExtension101Fn { + fn clone(&self) -> Self { + NvExtension101Fn {} + } +} +impl NvExtension101Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension101Fn {} + } +} +impl ExtConservativeRasterizationFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_conservative_rasterization\0") + .expect("Wrong extension string") + } +} +pub struct ExtConservativeRasterizationFn {} +unsafe impl Send for ExtConservativeRasterizationFn {} +unsafe impl Sync for ExtConservativeRasterizationFn {} +impl ::std::clone::Clone for ExtConservativeRasterizationFn { + fn clone(&self) -> Self { + ExtConservativeRasterizationFn {} + } +} +impl ExtConservativeRasterizationFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtConservativeRasterizationFn {} + } +} +#[doc = "Generated from \'VK_EXT_conservative_rasterization\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT: Self = + StructureType(1000101000); +} +#[doc = "Generated from \'VK_EXT_conservative_rasterization\'"] +impl StructureType { + pub const PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT: Self = + StructureType(1000101001); +} +impl NvExtension103Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_103\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension103Fn {} +unsafe impl Send for NvExtension103Fn {} +unsafe impl Sync for NvExtension103Fn {} +impl ::std::clone::Clone for NvExtension103Fn { + fn clone(&self) -> Self { + NvExtension103Fn {} + } +} +impl NvExtension103Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension103Fn {} + } +} +impl NvExtension104Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_104\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension104Fn {} +unsafe impl Send for NvExtension104Fn {} +unsafe impl Sync for NvExtension104Fn {} +impl ::std::clone::Clone for NvExtension104Fn { + fn clone(&self) -> Self { + NvExtension104Fn {} + } +} +impl NvExtension104Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension104Fn {} + } +} +impl ExtSwapchainColorspaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_swapchain_colorspace\0") + .expect("Wrong extension string") + } +} +pub struct ExtSwapchainColorspaceFn {} +unsafe impl Send for ExtSwapchainColorspaceFn {} +unsafe impl Sync for ExtSwapchainColorspaceFn {} +impl ::std::clone::Clone for ExtSwapchainColorspaceFn { + fn clone(&self) -> Self { + ExtSwapchainColorspaceFn {} + } +} +impl ExtSwapchainColorspaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtSwapchainColorspaceFn {} + } +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const DISPLAY_P3_NONLINEAR_EXT: Self = ColorSpaceKHR(1000104001); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const EXTENDED_SRGB_LINEAR_EXT: Self = ColorSpaceKHR(1000104002); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const DCI_P3_LINEAR_EXT: Self = ColorSpaceKHR(1000104003); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const DCI_P3_NONLINEAR_EXT: Self = ColorSpaceKHR(1000104004); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const BT709_LINEAR_EXT: Self = ColorSpaceKHR(1000104005); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const BT709_NONLINEAR_EXT: Self = ColorSpaceKHR(1000104006); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const BT2020_LINEAR_EXT: Self = ColorSpaceKHR(1000104007); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const HDR10_ST2084_EXT: Self = ColorSpaceKHR(1000104008); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const DOLBYVISION_EXT: Self = ColorSpaceKHR(1000104009); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const HDR10_HLG_EXT: Self = ColorSpaceKHR(1000104010); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const ADOBERGB_LINEAR_EXT: Self = ColorSpaceKHR(1000104011); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const ADOBERGB_NONLINEAR_EXT: Self = ColorSpaceKHR(1000104012); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const PASS_THROUGH_EXT: Self = ColorSpaceKHR(1000104013); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const EXTENDED_SRGB_NONLINEAR_EXT: Self = ColorSpaceKHR(1000104014); +} +impl ExtHdrMetadataFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_hdr_metadata\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkSetHdrMetadataEXT = extern "system" fn( + device: Device, + swapchain_count: u32, + p_swapchains: *const SwapchainKHR, + p_metadata: *const HdrMetadataEXT, +) -> c_void; +pub struct ExtHdrMetadataFn { + pub set_hdr_metadata_ext: extern "system" fn( + device: Device, + swapchain_count: u32, + p_swapchains: *const SwapchainKHR, + p_metadata: *const HdrMetadataEXT, + ) -> c_void, +} +unsafe impl Send for ExtHdrMetadataFn {} +unsafe impl Sync for ExtHdrMetadataFn {} +impl ::std::clone::Clone for ExtHdrMetadataFn { + fn clone(&self) -> Self { + ExtHdrMetadataFn { + set_hdr_metadata_ext: self.set_hdr_metadata_ext, + } + } +} +impl ExtHdrMetadataFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtHdrMetadataFn { + set_hdr_metadata_ext: unsafe { + extern "system" fn set_hdr_metadata_ext( + _device: Device, + _swapchain_count: u32, + _p_swapchains: *const SwapchainKHR, + _p_metadata: *const HdrMetadataEXT, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(set_hdr_metadata_ext))) + } + let raw_name = stringify!(vkSetHdrMetadataEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + set_hdr_metadata_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn set_hdr_metadata_ext( + &self, + device: Device, + swapchain_count: u32, + p_swapchains: *const SwapchainKHR, + p_metadata: *const HdrMetadataEXT, + ) -> c_void { + (self.set_hdr_metadata_ext)(device, swapchain_count, p_swapchains, p_metadata) + } +} +#[doc = "Generated from \'VK_EXT_hdr_metadata\'"] +impl StructureType { + pub const HDR_METADATA_EXT: Self = StructureType(1000105000); +} +impl ImgExtension107Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_IMG_extension_107\0") + .expect("Wrong extension string") + } +} +pub struct ImgExtension107Fn {} +unsafe impl Send for ImgExtension107Fn {} +unsafe impl Sync for ImgExtension107Fn {} +impl ::std::clone::Clone for ImgExtension107Fn { + fn clone(&self) -> Self { + ImgExtension107Fn {} + } +} +impl ImgExtension107Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ImgExtension107Fn {} + } +} +impl ImgExtension108Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_IMG_extension_108\0") + .expect("Wrong extension string") + } +} +pub struct ImgExtension108Fn {} +unsafe impl Send for ImgExtension108Fn {} +unsafe impl Sync for ImgExtension108Fn {} +impl ::std::clone::Clone for ImgExtension108Fn { + fn clone(&self) -> Self { + ImgExtension108Fn {} + } +} +impl ImgExtension108Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ImgExtension108Fn {} + } +} +impl ImgExtension109Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_IMG_extension_109\0") + .expect("Wrong extension string") + } +} +pub struct ImgExtension109Fn {} +unsafe impl Send for ImgExtension109Fn {} +unsafe impl Sync for ImgExtension109Fn {} +impl ::std::clone::Clone for ImgExtension109Fn { + fn clone(&self) -> Self { + ImgExtension109Fn {} + } +} +impl ImgExtension109Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ImgExtension109Fn {} + } +} +impl KhrCreateRenderpass2Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_create_renderpass2\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateRenderPass2KHR = extern "system" fn( + device: Device, + p_create_info: *const RenderPassCreateInfo2KHR, + p_allocator: *const AllocationCallbacks, + p_render_pass: *mut RenderPass, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBeginRenderPass2KHR = extern "system" fn( + command_buffer: CommandBuffer, + p_render_pass_begin: *const RenderPassBeginInfo, + p_subpass_begin_info: *const SubpassBeginInfoKHR, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdNextSubpass2KHR = extern "system" fn( + command_buffer: CommandBuffer, + p_subpass_begin_info: *const SubpassBeginInfoKHR, + p_subpass_end_info: *const SubpassEndInfoKHR, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdEndRenderPass2KHR = extern "system" fn( + command_buffer: CommandBuffer, + p_subpass_end_info: *const SubpassEndInfoKHR, +) -> c_void; +pub struct KhrCreateRenderpass2Fn { + pub create_render_pass2_khr: extern "system" fn( + device: Device, + p_create_info: *const RenderPassCreateInfo2KHR, + p_allocator: *const AllocationCallbacks, + p_render_pass: *mut RenderPass, + ) -> Result, + pub cmd_begin_render_pass2_khr: extern "system" fn( + command_buffer: CommandBuffer, + p_render_pass_begin: *const RenderPassBeginInfo, + p_subpass_begin_info: *const SubpassBeginInfoKHR, + ) -> c_void, + pub cmd_next_subpass2_khr: extern "system" fn( + command_buffer: CommandBuffer, + p_subpass_begin_info: *const SubpassBeginInfoKHR, + p_subpass_end_info: *const SubpassEndInfoKHR, + ) -> c_void, + pub cmd_end_render_pass2_khr: extern "system" fn( + command_buffer: CommandBuffer, + p_subpass_end_info: *const SubpassEndInfoKHR, + ) -> c_void, +} +unsafe impl Send for KhrCreateRenderpass2Fn {} +unsafe impl Sync for KhrCreateRenderpass2Fn {} +impl ::std::clone::Clone for KhrCreateRenderpass2Fn { + fn clone(&self) -> Self { + KhrCreateRenderpass2Fn { + create_render_pass2_khr: self.create_render_pass2_khr, + cmd_begin_render_pass2_khr: self.cmd_begin_render_pass2_khr, + cmd_next_subpass2_khr: self.cmd_next_subpass2_khr, + cmd_end_render_pass2_khr: self.cmd_end_render_pass2_khr, + } + } +} +impl KhrCreateRenderpass2Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrCreateRenderpass2Fn { + create_render_pass2_khr: unsafe { + extern "system" fn create_render_pass2_khr( + _device: Device, + _p_create_info: *const RenderPassCreateInfo2KHR, + _p_allocator: *const AllocationCallbacks, + _p_render_pass: *mut RenderPass, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_render_pass2_khr) + )) + } + let raw_name = stringify!(vkCreateRenderPass2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_render_pass2_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_begin_render_pass2_khr: unsafe { + extern "system" fn cmd_begin_render_pass2_khr( + _command_buffer: CommandBuffer, + _p_render_pass_begin: *const RenderPassBeginInfo, + _p_subpass_begin_info: *const SubpassBeginInfoKHR, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_begin_render_pass2_khr) + )) + } + let raw_name = stringify!(vkCmdBeginRenderPass2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_begin_render_pass2_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_next_subpass2_khr: unsafe { + extern "system" fn cmd_next_subpass2_khr( + _command_buffer: CommandBuffer, + _p_subpass_begin_info: *const SubpassBeginInfoKHR, + _p_subpass_end_info: *const SubpassEndInfoKHR, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_next_subpass2_khr) + )) + } + let raw_name = stringify!(vkCmdNextSubpass2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_next_subpass2_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_end_render_pass2_khr: unsafe { + extern "system" fn cmd_end_render_pass2_khr( + _command_buffer: CommandBuffer, + _p_subpass_end_info: *const SubpassEndInfoKHR, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_end_render_pass2_khr) + )) + } + let raw_name = stringify!(vkCmdEndRenderPass2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_end_render_pass2_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_render_pass2_khr( + &self, + device: Device, + p_create_info: *const RenderPassCreateInfo2KHR, + p_allocator: *const AllocationCallbacks, + p_render_pass: *mut RenderPass, + ) -> Result { + (self.create_render_pass2_khr)(device, p_create_info, p_allocator, p_render_pass) + } + #[doc = ""] + pub unsafe fn cmd_begin_render_pass2_khr( + &self, + command_buffer: CommandBuffer, + p_render_pass_begin: *const RenderPassBeginInfo, + p_subpass_begin_info: *const SubpassBeginInfoKHR, + ) -> c_void { + (self.cmd_begin_render_pass2_khr)(command_buffer, p_render_pass_begin, p_subpass_begin_info) + } + #[doc = ""] + pub unsafe fn cmd_next_subpass2_khr( + &self, + command_buffer: CommandBuffer, + p_subpass_begin_info: *const SubpassBeginInfoKHR, + p_subpass_end_info: *const SubpassEndInfoKHR, + ) -> c_void { + (self.cmd_next_subpass2_khr)(command_buffer, p_subpass_begin_info, p_subpass_end_info) + } + #[doc = ""] + pub unsafe fn cmd_end_render_pass2_khr( + &self, + command_buffer: CommandBuffer, + p_subpass_end_info: *const SubpassEndInfoKHR, + ) -> c_void { + (self.cmd_end_render_pass2_khr)(command_buffer, p_subpass_end_info) + } +} +#[doc = "Generated from \'VK_KHR_create_renderpass2\'"] +impl StructureType { + pub const ATTACHMENT_DESCRIPTION_2_KHR: Self = StructureType(1000109000); +} +#[doc = "Generated from \'VK_KHR_create_renderpass2\'"] +impl StructureType { + pub const ATTACHMENT_REFERENCE_2_KHR: Self = StructureType(1000109001); +} +#[doc = "Generated from \'VK_KHR_create_renderpass2\'"] +impl StructureType { + pub const SUBPASS_DESCRIPTION_2_KHR: Self = StructureType(1000109002); +} +#[doc = "Generated from \'VK_KHR_create_renderpass2\'"] +impl StructureType { + pub const SUBPASS_DEPENDENCY_2_KHR: Self = StructureType(1000109003); +} +#[doc = "Generated from \'VK_KHR_create_renderpass2\'"] +impl StructureType { + pub const RENDER_PASS_CREATE_INFO_2_KHR: Self = StructureType(1000109004); +} +#[doc = "Generated from \'VK_KHR_create_renderpass2\'"] +impl StructureType { + pub const SUBPASS_BEGIN_INFO_KHR: Self = StructureType(1000109005); +} +#[doc = "Generated from \'VK_KHR_create_renderpass2\'"] +impl StructureType { + pub const SUBPASS_END_INFO_KHR: Self = StructureType(1000109006); +} +impl ImgExtension111Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_IMG_extension_111\0") + .expect("Wrong extension string") + } +} +pub struct ImgExtension111Fn {} +unsafe impl Send for ImgExtension111Fn {} +unsafe impl Sync for ImgExtension111Fn {} +impl ::std::clone::Clone for ImgExtension111Fn { + fn clone(&self) -> Self { + ImgExtension111Fn {} + } +} +impl ImgExtension111Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ImgExtension111Fn {} + } +} +impl KhrSharedPresentableImageFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_shared_presentable_image\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetSwapchainStatusKHR = + extern "system" fn(device: Device, swapchain: SwapchainKHR) -> Result; +pub struct KhrSharedPresentableImageFn { + pub get_swapchain_status_khr: + extern "system" fn(device: Device, swapchain: SwapchainKHR) -> Result, +} +unsafe impl Send for KhrSharedPresentableImageFn {} +unsafe impl Sync for KhrSharedPresentableImageFn {} +impl ::std::clone::Clone for KhrSharedPresentableImageFn { + fn clone(&self) -> Self { + KhrSharedPresentableImageFn { + get_swapchain_status_khr: self.get_swapchain_status_khr, + } + } +} +impl KhrSharedPresentableImageFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrSharedPresentableImageFn { + get_swapchain_status_khr: unsafe { + extern "system" fn get_swapchain_status_khr( + _device: Device, + _swapchain: SwapchainKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_swapchain_status_khr) + )) + } + let raw_name = stringify!(vkGetSwapchainStatusKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_swapchain_status_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_swapchain_status_khr( + &self, + device: Device, + swapchain: SwapchainKHR, + ) -> Result { + (self.get_swapchain_status_khr)(device, swapchain) + } +} +#[doc = "Generated from \'VK_KHR_shared_presentable_image\'"] +impl StructureType { + pub const SHARED_PRESENT_SURFACE_CAPABILITIES_KHR: Self = StructureType(1000111000); +} +#[doc = "Generated from \'VK_KHR_shared_presentable_image\'"] +impl PresentModeKHR { + pub const SHARED_DEMAND_REFRESH: Self = PresentModeKHR(1000111000); +} +#[doc = "Generated from \'VK_KHR_shared_presentable_image\'"] +impl PresentModeKHR { + pub const SHARED_CONTINUOUS_REFRESH: Self = PresentModeKHR(1000111001); +} +#[doc = "Generated from \'VK_KHR_shared_presentable_image\'"] +impl ImageLayout { + pub const SHARED_PRESENT_KHR: Self = ImageLayout(1000111000); +} +impl KhrExternalFenceCapabilitiesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_fence_capabilities\0") + .expect("Wrong extension string") + } +} +pub struct KhrExternalFenceCapabilitiesFn {} +unsafe impl Send for KhrExternalFenceCapabilitiesFn {} +unsafe impl Sync for KhrExternalFenceCapabilitiesFn {} +impl ::std::clone::Clone for KhrExternalFenceCapabilitiesFn { + fn clone(&self) -> Self { + KhrExternalFenceCapabilitiesFn {} + } +} +impl KhrExternalFenceCapabilitiesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExternalFenceCapabilitiesFn {} + } +} +impl KhrExternalFenceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_fence\0") + .expect("Wrong extension string") + } +} +pub struct KhrExternalFenceFn {} +unsafe impl Send for KhrExternalFenceFn {} +unsafe impl Sync for KhrExternalFenceFn {} +impl ::std::clone::Clone for KhrExternalFenceFn { + fn clone(&self) -> Self { + KhrExternalFenceFn {} + } +} +impl KhrExternalFenceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExternalFenceFn {} + } +} +impl KhrExternalFenceWin32Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_fence_win32\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkImportFenceWin32HandleKHR = extern "system" fn( + device: Device, + p_import_fence_win32_handle_info: *const ImportFenceWin32HandleInfoKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetFenceWin32HandleKHR = extern "system" fn( + device: Device, + p_get_win32_handle_info: *const FenceGetWin32HandleInfoKHR, + p_handle: *mut HANDLE, +) -> Result; +pub struct KhrExternalFenceWin32Fn { + pub import_fence_win32_handle_khr: extern "system" fn( + device: Device, + p_import_fence_win32_handle_info: *const ImportFenceWin32HandleInfoKHR, + ) -> Result, + pub get_fence_win32_handle_khr: extern "system" fn( + device: Device, + p_get_win32_handle_info: *const FenceGetWin32HandleInfoKHR, + p_handle: *mut HANDLE, + ) -> Result, +} +unsafe impl Send for KhrExternalFenceWin32Fn {} +unsafe impl Sync for KhrExternalFenceWin32Fn {} +impl ::std::clone::Clone for KhrExternalFenceWin32Fn { + fn clone(&self) -> Self { + KhrExternalFenceWin32Fn { + import_fence_win32_handle_khr: self.import_fence_win32_handle_khr, + get_fence_win32_handle_khr: self.get_fence_win32_handle_khr, + } + } +} +impl KhrExternalFenceWin32Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExternalFenceWin32Fn { + import_fence_win32_handle_khr: unsafe { + extern "system" fn import_fence_win32_handle_khr( + _device: Device, + _p_import_fence_win32_handle_info: *const ImportFenceWin32HandleInfoKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(import_fence_win32_handle_khr) + )) + } + let raw_name = stringify!(vkImportFenceWin32HandleKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + import_fence_win32_handle_khr + } else { + ::std::mem::transmute(val) + } + }, + get_fence_win32_handle_khr: unsafe { + extern "system" fn get_fence_win32_handle_khr( + _device: Device, + _p_get_win32_handle_info: *const FenceGetWin32HandleInfoKHR, + _p_handle: *mut HANDLE, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_fence_win32_handle_khr) + )) + } + let raw_name = stringify!(vkGetFenceWin32HandleKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_fence_win32_handle_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn import_fence_win32_handle_khr( + &self, + device: Device, + p_import_fence_win32_handle_info: *const ImportFenceWin32HandleInfoKHR, + ) -> Result { + (self.import_fence_win32_handle_khr)(device, p_import_fence_win32_handle_info) + } + #[doc = ""] + pub unsafe fn get_fence_win32_handle_khr( + &self, + device: Device, + p_get_win32_handle_info: *const FenceGetWin32HandleInfoKHR, + p_handle: *mut HANDLE, + ) -> Result { + (self.get_fence_win32_handle_khr)(device, p_get_win32_handle_info, p_handle) + } +} +#[doc = "Generated from \'VK_KHR_external_fence_win32\'"] +impl StructureType { + pub const IMPORT_FENCE_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000114000); +} +#[doc = "Generated from \'VK_KHR_external_fence_win32\'"] +impl StructureType { + pub const EXPORT_FENCE_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000114001); +} +#[doc = "Generated from \'VK_KHR_external_fence_win32\'"] +impl StructureType { + pub const FENCE_GET_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000114002); +} +impl KhrExternalFenceFdFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_fence_fd\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkImportFenceFdKHR = extern "system" fn( + device: Device, + p_import_fence_fd_info: *const ImportFenceFdInfoKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetFenceFdKHR = extern "system" fn( + device: Device, + p_get_fd_info: *const FenceGetFdInfoKHR, + p_fd: *mut c_int, +) -> Result; +pub struct KhrExternalFenceFdFn { + pub import_fence_fd_khr: extern "system" fn( + device: Device, + p_import_fence_fd_info: *const ImportFenceFdInfoKHR, + ) -> Result, + pub get_fence_fd_khr: extern "system" fn( + device: Device, + p_get_fd_info: *const FenceGetFdInfoKHR, + p_fd: *mut c_int, + ) -> Result, +} +unsafe impl Send for KhrExternalFenceFdFn {} +unsafe impl Sync for KhrExternalFenceFdFn {} +impl ::std::clone::Clone for KhrExternalFenceFdFn { + fn clone(&self) -> Self { + KhrExternalFenceFdFn { + import_fence_fd_khr: self.import_fence_fd_khr, + get_fence_fd_khr: self.get_fence_fd_khr, + } + } +} +impl KhrExternalFenceFdFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExternalFenceFdFn { + import_fence_fd_khr: unsafe { + extern "system" fn import_fence_fd_khr( + _device: Device, + _p_import_fence_fd_info: *const ImportFenceFdInfoKHR, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(import_fence_fd_khr))) + } + let raw_name = stringify!(vkImportFenceFdKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + import_fence_fd_khr + } else { + ::std::mem::transmute(val) + } + }, + get_fence_fd_khr: unsafe { + extern "system" fn get_fence_fd_khr( + _device: Device, + _p_get_fd_info: *const FenceGetFdInfoKHR, + _p_fd: *mut c_int, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(get_fence_fd_khr))) + } + let raw_name = stringify!(vkGetFenceFdKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_fence_fd_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn import_fence_fd_khr( + &self, + device: Device, + p_import_fence_fd_info: *const ImportFenceFdInfoKHR, + ) -> Result { + (self.import_fence_fd_khr)(device, p_import_fence_fd_info) + } + #[doc = ""] + pub unsafe fn get_fence_fd_khr( + &self, + device: Device, + p_get_fd_info: *const FenceGetFdInfoKHR, + p_fd: *mut c_int, + ) -> Result { + (self.get_fence_fd_khr)(device, p_get_fd_info, p_fd) + } +} +#[doc = "Generated from \'VK_KHR_external_fence_fd\'"] +impl StructureType { + pub const IMPORT_FENCE_FD_INFO_KHR: Self = StructureType(1000115000); +} +#[doc = "Generated from \'VK_KHR_external_fence_fd\'"] +impl StructureType { + pub const FENCE_GET_FD_INFO_KHR: Self = StructureType(1000115001); +} +impl KhrExtension117Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_117\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension117Fn {} +unsafe impl Send for KhrExtension117Fn {} +unsafe impl Sync for KhrExtension117Fn {} +impl ::std::clone::Clone for KhrExtension117Fn { + fn clone(&self) -> Self { + KhrExtension117Fn {} + } +} +impl KhrExtension117Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension117Fn {} + } +} +impl KhrMaintenance2Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_maintenance2\0") + .expect("Wrong extension string") + } +} +pub struct KhrMaintenance2Fn {} +unsafe impl Send for KhrMaintenance2Fn {} +unsafe impl Sync for KhrMaintenance2Fn {} +impl ::std::clone::Clone for KhrMaintenance2Fn { + fn clone(&self) -> Self { + KhrMaintenance2Fn {} + } +} +impl KhrMaintenance2Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrMaintenance2Fn {} + } +} +impl KhrExtension119Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_119\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension119Fn {} +unsafe impl Send for KhrExtension119Fn {} +unsafe impl Sync for KhrExtension119Fn {} +impl ::std::clone::Clone for KhrExtension119Fn { + fn clone(&self) -> Self { + KhrExtension119Fn {} + } +} +impl KhrExtension119Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension119Fn {} + } +} +impl KhrGetSurfaceCapabilities2Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_get_surface_capabilities2\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR = extern "system" fn( + physical_device: PhysicalDevice, + p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + p_surface_capabilities: *mut SurfaceCapabilities2KHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceSurfaceFormats2KHR = extern "system" fn( + physical_device: PhysicalDevice, + p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + p_surface_format_count: *mut u32, + p_surface_formats: *mut SurfaceFormat2KHR, +) -> Result; +pub struct KhrGetSurfaceCapabilities2Fn { + pub get_physical_device_surface_capabilities2_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + p_surface_capabilities: *mut SurfaceCapabilities2KHR, + ) -> Result, + pub get_physical_device_surface_formats2_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + p_surface_format_count: *mut u32, + p_surface_formats: *mut SurfaceFormat2KHR, + ) -> Result, +} +unsafe impl Send for KhrGetSurfaceCapabilities2Fn {} +unsafe impl Sync for KhrGetSurfaceCapabilities2Fn {} +impl ::std::clone::Clone for KhrGetSurfaceCapabilities2Fn { + fn clone(&self) -> Self { + KhrGetSurfaceCapabilities2Fn { + get_physical_device_surface_capabilities2_khr: self + .get_physical_device_surface_capabilities2_khr, + get_physical_device_surface_formats2_khr: self.get_physical_device_surface_formats2_khr, + } + } +} +impl KhrGetSurfaceCapabilities2Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrGetSurfaceCapabilities2Fn { + get_physical_device_surface_capabilities2_khr: unsafe { + extern "system" fn get_physical_device_surface_capabilities2_khr( + _physical_device: PhysicalDevice, + _p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + _p_surface_capabilities: *mut SurfaceCapabilities2KHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_surface_capabilities2_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceSurfaceCapabilities2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_surface_capabilities2_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_surface_formats2_khr: unsafe { + extern "system" fn get_physical_device_surface_formats2_khr( + _physical_device: PhysicalDevice, + _p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + _p_surface_format_count: *mut u32, + _p_surface_formats: *mut SurfaceFormat2KHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_surface_formats2_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceSurfaceFormats2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_surface_formats2_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_physical_device_surface_capabilities2_khr( + &self, + physical_device: PhysicalDevice, + p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + p_surface_capabilities: *mut SurfaceCapabilities2KHR, + ) -> Result { + (self.get_physical_device_surface_capabilities2_khr)( + physical_device, + p_surface_info, + p_surface_capabilities, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_surface_formats2_khr( + &self, + physical_device: PhysicalDevice, + p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + p_surface_format_count: *mut u32, + p_surface_formats: *mut SurfaceFormat2KHR, + ) -> Result { + (self.get_physical_device_surface_formats2_khr)( + physical_device, + p_surface_info, + p_surface_format_count, + p_surface_formats, + ) + } +} +#[doc = "Generated from \'VK_KHR_get_surface_capabilities2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SURFACE_INFO_2_KHR: Self = StructureType(1000119000); +} +#[doc = "Generated from \'VK_KHR_get_surface_capabilities2\'"] +impl StructureType { + pub const SURFACE_CAPABILITIES_2_KHR: Self = StructureType(1000119001); +} +#[doc = "Generated from \'VK_KHR_get_surface_capabilities2\'"] +impl StructureType { + pub const SURFACE_FORMAT_2_KHR: Self = StructureType(1000119002); +} +impl KhrVariablePointersFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_variable_pointers\0") + .expect("Wrong extension string") + } +} +pub struct KhrVariablePointersFn {} +unsafe impl Send for KhrVariablePointersFn {} +unsafe impl Sync for KhrVariablePointersFn {} +impl ::std::clone::Clone for KhrVariablePointersFn { + fn clone(&self) -> Self { + KhrVariablePointersFn {} + } +} +impl KhrVariablePointersFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrVariablePointersFn {} + } +} +impl KhrGetDisplayProperties2Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_get_display_properties2\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceDisplayProperties2KHR = extern "system" fn( + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut DisplayProperties2KHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR = extern "system" fn( + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut DisplayPlaneProperties2KHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDisplayModeProperties2KHR = extern "system" fn( + physical_device: PhysicalDevice, + display: DisplayKHR, + p_property_count: *mut u32, + p_properties: *mut DisplayModeProperties2KHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDisplayPlaneCapabilities2KHR = extern "system" fn( + physical_device: PhysicalDevice, + p_display_plane_info: *const DisplayPlaneInfo2KHR, + p_capabilities: *mut DisplayPlaneCapabilities2KHR, +) -> Result; +pub struct KhrGetDisplayProperties2Fn { + pub get_physical_device_display_properties2_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut DisplayProperties2KHR, + ) -> Result, + pub get_physical_device_display_plane_properties2_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut DisplayPlaneProperties2KHR, + ) -> Result, + pub get_display_mode_properties2_khr: extern "system" fn( + physical_device: PhysicalDevice, + display: DisplayKHR, + p_property_count: *mut u32, + p_properties: *mut DisplayModeProperties2KHR, + ) -> Result, + pub get_display_plane_capabilities2_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_display_plane_info: *const DisplayPlaneInfo2KHR, + p_capabilities: *mut DisplayPlaneCapabilities2KHR, + ) -> Result, +} +unsafe impl Send for KhrGetDisplayProperties2Fn {} +unsafe impl Sync for KhrGetDisplayProperties2Fn {} +impl ::std::clone::Clone for KhrGetDisplayProperties2Fn { + fn clone(&self) -> Self { + KhrGetDisplayProperties2Fn { + get_physical_device_display_properties2_khr: self + .get_physical_device_display_properties2_khr, + get_physical_device_display_plane_properties2_khr: self + .get_physical_device_display_plane_properties2_khr, + get_display_mode_properties2_khr: self.get_display_mode_properties2_khr, + get_display_plane_capabilities2_khr: self.get_display_plane_capabilities2_khr, + } + } +} +impl KhrGetDisplayProperties2Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrGetDisplayProperties2Fn { + get_physical_device_display_properties2_khr: unsafe { + extern "system" fn get_physical_device_display_properties2_khr( + _physical_device: PhysicalDevice, + _p_property_count: *mut u32, + _p_properties: *mut DisplayProperties2KHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_display_properties2_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceDisplayProperties2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_display_properties2_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_display_plane_properties2_khr: unsafe { + extern "system" fn get_physical_device_display_plane_properties2_khr( + _physical_device: PhysicalDevice, + _p_property_count: *mut u32, + _p_properties: *mut DisplayPlaneProperties2KHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_display_plane_properties2_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceDisplayPlaneProperties2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_display_plane_properties2_khr + } else { + ::std::mem::transmute(val) + } + }, + get_display_mode_properties2_khr: unsafe { + extern "system" fn get_display_mode_properties2_khr( + _physical_device: PhysicalDevice, + _display: DisplayKHR, + _p_property_count: *mut u32, + _p_properties: *mut DisplayModeProperties2KHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_display_mode_properties2_khr) + )) + } + let raw_name = stringify!(vkGetDisplayModeProperties2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_display_mode_properties2_khr + } else { + ::std::mem::transmute(val) + } + }, + get_display_plane_capabilities2_khr: unsafe { + extern "system" fn get_display_plane_capabilities2_khr( + _physical_device: PhysicalDevice, + _p_display_plane_info: *const DisplayPlaneInfo2KHR, + _p_capabilities: *mut DisplayPlaneCapabilities2KHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_display_plane_capabilities2_khr) + )) + } + let raw_name = stringify!(vkGetDisplayPlaneCapabilities2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_display_plane_capabilities2_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_physical_device_display_properties2_khr( + &self, + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut DisplayProperties2KHR, + ) -> Result { + (self.get_physical_device_display_properties2_khr)( + physical_device, + p_property_count, + p_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_display_plane_properties2_khr( + &self, + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut DisplayPlaneProperties2KHR, + ) -> Result { + (self.get_physical_device_display_plane_properties2_khr)( + physical_device, + p_property_count, + p_properties, + ) + } + #[doc = ""] + pub unsafe fn get_display_mode_properties2_khr( + &self, + physical_device: PhysicalDevice, + display: DisplayKHR, + p_property_count: *mut u32, + p_properties: *mut DisplayModeProperties2KHR, + ) -> Result { + (self.get_display_mode_properties2_khr)( + physical_device, + display, + p_property_count, + p_properties, + ) + } + #[doc = ""] + pub unsafe fn get_display_plane_capabilities2_khr( + &self, + physical_device: PhysicalDevice, + p_display_plane_info: *const DisplayPlaneInfo2KHR, + p_capabilities: *mut DisplayPlaneCapabilities2KHR, + ) -> Result { + (self.get_display_plane_capabilities2_khr)( + physical_device, + p_display_plane_info, + p_capabilities, + ) + } +} +#[doc = "Generated from \'VK_KHR_get_display_properties2\'"] +impl StructureType { + pub const DISPLAY_PROPERTIES_2_KHR: Self = StructureType(1000121000); +} +#[doc = "Generated from \'VK_KHR_get_display_properties2\'"] +impl StructureType { + pub const DISPLAY_PLANE_PROPERTIES_2_KHR: Self = StructureType(1000121001); +} +#[doc = "Generated from \'VK_KHR_get_display_properties2\'"] +impl StructureType { + pub const DISPLAY_MODE_PROPERTIES_2_KHR: Self = StructureType(1000121002); +} +#[doc = "Generated from \'VK_KHR_get_display_properties2\'"] +impl StructureType { + pub const DISPLAY_PLANE_INFO_2_KHR: Self = StructureType(1000121003); +} +#[doc = "Generated from \'VK_KHR_get_display_properties2\'"] +impl StructureType { + pub const DISPLAY_PLANE_CAPABILITIES_2_KHR: Self = StructureType(1000121004); +} +impl MvkIosSurfaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_MVK_ios_surface\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateIOSSurfaceMVK = extern "system" fn( + instance: Instance, + p_create_info: *const IOSSurfaceCreateInfoMVK, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +pub struct MvkIosSurfaceFn { + pub create_ios_surface_mvk: extern "system" fn( + instance: Instance, + p_create_info: *const IOSSurfaceCreateInfoMVK, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, +} +unsafe impl Send for MvkIosSurfaceFn {} +unsafe impl Sync for MvkIosSurfaceFn {} +impl ::std::clone::Clone for MvkIosSurfaceFn { + fn clone(&self) -> Self { + MvkIosSurfaceFn { + create_ios_surface_mvk: self.create_ios_surface_mvk, + } + } +} +impl MvkIosSurfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + MvkIosSurfaceFn { + create_ios_surface_mvk: unsafe { + extern "system" fn create_ios_surface_mvk( + _instance: Instance, + _p_create_info: *const IOSSurfaceCreateInfoMVK, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_ios_surface_mvk) + )) + } + let raw_name = stringify!(vkCreateIOSSurfaceMVK); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_ios_surface_mvk + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_ios_surface_mvk( + &self, + instance: Instance, + p_create_info: *const IOSSurfaceCreateInfoMVK, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_ios_surface_mvk)(instance, p_create_info, p_allocator, p_surface) + } +} +#[doc = "Generated from \'VK_MVK_ios_surface\'"] +impl StructureType { + pub const IOS_SURFACE_CREATE_INFO_M: Self = StructureType(1000122000); +} +impl MvkMacosSurfaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_MVK_macos_surface\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateMacOSSurfaceMVK = extern "system" fn( + instance: Instance, + p_create_info: *const MacOSSurfaceCreateInfoMVK, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +pub struct MvkMacosSurfaceFn { + pub create_mac_os_surface_mvk: extern "system" fn( + instance: Instance, + p_create_info: *const MacOSSurfaceCreateInfoMVK, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, +} +unsafe impl Send for MvkMacosSurfaceFn {} +unsafe impl Sync for MvkMacosSurfaceFn {} +impl ::std::clone::Clone for MvkMacosSurfaceFn { + fn clone(&self) -> Self { + MvkMacosSurfaceFn { + create_mac_os_surface_mvk: self.create_mac_os_surface_mvk, + } + } +} +impl MvkMacosSurfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + MvkMacosSurfaceFn { + create_mac_os_surface_mvk: unsafe { + extern "system" fn create_mac_os_surface_mvk( + _instance: Instance, + _p_create_info: *const MacOSSurfaceCreateInfoMVK, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_mac_os_surface_mvk) + )) + } + let raw_name = stringify!(vkCreateMacOSSurfaceMVK); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_mac_os_surface_mvk + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_mac_os_surface_mvk( + &self, + instance: Instance, + p_create_info: *const MacOSSurfaceCreateInfoMVK, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_mac_os_surface_mvk)(instance, p_create_info, p_allocator, p_surface) + } +} +#[doc = "Generated from \'VK_MVK_macos_surface\'"] +impl StructureType { + pub const MACOS_SURFACE_CREATE_INFO_M: Self = StructureType(1000123000); +} +impl MvkMoltenvkFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_MVK_moltenvk\0").expect("Wrong extension string") + } +} +pub struct MvkMoltenvkFn {} +unsafe impl Send for MvkMoltenvkFn {} +unsafe impl Sync for MvkMoltenvkFn {} +impl ::std::clone::Clone for MvkMoltenvkFn { + fn clone(&self) -> Self { + MvkMoltenvkFn {} + } +} +impl MvkMoltenvkFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + MvkMoltenvkFn {} + } +} +impl ExtExternalMemoryDmaBufFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_external_memory_dma_buf\0") + .expect("Wrong extension string") + } +} +pub struct ExtExternalMemoryDmaBufFn {} +unsafe impl Send for ExtExternalMemoryDmaBufFn {} +unsafe impl Sync for ExtExternalMemoryDmaBufFn {} +impl ::std::clone::Clone for ExtExternalMemoryDmaBufFn { + fn clone(&self) -> Self { + ExtExternalMemoryDmaBufFn {} + } +} +impl ExtExternalMemoryDmaBufFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExternalMemoryDmaBufFn {} + } +} +#[doc = "Generated from \'VK_EXT_external_memory_dma_buf\'"] +impl ExternalMemoryHandleTypeFlags { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF: Self = + ExternalMemoryHandleTypeFlags(0b1000000000); +} +impl ExtQueueFamilyForeignFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_queue_family_foreign\0") + .expect("Wrong extension string") + } +} +pub struct ExtQueueFamilyForeignFn {} +unsafe impl Send for ExtQueueFamilyForeignFn {} +unsafe impl Sync for ExtQueueFamilyForeignFn {} +impl ::std::clone::Clone for ExtQueueFamilyForeignFn { + fn clone(&self) -> Self { + ExtQueueFamilyForeignFn {} + } +} +impl ExtQueueFamilyForeignFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtQueueFamilyForeignFn {} + } +} +impl KhrDedicatedAllocationFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_dedicated_allocation\0") + .expect("Wrong extension string") + } +} +pub struct KhrDedicatedAllocationFn {} +unsafe impl Send for KhrDedicatedAllocationFn {} +unsafe impl Sync for KhrDedicatedAllocationFn {} +impl ::std::clone::Clone for KhrDedicatedAllocationFn { + fn clone(&self) -> Self { + KhrDedicatedAllocationFn {} + } +} +impl KhrDedicatedAllocationFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrDedicatedAllocationFn {} + } +} +impl ExtDebugUtilsFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_debug_utils\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkSetDebugUtilsObjectNameEXT = + extern "system" fn(device: Device, p_name_info: *const DebugUtilsObjectNameInfoEXT) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkSetDebugUtilsObjectTagEXT = + extern "system" fn(device: Device, p_tag_info: *const DebugUtilsObjectTagInfoEXT) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkQueueBeginDebugUtilsLabelEXT = + extern "system" fn(queue: Queue, p_label_info: *const DebugUtilsLabelEXT) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkQueueEndDebugUtilsLabelEXT = extern "system" fn(queue: Queue) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkQueueInsertDebugUtilsLabelEXT = + extern "system" fn(queue: Queue, p_label_info: *const DebugUtilsLabelEXT) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBeginDebugUtilsLabelEXT = extern "system" fn( + command_buffer: CommandBuffer, + p_label_info: *const DebugUtilsLabelEXT, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdEndDebugUtilsLabelEXT = + extern "system" fn(command_buffer: CommandBuffer) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdInsertDebugUtilsLabelEXT = extern "system" fn( + command_buffer: CommandBuffer, + p_label_info: *const DebugUtilsLabelEXT, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateDebugUtilsMessengerEXT = extern "system" fn( + instance: Instance, + p_create_info: *const DebugUtilsMessengerCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_messenger: *mut DebugUtilsMessengerEXT, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyDebugUtilsMessengerEXT = extern "system" fn( + instance: Instance, + messenger: DebugUtilsMessengerEXT, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkSubmitDebugUtilsMessageEXT = extern "system" fn( + instance: Instance, + message_severity: DebugUtilsMessageSeverityFlagsEXT, + message_types: DebugUtilsMessageTypeFlagsEXT, + p_callback_data: *const DebugUtilsMessengerCallbackDataEXT, +) -> c_void; +pub struct ExtDebugUtilsFn { + pub set_debug_utils_object_name_ext: extern "system" fn( + device: Device, + p_name_info: *const DebugUtilsObjectNameInfoEXT, + ) -> Result, + pub set_debug_utils_object_tag_ext: + extern "system" fn(device: Device, p_tag_info: *const DebugUtilsObjectTagInfoEXT) -> Result, + pub queue_begin_debug_utils_label_ext: + extern "system" fn(queue: Queue, p_label_info: *const DebugUtilsLabelEXT) -> c_void, + pub queue_end_debug_utils_label_ext: extern "system" fn(queue: Queue) -> c_void, + pub queue_insert_debug_utils_label_ext: + extern "system" fn(queue: Queue, p_label_info: *const DebugUtilsLabelEXT) -> c_void, + pub cmd_begin_debug_utils_label_ext: extern "system" fn( + command_buffer: CommandBuffer, + p_label_info: *const DebugUtilsLabelEXT, + ) -> c_void, + pub cmd_end_debug_utils_label_ext: extern "system" fn(command_buffer: CommandBuffer) -> c_void, + pub cmd_insert_debug_utils_label_ext: extern "system" fn( + command_buffer: CommandBuffer, + p_label_info: *const DebugUtilsLabelEXT, + ) -> c_void, + pub create_debug_utils_messenger_ext: extern "system" fn( + instance: Instance, + p_create_info: *const DebugUtilsMessengerCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_messenger: *mut DebugUtilsMessengerEXT, + ) -> Result, + pub destroy_debug_utils_messenger_ext: extern "system" fn( + instance: Instance, + messenger: DebugUtilsMessengerEXT, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub submit_debug_utils_message_ext: extern "system" fn( + instance: Instance, + message_severity: DebugUtilsMessageSeverityFlagsEXT, + message_types: DebugUtilsMessageTypeFlagsEXT, + p_callback_data: *const DebugUtilsMessengerCallbackDataEXT, + ) -> c_void, +} +unsafe impl Send for ExtDebugUtilsFn {} +unsafe impl Sync for ExtDebugUtilsFn {} +impl ::std::clone::Clone for ExtDebugUtilsFn { + fn clone(&self) -> Self { + ExtDebugUtilsFn { + set_debug_utils_object_name_ext: self.set_debug_utils_object_name_ext, + set_debug_utils_object_tag_ext: self.set_debug_utils_object_tag_ext, + queue_begin_debug_utils_label_ext: self.queue_begin_debug_utils_label_ext, + queue_end_debug_utils_label_ext: self.queue_end_debug_utils_label_ext, + queue_insert_debug_utils_label_ext: self.queue_insert_debug_utils_label_ext, + cmd_begin_debug_utils_label_ext: self.cmd_begin_debug_utils_label_ext, + cmd_end_debug_utils_label_ext: self.cmd_end_debug_utils_label_ext, + cmd_insert_debug_utils_label_ext: self.cmd_insert_debug_utils_label_ext, + create_debug_utils_messenger_ext: self.create_debug_utils_messenger_ext, + destroy_debug_utils_messenger_ext: self.destroy_debug_utils_messenger_ext, + submit_debug_utils_message_ext: self.submit_debug_utils_message_ext, + } + } +} +impl ExtDebugUtilsFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtDebugUtilsFn { + set_debug_utils_object_name_ext: unsafe { + extern "system" fn set_debug_utils_object_name_ext( + _device: Device, + _p_name_info: *const DebugUtilsObjectNameInfoEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(set_debug_utils_object_name_ext) + )) + } + let raw_name = stringify!(vkSetDebugUtilsObjectNameEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + set_debug_utils_object_name_ext + } else { + ::std::mem::transmute(val) + } + }, + set_debug_utils_object_tag_ext: unsafe { + extern "system" fn set_debug_utils_object_tag_ext( + _device: Device, + _p_tag_info: *const DebugUtilsObjectTagInfoEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(set_debug_utils_object_tag_ext) + )) + } + let raw_name = stringify!(vkSetDebugUtilsObjectTagEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + set_debug_utils_object_tag_ext + } else { + ::std::mem::transmute(val) + } + }, + queue_begin_debug_utils_label_ext: unsafe { + extern "system" fn queue_begin_debug_utils_label_ext( + _queue: Queue, + _p_label_info: *const DebugUtilsLabelEXT, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(queue_begin_debug_utils_label_ext) + )) + } + let raw_name = stringify!(vkQueueBeginDebugUtilsLabelEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + queue_begin_debug_utils_label_ext + } else { + ::std::mem::transmute(val) + } + }, + queue_end_debug_utils_label_ext: unsafe { + extern "system" fn queue_end_debug_utils_label_ext(_queue: Queue) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(queue_end_debug_utils_label_ext) + )) + } + let raw_name = stringify!(vkQueueEndDebugUtilsLabelEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + queue_end_debug_utils_label_ext + } else { + ::std::mem::transmute(val) + } + }, + queue_insert_debug_utils_label_ext: unsafe { + extern "system" fn queue_insert_debug_utils_label_ext( + _queue: Queue, + _p_label_info: *const DebugUtilsLabelEXT, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(queue_insert_debug_utils_label_ext) + )) + } + let raw_name = stringify!(vkQueueInsertDebugUtilsLabelEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + queue_insert_debug_utils_label_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_begin_debug_utils_label_ext: unsafe { + extern "system" fn cmd_begin_debug_utils_label_ext( + _command_buffer: CommandBuffer, + _p_label_info: *const DebugUtilsLabelEXT, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_begin_debug_utils_label_ext) + )) + } + let raw_name = stringify!(vkCmdBeginDebugUtilsLabelEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_begin_debug_utils_label_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_end_debug_utils_label_ext: unsafe { + extern "system" fn cmd_end_debug_utils_label_ext( + _command_buffer: CommandBuffer, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_end_debug_utils_label_ext) + )) + } + let raw_name = stringify!(vkCmdEndDebugUtilsLabelEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_end_debug_utils_label_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_insert_debug_utils_label_ext: unsafe { + extern "system" fn cmd_insert_debug_utils_label_ext( + _command_buffer: CommandBuffer, + _p_label_info: *const DebugUtilsLabelEXT, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_insert_debug_utils_label_ext) + )) + } + let raw_name = stringify!(vkCmdInsertDebugUtilsLabelEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_insert_debug_utils_label_ext + } else { + ::std::mem::transmute(val) + } + }, + create_debug_utils_messenger_ext: unsafe { + extern "system" fn create_debug_utils_messenger_ext( + _instance: Instance, + _p_create_info: *const DebugUtilsMessengerCreateInfoEXT, + _p_allocator: *const AllocationCallbacks, + _p_messenger: *mut DebugUtilsMessengerEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_debug_utils_messenger_ext) + )) + } + let raw_name = stringify!(vkCreateDebugUtilsMessengerEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_debug_utils_messenger_ext + } else { + ::std::mem::transmute(val) + } + }, + destroy_debug_utils_messenger_ext: unsafe { + extern "system" fn destroy_debug_utils_messenger_ext( + _instance: Instance, + _messenger: DebugUtilsMessengerEXT, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_debug_utils_messenger_ext) + )) + } + let raw_name = stringify!(vkDestroyDebugUtilsMessengerEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_debug_utils_messenger_ext + } else { + ::std::mem::transmute(val) + } + }, + submit_debug_utils_message_ext: unsafe { + extern "system" fn submit_debug_utils_message_ext( + _instance: Instance, + _message_severity: DebugUtilsMessageSeverityFlagsEXT, + _message_types: DebugUtilsMessageTypeFlagsEXT, + _p_callback_data: *const DebugUtilsMessengerCallbackDataEXT, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(submit_debug_utils_message_ext) + )) + } + let raw_name = stringify!(vkSubmitDebugUtilsMessageEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + submit_debug_utils_message_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn set_debug_utils_object_name_ext( + &self, + device: Device, + p_name_info: *const DebugUtilsObjectNameInfoEXT, + ) -> Result { + (self.set_debug_utils_object_name_ext)(device, p_name_info) + } + #[doc = ""] + pub unsafe fn set_debug_utils_object_tag_ext( + &self, + device: Device, + p_tag_info: *const DebugUtilsObjectTagInfoEXT, + ) -> Result { + (self.set_debug_utils_object_tag_ext)(device, p_tag_info) + } + #[doc = ""] + pub unsafe fn queue_begin_debug_utils_label_ext( + &self, + queue: Queue, + p_label_info: *const DebugUtilsLabelEXT, + ) -> c_void { + (self.queue_begin_debug_utils_label_ext)(queue, p_label_info) + } + #[doc = ""] + pub unsafe fn queue_end_debug_utils_label_ext(&self, queue: Queue) -> c_void { + (self.queue_end_debug_utils_label_ext)(queue) + } + #[doc = ""] + pub unsafe fn queue_insert_debug_utils_label_ext( + &self, + queue: Queue, + p_label_info: *const DebugUtilsLabelEXT, + ) -> c_void { + (self.queue_insert_debug_utils_label_ext)(queue, p_label_info) + } + #[doc = ""] + pub unsafe fn cmd_begin_debug_utils_label_ext( + &self, + command_buffer: CommandBuffer, + p_label_info: *const DebugUtilsLabelEXT, + ) -> c_void { + (self.cmd_begin_debug_utils_label_ext)(command_buffer, p_label_info) + } + #[doc = ""] + pub unsafe fn cmd_end_debug_utils_label_ext(&self, command_buffer: CommandBuffer) -> c_void { + (self.cmd_end_debug_utils_label_ext)(command_buffer) + } + #[doc = ""] + pub unsafe fn cmd_insert_debug_utils_label_ext( + &self, + command_buffer: CommandBuffer, + p_label_info: *const DebugUtilsLabelEXT, + ) -> c_void { + (self.cmd_insert_debug_utils_label_ext)(command_buffer, p_label_info) + } + #[doc = ""] + pub unsafe fn create_debug_utils_messenger_ext( + &self, + instance: Instance, + p_create_info: *const DebugUtilsMessengerCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_messenger: *mut DebugUtilsMessengerEXT, + ) -> Result { + (self.create_debug_utils_messenger_ext)(instance, p_create_info, p_allocator, p_messenger) + } + #[doc = ""] + pub unsafe fn destroy_debug_utils_messenger_ext( + &self, + instance: Instance, + messenger: DebugUtilsMessengerEXT, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_debug_utils_messenger_ext)(instance, messenger, p_allocator) + } + #[doc = ""] + pub unsafe fn submit_debug_utils_message_ext( + &self, + instance: Instance, + message_severity: DebugUtilsMessageSeverityFlagsEXT, + message_types: DebugUtilsMessageTypeFlagsEXT, + p_callback_data: *const DebugUtilsMessengerCallbackDataEXT, + ) -> c_void { + (self.submit_debug_utils_message_ext)( + instance, + message_severity, + message_types, + p_callback_data, + ) + } +} +#[doc = "Generated from \'VK_EXT_debug_utils\'"] +impl StructureType { + pub const DEBUG_UTILS_OBJECT_NAME_INFO_EXT: Self = StructureType(1000128000); +} +#[doc = "Generated from \'VK_EXT_debug_utils\'"] +impl StructureType { + pub const DEBUG_UTILS_OBJECT_TAG_INFO_EXT: Self = StructureType(1000128001); +} +#[doc = "Generated from \'VK_EXT_debug_utils\'"] +impl StructureType { + pub const DEBUG_UTILS_LABEL_EXT: Self = StructureType(1000128002); +} +#[doc = "Generated from \'VK_EXT_debug_utils\'"] +impl StructureType { + pub const DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT: Self = StructureType(1000128003); +} +#[doc = "Generated from \'VK_EXT_debug_utils\'"] +impl StructureType { + pub const DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT: Self = StructureType(1000128004); +} +#[doc = "Generated from \'VK_EXT_debug_utils\'"] +impl ObjectType { + pub const DEBUG_UTILS_MESSENGER_EXT: Self = ObjectType(1000128000); +} +impl AndroidExternalMemoryAndroidHardwareBufferFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul( + b"VK_ANDROID_external_memory_android_hardware_buffer\0", + ) + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetAndroidHardwareBufferPropertiesANDROID = extern "system" fn( + device: Device, + buffer: *const AHardwareBuffer, + p_properties: *mut AndroidHardwareBufferPropertiesANDROID, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetMemoryAndroidHardwareBufferANDROID = extern "system" fn( + device: Device, + p_info: *const MemoryGetAndroidHardwareBufferInfoANDROID, + p_buffer: *mut *mut AHardwareBuffer, +) -> Result; +pub struct AndroidExternalMemoryAndroidHardwareBufferFn { + pub get_android_hardware_buffer_properties_android: extern "system" fn( + device: Device, + buffer: *const AHardwareBuffer, + p_properties: *mut AndroidHardwareBufferPropertiesANDROID, + ) -> Result, + pub get_memory_android_hardware_buffer_android: extern "system" fn( + device: Device, + p_info: *const MemoryGetAndroidHardwareBufferInfoANDROID, + p_buffer: *mut *mut AHardwareBuffer, + ) -> Result, +} +unsafe impl Send for AndroidExternalMemoryAndroidHardwareBufferFn {} +unsafe impl Sync for AndroidExternalMemoryAndroidHardwareBufferFn {} +impl ::std::clone::Clone for AndroidExternalMemoryAndroidHardwareBufferFn { + fn clone(&self) -> Self { + AndroidExternalMemoryAndroidHardwareBufferFn { + get_android_hardware_buffer_properties_android: self + .get_android_hardware_buffer_properties_android, + get_memory_android_hardware_buffer_android: self + .get_memory_android_hardware_buffer_android, + } + } +} +impl AndroidExternalMemoryAndroidHardwareBufferFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AndroidExternalMemoryAndroidHardwareBufferFn { + get_android_hardware_buffer_properties_android: unsafe { + extern "system" fn get_android_hardware_buffer_properties_android( + _device: Device, + _buffer: *const AHardwareBuffer, + _p_properties: *mut AndroidHardwareBufferPropertiesANDROID, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_android_hardware_buffer_properties_android) + )) + } + let raw_name = stringify!(vkGetAndroidHardwareBufferPropertiesANDROID); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_android_hardware_buffer_properties_android + } else { + ::std::mem::transmute(val) + } + }, + get_memory_android_hardware_buffer_android: unsafe { + extern "system" fn get_memory_android_hardware_buffer_android( + _device: Device, + _p_info: *const MemoryGetAndroidHardwareBufferInfoANDROID, + _p_buffer: *mut *mut AHardwareBuffer, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_memory_android_hardware_buffer_android) + )) + } + let raw_name = stringify!(vkGetMemoryAndroidHardwareBufferANDROID); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_memory_android_hardware_buffer_android + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_android_hardware_buffer_properties_android( + &self, + device: Device, + buffer: *const AHardwareBuffer, + p_properties: *mut AndroidHardwareBufferPropertiesANDROID, + ) -> Result { + (self.get_android_hardware_buffer_properties_android)(device, buffer, p_properties) + } + #[doc = ""] + pub unsafe fn get_memory_android_hardware_buffer_android( + &self, + device: Device, + p_info: *const MemoryGetAndroidHardwareBufferInfoANDROID, + p_buffer: *mut *mut AHardwareBuffer, + ) -> Result { + (self.get_memory_android_hardware_buffer_android)(device, p_info, p_buffer) + } +} +#[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] +impl ExternalMemoryHandleTypeFlags { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_ANDROID: Self = + ExternalMemoryHandleTypeFlags(0b10000000000); +} +#[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] +impl StructureType { + pub const ANDROID_HARDWARE_BUFFER_USAGE_ANDROID: Self = StructureType(1000129000); +} +#[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] +impl StructureType { + pub const ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID: Self = StructureType(1000129001); +} +#[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] +impl StructureType { + pub const ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID: Self = StructureType(1000129002); +} +#[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] +impl StructureType { + pub const IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID: Self = StructureType(1000129003); +} +#[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] +impl StructureType { + pub const MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID: Self = StructureType(1000129004); +} +#[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] +impl StructureType { + pub const EXTERNAL_FORMAT_ANDROID: Self = StructureType(1000129005); +} +impl ExtSamplerFilterMinmaxFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_sampler_filter_minmax\0") + .expect("Wrong extension string") + } +} +pub struct ExtSamplerFilterMinmaxFn {} +unsafe impl Send for ExtSamplerFilterMinmaxFn {} +unsafe impl Sync for ExtSamplerFilterMinmaxFn {} +impl ::std::clone::Clone for ExtSamplerFilterMinmaxFn { + fn clone(&self) -> Self { + ExtSamplerFilterMinmaxFn {} + } +} +impl ExtSamplerFilterMinmaxFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtSamplerFilterMinmaxFn {} + } +} +#[doc = "Generated from \'VK_EXT_sampler_filter_minmax\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT: Self = + StructureType(1000130000); +} +#[doc = "Generated from \'VK_EXT_sampler_filter_minmax\'"] +impl StructureType { + pub const SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT: Self = StructureType(1000130001); +} +#[doc = "Generated from \'VK_EXT_sampler_filter_minmax\'"] +impl FormatFeatureFlags { + pub const SAMPLED_IMAGE_FILTER_MINMAX_EXT: Self = FormatFeatureFlags(0b10000000000000000); +} +impl KhrStorageBufferStorageClassFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_storage_buffer_storage_class\0") + .expect("Wrong extension string") + } +} +pub struct KhrStorageBufferStorageClassFn {} +unsafe impl Send for KhrStorageBufferStorageClassFn {} +unsafe impl Sync for KhrStorageBufferStorageClassFn {} +impl ::std::clone::Clone for KhrStorageBufferStorageClassFn { + fn clone(&self) -> Self { + KhrStorageBufferStorageClassFn {} + } +} +impl KhrStorageBufferStorageClassFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrStorageBufferStorageClassFn {} + } +} +impl AmdGpuShaderInt16Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_gpu_shader_int16\0") + .expect("Wrong extension string") + } +} +pub struct AmdGpuShaderInt16Fn {} +unsafe impl Send for AmdGpuShaderInt16Fn {} +unsafe impl Sync for AmdGpuShaderInt16Fn {} +impl ::std::clone::Clone for AmdGpuShaderInt16Fn { + fn clone(&self) -> Self { + AmdGpuShaderInt16Fn {} + } +} +impl AmdGpuShaderInt16Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdGpuShaderInt16Fn {} + } +} +impl AmdExtension134Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_134\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension134Fn {} +unsafe impl Send for AmdExtension134Fn {} +unsafe impl Sync for AmdExtension134Fn {} +impl ::std::clone::Clone for AmdExtension134Fn { + fn clone(&self) -> Self { + AmdExtension134Fn {} + } +} +impl AmdExtension134Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension134Fn {} + } +} +impl AmdExtension135Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_135\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension135Fn {} +unsafe impl Send for AmdExtension135Fn {} +unsafe impl Sync for AmdExtension135Fn {} +impl ::std::clone::Clone for AmdExtension135Fn { + fn clone(&self) -> Self { + AmdExtension135Fn {} + } +} +impl AmdExtension135Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension135Fn {} + } +} +impl AmdExtension136Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_136\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension136Fn {} +unsafe impl Send for AmdExtension136Fn {} +unsafe impl Sync for AmdExtension136Fn {} +impl ::std::clone::Clone for AmdExtension136Fn { + fn clone(&self) -> Self { + AmdExtension136Fn {} + } +} +impl AmdExtension136Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension136Fn {} + } +} +impl AmdMixedAttachmentSamplesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_mixed_attachment_samples\0") + .expect("Wrong extension string") + } +} +pub struct AmdMixedAttachmentSamplesFn {} +unsafe impl Send for AmdMixedAttachmentSamplesFn {} +unsafe impl Sync for AmdMixedAttachmentSamplesFn {} +impl ::std::clone::Clone for AmdMixedAttachmentSamplesFn { + fn clone(&self) -> Self { + AmdMixedAttachmentSamplesFn {} + } +} +impl AmdMixedAttachmentSamplesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdMixedAttachmentSamplesFn {} + } +} +impl AmdShaderFragmentMaskFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_shader_fragment_mask\0") + .expect("Wrong extension string") + } +} +pub struct AmdShaderFragmentMaskFn {} +unsafe impl Send for AmdShaderFragmentMaskFn {} +unsafe impl Sync for AmdShaderFragmentMaskFn {} +impl ::std::clone::Clone for AmdShaderFragmentMaskFn { + fn clone(&self) -> Self { + AmdShaderFragmentMaskFn {} + } +} +impl AmdShaderFragmentMaskFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdShaderFragmentMaskFn {} + } +} +impl ExtInlineUniformBlockFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_inline_uniform_block\0") + .expect("Wrong extension string") + } +} +pub struct ExtInlineUniformBlockFn {} +unsafe impl Send for ExtInlineUniformBlockFn {} +unsafe impl Sync for ExtInlineUniformBlockFn {} +impl ::std::clone::Clone for ExtInlineUniformBlockFn { + fn clone(&self) -> Self { + ExtInlineUniformBlockFn {} + } +} +impl ExtInlineUniformBlockFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtInlineUniformBlockFn {} + } +} +#[doc = "Generated from \'VK_EXT_inline_uniform_block\'"] +impl DescriptorType { + pub const INLINE_UNIFORM_BLOCK_EXT: Self = DescriptorType(1000138000); +} +#[doc = "Generated from \'VK_EXT_inline_uniform_block\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT: Self = StructureType(1000138000); +} +#[doc = "Generated from \'VK_EXT_inline_uniform_block\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT: Self = StructureType(1000138001); +} +#[doc = "Generated from \'VK_EXT_inline_uniform_block\'"] +impl StructureType { + pub const WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT: Self = StructureType(1000138002); +} +#[doc = "Generated from \'VK_EXT_inline_uniform_block\'"] +impl StructureType { + pub const DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT: Self = + StructureType(1000138003); +} +impl AmdExtension140Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_140\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension140Fn {} +unsafe impl Send for AmdExtension140Fn {} +unsafe impl Sync for AmdExtension140Fn {} +impl ::std::clone::Clone for AmdExtension140Fn { + fn clone(&self) -> Self { + AmdExtension140Fn {} + } +} +impl AmdExtension140Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension140Fn {} + } +} +impl ExtShaderStencilExportFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_shader_stencil_export\0") + .expect("Wrong extension string") + } +} +pub struct ExtShaderStencilExportFn {} +unsafe impl Send for ExtShaderStencilExportFn {} +unsafe impl Sync for ExtShaderStencilExportFn {} +impl ::std::clone::Clone for ExtShaderStencilExportFn { + fn clone(&self) -> Self { + ExtShaderStencilExportFn {} + } +} +impl ExtShaderStencilExportFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtShaderStencilExportFn {} + } +} +impl AmdExtension142Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_142\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension142Fn {} +unsafe impl Send for AmdExtension142Fn {} +unsafe impl Sync for AmdExtension142Fn {} +impl ::std::clone::Clone for AmdExtension142Fn { + fn clone(&self) -> Self { + AmdExtension142Fn {} + } +} +impl AmdExtension142Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension142Fn {} + } +} +impl AmdExtension143Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_143\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension143Fn {} +unsafe impl Send for AmdExtension143Fn {} +unsafe impl Sync for AmdExtension143Fn {} +impl ::std::clone::Clone for AmdExtension143Fn { + fn clone(&self) -> Self { + AmdExtension143Fn {} + } +} +impl AmdExtension143Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension143Fn {} + } +} +impl ExtSampleLocationsFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_sample_locations\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetSampleLocationsEXT = extern "system" fn( + command_buffer: CommandBuffer, + p_sample_locations_info: *const SampleLocationsInfoEXT, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT = extern "system" fn( + physical_device: PhysicalDevice, + samples: SampleCountFlags, + p_multisample_properties: *mut MultisamplePropertiesEXT, +) -> c_void; +pub struct ExtSampleLocationsFn { + pub cmd_set_sample_locations_ext: extern "system" fn( + command_buffer: CommandBuffer, + p_sample_locations_info: *const SampleLocationsInfoEXT, + ) -> c_void, + pub get_physical_device_multisample_properties_ext: extern "system" fn( + physical_device: PhysicalDevice, + samples: SampleCountFlags, + p_multisample_properties: *mut MultisamplePropertiesEXT, + ) -> c_void, +} +unsafe impl Send for ExtSampleLocationsFn {} +unsafe impl Sync for ExtSampleLocationsFn {} +impl ::std::clone::Clone for ExtSampleLocationsFn { + fn clone(&self) -> Self { + ExtSampleLocationsFn { + cmd_set_sample_locations_ext: self.cmd_set_sample_locations_ext, + get_physical_device_multisample_properties_ext: self + .get_physical_device_multisample_properties_ext, + } + } +} +impl ExtSampleLocationsFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtSampleLocationsFn { + cmd_set_sample_locations_ext: unsafe { + extern "system" fn cmd_set_sample_locations_ext( + _command_buffer: CommandBuffer, + _p_sample_locations_info: *const SampleLocationsInfoEXT, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_sample_locations_ext) + )) + } + let raw_name = stringify!(vkCmdSetSampleLocationsEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_sample_locations_ext + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_multisample_properties_ext: unsafe { + extern "system" fn get_physical_device_multisample_properties_ext( + _physical_device: PhysicalDevice, + _samples: SampleCountFlags, + _p_multisample_properties: *mut MultisamplePropertiesEXT, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_multisample_properties_ext) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceMultisamplePropertiesEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_multisample_properties_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_set_sample_locations_ext( + &self, + command_buffer: CommandBuffer, + p_sample_locations_info: *const SampleLocationsInfoEXT, + ) -> c_void { + (self.cmd_set_sample_locations_ext)(command_buffer, p_sample_locations_info) + } + #[doc = ""] + pub unsafe fn get_physical_device_multisample_properties_ext( + &self, + physical_device: PhysicalDevice, + samples: SampleCountFlags, + p_multisample_properties: *mut MultisamplePropertiesEXT, + ) -> c_void { + (self.get_physical_device_multisample_properties_ext)( + physical_device, + samples, + p_multisample_properties, + ) + } +} +#[doc = "Generated from \'VK_EXT_sample_locations\'"] +impl ImageCreateFlags { + pub const SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_EXT: Self = ImageCreateFlags(0b1000000000000); +} +#[doc = "Generated from \'VK_EXT_sample_locations\'"] +impl StructureType { + pub const SAMPLE_LOCATIONS_INFO_EXT: Self = StructureType(1000143000); +} +#[doc = "Generated from \'VK_EXT_sample_locations\'"] +impl StructureType { + pub const RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT: Self = StructureType(1000143001); +} +#[doc = "Generated from \'VK_EXT_sample_locations\'"] +impl StructureType { + pub const PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT: Self = StructureType(1000143002); +} +#[doc = "Generated from \'VK_EXT_sample_locations\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: Self = StructureType(1000143003); +} +#[doc = "Generated from \'VK_EXT_sample_locations\'"] +impl StructureType { + pub const MULTISAMPLE_PROPERTIES_EXT: Self = StructureType(1000143004); +} +#[doc = "Generated from \'VK_EXT_sample_locations\'"] +impl DynamicState { + pub const SAMPLE_LOCATIONS_EXT: Self = DynamicState(1000143000); +} +impl KhrRelaxedBlockLayoutFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_relaxed_block_layout\0") + .expect("Wrong extension string") + } +} +pub struct KhrRelaxedBlockLayoutFn {} +unsafe impl Send for KhrRelaxedBlockLayoutFn {} +unsafe impl Sync for KhrRelaxedBlockLayoutFn {} +impl ::std::clone::Clone for KhrRelaxedBlockLayoutFn { + fn clone(&self) -> Self { + KhrRelaxedBlockLayoutFn {} + } +} +impl KhrRelaxedBlockLayoutFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrRelaxedBlockLayoutFn {} + } +} +impl KhrGetMemoryRequirements2Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_get_memory_requirements2\0") + .expect("Wrong extension string") + } +} +pub struct KhrGetMemoryRequirements2Fn {} +unsafe impl Send for KhrGetMemoryRequirements2Fn {} +unsafe impl Sync for KhrGetMemoryRequirements2Fn {} +impl ::std::clone::Clone for KhrGetMemoryRequirements2Fn { + fn clone(&self) -> Self { + KhrGetMemoryRequirements2Fn {} + } +} +impl KhrGetMemoryRequirements2Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrGetMemoryRequirements2Fn {} + } +} +impl KhrImageFormatListFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_image_format_list\0") + .expect("Wrong extension string") + } +} +pub struct KhrImageFormatListFn {} +unsafe impl Send for KhrImageFormatListFn {} +unsafe impl Sync for KhrImageFormatListFn {} +impl ::std::clone::Clone for KhrImageFormatListFn { + fn clone(&self) -> Self { + KhrImageFormatListFn {} + } +} +impl KhrImageFormatListFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrImageFormatListFn {} + } +} +#[doc = "Generated from \'VK_KHR_image_format_list\'"] +impl StructureType { + pub const IMAGE_FORMAT_LIST_CREATE_INFO_KHR: Self = StructureType(1000147000); +} +impl ExtBlendOperationAdvancedFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_blend_operation_advanced\0") + .expect("Wrong extension string") + } +} +pub struct ExtBlendOperationAdvancedFn {} +unsafe impl Send for ExtBlendOperationAdvancedFn {} +unsafe impl Sync for ExtBlendOperationAdvancedFn {} +impl ::std::clone::Clone for ExtBlendOperationAdvancedFn { + fn clone(&self) -> Self { + ExtBlendOperationAdvancedFn {} + } +} +impl ExtBlendOperationAdvancedFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtBlendOperationAdvancedFn {} + } +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT: Self = + StructureType(1000148000); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT: Self = + StructureType(1000148001); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl StructureType { + pub const PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT: Self = StructureType(1000148002); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const ZERO_EXT: Self = BlendOp(1000148000); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const SRC_EXT: Self = BlendOp(1000148001); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const DST_EXT: Self = BlendOp(1000148002); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const SRC_OVER_EXT: Self = BlendOp(1000148003); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const DST_OVER_EXT: Self = BlendOp(1000148004); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const SRC_IN_EXT: Self = BlendOp(1000148005); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const DST_IN_EXT: Self = BlendOp(1000148006); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const SRC_OUT_EXT: Self = BlendOp(1000148007); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const DST_OUT_EXT: Self = BlendOp(1000148008); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const SRC_ATOP_EXT: Self = BlendOp(1000148009); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const DST_ATOP_EXT: Self = BlendOp(1000148010); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const XOR_EXT: Self = BlendOp(1000148011); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const MULTIPLY_EXT: Self = BlendOp(1000148012); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const SCREEN_EXT: Self = BlendOp(1000148013); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const OVERLAY_EXT: Self = BlendOp(1000148014); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const DARKEN_EXT: Self = BlendOp(1000148015); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const LIGHTEN_EXT: Self = BlendOp(1000148016); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const COLORDODGE_EXT: Self = BlendOp(1000148017); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const COLORBURN_EXT: Self = BlendOp(1000148018); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const HARDLIGHT_EXT: Self = BlendOp(1000148019); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const SOFTLIGHT_EXT: Self = BlendOp(1000148020); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const DIFFERENCE_EXT: Self = BlendOp(1000148021); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const EXCLUSION_EXT: Self = BlendOp(1000148022); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const INVERT_EXT: Self = BlendOp(1000148023); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const INVERT_RGB_EXT: Self = BlendOp(1000148024); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const LINEARDODGE_EXT: Self = BlendOp(1000148025); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const LINEARBURN_EXT: Self = BlendOp(1000148026); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const VIVIDLIGHT_EXT: Self = BlendOp(1000148027); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const LINEARLIGHT_EXT: Self = BlendOp(1000148028); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const PINLIGHT_EXT: Self = BlendOp(1000148029); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const HARDMIX_EXT: Self = BlendOp(1000148030); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const HSL_HUE_EXT: Self = BlendOp(1000148031); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const HSL_SATURATION_EXT: Self = BlendOp(1000148032); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const HSL_COLOR_EXT: Self = BlendOp(1000148033); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const HSL_LUMINOSITY_EXT: Self = BlendOp(1000148034); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const PLUS_EXT: Self = BlendOp(1000148035); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const PLUS_CLAMPED_EXT: Self = BlendOp(1000148036); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const PLUS_CLAMPED_ALPHA_EXT: Self = BlendOp(1000148037); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const PLUS_DARKER_EXT: Self = BlendOp(1000148038); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const MINUS_EXT: Self = BlendOp(1000148039); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const MINUS_CLAMPED_EXT: Self = BlendOp(1000148040); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const CONTRAST_EXT: Self = BlendOp(1000148041); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const INVERT_OVG_EXT: Self = BlendOp(1000148042); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const RED_EXT: Self = BlendOp(1000148043); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const GREEN_EXT: Self = BlendOp(1000148044); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl BlendOp { + pub const BLUE_EXT: Self = BlendOp(1000148045); +} +#[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] +impl AccessFlags { + pub const COLOR_ATTACHMENT_READ_NONCOHERENT_EXT: Self = AccessFlags(0b10000000000000000000); +} +impl NvFragmentCoverageToColorFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_fragment_coverage_to_color\0") + .expect("Wrong extension string") + } +} +pub struct NvFragmentCoverageToColorFn {} +unsafe impl Send for NvFragmentCoverageToColorFn {} +unsafe impl Sync for NvFragmentCoverageToColorFn {} +impl ::std::clone::Clone for NvFragmentCoverageToColorFn { + fn clone(&self) -> Self { + NvFragmentCoverageToColorFn {} + } +} +impl NvFragmentCoverageToColorFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvFragmentCoverageToColorFn {} + } +} +#[doc = "Generated from \'VK_NV_fragment_coverage_to_color\'"] +impl StructureType { + pub const PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV: Self = StructureType(1000149000); +} +impl NvExtension151Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_151\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension151Fn {} +unsafe impl Send for NvExtension151Fn {} +unsafe impl Sync for NvExtension151Fn {} +impl ::std::clone::Clone for NvExtension151Fn { + fn clone(&self) -> Self { + NvExtension151Fn {} + } +} +impl NvExtension151Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension151Fn {} + } +} +impl NvExtension152Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_152\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension152Fn {} +unsafe impl Send for NvExtension152Fn {} +unsafe impl Sync for NvExtension152Fn {} +impl ::std::clone::Clone for NvExtension152Fn { + fn clone(&self) -> Self { + NvExtension152Fn {} + } +} +impl NvExtension152Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension152Fn {} + } +} +impl NvFramebufferMixedSamplesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_framebuffer_mixed_samples\0") + .expect("Wrong extension string") + } +} +pub struct NvFramebufferMixedSamplesFn {} +unsafe impl Send for NvFramebufferMixedSamplesFn {} +unsafe impl Sync for NvFramebufferMixedSamplesFn {} +impl ::std::clone::Clone for NvFramebufferMixedSamplesFn { + fn clone(&self) -> Self { + NvFramebufferMixedSamplesFn {} + } +} +impl NvFramebufferMixedSamplesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvFramebufferMixedSamplesFn {} + } +} +#[doc = "Generated from \'VK_NV_framebuffer_mixed_samples\'"] +impl StructureType { + pub const PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV: Self = StructureType(1000152000); +} +impl NvFillRectangleFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_fill_rectangle\0") + .expect("Wrong extension string") + } +} +pub struct NvFillRectangleFn {} +unsafe impl Send for NvFillRectangleFn {} +unsafe impl Sync for NvFillRectangleFn {} +impl ::std::clone::Clone for NvFillRectangleFn { + fn clone(&self) -> Self { + NvFillRectangleFn {} + } +} +impl NvFillRectangleFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvFillRectangleFn {} + } +} +#[doc = "Generated from \'VK_NV_fill_rectangle\'"] +impl PolygonMode { + pub const FILL_RECTANGLE_NV: Self = PolygonMode(1000153000); +} +impl NvExtension155Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_155\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension155Fn {} +unsafe impl Send for NvExtension155Fn {} +unsafe impl Sync for NvExtension155Fn {} +impl ::std::clone::Clone for NvExtension155Fn { + fn clone(&self) -> Self { + NvExtension155Fn {} + } +} +impl NvExtension155Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension155Fn {} + } +} +impl ExtPostDepthCoverageFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_post_depth_coverage\0") + .expect("Wrong extension string") + } +} +pub struct ExtPostDepthCoverageFn {} +unsafe impl Send for ExtPostDepthCoverageFn {} +unsafe impl Sync for ExtPostDepthCoverageFn {} +impl ::std::clone::Clone for ExtPostDepthCoverageFn { + fn clone(&self) -> Self { + ExtPostDepthCoverageFn {} + } +} +impl ExtPostDepthCoverageFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtPostDepthCoverageFn {} + } +} +impl KhrSamplerYcbcrConversionFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_sampler_ycbcr_conversion\0") + .expect("Wrong extension string") + } +} +pub struct KhrSamplerYcbcrConversionFn {} +unsafe impl Send for KhrSamplerYcbcrConversionFn {} +unsafe impl Sync for KhrSamplerYcbcrConversionFn {} +impl ::std::clone::Clone for KhrSamplerYcbcrConversionFn { + fn clone(&self) -> Self { + KhrSamplerYcbcrConversionFn {} + } +} +impl KhrSamplerYcbcrConversionFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrSamplerYcbcrConversionFn {} + } +} +impl KhrBindMemory2Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_bind_memory2\0") + .expect("Wrong extension string") + } +} +pub struct KhrBindMemory2Fn {} +unsafe impl Send for KhrBindMemory2Fn {} +unsafe impl Sync for KhrBindMemory2Fn {} +impl ::std::clone::Clone for KhrBindMemory2Fn { + fn clone(&self) -> Self { + KhrBindMemory2Fn {} + } +} +impl KhrBindMemory2Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrBindMemory2Fn {} + } +} +impl ExtImageDrmFormatModifierFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_image_drm_format_modifier\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetImageDrmFormatModifierPropertiesEXT = extern "system" fn( + device: Device, + image: Image, + p_properties: *mut ImageDrmFormatModifierPropertiesEXT, +) -> Result; +pub struct ExtImageDrmFormatModifierFn { + pub get_image_drm_format_modifier_properties_ext: extern "system" fn( + device: Device, + image: Image, + p_properties: *mut ImageDrmFormatModifierPropertiesEXT, + ) -> Result, +} +unsafe impl Send for ExtImageDrmFormatModifierFn {} +unsafe impl Sync for ExtImageDrmFormatModifierFn {} +impl ::std::clone::Clone for ExtImageDrmFormatModifierFn { + fn clone(&self) -> Self { + ExtImageDrmFormatModifierFn { + get_image_drm_format_modifier_properties_ext: self + .get_image_drm_format_modifier_properties_ext, + } + } +} +impl ExtImageDrmFormatModifierFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtImageDrmFormatModifierFn { + get_image_drm_format_modifier_properties_ext: unsafe { + extern "system" fn get_image_drm_format_modifier_properties_ext( + _device: Device, + _image: Image, + _p_properties: *mut ImageDrmFormatModifierPropertiesEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_image_drm_format_modifier_properties_ext) + )) + } + let raw_name = stringify!(vkGetImageDrmFormatModifierPropertiesEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_image_drm_format_modifier_properties_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_image_drm_format_modifier_properties_ext( + &self, + device: Device, + image: Image, + p_properties: *mut ImageDrmFormatModifierPropertiesEXT, + ) -> Result { + (self.get_image_drm_format_modifier_properties_ext)(device, image, p_properties) + } +} +#[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] +impl Result { + pub const ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT: Self = Result(-1000158000); +} +#[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] +impl StructureType { + pub const DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT: Self = StructureType(1000158000); +} +#[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] +impl StructureType { + pub const DRM_FORMAT_MODIFIER_PROPERTIES_EXT: Self = StructureType(1000158001); +} +#[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT: Self = StructureType(1000158002); +} +#[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] +impl StructureType { + pub const IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT: Self = StructureType(1000158003); +} +#[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] +impl StructureType { + pub const IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT: Self = StructureType(1000158004); +} +#[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] +impl StructureType { + pub const IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT: Self = StructureType(1000158005); +} +#[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] +impl ImageTiling { + pub const DRM_FORMAT_MODIFIER_EXT: Self = ImageTiling(1000158000); +} +#[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] +impl ImageAspectFlags { + pub const MEMORY_PLANE_0_EXT: Self = ImageAspectFlags(0b10000000); +} +#[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] +impl ImageAspectFlags { + pub const MEMORY_PLANE_1_EXT: Self = ImageAspectFlags(0b100000000); +} +#[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] +impl ImageAspectFlags { + pub const MEMORY_PLANE_2_EXT: Self = ImageAspectFlags(0b1000000000); +} +#[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] +impl ImageAspectFlags { + pub const MEMORY_PLANE_3_EXT: Self = ImageAspectFlags(0b10000000000); +} +impl ExtExtension160Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_160\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension160Fn {} +unsafe impl Send for ExtExtension160Fn {} +unsafe impl Sync for ExtExtension160Fn {} +impl ::std::clone::Clone for ExtExtension160Fn { + fn clone(&self) -> Self { + ExtExtension160Fn {} + } +} +impl ExtExtension160Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension160Fn {} + } +} +impl ExtValidationCacheFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_validation_cache\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateValidationCacheEXT = extern "system" fn( + device: Device, + p_create_info: *const ValidationCacheCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_validation_cache: *mut ValidationCacheEXT, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyValidationCacheEXT = extern "system" fn( + device: Device, + validation_cache: ValidationCacheEXT, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkMergeValidationCachesEXT = extern "system" fn( + device: Device, + dst_cache: ValidationCacheEXT, + src_cache_count: u32, + p_src_caches: *const ValidationCacheEXT, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetValidationCacheDataEXT = extern "system" fn( + device: Device, + validation_cache: ValidationCacheEXT, + p_data_size: *mut usize, + p_data: *mut c_void, +) -> Result; +pub struct ExtValidationCacheFn { + pub create_validation_cache_ext: extern "system" fn( + device: Device, + p_create_info: *const ValidationCacheCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_validation_cache: *mut ValidationCacheEXT, + ) -> Result, + pub destroy_validation_cache_ext: extern "system" fn( + device: Device, + validation_cache: ValidationCacheEXT, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub merge_validation_caches_ext: extern "system" fn( + device: Device, + dst_cache: ValidationCacheEXT, + src_cache_count: u32, + p_src_caches: *const ValidationCacheEXT, + ) -> Result, + pub get_validation_cache_data_ext: extern "system" fn( + device: Device, + validation_cache: ValidationCacheEXT, + p_data_size: *mut usize, + p_data: *mut c_void, + ) -> Result, +} +unsafe impl Send for ExtValidationCacheFn {} +unsafe impl Sync for ExtValidationCacheFn {} +impl ::std::clone::Clone for ExtValidationCacheFn { + fn clone(&self) -> Self { + ExtValidationCacheFn { + create_validation_cache_ext: self.create_validation_cache_ext, + destroy_validation_cache_ext: self.destroy_validation_cache_ext, + merge_validation_caches_ext: self.merge_validation_caches_ext, + get_validation_cache_data_ext: self.get_validation_cache_data_ext, + } + } +} +impl ExtValidationCacheFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtValidationCacheFn { + create_validation_cache_ext: unsafe { + extern "system" fn create_validation_cache_ext( + _device: Device, + _p_create_info: *const ValidationCacheCreateInfoEXT, + _p_allocator: *const AllocationCallbacks, + _p_validation_cache: *mut ValidationCacheEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_validation_cache_ext) + )) + } + let raw_name = stringify!(vkCreateValidationCacheEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_validation_cache_ext + } else { + ::std::mem::transmute(val) + } + }, + destroy_validation_cache_ext: unsafe { + extern "system" fn destroy_validation_cache_ext( + _device: Device, + _validation_cache: ValidationCacheEXT, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_validation_cache_ext) + )) + } + let raw_name = stringify!(vkDestroyValidationCacheEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_validation_cache_ext + } else { + ::std::mem::transmute(val) + } + }, + merge_validation_caches_ext: unsafe { + extern "system" fn merge_validation_caches_ext( + _device: Device, + _dst_cache: ValidationCacheEXT, + _src_cache_count: u32, + _p_src_caches: *const ValidationCacheEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(merge_validation_caches_ext) + )) + } + let raw_name = stringify!(vkMergeValidationCachesEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + merge_validation_caches_ext + } else { + ::std::mem::transmute(val) + } + }, + get_validation_cache_data_ext: unsafe { + extern "system" fn get_validation_cache_data_ext( + _device: Device, + _validation_cache: ValidationCacheEXT, + _p_data_size: *mut usize, + _p_data: *mut c_void, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_validation_cache_data_ext) + )) + } + let raw_name = stringify!(vkGetValidationCacheDataEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_validation_cache_data_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_validation_cache_ext( + &self, + device: Device, + p_create_info: *const ValidationCacheCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_validation_cache: *mut ValidationCacheEXT, + ) -> Result { + (self.create_validation_cache_ext)(device, p_create_info, p_allocator, p_validation_cache) + } + #[doc = ""] + pub unsafe fn destroy_validation_cache_ext( + &self, + device: Device, + validation_cache: ValidationCacheEXT, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_validation_cache_ext)(device, validation_cache, p_allocator) + } + #[doc = ""] + pub unsafe fn merge_validation_caches_ext( + &self, + device: Device, + dst_cache: ValidationCacheEXT, + src_cache_count: u32, + p_src_caches: *const ValidationCacheEXT, + ) -> Result { + (self.merge_validation_caches_ext)(device, dst_cache, src_cache_count, p_src_caches) + } + #[doc = ""] + pub unsafe fn get_validation_cache_data_ext( + &self, + device: Device, + validation_cache: ValidationCacheEXT, + p_data_size: *mut usize, + p_data: *mut c_void, + ) -> Result { + (self.get_validation_cache_data_ext)(device, validation_cache, p_data_size, p_data) + } +} +#[doc = "Generated from \'VK_EXT_validation_cache\'"] +impl StructureType { + pub const VALIDATION_CACHE_CREATE_INFO_EXT: Self = StructureType(1000160000); +} +#[doc = "Generated from \'VK_EXT_validation_cache\'"] +impl StructureType { + pub const SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT: Self = StructureType(1000160001); +} +#[doc = "Generated from \'VK_EXT_validation_cache\'"] +impl ObjectType { + pub const VALIDATION_CACHE_EXT: Self = ObjectType(1000160000); +} +impl ExtDescriptorIndexingFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_descriptor_indexing\0") + .expect("Wrong extension string") + } +} +pub struct ExtDescriptorIndexingFn {} +unsafe impl Send for ExtDescriptorIndexingFn {} +unsafe impl Sync for ExtDescriptorIndexingFn {} +impl ::std::clone::Clone for ExtDescriptorIndexingFn { + fn clone(&self) -> Self { + ExtDescriptorIndexingFn {} + } +} +impl ExtDescriptorIndexingFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtDescriptorIndexingFn {} + } +} +#[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] +impl StructureType { + pub const DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT: Self = StructureType(1000161000); +} +#[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: Self = StructureType(1000161001); +} +#[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT: Self = StructureType(1000161002); +} +#[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] +impl StructureType { + pub const DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT: Self = + StructureType(1000161003); +} +#[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] +impl StructureType { + pub const DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT: Self = + StructureType(1000161004); +} +#[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] +impl DescriptorPoolCreateFlags { + pub const UPDATE_AFTER_BIND_EXT: Self = DescriptorPoolCreateFlags(0b10); +} +#[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] +impl DescriptorSetLayoutCreateFlags { + pub const UPDATE_AFTER_BIND_POOL_EXT: Self = DescriptorSetLayoutCreateFlags(0b10); +} +#[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] +impl Result { + pub const ERROR_FRAGMENTATION_EXT: Self = Result(-1000161000); +} +impl ExtShaderViewportIndexLayerFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_shader_viewport_index_layer\0") + .expect("Wrong extension string") + } +} +pub struct ExtShaderViewportIndexLayerFn {} +unsafe impl Send for ExtShaderViewportIndexLayerFn {} +unsafe impl Sync for ExtShaderViewportIndexLayerFn {} +impl ::std::clone::Clone for ExtShaderViewportIndexLayerFn { + fn clone(&self) -> Self { + ExtShaderViewportIndexLayerFn {} + } +} +impl ExtShaderViewportIndexLayerFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtShaderViewportIndexLayerFn {} + } +} +impl NvExtension164Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_164\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension164Fn {} +unsafe impl Send for NvExtension164Fn {} +unsafe impl Sync for NvExtension164Fn {} +impl ::std::clone::Clone for NvExtension164Fn { + fn clone(&self) -> Self { + NvExtension164Fn {} + } +} +impl NvExtension164Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension164Fn {} + } +} +impl NvShadingRateImageFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_shading_rate_image\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBindShadingRateImageNV = extern "system" fn( + command_buffer: CommandBuffer, + image_view: ImageView, + image_layout: ImageLayout, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetViewportShadingRatePaletteNV = extern "system" fn( + command_buffer: CommandBuffer, + first_viewport: u32, + viewport_count: u32, + p_shading_rate_palettes: *const ShadingRatePaletteNV, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetCoarseSampleOrderNV = extern "system" fn( + command_buffer: CommandBuffer, + sample_order_type: CoarseSampleOrderTypeNV, + custom_sample_order_count: u32, + p_custom_sample_orders: *const CoarseSampleOrderCustomNV, +) -> c_void; +pub struct NvShadingRateImageFn { + pub cmd_bind_shading_rate_image_nv: extern "system" fn( + command_buffer: CommandBuffer, + image_view: ImageView, + image_layout: ImageLayout, + ) -> c_void, + pub cmd_set_viewport_shading_rate_palette_nv: extern "system" fn( + command_buffer: CommandBuffer, + first_viewport: u32, + viewport_count: u32, + p_shading_rate_palettes: *const ShadingRatePaletteNV, + ) -> c_void, + pub cmd_set_coarse_sample_order_nv: extern "system" fn( + command_buffer: CommandBuffer, + sample_order_type: CoarseSampleOrderTypeNV, + custom_sample_order_count: u32, + p_custom_sample_orders: *const CoarseSampleOrderCustomNV, + ) -> c_void, +} +unsafe impl Send for NvShadingRateImageFn {} +unsafe impl Sync for NvShadingRateImageFn {} +impl ::std::clone::Clone for NvShadingRateImageFn { + fn clone(&self) -> Self { + NvShadingRateImageFn { + cmd_bind_shading_rate_image_nv: self.cmd_bind_shading_rate_image_nv, + cmd_set_viewport_shading_rate_palette_nv: self.cmd_set_viewport_shading_rate_palette_nv, + cmd_set_coarse_sample_order_nv: self.cmd_set_coarse_sample_order_nv, + } + } +} +impl NvShadingRateImageFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvShadingRateImageFn { + cmd_bind_shading_rate_image_nv: unsafe { + extern "system" fn cmd_bind_shading_rate_image_nv( + _command_buffer: CommandBuffer, + _image_view: ImageView, + _image_layout: ImageLayout, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_bind_shading_rate_image_nv) + )) + } + let raw_name = stringify!(vkCmdBindShadingRateImageNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_bind_shading_rate_image_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_viewport_shading_rate_palette_nv: unsafe { + extern "system" fn cmd_set_viewport_shading_rate_palette_nv( + _command_buffer: CommandBuffer, + _first_viewport: u32, + _viewport_count: u32, + _p_shading_rate_palettes: *const ShadingRatePaletteNV, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_viewport_shading_rate_palette_nv) + )) + } + let raw_name = stringify!(vkCmdSetViewportShadingRatePaletteNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_viewport_shading_rate_palette_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_coarse_sample_order_nv: unsafe { + extern "system" fn cmd_set_coarse_sample_order_nv( + _command_buffer: CommandBuffer, + _sample_order_type: CoarseSampleOrderTypeNV, + _custom_sample_order_count: u32, + _p_custom_sample_orders: *const CoarseSampleOrderCustomNV, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_coarse_sample_order_nv) + )) + } + let raw_name = stringify!(vkCmdSetCoarseSampleOrderNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_coarse_sample_order_nv + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_bind_shading_rate_image_nv( + &self, + command_buffer: CommandBuffer, + image_view: ImageView, + image_layout: ImageLayout, + ) -> c_void { + (self.cmd_bind_shading_rate_image_nv)(command_buffer, image_view, image_layout) + } + #[doc = ""] + pub unsafe fn cmd_set_viewport_shading_rate_palette_nv( + &self, + command_buffer: CommandBuffer, + first_viewport: u32, + viewport_count: u32, + p_shading_rate_palettes: *const ShadingRatePaletteNV, + ) -> c_void { + (self.cmd_set_viewport_shading_rate_palette_nv)( + command_buffer, + first_viewport, + viewport_count, + p_shading_rate_palettes, + ) + } + #[doc = ""] + pub unsafe fn cmd_set_coarse_sample_order_nv( + &self, + command_buffer: CommandBuffer, + sample_order_type: CoarseSampleOrderTypeNV, + custom_sample_order_count: u32, + p_custom_sample_orders: *const CoarseSampleOrderCustomNV, + ) -> c_void { + (self.cmd_set_coarse_sample_order_nv)( + command_buffer, + sample_order_type, + custom_sample_order_count, + p_custom_sample_orders, + ) + } +} +#[doc = "Generated from \'VK_NV_shading_rate_image\'"] +impl StructureType { + pub const PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV: Self = + StructureType(1000164000); +} +#[doc = "Generated from \'VK_NV_shading_rate_image\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV: Self = StructureType(1000164001); +} +#[doc = "Generated from \'VK_NV_shading_rate_image\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV: Self = StructureType(1000164002); +} +#[doc = "Generated from \'VK_NV_shading_rate_image\'"] +impl ImageLayout { + pub const SHADING_RATE_OPTIMAL_NV: Self = ImageLayout(1000164003); +} +#[doc = "Generated from \'VK_NV_shading_rate_image\'"] +impl DynamicState { + pub const VIEWPORT_SHADING_RATE_PALETTE_NV: Self = DynamicState(1000164004); +} +#[doc = "Generated from \'VK_NV_shading_rate_image\'"] +impl AccessFlags { + pub const SHADING_RATE_IMAGE_READ_NV: Self = AccessFlags(0b100000000000000000000000); +} +#[doc = "Generated from \'VK_NV_shading_rate_image\'"] +impl ImageUsageFlags { + pub const SHADING_RATE_IMAGE_NV: Self = ImageUsageFlags(0b100000000); +} +#[doc = "Generated from \'VK_NV_shading_rate_image\'"] +impl PipelineStageFlags { + pub const SHADING_RATE_IMAGE_NV: Self = PipelineStageFlags(0b10000000000000000000000); +} +#[doc = "Generated from \'VK_NV_shading_rate_image\'"] +impl StructureType { + pub const PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV: Self = + StructureType(1000164005); +} +#[doc = "Generated from \'VK_NV_shading_rate_image\'"] +impl DynamicState { + pub const VIEWPORT_COARSE_SAMPLE_ORDER_NV: Self = DynamicState(1000164006); +} +impl NvRayTracingFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_ray_tracing\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateAccelerationStructureNV = extern "system" fn( + device: Device, + p_create_info: *const AccelerationStructureCreateInfoNV, + p_allocator: *const AllocationCallbacks, + p_acceleration_structure: *mut AccelerationStructureNV, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyAccelerationStructureNV = extern "system" fn( + device: Device, + acceleration_structure: AccelerationStructureNV, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetAccelerationStructureMemoryRequirementsNV = extern "system" fn( + device: Device, + p_info: *const AccelerationStructureMemoryRequirementsInfoNV, + p_memory_requirements: *mut MemoryRequirements2KHR, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkBindAccelerationStructureMemoryNV = extern "system" fn( + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindAccelerationStructureMemoryInfoNV, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBuildAccelerationStructureNV = extern "system" fn( + command_buffer: CommandBuffer, + p_info: *const AccelerationStructureInfoNV, + instance_data: Buffer, + instance_offset: DeviceSize, + update: Bool32, + dst: AccelerationStructureNV, + src: AccelerationStructureNV, + scratch: Buffer, + scratch_offset: DeviceSize, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdCopyAccelerationStructureNV = extern "system" fn( + command_buffer: CommandBuffer, + dst: AccelerationStructureNV, + src: AccelerationStructureNV, + mode: CopyAccelerationStructureModeNV, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdTraceRaysNV = extern "system" fn( + command_buffer: CommandBuffer, + raygen_shader_binding_table_buffer: Buffer, + raygen_shader_binding_offset: DeviceSize, + miss_shader_binding_table_buffer: Buffer, + miss_shader_binding_offset: DeviceSize, + miss_shader_binding_stride: DeviceSize, + hit_shader_binding_table_buffer: Buffer, + hit_shader_binding_offset: DeviceSize, + hit_shader_binding_stride: DeviceSize, + callable_shader_binding_table_buffer: Buffer, + callable_shader_binding_offset: DeviceSize, + callable_shader_binding_stride: DeviceSize, + width: u32, + height: u32, + depth: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateRayTracingPipelinesNV = extern "system" fn( + device: Device, + pipeline_cache: PipelineCache, + create_info_count: u32, + p_create_infos: *const RayTracingPipelineCreateInfoNV, + p_allocator: *const AllocationCallbacks, + p_pipelines: *mut Pipeline, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetRayTracingShaderGroupHandlesNV = extern "system" fn( + device: Device, + pipeline: Pipeline, + first_group: u32, + group_count: u32, + data_size: usize, + p_data: *mut c_void, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetAccelerationStructureHandleNV = extern "system" fn( + device: Device, + acceleration_structure: AccelerationStructureNV, + data_size: usize, + p_data: *mut c_void, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdWriteAccelerationStructuresPropertiesNV = extern "system" fn( + command_buffer: CommandBuffer, + acceleration_structure_count: u32, + p_acceleration_structures: *const AccelerationStructureNV, + query_type: QueryType, + query_pool: QueryPool, + first_query: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCompileDeferredNV = + extern "system" fn(device: Device, pipeline: Pipeline, shader: u32) -> Result; +pub struct NvRayTracingFn { + pub create_acceleration_structure_nv: extern "system" fn( + device: Device, + p_create_info: *const AccelerationStructureCreateInfoNV, + p_allocator: *const AllocationCallbacks, + p_acceleration_structure: *mut AccelerationStructureNV, + ) -> Result, + pub destroy_acceleration_structure_nv: extern "system" fn( + device: Device, + acceleration_structure: AccelerationStructureNV, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub get_acceleration_structure_memory_requirements_nv: extern "system" fn( + device: Device, + p_info: *const AccelerationStructureMemoryRequirementsInfoNV, + p_memory_requirements: *mut MemoryRequirements2KHR, + ) -> c_void, + pub bind_acceleration_structure_memory_nv: extern "system" fn( + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindAccelerationStructureMemoryInfoNV, + ) -> Result, + pub cmd_build_acceleration_structure_nv: extern "system" fn( + command_buffer: CommandBuffer, + p_info: *const AccelerationStructureInfoNV, + instance_data: Buffer, + instance_offset: DeviceSize, + update: Bool32, + dst: AccelerationStructureNV, + src: AccelerationStructureNV, + scratch: Buffer, + scratch_offset: DeviceSize, + ) -> c_void, + pub cmd_copy_acceleration_structure_nv: extern "system" fn( + command_buffer: CommandBuffer, + dst: AccelerationStructureNV, + src: AccelerationStructureNV, + mode: CopyAccelerationStructureModeNV, + ) -> c_void, + pub cmd_trace_rays_nv: extern "system" fn( + command_buffer: CommandBuffer, + raygen_shader_binding_table_buffer: Buffer, + raygen_shader_binding_offset: DeviceSize, + miss_shader_binding_table_buffer: Buffer, + miss_shader_binding_offset: DeviceSize, + miss_shader_binding_stride: DeviceSize, + hit_shader_binding_table_buffer: Buffer, + hit_shader_binding_offset: DeviceSize, + hit_shader_binding_stride: DeviceSize, + callable_shader_binding_table_buffer: Buffer, + callable_shader_binding_offset: DeviceSize, + callable_shader_binding_stride: DeviceSize, + width: u32, + height: u32, + depth: u32, + ) -> c_void, + pub create_ray_tracing_pipelines_nv: extern "system" fn( + device: Device, + pipeline_cache: PipelineCache, + create_info_count: u32, + p_create_infos: *const RayTracingPipelineCreateInfoNV, + p_allocator: *const AllocationCallbacks, + p_pipelines: *mut Pipeline, + ) -> Result, + pub get_ray_tracing_shader_group_handles_nv: extern "system" fn( + device: Device, + pipeline: Pipeline, + first_group: u32, + group_count: u32, + data_size: usize, + p_data: *mut c_void, + ) -> Result, + pub get_acceleration_structure_handle_nv: extern "system" fn( + device: Device, + acceleration_structure: AccelerationStructureNV, + data_size: usize, + p_data: *mut c_void, + ) -> Result, + pub cmd_write_acceleration_structures_properties_nv: extern "system" fn( + command_buffer: CommandBuffer, + acceleration_structure_count: u32, + p_acceleration_structures: *const AccelerationStructureNV, + query_type: QueryType, + query_pool: QueryPool, + first_query: u32, + ) -> c_void, + pub compile_deferred_nv: + extern "system" fn(device: Device, pipeline: Pipeline, shader: u32) -> Result, +} +unsafe impl Send for NvRayTracingFn {} +unsafe impl Sync for NvRayTracingFn {} +impl ::std::clone::Clone for NvRayTracingFn { + fn clone(&self) -> Self { + NvRayTracingFn { + create_acceleration_structure_nv: self.create_acceleration_structure_nv, + destroy_acceleration_structure_nv: self.destroy_acceleration_structure_nv, + get_acceleration_structure_memory_requirements_nv: self + .get_acceleration_structure_memory_requirements_nv, + bind_acceleration_structure_memory_nv: self.bind_acceleration_structure_memory_nv, + cmd_build_acceleration_structure_nv: self.cmd_build_acceleration_structure_nv, + cmd_copy_acceleration_structure_nv: self.cmd_copy_acceleration_structure_nv, + cmd_trace_rays_nv: self.cmd_trace_rays_nv, + create_ray_tracing_pipelines_nv: self.create_ray_tracing_pipelines_nv, + get_ray_tracing_shader_group_handles_nv: self.get_ray_tracing_shader_group_handles_nv, + get_acceleration_structure_handle_nv: self.get_acceleration_structure_handle_nv, + cmd_write_acceleration_structures_properties_nv: self + .cmd_write_acceleration_structures_properties_nv, + compile_deferred_nv: self.compile_deferred_nv, + } + } +} +impl NvRayTracingFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvRayTracingFn { + create_acceleration_structure_nv: unsafe { + extern "system" fn create_acceleration_structure_nv( + _device: Device, + _p_create_info: *const AccelerationStructureCreateInfoNV, + _p_allocator: *const AllocationCallbacks, + _p_acceleration_structure: *mut AccelerationStructureNV, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_acceleration_structure_nv) + )) + } + let raw_name = stringify!(vkCreateAccelerationStructureNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_acceleration_structure_nv + } else { + ::std::mem::transmute(val) + } + }, + destroy_acceleration_structure_nv: unsafe { + extern "system" fn destroy_acceleration_structure_nv( + _device: Device, + _acceleration_structure: AccelerationStructureNV, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_acceleration_structure_nv) + )) + } + let raw_name = stringify!(vkDestroyAccelerationStructureNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_acceleration_structure_nv + } else { + ::std::mem::transmute(val) + } + }, + get_acceleration_structure_memory_requirements_nv: unsafe { + extern "system" fn get_acceleration_structure_memory_requirements_nv( + _device: Device, + _p_info: *const AccelerationStructureMemoryRequirementsInfoNV, + _p_memory_requirements: *mut MemoryRequirements2KHR, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_acceleration_structure_memory_requirements_nv) + )) + } + let raw_name = stringify!(vkGetAccelerationStructureMemoryRequirementsNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_acceleration_structure_memory_requirements_nv + } else { + ::std::mem::transmute(val) + } + }, + bind_acceleration_structure_memory_nv: unsafe { + extern "system" fn bind_acceleration_structure_memory_nv( + _device: Device, + _bind_info_count: u32, + _p_bind_infos: *const BindAccelerationStructureMemoryInfoNV, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(bind_acceleration_structure_memory_nv) + )) + } + let raw_name = stringify!(vkBindAccelerationStructureMemoryNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + bind_acceleration_structure_memory_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_build_acceleration_structure_nv: unsafe { + extern "system" fn cmd_build_acceleration_structure_nv( + _command_buffer: CommandBuffer, + _p_info: *const AccelerationStructureInfoNV, + _instance_data: Buffer, + _instance_offset: DeviceSize, + _update: Bool32, + _dst: AccelerationStructureNV, + _src: AccelerationStructureNV, + _scratch: Buffer, + _scratch_offset: DeviceSize, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_build_acceleration_structure_nv) + )) + } + let raw_name = stringify!(vkCmdBuildAccelerationStructureNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_build_acceleration_structure_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_copy_acceleration_structure_nv: unsafe { + extern "system" fn cmd_copy_acceleration_structure_nv( + _command_buffer: CommandBuffer, + _dst: AccelerationStructureNV, + _src: AccelerationStructureNV, + _mode: CopyAccelerationStructureModeNV, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_copy_acceleration_structure_nv) + )) + } + let raw_name = stringify!(vkCmdCopyAccelerationStructureNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_copy_acceleration_structure_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_trace_rays_nv: unsafe { + extern "system" fn cmd_trace_rays_nv( + _command_buffer: CommandBuffer, + _raygen_shader_binding_table_buffer: Buffer, + _raygen_shader_binding_offset: DeviceSize, + _miss_shader_binding_table_buffer: Buffer, + _miss_shader_binding_offset: DeviceSize, + _miss_shader_binding_stride: DeviceSize, + _hit_shader_binding_table_buffer: Buffer, + _hit_shader_binding_offset: DeviceSize, + _hit_shader_binding_stride: DeviceSize, + _callable_shader_binding_table_buffer: Buffer, + _callable_shader_binding_offset: DeviceSize, + _callable_shader_binding_stride: DeviceSize, + _width: u32, + _height: u32, + _depth: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_trace_rays_nv))) + } + let raw_name = stringify!(vkCmdTraceRaysNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_trace_rays_nv + } else { + ::std::mem::transmute(val) + } + }, + create_ray_tracing_pipelines_nv: unsafe { + extern "system" fn create_ray_tracing_pipelines_nv( + _device: Device, + _pipeline_cache: PipelineCache, + _create_info_count: u32, + _p_create_infos: *const RayTracingPipelineCreateInfoNV, + _p_allocator: *const AllocationCallbacks, + _p_pipelines: *mut Pipeline, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_ray_tracing_pipelines_nv) + )) + } + let raw_name = stringify!(vkCreateRayTracingPipelinesNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_ray_tracing_pipelines_nv + } else { + ::std::mem::transmute(val) + } + }, + get_ray_tracing_shader_group_handles_nv: unsafe { + extern "system" fn get_ray_tracing_shader_group_handles_nv( + _device: Device, + _pipeline: Pipeline, + _first_group: u32, + _group_count: u32, + _data_size: usize, + _p_data: *mut c_void, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_ray_tracing_shader_group_handles_nv) + )) + } + let raw_name = stringify!(vkGetRayTracingShaderGroupHandlesNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_ray_tracing_shader_group_handles_nv + } else { + ::std::mem::transmute(val) + } + }, + get_acceleration_structure_handle_nv: unsafe { + extern "system" fn get_acceleration_structure_handle_nv( + _device: Device, + _acceleration_structure: AccelerationStructureNV, + _data_size: usize, + _p_data: *mut c_void, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_acceleration_structure_handle_nv) + )) + } + let raw_name = stringify!(vkGetAccelerationStructureHandleNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_acceleration_structure_handle_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_write_acceleration_structures_properties_nv: unsafe { + extern "system" fn cmd_write_acceleration_structures_properties_nv( + _command_buffer: CommandBuffer, + _acceleration_structure_count: u32, + _p_acceleration_structures: *const AccelerationStructureNV, + _query_type: QueryType, + _query_pool: QueryPool, + _first_query: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_write_acceleration_structures_properties_nv) + )) + } + let raw_name = stringify!(vkCmdWriteAccelerationStructuresPropertiesNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_write_acceleration_structures_properties_nv + } else { + ::std::mem::transmute(val) + } + }, + compile_deferred_nv: unsafe { + extern "system" fn compile_deferred_nv( + _device: Device, + _pipeline: Pipeline, + _shader: u32, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(compile_deferred_nv))) + } + let raw_name = stringify!(vkCompileDeferredNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + compile_deferred_nv + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_acceleration_structure_nv( + &self, + device: Device, + p_create_info: *const AccelerationStructureCreateInfoNV, + p_allocator: *const AllocationCallbacks, + p_acceleration_structure: *mut AccelerationStructureNV, + ) -> Result { + (self.create_acceleration_structure_nv)( + device, + p_create_info, + p_allocator, + p_acceleration_structure, + ) + } + #[doc = ""] + pub unsafe fn destroy_acceleration_structure_nv( + &self, + device: Device, + acceleration_structure: AccelerationStructureNV, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_acceleration_structure_nv)(device, acceleration_structure, p_allocator) + } + #[doc = ""] + pub unsafe fn get_acceleration_structure_memory_requirements_nv( + &self, + device: Device, + p_info: *const AccelerationStructureMemoryRequirementsInfoNV, + p_memory_requirements: *mut MemoryRequirements2KHR, + ) -> c_void { + (self.get_acceleration_structure_memory_requirements_nv)( + device, + p_info, + p_memory_requirements, + ) + } + #[doc = ""] + pub unsafe fn bind_acceleration_structure_memory_nv( + &self, + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindAccelerationStructureMemoryInfoNV, + ) -> Result { + (self.bind_acceleration_structure_memory_nv)(device, bind_info_count, p_bind_infos) + } + #[doc = ""] + pub unsafe fn cmd_build_acceleration_structure_nv( + &self, + command_buffer: CommandBuffer, + p_info: *const AccelerationStructureInfoNV, + instance_data: Buffer, + instance_offset: DeviceSize, + update: Bool32, + dst: AccelerationStructureNV, + src: AccelerationStructureNV, + scratch: Buffer, + scratch_offset: DeviceSize, + ) -> c_void { + (self.cmd_build_acceleration_structure_nv)( + command_buffer, + p_info, + instance_data, + instance_offset, + update, + dst, + src, + scratch, + scratch_offset, + ) + } + #[doc = ""] + pub unsafe fn cmd_copy_acceleration_structure_nv( + &self, + command_buffer: CommandBuffer, + dst: AccelerationStructureNV, + src: AccelerationStructureNV, + mode: CopyAccelerationStructureModeNV, + ) -> c_void { + (self.cmd_copy_acceleration_structure_nv)(command_buffer, dst, src, mode) + } + #[doc = ""] + pub unsafe fn cmd_trace_rays_nv( + &self, + command_buffer: CommandBuffer, + raygen_shader_binding_table_buffer: Buffer, + raygen_shader_binding_offset: DeviceSize, + miss_shader_binding_table_buffer: Buffer, + miss_shader_binding_offset: DeviceSize, + miss_shader_binding_stride: DeviceSize, + hit_shader_binding_table_buffer: Buffer, + hit_shader_binding_offset: DeviceSize, + hit_shader_binding_stride: DeviceSize, + callable_shader_binding_table_buffer: Buffer, + callable_shader_binding_offset: DeviceSize, + callable_shader_binding_stride: DeviceSize, + width: u32, + height: u32, + depth: u32, + ) -> c_void { + (self.cmd_trace_rays_nv)( + command_buffer, + raygen_shader_binding_table_buffer, + raygen_shader_binding_offset, + miss_shader_binding_table_buffer, + miss_shader_binding_offset, + miss_shader_binding_stride, + hit_shader_binding_table_buffer, + hit_shader_binding_offset, + hit_shader_binding_stride, + callable_shader_binding_table_buffer, + callable_shader_binding_offset, + callable_shader_binding_stride, + width, + height, + depth, + ) + } + #[doc = ""] + pub unsafe fn create_ray_tracing_pipelines_nv( + &self, + device: Device, + pipeline_cache: PipelineCache, + create_info_count: u32, + p_create_infos: *const RayTracingPipelineCreateInfoNV, + p_allocator: *const AllocationCallbacks, + p_pipelines: *mut Pipeline, + ) -> Result { + (self.create_ray_tracing_pipelines_nv)( + device, + pipeline_cache, + create_info_count, + p_create_infos, + p_allocator, + p_pipelines, + ) + } + #[doc = ""] + pub unsafe fn get_ray_tracing_shader_group_handles_nv( + &self, + device: Device, + pipeline: Pipeline, + first_group: u32, + group_count: u32, + data_size: usize, + p_data: *mut c_void, + ) -> Result { + (self.get_ray_tracing_shader_group_handles_nv)( + device, + pipeline, + first_group, + group_count, + data_size, + p_data, + ) + } + #[doc = ""] + pub unsafe fn get_acceleration_structure_handle_nv( + &self, + device: Device, + acceleration_structure: AccelerationStructureNV, + data_size: usize, + p_data: *mut c_void, + ) -> Result { + (self.get_acceleration_structure_handle_nv)( + device, + acceleration_structure, + data_size, + p_data, + ) + } + #[doc = ""] + pub unsafe fn cmd_write_acceleration_structures_properties_nv( + &self, + command_buffer: CommandBuffer, + acceleration_structure_count: u32, + p_acceleration_structures: *const AccelerationStructureNV, + query_type: QueryType, + query_pool: QueryPool, + first_query: u32, + ) -> c_void { + (self.cmd_write_acceleration_structures_properties_nv)( + command_buffer, + acceleration_structure_count, + p_acceleration_structures, + query_type, + query_pool, + first_query, + ) + } + #[doc = ""] + pub unsafe fn compile_deferred_nv( + &self, + device: Device, + pipeline: Pipeline, + shader: u32, + ) -> Result { + (self.compile_deferred_nv)(device, pipeline, shader) + } +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl StructureType { + pub const RAY_TRACING_PIPELINE_CREATE_INFO_NV: Self = StructureType(1000165000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_CREATE_INFO_NV: Self = StructureType(1000165001); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl StructureType { + pub const GEOMETRY_NV: Self = StructureType(1000165003); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl StructureType { + pub const GEOMETRY_TRIANGLES_NV: Self = StructureType(1000165004); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl StructureType { + pub const GEOMETRY_AABB_NV: Self = StructureType(1000165005); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl StructureType { + pub const BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV: Self = StructureType(1000165006); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl StructureType { + pub const WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV: Self = StructureType(1000165007); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV: Self = StructureType(1000165008); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV: Self = StructureType(1000165009); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl StructureType { + pub const RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV: Self = StructureType(1000165011); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_INFO_NV: Self = StructureType(1000165012); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl ShaderStageFlags { + pub const RAYGEN_NV: Self = ShaderStageFlags(0b100000000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl ShaderStageFlags { + pub const ANY_HIT_NV: Self = ShaderStageFlags(0b1000000000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl ShaderStageFlags { + pub const CLOSEST_HIT_NV: Self = ShaderStageFlags(0b10000000000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl ShaderStageFlags { + pub const MISS_NV: Self = ShaderStageFlags(0b100000000000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl ShaderStageFlags { + pub const INTERSECTION_NV: Self = ShaderStageFlags(0b1000000000000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl ShaderStageFlags { + pub const CALLABLE_NV: Self = ShaderStageFlags(0b10000000000000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl PipelineStageFlags { + pub const RAY_TRACING_SHADER_NV: Self = PipelineStageFlags(0b1000000000000000000000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl PipelineStageFlags { + pub const ACCELERATION_STRUCTURE_BUILD_NV: Self = + PipelineStageFlags(0b10000000000000000000000000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl BufferUsageFlags { + pub const RAY_TRACING_NV: Self = BufferUsageFlags(0b10000000000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl PipelineBindPoint { + pub const RAY_TRACING_NV: Self = PipelineBindPoint(1000165000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl DescriptorType { + pub const ACCELERATION_STRUCTURE_NV: Self = DescriptorType(1000165000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl AccessFlags { + pub const ACCELERATION_STRUCTURE_READ_NV: Self = AccessFlags(0b1000000000000000000000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl AccessFlags { + pub const ACCELERATION_STRUCTURE_WRITE_NV: Self = AccessFlags(0b10000000000000000000000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl QueryType { + pub const ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV: Self = QueryType(1000165000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl PipelineCreateFlags { + pub const DEFER_COMPILE_NV: Self = PipelineCreateFlags(0b100000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl ObjectType { + pub const ACCELERATION_STRUCTURE_NV: Self = ObjectType(1000165000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl DebugReportObjectTypeEXT { + pub const ACCELERATION_STRUCTURE_NV: Self = DebugReportObjectTypeEXT(1000165000); +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl IndexType { + pub const NONE_NV: Self = IndexType(1000165000); +} +impl NvRepresentativeFragmentTestFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_representative_fragment_test\0") + .expect("Wrong extension string") + } +} +pub struct NvRepresentativeFragmentTestFn {} +unsafe impl Send for NvRepresentativeFragmentTestFn {} +unsafe impl Sync for NvRepresentativeFragmentTestFn {} +impl ::std::clone::Clone for NvRepresentativeFragmentTestFn { + fn clone(&self) -> Self { + NvRepresentativeFragmentTestFn {} + } +} +impl NvRepresentativeFragmentTestFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvRepresentativeFragmentTestFn {} + } +} +#[doc = "Generated from \'VK_NV_representative_fragment_test\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV: Self = + StructureType(1000166000); +} +#[doc = "Generated from \'VK_NV_representative_fragment_test\'"] +impl StructureType { + pub const PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV: Self = + StructureType(1000166001); +} +impl NvExtension168Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_168\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension168Fn {} +unsafe impl Send for NvExtension168Fn {} +unsafe impl Sync for NvExtension168Fn {} +impl ::std::clone::Clone for NvExtension168Fn { + fn clone(&self) -> Self { + NvExtension168Fn {} + } +} +impl NvExtension168Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension168Fn {} + } +} +impl KhrMaintenance3Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_maintenance3\0") + .expect("Wrong extension string") + } +} +pub struct KhrMaintenance3Fn {} +unsafe impl Send for KhrMaintenance3Fn {} +unsafe impl Sync for KhrMaintenance3Fn {} +impl ::std::clone::Clone for KhrMaintenance3Fn { + fn clone(&self) -> Self { + KhrMaintenance3Fn {} + } +} +impl KhrMaintenance3Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrMaintenance3Fn {} + } +} +impl KhrDrawIndirectCountFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_draw_indirect_count\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawIndirectCountKHR = extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawIndexedIndirectCountKHR = extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, +) -> c_void; +pub struct KhrDrawIndirectCountFn { + pub cmd_draw_indirect_count_khr: extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void, + pub cmd_draw_indexed_indirect_count_khr: extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void, +} +unsafe impl Send for KhrDrawIndirectCountFn {} +unsafe impl Sync for KhrDrawIndirectCountFn {} +impl ::std::clone::Clone for KhrDrawIndirectCountFn { + fn clone(&self) -> Self { + KhrDrawIndirectCountFn { + cmd_draw_indirect_count_khr: self.cmd_draw_indirect_count_khr, + cmd_draw_indexed_indirect_count_khr: self.cmd_draw_indexed_indirect_count_khr, + } + } +} +impl KhrDrawIndirectCountFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrDrawIndirectCountFn { + cmd_draw_indirect_count_khr: unsafe { + extern "system" fn cmd_draw_indirect_count_khr( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + _count_buffer: Buffer, + _count_buffer_offset: DeviceSize, + _max_draw_count: u32, + _stride: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_indirect_count_khr) + )) + } + let raw_name = stringify!(vkCmdDrawIndirectCountKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_indirect_count_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_draw_indexed_indirect_count_khr: unsafe { + extern "system" fn cmd_draw_indexed_indirect_count_khr( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + _count_buffer: Buffer, + _count_buffer_offset: DeviceSize, + _max_draw_count: u32, + _stride: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_indexed_indirect_count_khr) + )) + } + let raw_name = stringify!(vkCmdDrawIndexedIndirectCountKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_indexed_indirect_count_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_draw_indirect_count_khr( + &self, + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void { + (self.cmd_draw_indirect_count_khr)( + command_buffer, + buffer, + offset, + count_buffer, + count_buffer_offset, + max_draw_count, + stride, + ) + } + #[doc = ""] + pub unsafe fn cmd_draw_indexed_indirect_count_khr( + &self, + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void { + (self.cmd_draw_indexed_indirect_count_khr)( + command_buffer, + buffer, + offset, + count_buffer, + count_buffer_offset, + max_draw_count, + stride, + ) + } +} +impl QcomExtension171Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_171\0") + .expect("Wrong extension string") + } +} +pub struct QcomExtension171Fn {} +unsafe impl Send for QcomExtension171Fn {} +unsafe impl Sync for QcomExtension171Fn {} +impl ::std::clone::Clone for QcomExtension171Fn { + fn clone(&self) -> Self { + QcomExtension171Fn {} + } +} +impl QcomExtension171Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + QcomExtension171Fn {} + } +} +impl QcomExtension172Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_172\0") + .expect("Wrong extension string") + } +} +pub struct QcomExtension172Fn {} +unsafe impl Send for QcomExtension172Fn {} +unsafe impl Sync for QcomExtension172Fn {} +impl ::std::clone::Clone for QcomExtension172Fn { + fn clone(&self) -> Self { + QcomExtension172Fn {} + } +} +impl QcomExtension172Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + QcomExtension172Fn {} + } +} +impl QcomExtension173Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_173\0") + .expect("Wrong extension string") + } +} +pub struct QcomExtension173Fn {} +unsafe impl Send for QcomExtension173Fn {} +unsafe impl Sync for QcomExtension173Fn {} +impl ::std::clone::Clone for QcomExtension173Fn { + fn clone(&self) -> Self { + QcomExtension173Fn {} + } +} +impl QcomExtension173Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + QcomExtension173Fn {} + } +} +impl QcomExtension174Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_174\0") + .expect("Wrong extension string") + } +} +pub struct QcomExtension174Fn {} +unsafe impl Send for QcomExtension174Fn {} +unsafe impl Sync for QcomExtension174Fn {} +impl ::std::clone::Clone for QcomExtension174Fn { + fn clone(&self) -> Self { + QcomExtension174Fn {} + } +} +impl QcomExtension174Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + QcomExtension174Fn {} + } +} +impl ExtGlobalPriorityFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_global_priority\0") + .expect("Wrong extension string") + } +} +pub struct ExtGlobalPriorityFn {} +unsafe impl Send for ExtGlobalPriorityFn {} +unsafe impl Sync for ExtGlobalPriorityFn {} +impl ::std::clone::Clone for ExtGlobalPriorityFn { + fn clone(&self) -> Self { + ExtGlobalPriorityFn {} + } +} +impl ExtGlobalPriorityFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtGlobalPriorityFn {} + } +} +#[doc = "Generated from \'VK_EXT_global_priority\'"] +impl StructureType { + pub const DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT: Self = StructureType(1000174000); +} +#[doc = "Generated from \'VK_EXT_global_priority\'"] +impl Result { + pub const ERROR_NOT_PERMITTED_EXT: Self = Result(-1000174001); +} +impl ExtExtension176Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_176\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension176Fn {} +unsafe impl Send for ExtExtension176Fn {} +unsafe impl Sync for ExtExtension176Fn {} +impl ::std::clone::Clone for ExtExtension176Fn { + fn clone(&self) -> Self { + ExtExtension176Fn {} + } +} +impl ExtExtension176Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension176Fn {} + } +} +impl ExtExtension177Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_177\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension177Fn {} +unsafe impl Send for ExtExtension177Fn {} +unsafe impl Sync for ExtExtension177Fn {} +impl ::std::clone::Clone for ExtExtension177Fn { + fn clone(&self) -> Self { + ExtExtension177Fn {} + } +} +impl ExtExtension177Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension177Fn {} + } +} +impl Khr8bitStorageFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_8bit_storage\0") + .expect("Wrong extension string") + } +} +pub struct Khr8bitStorageFn {} +unsafe impl Send for Khr8bitStorageFn {} +unsafe impl Sync for Khr8bitStorageFn {} +impl ::std::clone::Clone for Khr8bitStorageFn { + fn clone(&self) -> Self { + Khr8bitStorageFn {} + } +} +impl Khr8bitStorageFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Khr8bitStorageFn {} + } +} +#[doc = "Generated from \'VK_KHR_8bit_storage\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR: Self = StructureType(1000177000); +} +impl ExtExternalMemoryHostFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_external_memory_host\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetMemoryHostPointerPropertiesEXT = extern "system" fn( + device: Device, + handle_type: ExternalMemoryHandleTypeFlags, + p_host_pointer: *const c_void, + p_memory_host_pointer_properties: *mut MemoryHostPointerPropertiesEXT, +) -> Result; +pub struct ExtExternalMemoryHostFn { + pub get_memory_host_pointer_properties_ext: extern "system" fn( + device: Device, + handle_type: ExternalMemoryHandleTypeFlags, + p_host_pointer: *const c_void, + p_memory_host_pointer_properties: *mut MemoryHostPointerPropertiesEXT, + ) -> Result, +} +unsafe impl Send for ExtExternalMemoryHostFn {} +unsafe impl Sync for ExtExternalMemoryHostFn {} +impl ::std::clone::Clone for ExtExternalMemoryHostFn { + fn clone(&self) -> Self { + ExtExternalMemoryHostFn { + get_memory_host_pointer_properties_ext: self.get_memory_host_pointer_properties_ext, + } + } +} +impl ExtExternalMemoryHostFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExternalMemoryHostFn { + get_memory_host_pointer_properties_ext: unsafe { + extern "system" fn get_memory_host_pointer_properties_ext( + _device: Device, + _handle_type: ExternalMemoryHandleTypeFlags, + _p_host_pointer: *const c_void, + _p_memory_host_pointer_properties: *mut MemoryHostPointerPropertiesEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_memory_host_pointer_properties_ext) + )) + } + let raw_name = stringify!(vkGetMemoryHostPointerPropertiesEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_memory_host_pointer_properties_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_memory_host_pointer_properties_ext( + &self, + device: Device, + handle_type: ExternalMemoryHandleTypeFlags, + p_host_pointer: *const c_void, + p_memory_host_pointer_properties: *mut MemoryHostPointerPropertiesEXT, + ) -> Result { + (self.get_memory_host_pointer_properties_ext)( + device, + handle_type, + p_host_pointer, + p_memory_host_pointer_properties, + ) + } +} +#[doc = "Generated from \'VK_EXT_external_memory_host\'"] +impl StructureType { + pub const IMPORT_MEMORY_HOST_POINTER_INFO_EXT: Self = StructureType(1000178000); +} +#[doc = "Generated from \'VK_EXT_external_memory_host\'"] +impl StructureType { + pub const MEMORY_HOST_POINTER_PROPERTIES_EXT: Self = StructureType(1000178001); +} +#[doc = "Generated from \'VK_EXT_external_memory_host\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: Self = StructureType(1000178002); +} +#[doc = "Generated from \'VK_EXT_external_memory_host\'"] +impl ExternalMemoryHandleTypeFlags { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION: Self = + ExternalMemoryHandleTypeFlags(0b10000000); +} +#[doc = "Generated from \'VK_EXT_external_memory_host\'"] +impl ExternalMemoryHandleTypeFlags { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY: Self = + ExternalMemoryHandleTypeFlags(0b100000000); +} +impl AmdBufferMarkerFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_buffer_marker\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdWriteBufferMarkerAMD = extern "system" fn( + command_buffer: CommandBuffer, + pipeline_stage: PipelineStageFlags, + dst_buffer: Buffer, + dst_offset: DeviceSize, + marker: u32, +) -> c_void; +pub struct AmdBufferMarkerFn { + pub cmd_write_buffer_marker_amd: extern "system" fn( + command_buffer: CommandBuffer, + pipeline_stage: PipelineStageFlags, + dst_buffer: Buffer, + dst_offset: DeviceSize, + marker: u32, + ) -> c_void, +} +unsafe impl Send for AmdBufferMarkerFn {} +unsafe impl Sync for AmdBufferMarkerFn {} +impl ::std::clone::Clone for AmdBufferMarkerFn { + fn clone(&self) -> Self { + AmdBufferMarkerFn { + cmd_write_buffer_marker_amd: self.cmd_write_buffer_marker_amd, + } + } +} +impl AmdBufferMarkerFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdBufferMarkerFn { + cmd_write_buffer_marker_amd: unsafe { + extern "system" fn cmd_write_buffer_marker_amd( + _command_buffer: CommandBuffer, + _pipeline_stage: PipelineStageFlags, + _dst_buffer: Buffer, + _dst_offset: DeviceSize, + _marker: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_write_buffer_marker_amd) + )) + } + let raw_name = stringify!(vkCmdWriteBufferMarkerAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_write_buffer_marker_amd + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_write_buffer_marker_amd( + &self, + command_buffer: CommandBuffer, + pipeline_stage: PipelineStageFlags, + dst_buffer: Buffer, + dst_offset: DeviceSize, + marker: u32, + ) -> c_void { + (self.cmd_write_buffer_marker_amd)( + command_buffer, + pipeline_stage, + dst_buffer, + dst_offset, + marker, + ) + } +} +impl KhrShaderAtomicInt64Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_shader_atomic_int64\0") + .expect("Wrong extension string") + } +} +pub struct KhrShaderAtomicInt64Fn {} +unsafe impl Send for KhrShaderAtomicInt64Fn {} +unsafe impl Sync for KhrShaderAtomicInt64Fn {} +impl ::std::clone::Clone for KhrShaderAtomicInt64Fn { + fn clone(&self) -> Self { + KhrShaderAtomicInt64Fn {} + } +} +impl KhrShaderAtomicInt64Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrShaderAtomicInt64Fn {} + } +} +#[doc = "Generated from \'VK_KHR_shader_atomic_int64\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR: Self = StructureType(1000180000); +} +impl AmdExtension182Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_182\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension182Fn {} +unsafe impl Send for AmdExtension182Fn {} +unsafe impl Sync for AmdExtension182Fn {} +impl ::std::clone::Clone for AmdExtension182Fn { + fn clone(&self) -> Self { + AmdExtension182Fn {} + } +} +impl AmdExtension182Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension182Fn {} + } +} +impl AmdExtension183Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_183\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension183Fn {} +unsafe impl Send for AmdExtension183Fn {} +unsafe impl Sync for AmdExtension183Fn {} +impl ::std::clone::Clone for AmdExtension183Fn { + fn clone(&self) -> Self { + AmdExtension183Fn {} + } +} +impl AmdExtension183Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension183Fn {} + } +} +impl AmdExtension184Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_184\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension184Fn {} +unsafe impl Send for AmdExtension184Fn {} +unsafe impl Sync for AmdExtension184Fn {} +impl ::std::clone::Clone for AmdExtension184Fn { + fn clone(&self) -> Self { + AmdExtension184Fn {} + } +} +impl AmdExtension184Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension184Fn {} + } +} +impl ExtCalibratedTimestampsFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_calibrated_timestamps\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT = extern "system" fn( + physical_device: PhysicalDevice, + p_time_domain_count: *mut u32, + p_time_domains: *mut TimeDomainEXT, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetCalibratedTimestampsEXT = extern "system" fn( + device: Device, + timestamp_count: u32, + p_timestamp_infos: *const CalibratedTimestampInfoEXT, + p_timestamps: *mut u64, + p_max_deviation: *mut u64, +) -> Result; +pub struct ExtCalibratedTimestampsFn { + pub get_physical_device_calibrateable_time_domains_ext: extern "system" fn( + physical_device: PhysicalDevice, + p_time_domain_count: *mut u32, + p_time_domains: *mut TimeDomainEXT, + ) -> Result, + pub get_calibrated_timestamps_ext: extern "system" fn( + device: Device, + timestamp_count: u32, + p_timestamp_infos: *const CalibratedTimestampInfoEXT, + p_timestamps: *mut u64, + p_max_deviation: *mut u64, + ) -> Result, +} +unsafe impl Send for ExtCalibratedTimestampsFn {} +unsafe impl Sync for ExtCalibratedTimestampsFn {} +impl ::std::clone::Clone for ExtCalibratedTimestampsFn { + fn clone(&self) -> Self { + ExtCalibratedTimestampsFn { + get_physical_device_calibrateable_time_domains_ext: self + .get_physical_device_calibrateable_time_domains_ext, + get_calibrated_timestamps_ext: self.get_calibrated_timestamps_ext, + } + } +} +impl ExtCalibratedTimestampsFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtCalibratedTimestampsFn { + get_physical_device_calibrateable_time_domains_ext: unsafe { + extern "system" fn get_physical_device_calibrateable_time_domains_ext( + _physical_device: PhysicalDevice, + _p_time_domain_count: *mut u32, + _p_time_domains: *mut TimeDomainEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_calibrateable_time_domains_ext) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceCalibrateableTimeDomainsEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_calibrateable_time_domains_ext + } else { + ::std::mem::transmute(val) + } + }, + get_calibrated_timestamps_ext: unsafe { + extern "system" fn get_calibrated_timestamps_ext( + _device: Device, + _timestamp_count: u32, + _p_timestamp_infos: *const CalibratedTimestampInfoEXT, + _p_timestamps: *mut u64, + _p_max_deviation: *mut u64, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_calibrated_timestamps_ext) + )) + } + let raw_name = stringify!(vkGetCalibratedTimestampsEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_calibrated_timestamps_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_physical_device_calibrateable_time_domains_ext( + &self, + physical_device: PhysicalDevice, + p_time_domain_count: *mut u32, + p_time_domains: *mut TimeDomainEXT, + ) -> Result { + (self.get_physical_device_calibrateable_time_domains_ext)( + physical_device, + p_time_domain_count, + p_time_domains, + ) + } + #[doc = ""] + pub unsafe fn get_calibrated_timestamps_ext( + &self, + device: Device, + timestamp_count: u32, + p_timestamp_infos: *const CalibratedTimestampInfoEXT, + p_timestamps: *mut u64, + p_max_deviation: *mut u64, + ) -> Result { + (self.get_calibrated_timestamps_ext)( + device, + timestamp_count, + p_timestamp_infos, + p_timestamps, + p_max_deviation, + ) + } +} +#[doc = "Generated from \'VK_EXT_calibrated_timestamps\'"] +impl StructureType { + pub const CALIBRATED_TIMESTAMP_INFO_EXT: Self = StructureType(1000184000); +} +impl AmdShaderCorePropertiesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_shader_core_properties\0") + .expect("Wrong extension string") + } +} +pub struct AmdShaderCorePropertiesFn {} +unsafe impl Send for AmdShaderCorePropertiesFn {} +unsafe impl Sync for AmdShaderCorePropertiesFn {} +impl ::std::clone::Clone for AmdShaderCorePropertiesFn { + fn clone(&self) -> Self { + AmdShaderCorePropertiesFn {} + } +} +impl AmdShaderCorePropertiesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdShaderCorePropertiesFn {} + } +} +#[doc = "Generated from \'VK_AMD_shader_core_properties\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD: Self = StructureType(1000185000); +} +impl AmdExtension187Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_187\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension187Fn {} +unsafe impl Send for AmdExtension187Fn {} +unsafe impl Sync for AmdExtension187Fn {} +impl ::std::clone::Clone for AmdExtension187Fn { + fn clone(&self) -> Self { + AmdExtension187Fn {} + } +} +impl AmdExtension187Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension187Fn {} + } +} +impl AmdExtension188Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_188\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension188Fn {} +unsafe impl Send for AmdExtension188Fn {} +unsafe impl Sync for AmdExtension188Fn {} +impl ::std::clone::Clone for AmdExtension188Fn { + fn clone(&self) -> Self { + AmdExtension188Fn {} + } +} +impl AmdExtension188Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension188Fn {} + } +} +impl AmdExtension189Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_189\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension189Fn {} +unsafe impl Send for AmdExtension189Fn {} +unsafe impl Sync for AmdExtension189Fn {} +impl ::std::clone::Clone for AmdExtension189Fn { + fn clone(&self) -> Self { + AmdExtension189Fn {} + } +} +impl AmdExtension189Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension189Fn {} + } +} +impl AmdMemoryOverallocationBehaviorFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_memory_overallocation_behavior\0") + .expect("Wrong extension string") + } +} +pub struct AmdMemoryOverallocationBehaviorFn {} +unsafe impl Send for AmdMemoryOverallocationBehaviorFn {} +unsafe impl Sync for AmdMemoryOverallocationBehaviorFn {} +impl ::std::clone::Clone for AmdMemoryOverallocationBehaviorFn { + fn clone(&self) -> Self { + AmdMemoryOverallocationBehaviorFn {} + } +} +impl AmdMemoryOverallocationBehaviorFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdMemoryOverallocationBehaviorFn {} + } +} +#[doc = "Generated from \'VK_AMD_memory_overallocation_behavior\'"] +impl StructureType { + pub const DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: Self = StructureType(1000189000); +} +impl ExtVertexAttributeDivisorFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_vertex_attribute_divisor\0") + .expect("Wrong extension string") + } +} +pub struct ExtVertexAttributeDivisorFn {} +unsafe impl Send for ExtVertexAttributeDivisorFn {} +unsafe impl Sync for ExtVertexAttributeDivisorFn {} +impl ::std::clone::Clone for ExtVertexAttributeDivisorFn { + fn clone(&self) -> Self { + ExtVertexAttributeDivisorFn {} + } +} +impl ExtVertexAttributeDivisorFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtVertexAttributeDivisorFn {} + } +} +#[doc = "Generated from \'VK_EXT_vertex_attribute_divisor\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: Self = + StructureType(1000190000); +} +#[doc = "Generated from \'VK_EXT_vertex_attribute_divisor\'"] +impl StructureType { + pub const PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT: Self = StructureType(1000190001); +} +#[doc = "Generated from \'VK_EXT_vertex_attribute_divisor\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: Self = + StructureType(1000190002); +} +impl GoogleExtension192Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_192\0") + .expect("Wrong extension string") + } +} +pub struct GoogleExtension192Fn {} +unsafe impl Send for GoogleExtension192Fn {} +unsafe impl Sync for GoogleExtension192Fn {} +impl ::std::clone::Clone for GoogleExtension192Fn { + fn clone(&self) -> Self { + GoogleExtension192Fn {} + } +} +impl GoogleExtension192Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleExtension192Fn {} + } +} +impl GoogleExtension193Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_193\0") + .expect("Wrong extension string") + } +} +pub struct GoogleExtension193Fn {} +unsafe impl Send for GoogleExtension193Fn {} +unsafe impl Sync for GoogleExtension193Fn {} +impl ::std::clone::Clone for GoogleExtension193Fn { + fn clone(&self) -> Self { + GoogleExtension193Fn {} + } +} +impl GoogleExtension193Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleExtension193Fn {} + } +} +impl GoogleExtension194Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_194\0") + .expect("Wrong extension string") + } +} +pub struct GoogleExtension194Fn {} +unsafe impl Send for GoogleExtension194Fn {} +unsafe impl Sync for GoogleExtension194Fn {} +impl ::std::clone::Clone for GoogleExtension194Fn { + fn clone(&self) -> Self { + GoogleExtension194Fn {} + } +} +impl GoogleExtension194Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleExtension194Fn {} + } +} +impl GoogleExtension195Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_195\0") + .expect("Wrong extension string") + } +} +pub struct GoogleExtension195Fn {} +unsafe impl Send for GoogleExtension195Fn {} +unsafe impl Sync for GoogleExtension195Fn {} +impl ::std::clone::Clone for GoogleExtension195Fn { + fn clone(&self) -> Self { + GoogleExtension195Fn {} + } +} +impl GoogleExtension195Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleExtension195Fn {} + } +} +impl GoogleExtension196Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_196\0") + .expect("Wrong extension string") + } +} +pub struct GoogleExtension196Fn {} +unsafe impl Send for GoogleExtension196Fn {} +unsafe impl Sync for GoogleExtension196Fn {} +impl ::std::clone::Clone for GoogleExtension196Fn { + fn clone(&self) -> Self { + GoogleExtension196Fn {} + } +} +impl GoogleExtension196Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleExtension196Fn {} + } +} +impl KhrDriverPropertiesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_driver_properties\0") + .expect("Wrong extension string") + } +} +pub struct KhrDriverPropertiesFn {} +unsafe impl Send for KhrDriverPropertiesFn {} +unsafe impl Sync for KhrDriverPropertiesFn {} +impl ::std::clone::Clone for KhrDriverPropertiesFn { + fn clone(&self) -> Self { + KhrDriverPropertiesFn {} + } +} +impl KhrDriverPropertiesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrDriverPropertiesFn {} + } +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: Self = StructureType(1000196000); +} +impl KhrShaderFloatControlsFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_shader_float_controls\0") + .expect("Wrong extension string") + } +} +pub struct KhrShaderFloatControlsFn {} +unsafe impl Send for KhrShaderFloatControlsFn {} +unsafe impl Sync for KhrShaderFloatControlsFn {} +impl ::std::clone::Clone for KhrShaderFloatControlsFn { + fn clone(&self) -> Self { + KhrShaderFloatControlsFn {} + } +} +impl KhrShaderFloatControlsFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrShaderFloatControlsFn {} + } +} +#[doc = "Generated from \'VK_KHR_shader_float_controls\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR: Self = StructureType(1000197000); +} +impl NvShaderSubgroupPartitionedFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_shader_subgroup_partitioned\0") + .expect("Wrong extension string") + } +} +pub struct NvShaderSubgroupPartitionedFn {} +unsafe impl Send for NvShaderSubgroupPartitionedFn {} +unsafe impl Sync for NvShaderSubgroupPartitionedFn {} +impl ::std::clone::Clone for NvShaderSubgroupPartitionedFn { + fn clone(&self) -> Self { + NvShaderSubgroupPartitionedFn {} + } +} +impl NvShaderSubgroupPartitionedFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvShaderSubgroupPartitionedFn {} + } +} +#[doc = "Generated from \'VK_NV_shader_subgroup_partitioned\'"] +impl SubgroupFeatureFlags { + pub const PARTITIONED_NV: Self = SubgroupFeatureFlags(0b100000000); +} +impl KhrDepthStencilResolveFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_depth_stencil_resolve\0") + .expect("Wrong extension string") + } +} +pub struct KhrDepthStencilResolveFn {} +unsafe impl Send for KhrDepthStencilResolveFn {} +unsafe impl Sync for KhrDepthStencilResolveFn {} +impl ::std::clone::Clone for KhrDepthStencilResolveFn { + fn clone(&self) -> Self { + KhrDepthStencilResolveFn {} + } +} +impl KhrDepthStencilResolveFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrDepthStencilResolveFn {} + } +} +#[doc = "Generated from \'VK_KHR_depth_stencil_resolve\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR: Self = + StructureType(1000199000); +} +#[doc = "Generated from \'VK_KHR_depth_stencil_resolve\'"] +impl StructureType { + pub const SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR: Self = StructureType(1000199001); +} +impl KhrSwapchainMutableFormatFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_swapchain_mutable_format\0") + .expect("Wrong extension string") + } +} +pub struct KhrSwapchainMutableFormatFn {} +unsafe impl Send for KhrSwapchainMutableFormatFn {} +unsafe impl Sync for KhrSwapchainMutableFormatFn {} +impl ::std::clone::Clone for KhrSwapchainMutableFormatFn { + fn clone(&self) -> Self { + KhrSwapchainMutableFormatFn {} + } +} +impl KhrSwapchainMutableFormatFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrSwapchainMutableFormatFn {} + } +} +#[doc = "Generated from \'VK_KHR_swapchain_mutable_format\'"] +impl SwapchainCreateFlagsKHR { + pub const MUTABLE_FORMAT: Self = SwapchainCreateFlagsKHR(0b100); +} +impl NvComputeShaderDerivativesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_compute_shader_derivatives\0") + .expect("Wrong extension string") + } +} +pub struct NvComputeShaderDerivativesFn {} +unsafe impl Send for NvComputeShaderDerivativesFn {} +unsafe impl Sync for NvComputeShaderDerivativesFn {} +impl ::std::clone::Clone for NvComputeShaderDerivativesFn { + fn clone(&self) -> Self { + NvComputeShaderDerivativesFn {} + } +} +impl NvComputeShaderDerivativesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvComputeShaderDerivativesFn {} + } +} +#[doc = "Generated from \'VK_NV_compute_shader_derivatives\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV: Self = + StructureType(1000201000); +} +impl NvMeshShaderFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_mesh_shader\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawMeshTasksNV = + extern "system" fn(command_buffer: CommandBuffer, task_count: u32, first_task: u32) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawMeshTasksIndirectNV = extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + draw_count: u32, + stride: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawMeshTasksIndirectCountNV = extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, +) -> c_void; +pub struct NvMeshShaderFn { + pub cmd_draw_mesh_tasks_nv: extern "system" fn( + command_buffer: CommandBuffer, + task_count: u32, + first_task: u32, + ) -> c_void, + pub cmd_draw_mesh_tasks_indirect_nv: extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + draw_count: u32, + stride: u32, + ) -> c_void, + pub cmd_draw_mesh_tasks_indirect_count_nv: extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void, +} +unsafe impl Send for NvMeshShaderFn {} +unsafe impl Sync for NvMeshShaderFn {} +impl ::std::clone::Clone for NvMeshShaderFn { + fn clone(&self) -> Self { + NvMeshShaderFn { + cmd_draw_mesh_tasks_nv: self.cmd_draw_mesh_tasks_nv, + cmd_draw_mesh_tasks_indirect_nv: self.cmd_draw_mesh_tasks_indirect_nv, + cmd_draw_mesh_tasks_indirect_count_nv: self.cmd_draw_mesh_tasks_indirect_count_nv, + } + } +} +impl NvMeshShaderFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvMeshShaderFn { + cmd_draw_mesh_tasks_nv: unsafe { + extern "system" fn cmd_draw_mesh_tasks_nv( + _command_buffer: CommandBuffer, + _task_count: u32, + _first_task: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_mesh_tasks_nv) + )) + } + let raw_name = stringify!(vkCmdDrawMeshTasksNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_mesh_tasks_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_draw_mesh_tasks_indirect_nv: unsafe { + extern "system" fn cmd_draw_mesh_tasks_indirect_nv( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + _draw_count: u32, + _stride: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_mesh_tasks_indirect_nv) + )) + } + let raw_name = stringify!(vkCmdDrawMeshTasksIndirectNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_mesh_tasks_indirect_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_draw_mesh_tasks_indirect_count_nv: unsafe { + extern "system" fn cmd_draw_mesh_tasks_indirect_count_nv( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + _count_buffer: Buffer, + _count_buffer_offset: DeviceSize, + _max_draw_count: u32, + _stride: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_mesh_tasks_indirect_count_nv) + )) + } + let raw_name = stringify!(vkCmdDrawMeshTasksIndirectCountNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_mesh_tasks_indirect_count_nv + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_draw_mesh_tasks_nv( + &self, + command_buffer: CommandBuffer, + task_count: u32, + first_task: u32, + ) -> c_void { + (self.cmd_draw_mesh_tasks_nv)(command_buffer, task_count, first_task) + } + #[doc = ""] + pub unsafe fn cmd_draw_mesh_tasks_indirect_nv( + &self, + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + draw_count: u32, + stride: u32, + ) -> c_void { + (self.cmd_draw_mesh_tasks_indirect_nv)(command_buffer, buffer, offset, draw_count, stride) + } + #[doc = ""] + pub unsafe fn cmd_draw_mesh_tasks_indirect_count_nv( + &self, + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void { + (self.cmd_draw_mesh_tasks_indirect_count_nv)( + command_buffer, + buffer, + offset, + count_buffer, + count_buffer_offset, + max_draw_count, + stride, + ) + } +} +#[doc = "Generated from \'VK_NV_mesh_shader\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV: Self = StructureType(1000202000); +} +#[doc = "Generated from \'VK_NV_mesh_shader\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV: Self = StructureType(1000202001); +} +#[doc = "Generated from \'VK_NV_mesh_shader\'"] +impl ShaderStageFlags { + pub const TASK_NV: Self = ShaderStageFlags(0b1000000); +} +#[doc = "Generated from \'VK_NV_mesh_shader\'"] +impl ShaderStageFlags { + pub const MESH_NV: Self = ShaderStageFlags(0b10000000); +} +#[doc = "Generated from \'VK_NV_mesh_shader\'"] +impl PipelineStageFlags { + pub const TASK_SHADER_NV: Self = PipelineStageFlags(0b10000000000000000000); +} +#[doc = "Generated from \'VK_NV_mesh_shader\'"] +impl PipelineStageFlags { + pub const MESH_SHADER_NV: Self = PipelineStageFlags(0b100000000000000000000); +} +impl NvFragmentShaderBarycentricFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_fragment_shader_barycentric\0") + .expect("Wrong extension string") + } +} +pub struct NvFragmentShaderBarycentricFn {} +unsafe impl Send for NvFragmentShaderBarycentricFn {} +unsafe impl Sync for NvFragmentShaderBarycentricFn {} +impl ::std::clone::Clone for NvFragmentShaderBarycentricFn { + fn clone(&self) -> Self { + NvFragmentShaderBarycentricFn {} + } +} +impl NvFragmentShaderBarycentricFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvFragmentShaderBarycentricFn {} + } +} +#[doc = "Generated from \'VK_NV_fragment_shader_barycentric\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV: Self = + StructureType(1000203000); +} +impl NvShaderImageFootprintFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_shader_image_footprint\0") + .expect("Wrong extension string") + } +} +pub struct NvShaderImageFootprintFn {} +unsafe impl Send for NvShaderImageFootprintFn {} +unsafe impl Sync for NvShaderImageFootprintFn {} +impl ::std::clone::Clone for NvShaderImageFootprintFn { + fn clone(&self) -> Self { + NvShaderImageFootprintFn {} + } +} +impl NvShaderImageFootprintFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvShaderImageFootprintFn {} + } +} +#[doc = "Generated from \'VK_NV_shader_image_footprint\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV: Self = StructureType(1000204000); +} +impl NvScissorExclusiveFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_scissor_exclusive\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetExclusiveScissorNV = extern "system" fn( + command_buffer: CommandBuffer, + first_exclusive_scissor: u32, + exclusive_scissor_count: u32, + p_exclusive_scissors: *const Rect2D, +) -> c_void; +pub struct NvScissorExclusiveFn { + pub cmd_set_exclusive_scissor_nv: extern "system" fn( + command_buffer: CommandBuffer, + first_exclusive_scissor: u32, + exclusive_scissor_count: u32, + p_exclusive_scissors: *const Rect2D, + ) -> c_void, +} +unsafe impl Send for NvScissorExclusiveFn {} +unsafe impl Sync for NvScissorExclusiveFn {} +impl ::std::clone::Clone for NvScissorExclusiveFn { + fn clone(&self) -> Self { + NvScissorExclusiveFn { + cmd_set_exclusive_scissor_nv: self.cmd_set_exclusive_scissor_nv, + } + } +} +impl NvScissorExclusiveFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvScissorExclusiveFn { + cmd_set_exclusive_scissor_nv: unsafe { + extern "system" fn cmd_set_exclusive_scissor_nv( + _command_buffer: CommandBuffer, + _first_exclusive_scissor: u32, + _exclusive_scissor_count: u32, + _p_exclusive_scissors: *const Rect2D, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_exclusive_scissor_nv) + )) + } + let raw_name = stringify!(vkCmdSetExclusiveScissorNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_exclusive_scissor_nv + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_set_exclusive_scissor_nv( + &self, + command_buffer: CommandBuffer, + first_exclusive_scissor: u32, + exclusive_scissor_count: u32, + p_exclusive_scissors: *const Rect2D, + ) -> c_void { + (self.cmd_set_exclusive_scissor_nv)( + command_buffer, + first_exclusive_scissor, + exclusive_scissor_count, + p_exclusive_scissors, + ) + } +} +#[doc = "Generated from \'VK_NV_scissor_exclusive\'"] +impl StructureType { + pub const PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV: Self = + StructureType(1000205000); +} +#[doc = "Generated from \'VK_NV_scissor_exclusive\'"] +impl DynamicState { + pub const EXCLUSIVE_SCISSOR_NV: Self = DynamicState(1000205001); +} +#[doc = "Generated from \'VK_NV_scissor_exclusive\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV: Self = StructureType(1000205002); +} +impl NvDeviceDiagnosticCheckpointsFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_device_diagnostic_checkpoints\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetCheckpointNV = + extern "system" fn(command_buffer: CommandBuffer, p_checkpoint_marker: *const c_void) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetQueueCheckpointDataNV = extern "system" fn( + queue: Queue, + p_checkpoint_data_count: *mut u32, + p_checkpoint_data: *mut CheckpointDataNV, +) -> c_void; +pub struct NvDeviceDiagnosticCheckpointsFn { + pub cmd_set_checkpoint_nv: extern "system" fn( + command_buffer: CommandBuffer, + p_checkpoint_marker: *const c_void, + ) -> c_void, + pub get_queue_checkpoint_data_nv: extern "system" fn( + queue: Queue, + p_checkpoint_data_count: *mut u32, + p_checkpoint_data: *mut CheckpointDataNV, + ) -> c_void, +} +unsafe impl Send for NvDeviceDiagnosticCheckpointsFn {} +unsafe impl Sync for NvDeviceDiagnosticCheckpointsFn {} +impl ::std::clone::Clone for NvDeviceDiagnosticCheckpointsFn { + fn clone(&self) -> Self { + NvDeviceDiagnosticCheckpointsFn { + cmd_set_checkpoint_nv: self.cmd_set_checkpoint_nv, + get_queue_checkpoint_data_nv: self.get_queue_checkpoint_data_nv, + } + } +} +impl NvDeviceDiagnosticCheckpointsFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvDeviceDiagnosticCheckpointsFn { + cmd_set_checkpoint_nv: unsafe { + extern "system" fn cmd_set_checkpoint_nv( + _command_buffer: CommandBuffer, + _p_checkpoint_marker: *const c_void, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_checkpoint_nv) + )) + } + let raw_name = stringify!(vkCmdSetCheckpointNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_checkpoint_nv + } else { + ::std::mem::transmute(val) + } + }, + get_queue_checkpoint_data_nv: unsafe { + extern "system" fn get_queue_checkpoint_data_nv( + _queue: Queue, + _p_checkpoint_data_count: *mut u32, + _p_checkpoint_data: *mut CheckpointDataNV, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_queue_checkpoint_data_nv) + )) + } + let raw_name = stringify!(vkGetQueueCheckpointDataNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_queue_checkpoint_data_nv + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_set_checkpoint_nv( + &self, + command_buffer: CommandBuffer, + p_checkpoint_marker: *const c_void, + ) -> c_void { + (self.cmd_set_checkpoint_nv)(command_buffer, p_checkpoint_marker) + } + #[doc = ""] + pub unsafe fn get_queue_checkpoint_data_nv( + &self, + queue: Queue, + p_checkpoint_data_count: *mut u32, + p_checkpoint_data: *mut CheckpointDataNV, + ) -> c_void { + (self.get_queue_checkpoint_data_nv)(queue, p_checkpoint_data_count, p_checkpoint_data) + } +} +#[doc = "Generated from \'VK_NV_device_diagnostic_checkpoints\'"] +impl StructureType { + pub const CHECKPOINT_DATA_NV: Self = StructureType(1000206000); +} +#[doc = "Generated from \'VK_NV_device_diagnostic_checkpoints\'"] +impl StructureType { + pub const QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV: Self = StructureType(1000206001); +} +impl KhrExtension208Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_208\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension208Fn {} +unsafe impl Send for KhrExtension208Fn {} +unsafe impl Sync for KhrExtension208Fn {} +impl ::std::clone::Clone for KhrExtension208Fn { + fn clone(&self) -> Self { + KhrExtension208Fn {} + } +} +impl KhrExtension208Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension208Fn {} + } +} +impl KhrExtension209Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_209\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension209Fn {} +unsafe impl Send for KhrExtension209Fn {} +unsafe impl Sync for KhrExtension209Fn {} +impl ::std::clone::Clone for KhrExtension209Fn { + fn clone(&self) -> Self { + KhrExtension209Fn {} + } +} +impl KhrExtension209Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension209Fn {} + } +} +impl IntelExtension210Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_INTEL_extension_210\0") + .expect("Wrong extension string") + } +} +pub struct IntelExtension210Fn {} +unsafe impl Send for IntelExtension210Fn {} +unsafe impl Sync for IntelExtension210Fn {} +impl ::std::clone::Clone for IntelExtension210Fn { + fn clone(&self) -> Self { + IntelExtension210Fn {} + } +} +impl IntelExtension210Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + IntelExtension210Fn {} + } +} +impl IntelExtension211Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_INTEL_extension_211\0") + .expect("Wrong extension string") + } +} +pub struct IntelExtension211Fn {} +unsafe impl Send for IntelExtension211Fn {} +unsafe impl Sync for IntelExtension211Fn {} +impl ::std::clone::Clone for IntelExtension211Fn { + fn clone(&self) -> Self { + IntelExtension211Fn {} + } +} +impl IntelExtension211Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + IntelExtension211Fn {} + } +} +impl KhrVulkanMemoryModelFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_vulkan_memory_model\0") + .expect("Wrong extension string") + } +} +pub struct KhrVulkanMemoryModelFn {} +unsafe impl Send for KhrVulkanMemoryModelFn {} +unsafe impl Sync for KhrVulkanMemoryModelFn {} +impl ::std::clone::Clone for KhrVulkanMemoryModelFn { + fn clone(&self) -> Self { + KhrVulkanMemoryModelFn {} + } +} +impl KhrVulkanMemoryModelFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrVulkanMemoryModelFn {} + } +} +#[doc = "Generated from \'VK_KHR_vulkan_memory_model\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR: Self = StructureType(1000211000); +} +impl ExtPciBusInfoFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_pci_bus_info\0") + .expect("Wrong extension string") + } +} +pub struct ExtPciBusInfoFn {} +unsafe impl Send for ExtPciBusInfoFn {} +unsafe impl Sync for ExtPciBusInfoFn {} +impl ::std::clone::Clone for ExtPciBusInfoFn { + fn clone(&self) -> Self { + ExtPciBusInfoFn {} + } +} +impl ExtPciBusInfoFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtPciBusInfoFn {} + } +} +#[doc = "Generated from \'VK_EXT_pci_bus_info\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: Self = StructureType(1000212000); +} +impl AmdExtension214Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_214\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension214Fn {} +unsafe impl Send for AmdExtension214Fn {} +unsafe impl Sync for AmdExtension214Fn {} +impl ::std::clone::Clone for AmdExtension214Fn { + fn clone(&self) -> Self { + AmdExtension214Fn {} + } +} +impl AmdExtension214Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension214Fn {} + } +} +impl FuchsiaImagepipeSurfaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_FUCHSIA_imagepipe_surface\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateImagePipeSurfaceFUCHSIA = extern "system" fn( + instance: Instance, + p_create_info: *const ImagePipeSurfaceCreateInfoFUCHSIA, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +pub struct FuchsiaImagepipeSurfaceFn { + pub create_image_pipe_surface_fuchsia: extern "system" fn( + instance: Instance, + p_create_info: *const ImagePipeSurfaceCreateInfoFUCHSIA, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, +} +unsafe impl Send for FuchsiaImagepipeSurfaceFn {} +unsafe impl Sync for FuchsiaImagepipeSurfaceFn {} +impl ::std::clone::Clone for FuchsiaImagepipeSurfaceFn { + fn clone(&self) -> Self { + FuchsiaImagepipeSurfaceFn { + create_image_pipe_surface_fuchsia: self.create_image_pipe_surface_fuchsia, + } + } +} +impl FuchsiaImagepipeSurfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + FuchsiaImagepipeSurfaceFn { + create_image_pipe_surface_fuchsia: unsafe { + extern "system" fn create_image_pipe_surface_fuchsia( + _instance: Instance, + _p_create_info: *const ImagePipeSurfaceCreateInfoFUCHSIA, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_image_pipe_surface_fuchsia) + )) + } + let raw_name = stringify!(vkCreateImagePipeSurfaceFUCHSIA); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_image_pipe_surface_fuchsia + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_image_pipe_surface_fuchsia( + &self, + instance: Instance, + p_create_info: *const ImagePipeSurfaceCreateInfoFUCHSIA, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_image_pipe_surface_fuchsia)(instance, p_create_info, p_allocator, p_surface) + } +} +#[doc = "Generated from \'VK_FUCHSIA_imagepipe_surface\'"] +impl StructureType { + pub const IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA: Self = StructureType(1000214000); +} +impl GoogleExtension216Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_216\0") + .expect("Wrong extension string") + } +} +pub struct GoogleExtension216Fn {} +unsafe impl Send for GoogleExtension216Fn {} +unsafe impl Sync for GoogleExtension216Fn {} +impl ::std::clone::Clone for GoogleExtension216Fn { + fn clone(&self) -> Self { + GoogleExtension216Fn {} + } +} +impl GoogleExtension216Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleExtension216Fn {} + } +} +impl GoogleExtension217Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_217\0") + .expect("Wrong extension string") + } +} +pub struct GoogleExtension217Fn {} +unsafe impl Send for GoogleExtension217Fn {} +unsafe impl Sync for GoogleExtension217Fn {} +impl ::std::clone::Clone for GoogleExtension217Fn { + fn clone(&self) -> Self { + GoogleExtension217Fn {} + } +} +impl GoogleExtension217Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleExtension217Fn {} + } +} +impl ExtMacosIosWindowFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_macos_ios_window\0") + .expect("Wrong extension string") + } +} +pub struct ExtMacosIosWindowFn {} +unsafe impl Send for ExtMacosIosWindowFn {} +unsafe impl Sync for ExtMacosIosWindowFn {} +impl ::std::clone::Clone for ExtMacosIosWindowFn { + fn clone(&self) -> Self { + ExtMacosIosWindowFn {} + } +} +impl ExtMacosIosWindowFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtMacosIosWindowFn {} + } +} +impl ExtFragmentDensityMapFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_fragment_density_map\0") + .expect("Wrong extension string") + } +} +pub struct ExtFragmentDensityMapFn {} +unsafe impl Send for ExtFragmentDensityMapFn {} +unsafe impl Sync for ExtFragmentDensityMapFn {} +impl ::std::clone::Clone for ExtFragmentDensityMapFn { + fn clone(&self) -> Self { + ExtFragmentDensityMapFn {} + } +} +impl ExtFragmentDensityMapFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtFragmentDensityMapFn {} + } +} +#[doc = "Generated from \'VK_EXT_fragment_density_map\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT: Self = StructureType(1000218000); +} +#[doc = "Generated from \'VK_EXT_fragment_density_map\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT: Self = StructureType(1000218001); +} +#[doc = "Generated from \'VK_EXT_fragment_density_map\'"] +impl StructureType { + pub const RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT: Self = StructureType(1000218002); +} +#[doc = "Generated from \'VK_EXT_fragment_density_map\'"] +impl ImageCreateFlags { + pub const SUBSAMPLED_EXT: Self = ImageCreateFlags(0b100000000000000); +} +#[doc = "Generated from \'VK_EXT_fragment_density_map\'"] +impl ImageLayout { + pub const FRAGMENT_DENSITY_MAP_OPTIMAL_EXT: Self = ImageLayout(1000218000); +} +#[doc = "Generated from \'VK_EXT_fragment_density_map\'"] +impl AccessFlags { + pub const FRAGMENT_DENSITY_MAP_READ_EXT: Self = AccessFlags(0b1000000000000000000000000); +} +#[doc = "Generated from \'VK_EXT_fragment_density_map\'"] +impl FormatFeatureFlags { + pub const FRAGMENT_DENSITY_MAP_EXT: Self = FormatFeatureFlags(0b1000000000000000000000000); +} +#[doc = "Generated from \'VK_EXT_fragment_density_map\'"] +impl ImageUsageFlags { + pub const FRAGMENT_DENSITY_MAP_EXT: Self = ImageUsageFlags(0b1000000000); +} +#[doc = "Generated from \'VK_EXT_fragment_density_map\'"] +impl ImageViewCreateFlags { + pub const FRAGMENT_DENSITY_MAP_DYNAMIC_EXT: Self = ImageViewCreateFlags(0b1); +} +#[doc = "Generated from \'VK_EXT_fragment_density_map\'"] +impl PipelineStageFlags { + pub const FRAGMENT_DENSITY_PROCESS_EXT: Self = PipelineStageFlags(0b100000000000000000000000); +} +#[doc = "Generated from \'VK_EXT_fragment_density_map\'"] +impl SamplerCreateFlags { + pub const SUBSAMPLED_EXT: Self = SamplerCreateFlags(0b1); +} +#[doc = "Generated from \'VK_EXT_fragment_density_map\'"] +impl SamplerCreateFlags { + pub const SUBSAMPLED_COARSE_RECONSTRUCTION_EXT: Self = SamplerCreateFlags(0b10); +} +impl ExtExtension220Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_220\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension220Fn {} +unsafe impl Send for ExtExtension220Fn {} +unsafe impl Sync for ExtExtension220Fn {} +impl ::std::clone::Clone for ExtExtension220Fn { + fn clone(&self) -> Self { + ExtExtension220Fn {} + } +} +impl ExtExtension220Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension220Fn {} + } +} +impl KhrExtension221Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_221\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension221Fn {} +unsafe impl Send for KhrExtension221Fn {} +unsafe impl Sync for KhrExtension221Fn {} +impl ::std::clone::Clone for KhrExtension221Fn { + fn clone(&self) -> Self { + KhrExtension221Fn {} + } +} +impl KhrExtension221Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension221Fn {} + } +} +#[doc = "Generated from \'VK_KHR_extension_221\'"] +impl RenderPassCreateFlags { + pub const RESERVED_0_KHR: Self = RenderPassCreateFlags(0b1); +} +impl ExtScalarBlockLayoutFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_scalar_block_layout\0") + .expect("Wrong extension string") + } +} +pub struct ExtScalarBlockLayoutFn {} +unsafe impl Send for ExtScalarBlockLayoutFn {} +unsafe impl Sync for ExtScalarBlockLayoutFn {} +impl ::std::clone::Clone for ExtScalarBlockLayoutFn { + fn clone(&self) -> Self { + ExtScalarBlockLayoutFn {} + } +} +impl ExtScalarBlockLayoutFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtScalarBlockLayoutFn {} + } +} +#[doc = "Generated from \'VK_EXT_scalar_block_layout\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT: Self = StructureType(1000221000); +} +impl ExtExtension223Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_223\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension223Fn {} +unsafe impl Send for ExtExtension223Fn {} +unsafe impl Sync for ExtExtension223Fn {} +impl ::std::clone::Clone for ExtExtension223Fn { + fn clone(&self) -> Self { + ExtExtension223Fn {} + } +} +impl ExtExtension223Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension223Fn {} + } +} +impl GoogleHlslFunctionality1Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_hlsl_functionality1\0") + .expect("Wrong extension string") + } +} +pub struct GoogleHlslFunctionality1Fn {} +unsafe impl Send for GoogleHlslFunctionality1Fn {} +unsafe impl Sync for GoogleHlslFunctionality1Fn {} +impl ::std::clone::Clone for GoogleHlslFunctionality1Fn { + fn clone(&self) -> Self { + GoogleHlslFunctionality1Fn {} + } +} +impl GoogleHlslFunctionality1Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleHlslFunctionality1Fn {} + } +} +impl GoogleDecorateStringFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_decorate_string\0") + .expect("Wrong extension string") + } +} +pub struct GoogleDecorateStringFn {} +unsafe impl Send for GoogleDecorateStringFn {} +unsafe impl Sync for GoogleDecorateStringFn {} +impl ::std::clone::Clone for GoogleDecorateStringFn { + fn clone(&self) -> Self { + GoogleDecorateStringFn {} + } +} +impl GoogleDecorateStringFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleDecorateStringFn {} + } +} +impl AmdExtension226Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_226\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension226Fn {} +unsafe impl Send for AmdExtension226Fn {} +unsafe impl Sync for AmdExtension226Fn {} +impl ::std::clone::Clone for AmdExtension226Fn { + fn clone(&self) -> Self { + AmdExtension226Fn {} + } +} +impl AmdExtension226Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension226Fn {} + } +} +impl AmdExtension227Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_227\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension227Fn {} +unsafe impl Send for AmdExtension227Fn {} +unsafe impl Sync for AmdExtension227Fn {} +impl ::std::clone::Clone for AmdExtension227Fn { + fn clone(&self) -> Self { + AmdExtension227Fn {} + } +} +impl AmdExtension227Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension227Fn {} + } +} +impl AmdExtension228Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_228\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension228Fn {} +unsafe impl Send for AmdExtension228Fn {} +unsafe impl Sync for AmdExtension228Fn {} +impl ::std::clone::Clone for AmdExtension228Fn { + fn clone(&self) -> Self { + AmdExtension228Fn {} + } +} +impl AmdExtension228Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension228Fn {} + } +} +impl AmdExtension229Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_229\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension229Fn {} +unsafe impl Send for AmdExtension229Fn {} +unsafe impl Sync for AmdExtension229Fn {} +impl ::std::clone::Clone for AmdExtension229Fn { + fn clone(&self) -> Self { + AmdExtension229Fn {} + } +} +impl AmdExtension229Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension229Fn {} + } +} +impl AmdExtension230Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_230\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension230Fn {} +unsafe impl Send for AmdExtension230Fn {} +unsafe impl Sync for AmdExtension230Fn {} +impl ::std::clone::Clone for AmdExtension230Fn { + fn clone(&self) -> Self { + AmdExtension230Fn {} + } +} +impl AmdExtension230Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension230Fn {} + } +} +impl AmdExtension231Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_231\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension231Fn {} +unsafe impl Send for AmdExtension231Fn {} +unsafe impl Sync for AmdExtension231Fn {} +impl ::std::clone::Clone for AmdExtension231Fn { + fn clone(&self) -> Self { + AmdExtension231Fn {} + } +} +impl AmdExtension231Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension231Fn {} + } +} +impl AmdExtension232Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_232\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension232Fn {} +unsafe impl Send for AmdExtension232Fn {} +unsafe impl Sync for AmdExtension232Fn {} +impl ::std::clone::Clone for AmdExtension232Fn { + fn clone(&self) -> Self { + AmdExtension232Fn {} + } +} +impl AmdExtension232Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension232Fn {} + } +} +impl AmdExtension233Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_233\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension233Fn {} +unsafe impl Send for AmdExtension233Fn {} +unsafe impl Sync for AmdExtension233Fn {} +impl ::std::clone::Clone for AmdExtension233Fn { + fn clone(&self) -> Self { + AmdExtension233Fn {} + } +} +impl AmdExtension233Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension233Fn {} + } +} +impl AmdExtension234Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_234\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension234Fn {} +unsafe impl Send for AmdExtension234Fn {} +unsafe impl Sync for AmdExtension234Fn {} +impl ::std::clone::Clone for AmdExtension234Fn { + fn clone(&self) -> Self { + AmdExtension234Fn {} + } +} +impl AmdExtension234Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension234Fn {} + } +} +impl AmdExtension235Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_235\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension235Fn {} +unsafe impl Send for AmdExtension235Fn {} +unsafe impl Sync for AmdExtension235Fn {} +impl ::std::clone::Clone for AmdExtension235Fn { + fn clone(&self) -> Self { + AmdExtension235Fn {} + } +} +impl AmdExtension235Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension235Fn {} + } +} +impl AmdExtension236Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_236\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension236Fn {} +unsafe impl Send for AmdExtension236Fn {} +unsafe impl Sync for AmdExtension236Fn {} +impl ::std::clone::Clone for AmdExtension236Fn { + fn clone(&self) -> Self { + AmdExtension236Fn {} + } +} +impl AmdExtension236Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension236Fn {} + } +} +impl KhrExtension237Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_237\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension237Fn {} +unsafe impl Send for KhrExtension237Fn {} +unsafe impl Sync for KhrExtension237Fn {} +impl ::std::clone::Clone for KhrExtension237Fn { + fn clone(&self) -> Self { + KhrExtension237Fn {} + } +} +impl KhrExtension237Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension237Fn {} + } +} +impl ExtMemoryBudgetFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_memory_budget\0") + .expect("Wrong extension string") + } +} +pub struct ExtMemoryBudgetFn {} +unsafe impl Send for ExtMemoryBudgetFn {} +unsafe impl Sync for ExtMemoryBudgetFn {} +impl ::std::clone::Clone for ExtMemoryBudgetFn { + fn clone(&self) -> Self { + ExtMemoryBudgetFn {} + } +} +impl ExtMemoryBudgetFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtMemoryBudgetFn {} + } +} +#[doc = "Generated from \'VK_EXT_memory_budget\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT: Self = StructureType(1000237000); +} +impl ExtMemoryPriorityFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_memory_priority\0") + .expect("Wrong extension string") + } +} +pub struct ExtMemoryPriorityFn {} +unsafe impl Send for ExtMemoryPriorityFn {} +unsafe impl Sync for ExtMemoryPriorityFn {} +impl ::std::clone::Clone for ExtMemoryPriorityFn { + fn clone(&self) -> Self { + ExtMemoryPriorityFn {} + } +} +impl ExtMemoryPriorityFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtMemoryPriorityFn {} + } +} +#[doc = "Generated from \'VK_EXT_memory_priority\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT: Self = StructureType(1000238000); +} +#[doc = "Generated from \'VK_EXT_memory_priority\'"] +impl StructureType { + pub const MEMORY_PRIORITY_ALLOCATE_INFO_EXT: Self = StructureType(1000238001); +} +impl KhrExtension240Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_240\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension240Fn {} +unsafe impl Send for KhrExtension240Fn {} +unsafe impl Sync for KhrExtension240Fn {} +impl ::std::clone::Clone for KhrExtension240Fn { + fn clone(&self) -> Self { + KhrExtension240Fn {} + } +} +impl KhrExtension240Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension240Fn {} + } +} +impl NvExtension241Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_241\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension241Fn {} +unsafe impl Send for NvExtension241Fn {} +unsafe impl Sync for NvExtension241Fn {} +impl ::std::clone::Clone for NvExtension241Fn { + fn clone(&self) -> Self { + NvExtension241Fn {} + } +} +impl NvExtension241Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension241Fn {} + } +} +impl NvExtension242Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_242\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension242Fn {} +unsafe impl Send for NvExtension242Fn {} +unsafe impl Sync for NvExtension242Fn {} +impl ::std::clone::Clone for NvExtension242Fn { + fn clone(&self) -> Self { + NvExtension242Fn {} + } +} +impl NvExtension242Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension242Fn {} + } +} +impl IntelExtension243Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_INTEL_extension_243\0") + .expect("Wrong extension string") + } +} +pub struct IntelExtension243Fn {} +unsafe impl Send for IntelExtension243Fn {} +unsafe impl Sync for IntelExtension243Fn {} +impl ::std::clone::Clone for IntelExtension243Fn { + fn clone(&self) -> Self { + IntelExtension243Fn {} + } +} +impl IntelExtension243Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + IntelExtension243Fn {} + } +} +impl MesaExtension244Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_MESA_extension_244\0") + .expect("Wrong extension string") + } +} +pub struct MesaExtension244Fn {} +unsafe impl Send for MesaExtension244Fn {} +unsafe impl Sync for MesaExtension244Fn {} +impl ::std::clone::Clone for MesaExtension244Fn { + fn clone(&self) -> Self { + MesaExtension244Fn {} + } +} +impl MesaExtension244Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + MesaExtension244Fn {} + } +} +impl ExtBufferDeviceAddressFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_buffer_device_address\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetBufferDeviceAddressEXT = + extern "system" fn(device: Device, p_info: *const BufferDeviceAddressInfoEXT) -> DeviceAddress; +pub struct ExtBufferDeviceAddressFn { + pub get_buffer_device_address_ext: extern "system" fn( + device: Device, + p_info: *const BufferDeviceAddressInfoEXT, + ) -> DeviceAddress, +} +unsafe impl Send for ExtBufferDeviceAddressFn {} +unsafe impl Sync for ExtBufferDeviceAddressFn {} +impl ::std::clone::Clone for ExtBufferDeviceAddressFn { + fn clone(&self) -> Self { + ExtBufferDeviceAddressFn { + get_buffer_device_address_ext: self.get_buffer_device_address_ext, + } + } +} +impl ExtBufferDeviceAddressFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtBufferDeviceAddressFn { + get_buffer_device_address_ext: unsafe { + extern "system" fn get_buffer_device_address_ext( + _device: Device, + _p_info: *const BufferDeviceAddressInfoEXT, + ) -> DeviceAddress { + panic!(concat!( + "Unable to load ", + stringify!(get_buffer_device_address_ext) + )) + } + let raw_name = stringify!(vkGetBufferDeviceAddressEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_buffer_device_address_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_buffer_device_address_ext( + &self, + device: Device, + p_info: *const BufferDeviceAddressInfoEXT, + ) -> DeviceAddress { + (self.get_buffer_device_address_ext)(device, p_info) + } +} +#[doc = "Generated from \'VK_EXT_buffer_device_address\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT: Self = StructureType(1000244000); +} +#[doc = "Generated from \'VK_EXT_buffer_device_address\'"] +impl StructureType { + pub const BUFFER_DEVICE_ADDRESS_INFO_EXT: Self = StructureType(1000244001); +} +#[doc = "Generated from \'VK_EXT_buffer_device_address\'"] +impl StructureType { + pub const BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT: Self = StructureType(1000244002); +} +#[doc = "Generated from \'VK_EXT_buffer_device_address\'"] +impl BufferUsageFlags { + pub const SHADER_DEVICE_ADDRESS_EXT: Self = BufferUsageFlags(0b100000000000000000); +} +#[doc = "Generated from \'VK_EXT_buffer_device_address\'"] +impl BufferCreateFlags { + pub const DEVICE_ADDRESS_CAPTURE_REPLAY_EXT: Self = BufferCreateFlags(0b10000); +} +#[doc = "Generated from \'VK_EXT_buffer_device_address\'"] +impl Result { + pub const ERROR_INVALID_DEVICE_ADDRESS_EXT: Self = Result(-1000244000); +} +impl ExtExtension246Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_246\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension246Fn {} +unsafe impl Send for ExtExtension246Fn {} +unsafe impl Sync for ExtExtension246Fn {} +impl ::std::clone::Clone for ExtExtension246Fn { + fn clone(&self) -> Self { + ExtExtension246Fn {} + } +} +impl ExtExtension246Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension246Fn {} + } +} +impl ExtSeparateStencilUsageFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_separate_stencil_usage\0") + .expect("Wrong extension string") + } +} +pub struct ExtSeparateStencilUsageFn {} +unsafe impl Send for ExtSeparateStencilUsageFn {} +unsafe impl Sync for ExtSeparateStencilUsageFn {} +impl ::std::clone::Clone for ExtSeparateStencilUsageFn { + fn clone(&self) -> Self { + ExtSeparateStencilUsageFn {} + } +} +impl ExtSeparateStencilUsageFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtSeparateStencilUsageFn {} + } +} +#[doc = "Generated from \'VK_EXT_separate_stencil_usage\'"] +impl StructureType { + pub const IMAGE_STENCIL_USAGE_CREATE_INFO_EXT: Self = StructureType(1000246000); +} +impl ExtValidationFeaturesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_validation_features\0") + .expect("Wrong extension string") + } +} +pub struct ExtValidationFeaturesFn {} +unsafe impl Send for ExtValidationFeaturesFn {} +unsafe impl Sync for ExtValidationFeaturesFn {} +impl ::std::clone::Clone for ExtValidationFeaturesFn { + fn clone(&self) -> Self { + ExtValidationFeaturesFn {} + } +} +impl ExtValidationFeaturesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtValidationFeaturesFn {} + } +} +#[doc = "Generated from \'VK_EXT_validation_features\'"] +impl StructureType { + pub const VALIDATION_FEATURES_EXT: Self = StructureType(1000247000); +} +impl KhrExtension249Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_249\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension249Fn {} +unsafe impl Send for KhrExtension249Fn {} +unsafe impl Sync for KhrExtension249Fn {} +impl ::std::clone::Clone for KhrExtension249Fn { + fn clone(&self) -> Self { + KhrExtension249Fn {} + } +} +impl KhrExtension249Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension249Fn {} + } +} +impl NvExtension250Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_250\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension250Fn {} +unsafe impl Send for NvExtension250Fn {} +unsafe impl Sync for NvExtension250Fn {} +impl ::std::clone::Clone for NvExtension250Fn { + fn clone(&self) -> Self { + NvExtension250Fn {} + } +} +impl NvExtension250Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension250Fn {} + } +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: Self = StructureType(1000094000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const BIND_BUFFER_MEMORY_INFO: Self = StructureType(1000157000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const BIND_IMAGE_MEMORY_INFO: Self = StructureType(1000157001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ImageCreateFlags { + pub const ALIAS: Self = ImageCreateFlags(0b10000000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: Self = StructureType(1000083000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const MEMORY_DEDICATED_REQUIREMENTS: Self = StructureType(1000127000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const MEMORY_DEDICATED_ALLOCATE_INFO: Self = StructureType(1000127001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const MEMORY_ALLOCATE_FLAGS_INFO: Self = StructureType(1000060000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const DEVICE_GROUP_RENDER_PASS_BEGIN_INFO: Self = StructureType(1000060003); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO: Self = StructureType(1000060004); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const DEVICE_GROUP_SUBMIT_INFO: Self = StructureType(1000060005); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const DEVICE_GROUP_BIND_SPARSE_INFO: Self = StructureType(1000060006); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl PipelineCreateFlags { + pub const VIEW_INDEX_FROM_DEVICE_INDEX: Self = PipelineCreateFlags(0b1000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl PipelineCreateFlags { + pub const DISPATCH_BASE: Self = PipelineCreateFlags(0b10000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl DependencyFlags { + pub const DEVICE_GROUP: Self = DependencyFlags(0b100); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO: Self = StructureType(1000060013); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO: Self = StructureType(1000060014); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ImageCreateFlags { + pub const SPLIT_INSTANCE_BIND_REGIONS: Self = ImageCreateFlags(0b1000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_GROUP_PROPERTIES: Self = StructureType(1000070000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const DEVICE_GROUP_DEVICE_CREATE_INFO: Self = StructureType(1000070001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl MemoryHeapFlags { + pub const MULTI_INSTANCE: Self = MemoryHeapFlags(0b10); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const BUFFER_MEMORY_REQUIREMENTS_INFO_2: Self = StructureType(1000146000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const IMAGE_MEMORY_REQUIREMENTS_INFO_2: Self = StructureType(1000146001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2: Self = StructureType(1000146002); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const MEMORY_REQUIREMENTS_2: Self = StructureType(1000146003); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const SPARSE_IMAGE_MEMORY_REQUIREMENTS_2: Self = StructureType(1000146004); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_FEATURES_2: Self = StructureType(1000059000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_PROPERTIES_2: Self = StructureType(1000059001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const FORMAT_PROPERTIES_2: Self = StructureType(1000059002); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const IMAGE_FORMAT_PROPERTIES_2: Self = StructureType(1000059003); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2: Self = StructureType(1000059004); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const QUEUE_FAMILY_PROPERTIES_2: Self = StructureType(1000059005); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MEMORY_PROPERTIES_2: Self = StructureType(1000059006); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const SPARSE_IMAGE_FORMAT_PROPERTIES_2: Self = StructureType(1000059007); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2: Self = StructureType(1000059008); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Result { + pub const ERROR_OUT_OF_POOL_MEMORY: Self = Result(-1000069000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl FormatFeatureFlags { + pub const TRANSFER_SRC: Self = FormatFeatureFlags(0b100000000000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl FormatFeatureFlags { + pub const TRANSFER_DST: Self = FormatFeatureFlags(0b1000000000000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ImageCreateFlags { + pub const TYPE_2D_ARRAY_COMPATIBLE: Self = ImageCreateFlags(0b100000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ImageCreateFlags { + pub const BLOCK_TEXEL_VIEW_COMPATIBLE: Self = ImageCreateFlags(0b10000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ImageCreateFlags { + pub const EXTENDED_USAGE: Self = ImageCreateFlags(0b100000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: Self = StructureType(1000117000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO: Self = StructureType(1000117001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const IMAGE_VIEW_USAGE_CREATE_INFO: Self = StructureType(1000117002); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO: Self = + StructureType(1000117003); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ImageLayout { + pub const DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: Self = ImageLayout(1000117000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ImageLayout { + pub const DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: Self = ImageLayout(1000117001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const RENDER_PASS_MULTIVIEW_CREATE_INFO: Self = StructureType(1000053000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MULTIVIEW_FEATURES: Self = StructureType(1000053001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: Self = StructureType(1000053002); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl DependencyFlags { + pub const VIEW_LOCAL: Self = DependencyFlags(0b10); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES: Self = StructureType(1000120000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PROTECTED_SUBMIT_INFO: Self = StructureType(1000145000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: Self = StructureType(1000145001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES: Self = StructureType(1000145002); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const DEVICE_QUEUE_INFO_2: Self = StructureType(1000145003); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl QueueFlags { + pub const PROTECTED: Self = QueueFlags(0b10000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl DeviceQueueCreateFlags { + pub const PROTECTED: Self = DeviceQueueCreateFlags(0b1); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl MemoryPropertyFlags { + pub const PROTECTED: Self = MemoryPropertyFlags(0b100000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl BufferCreateFlags { + pub const PROTECTED: Self = BufferCreateFlags(0b1000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ImageCreateFlags { + pub const PROTECTED: Self = ImageCreateFlags(0b100000000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl CommandPoolCreateFlags { + pub const PROTECTED: Self = CommandPoolCreateFlags(0b100); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const SAMPLER_YCBCR_CONVERSION_CREATE_INFO: Self = StructureType(1000156000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const SAMPLER_YCBCR_CONVERSION_INFO: Self = StructureType(1000156001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const BIND_IMAGE_PLANE_MEMORY_INFO: Self = StructureType(1000156002); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO: Self = StructureType(1000156003); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: Self = StructureType(1000156004); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES: Self = StructureType(1000156005); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ObjectType { + pub const SAMPLER_YCBCR_CONVERSION: Self = ObjectType(1000156000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G8B8G8R8_422_UNORM: Self = Format(1000156000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const B8G8R8G8_422_UNORM: Self = Format(1000156001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G8_B8_R8_3PLANE_420_UNORM: Self = Format(1000156002); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G8_B8R8_2PLANE_420_UNORM: Self = Format(1000156003); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G8_B8_R8_3PLANE_422_UNORM: Self = Format(1000156004); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G8_B8R8_2PLANE_422_UNORM: Self = Format(1000156005); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G8_B8_R8_3PLANE_444_UNORM: Self = Format(1000156006); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const R10X6_UNORM_PACK16: Self = Format(1000156007); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const R10X6G10X6_UNORM_2PACK16: Self = Format(1000156008); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const R10X6G10X6B10X6A10X6_UNORM_4PACK16: Self = Format(1000156009); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G10X6B10X6G10X6R10X6_422_UNORM_4PACK16: Self = Format(1000156010); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const B10X6G10X6R10X6G10X6_422_UNORM_4PACK16: Self = Format(1000156011); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16: Self = Format(1000156012); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16: Self = Format(1000156013); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16: Self = Format(1000156014); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16: Self = Format(1000156015); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16: Self = Format(1000156016); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const R12X4_UNORM_PACK16: Self = Format(1000156017); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const R12X4G12X4_UNORM_2PACK16: Self = Format(1000156018); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const R12X4G12X4B12X4A12X4_UNORM_4PACK16: Self = Format(1000156019); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G12X4B12X4G12X4R12X4_422_UNORM_4PACK16: Self = Format(1000156020); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const B12X4G12X4R12X4G12X4_422_UNORM_4PACK16: Self = Format(1000156021); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16: Self = Format(1000156022); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16: Self = Format(1000156023); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16: Self = Format(1000156024); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16: Self = Format(1000156025); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16: Self = Format(1000156026); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G16B16G16R16_422_UNORM: Self = Format(1000156027); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const B16G16R16G16_422_UNORM: Self = Format(1000156028); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G16_B16_R16_3PLANE_420_UNORM: Self = Format(1000156029); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G16_B16R16_2PLANE_420_UNORM: Self = Format(1000156030); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G16_B16_R16_3PLANE_422_UNORM: Self = Format(1000156031); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G16_B16R16_2PLANE_422_UNORM: Self = Format(1000156032); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Format { + pub const G16_B16_R16_3PLANE_444_UNORM: Self = Format(1000156033); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ImageAspectFlags { + pub const PLANE_0: Self = ImageAspectFlags(0b10000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ImageAspectFlags { + pub const PLANE_1: Self = ImageAspectFlags(0b100000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ImageAspectFlags { + pub const PLANE_2: Self = ImageAspectFlags(0b1000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ImageCreateFlags { + pub const DISJOINT: Self = ImageCreateFlags(0b1000000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl FormatFeatureFlags { + pub const MIDPOINT_CHROMA_SAMPLES: Self = FormatFeatureFlags(0b100000000000000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl FormatFeatureFlags { + pub const SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER: Self = + FormatFeatureFlags(0b1000000000000000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl FormatFeatureFlags { + pub const SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER: Self = + FormatFeatureFlags(0b10000000000000000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl FormatFeatureFlags { + pub const SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT: Self = + FormatFeatureFlags(0b100000000000000000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl FormatFeatureFlags { + pub const SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE: Self = + FormatFeatureFlags(0b1000000000000000000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl FormatFeatureFlags { + pub const DISJOINT: Self = FormatFeatureFlags(0b10000000000000000000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl FormatFeatureFlags { + pub const COSITED_CHROMA_SAMPLES: Self = FormatFeatureFlags(0b100000000000000000000000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO: Self = StructureType(1000085000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl ObjectType { + pub const DESCRIPTOR_UPDATE_TEMPLATE: Self = ObjectType(1000085000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO: Self = StructureType(1000071000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const EXTERNAL_IMAGE_FORMAT_PROPERTIES: Self = StructureType(1000071001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO: Self = StructureType(1000071002); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const EXTERNAL_BUFFER_PROPERTIES: Self = StructureType(1000071003); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_ID_PROPERTIES: Self = StructureType(1000071004); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const EXTERNAL_MEMORY_BUFFER_CREATE_INFO: Self = StructureType(1000072000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const EXTERNAL_MEMORY_IMAGE_CREATE_INFO: Self = StructureType(1000072001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const EXPORT_MEMORY_ALLOCATE_INFO: Self = StructureType(1000072002); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl Result { + pub const ERROR_INVALID_EXTERNAL_HANDLE: Self = Result(-1000072003); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO: Self = StructureType(1000112000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const EXTERNAL_FENCE_PROPERTIES: Self = StructureType(1000112001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const EXPORT_FENCE_CREATE_INFO: Self = StructureType(1000113000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const EXPORT_SEMAPHORE_CREATE_INFO: Self = StructureType(1000077000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO: Self = StructureType(1000076000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const EXTERNAL_SEMAPHORE_PROPERTIES: Self = StructureType(1000076001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: Self = StructureType(1000168000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const DESCRIPTOR_SET_LAYOUT_SUPPORT: Self = StructureType(1000168001); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: Self = StructureType(1000063000); +} +pub(crate) fn debug_flags( + f: &mut fmt::Formatter, + known: &[(Flags, &'static str)], + value: Flags, +) -> fmt::Result { + let mut first = true; + let mut accum = value; + for (bit, name) in known { + if *bit != 0 && accum & *bit == *bit { + if !first { + f.write_str(" | ")?; + } + f.write_str(name)?; + first = false; + accum &= !bit; + } + } + if accum != 0 { + if !first { + f.write_str(" | ")?; + } + write!(f, "{:b}", accum)?; + } + Ok(()) +} +impl fmt::Debug for AccelerationStructureMemoryRequirementsTypeNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::OBJECT => Some("OBJECT"), + Self::BUILD_SCRATCH => Some("BUILD_SCRATCH"), + Self::UPDATE_SCRATCH => Some("UPDATE_SCRATCH"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for AccelerationStructureTypeNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::TOP_LEVEL => Some("TOP_LEVEL"), + Self::BOTTOM_LEVEL => Some("BOTTOM_LEVEL"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for AccessFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + AccessFlags::INDIRECT_COMMAND_READ.0, + "INDIRECT_COMMAND_READ", + ), + (AccessFlags::INDEX_READ.0, "INDEX_READ"), + ( + AccessFlags::VERTEX_ATTRIBUTE_READ.0, + "VERTEX_ATTRIBUTE_READ", + ), + (AccessFlags::UNIFORM_READ.0, "UNIFORM_READ"), + ( + AccessFlags::INPUT_ATTACHMENT_READ.0, + "INPUT_ATTACHMENT_READ", + ), + (AccessFlags::SHADER_READ.0, "SHADER_READ"), + (AccessFlags::SHADER_WRITE.0, "SHADER_WRITE"), + ( + AccessFlags::COLOR_ATTACHMENT_READ.0, + "COLOR_ATTACHMENT_READ", + ), + ( + AccessFlags::COLOR_ATTACHMENT_WRITE.0, + "COLOR_ATTACHMENT_WRITE", + ), + ( + AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ.0, + "DEPTH_STENCIL_ATTACHMENT_READ", + ), + ( + AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE.0, + "DEPTH_STENCIL_ATTACHMENT_WRITE", + ), + (AccessFlags::TRANSFER_READ.0, "TRANSFER_READ"), + (AccessFlags::TRANSFER_WRITE.0, "TRANSFER_WRITE"), + (AccessFlags::HOST_READ.0, "HOST_READ"), + (AccessFlags::HOST_WRITE.0, "HOST_WRITE"), + (AccessFlags::MEMORY_READ.0, "MEMORY_READ"), + (AccessFlags::MEMORY_WRITE.0, "MEMORY_WRITE"), + (AccessFlags::RESERVED_30_KHR.0, "RESERVED_30_KHR"), + (AccessFlags::RESERVED_31_KHR.0, "RESERVED_31_KHR"), + (AccessFlags::RESERVED_28_KHR.0, "RESERVED_28_KHR"), + (AccessFlags::RESERVED_29_KHR.0, "RESERVED_29_KHR"), + ( + AccessFlags::TRANSFORM_FEEDBACK_WRITE_EXT.0, + "TRANSFORM_FEEDBACK_WRITE_EXT", + ), + ( + AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ_EXT.0, + "TRANSFORM_FEEDBACK_COUNTER_READ_EXT", + ), + ( + AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE_EXT.0, + "TRANSFORM_FEEDBACK_COUNTER_WRITE_EXT", + ), + ( + AccessFlags::CONDITIONAL_RENDERING_READ_EXT.0, + "CONDITIONAL_RENDERING_READ_EXT", + ), + ( + AccessFlags::COMMAND_PROCESS_READ_NVX.0, + "COMMAND_PROCESS_READ_NVX", + ), + ( + AccessFlags::COMMAND_PROCESS_WRITE_NVX.0, + "COMMAND_PROCESS_WRITE_NVX", + ), + ( + AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT_EXT.0, + "COLOR_ATTACHMENT_READ_NONCOHERENT_EXT", + ), + ( + AccessFlags::SHADING_RATE_IMAGE_READ_NV.0, + "SHADING_RATE_IMAGE_READ_NV", + ), + ( + AccessFlags::ACCELERATION_STRUCTURE_READ_NV.0, + "ACCELERATION_STRUCTURE_READ_NV", + ), + ( + AccessFlags::ACCELERATION_STRUCTURE_WRITE_NV.0, + "ACCELERATION_STRUCTURE_WRITE_NV", + ), + ( + AccessFlags::FRAGMENT_DENSITY_MAP_READ_EXT.0, + "FRAGMENT_DENSITY_MAP_READ_EXT", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for AndroidSurfaceCreateFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for AttachmentDescriptionFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[(AttachmentDescriptionFlags::MAY_ALIAS.0, "MAY_ALIAS")]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for AttachmentLoadOp { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::LOAD => Some("LOAD"), + Self::CLEAR => Some("CLEAR"), + Self::DONT_CARE => Some("DONT_CARE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for AttachmentStoreOp { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::STORE => Some("STORE"), + Self::DONT_CARE => Some("DONT_CARE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for BlendFactor { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::ZERO => Some("ZERO"), + Self::ONE => Some("ONE"), + Self::SRC_COLOR => Some("SRC_COLOR"), + Self::ONE_MINUS_SRC_COLOR => Some("ONE_MINUS_SRC_COLOR"), + Self::DST_COLOR => Some("DST_COLOR"), + Self::ONE_MINUS_DST_COLOR => Some("ONE_MINUS_DST_COLOR"), + Self::SRC_ALPHA => Some("SRC_ALPHA"), + Self::ONE_MINUS_SRC_ALPHA => Some("ONE_MINUS_SRC_ALPHA"), + Self::DST_ALPHA => Some("DST_ALPHA"), + Self::ONE_MINUS_DST_ALPHA => Some("ONE_MINUS_DST_ALPHA"), + Self::CONSTANT_COLOR => Some("CONSTANT_COLOR"), + Self::ONE_MINUS_CONSTANT_COLOR => Some("ONE_MINUS_CONSTANT_COLOR"), + Self::CONSTANT_ALPHA => Some("CONSTANT_ALPHA"), + Self::ONE_MINUS_CONSTANT_ALPHA => Some("ONE_MINUS_CONSTANT_ALPHA"), + Self::SRC_ALPHA_SATURATE => Some("SRC_ALPHA_SATURATE"), + Self::SRC1_COLOR => Some("SRC1_COLOR"), + Self::ONE_MINUS_SRC1_COLOR => Some("ONE_MINUS_SRC1_COLOR"), + Self::SRC1_ALPHA => Some("SRC1_ALPHA"), + Self::ONE_MINUS_SRC1_ALPHA => Some("ONE_MINUS_SRC1_ALPHA"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for BlendOp { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::ADD => Some("ADD"), + Self::SUBTRACT => Some("SUBTRACT"), + Self::REVERSE_SUBTRACT => Some("REVERSE_SUBTRACT"), + Self::MIN => Some("MIN"), + Self::MAX => Some("MAX"), + Self::ZERO_EXT => Some("ZERO_EXT"), + Self::SRC_EXT => Some("SRC_EXT"), + Self::DST_EXT => Some("DST_EXT"), + Self::SRC_OVER_EXT => Some("SRC_OVER_EXT"), + Self::DST_OVER_EXT => Some("DST_OVER_EXT"), + Self::SRC_IN_EXT => Some("SRC_IN_EXT"), + Self::DST_IN_EXT => Some("DST_IN_EXT"), + Self::SRC_OUT_EXT => Some("SRC_OUT_EXT"), + Self::DST_OUT_EXT => Some("DST_OUT_EXT"), + Self::SRC_ATOP_EXT => Some("SRC_ATOP_EXT"), + Self::DST_ATOP_EXT => Some("DST_ATOP_EXT"), + Self::XOR_EXT => Some("XOR_EXT"), + Self::MULTIPLY_EXT => Some("MULTIPLY_EXT"), + Self::SCREEN_EXT => Some("SCREEN_EXT"), + Self::OVERLAY_EXT => Some("OVERLAY_EXT"), + Self::DARKEN_EXT => Some("DARKEN_EXT"), + Self::LIGHTEN_EXT => Some("LIGHTEN_EXT"), + Self::COLORDODGE_EXT => Some("COLORDODGE_EXT"), + Self::COLORBURN_EXT => Some("COLORBURN_EXT"), + Self::HARDLIGHT_EXT => Some("HARDLIGHT_EXT"), + Self::SOFTLIGHT_EXT => Some("SOFTLIGHT_EXT"), + Self::DIFFERENCE_EXT => Some("DIFFERENCE_EXT"), + Self::EXCLUSION_EXT => Some("EXCLUSION_EXT"), + Self::INVERT_EXT => Some("INVERT_EXT"), + Self::INVERT_RGB_EXT => Some("INVERT_RGB_EXT"), + Self::LINEARDODGE_EXT => Some("LINEARDODGE_EXT"), + Self::LINEARBURN_EXT => Some("LINEARBURN_EXT"), + Self::VIVIDLIGHT_EXT => Some("VIVIDLIGHT_EXT"), + Self::LINEARLIGHT_EXT => Some("LINEARLIGHT_EXT"), + Self::PINLIGHT_EXT => Some("PINLIGHT_EXT"), + Self::HARDMIX_EXT => Some("HARDMIX_EXT"), + Self::HSL_HUE_EXT => Some("HSL_HUE_EXT"), + Self::HSL_SATURATION_EXT => Some("HSL_SATURATION_EXT"), + Self::HSL_COLOR_EXT => Some("HSL_COLOR_EXT"), + Self::HSL_LUMINOSITY_EXT => Some("HSL_LUMINOSITY_EXT"), + Self::PLUS_EXT => Some("PLUS_EXT"), + Self::PLUS_CLAMPED_EXT => Some("PLUS_CLAMPED_EXT"), + Self::PLUS_CLAMPED_ALPHA_EXT => Some("PLUS_CLAMPED_ALPHA_EXT"), + Self::PLUS_DARKER_EXT => Some("PLUS_DARKER_EXT"), + Self::MINUS_EXT => Some("MINUS_EXT"), + Self::MINUS_CLAMPED_EXT => Some("MINUS_CLAMPED_EXT"), + Self::CONTRAST_EXT => Some("CONTRAST_EXT"), + Self::INVERT_OVG_EXT => Some("INVERT_OVG_EXT"), + Self::RED_EXT => Some("RED_EXT"), + Self::GREEN_EXT => Some("GREEN_EXT"), + Self::BLUE_EXT => Some("BLUE_EXT"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for BlendOverlapEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::UNCORRELATED => Some("UNCORRELATED"), + Self::DISJOINT => Some("DISJOINT"), + Self::CONJOINT => Some("CONJOINT"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for BorderColor { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::FLOAT_TRANSPARENT_BLACK => Some("FLOAT_TRANSPARENT_BLACK"), + Self::INT_TRANSPARENT_BLACK => Some("INT_TRANSPARENT_BLACK"), + Self::FLOAT_OPAQUE_BLACK => Some("FLOAT_OPAQUE_BLACK"), + Self::INT_OPAQUE_BLACK => Some("INT_OPAQUE_BLACK"), + Self::FLOAT_OPAQUE_WHITE => Some("FLOAT_OPAQUE_WHITE"), + Self::INT_OPAQUE_WHITE => Some("INT_OPAQUE_WHITE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for BufferCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (BufferCreateFlags::SPARSE_BINDING.0, "SPARSE_BINDING"), + (BufferCreateFlags::SPARSE_RESIDENCY.0, "SPARSE_RESIDENCY"), + (BufferCreateFlags::SPARSE_ALIASED.0, "SPARSE_ALIASED"), + ( + BufferCreateFlags::DEVICE_ADDRESS_CAPTURE_REPLAY_EXT.0, + "DEVICE_ADDRESS_CAPTURE_REPLAY_EXT", + ), + (BufferCreateFlags::PROTECTED.0, "PROTECTED"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for BufferUsageFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (BufferUsageFlags::TRANSFER_SRC.0, "TRANSFER_SRC"), + (BufferUsageFlags::TRANSFER_DST.0, "TRANSFER_DST"), + ( + BufferUsageFlags::UNIFORM_TEXEL_BUFFER.0, + "UNIFORM_TEXEL_BUFFER", + ), + ( + BufferUsageFlags::STORAGE_TEXEL_BUFFER.0, + "STORAGE_TEXEL_BUFFER", + ), + (BufferUsageFlags::UNIFORM_BUFFER.0, "UNIFORM_BUFFER"), + (BufferUsageFlags::STORAGE_BUFFER.0, "STORAGE_BUFFER"), + (BufferUsageFlags::INDEX_BUFFER.0, "INDEX_BUFFER"), + (BufferUsageFlags::VERTEX_BUFFER.0, "VERTEX_BUFFER"), + (BufferUsageFlags::INDIRECT_BUFFER.0, "INDIRECT_BUFFER"), + (BufferUsageFlags::RESERVED_15_KHR.0, "RESERVED_15_KHR"), + (BufferUsageFlags::RESERVED_16_KHR.0, "RESERVED_16_KHR"), + (BufferUsageFlags::RESERVED_13_KHR.0, "RESERVED_13_KHR"), + (BufferUsageFlags::RESERVED_14_KHR.0, "RESERVED_14_KHR"), + ( + BufferUsageFlags::TRANSFORM_FEEDBACK_BUFFER_EXT.0, + "TRANSFORM_FEEDBACK_BUFFER_EXT", + ), + ( + BufferUsageFlags::TRANSFORM_FEEDBACK_COUNTER_BUFFER_EXT.0, + "TRANSFORM_FEEDBACK_COUNTER_BUFFER_EXT", + ), + ( + BufferUsageFlags::CONDITIONAL_RENDERING_EXT.0, + "CONDITIONAL_RENDERING_EXT", + ), + (BufferUsageFlags::RAY_TRACING_NV.0, "RAY_TRACING_NV"), + ( + BufferUsageFlags::SHADER_DEVICE_ADDRESS_EXT.0, + "SHADER_DEVICE_ADDRESS_EXT", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for BufferViewCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for BuildAccelerationStructureFlagsNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + BuildAccelerationStructureFlagsNV::ALLOW_UPDATE.0, + "ALLOW_UPDATE", + ), + ( + BuildAccelerationStructureFlagsNV::ALLOW_COMPACTION.0, + "ALLOW_COMPACTION", + ), + ( + BuildAccelerationStructureFlagsNV::PREFER_FAST_TRACE.0, + "PREFER_FAST_TRACE", + ), + ( + BuildAccelerationStructureFlagsNV::PREFER_FAST_BUILD.0, + "PREFER_FAST_BUILD", + ), + ( + BuildAccelerationStructureFlagsNV::LOW_MEMORY.0, + "LOW_MEMORY", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ChromaLocation { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::COSITED_EVEN => Some("COSITED_EVEN"), + Self::MIDPOINT => Some("MIDPOINT"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for CoarseSampleOrderTypeNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::DEFAULT => Some("DEFAULT"), + Self::CUSTOM => Some("CUSTOM"), + Self::PIXEL_MAJOR => Some("PIXEL_MAJOR"), + Self::SAMPLE_MAJOR => Some("SAMPLE_MAJOR"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for ColorComponentFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (ColorComponentFlags::R.0, "R"), + (ColorComponentFlags::G.0, "G"), + (ColorComponentFlags::B.0, "B"), + (ColorComponentFlags::A.0, "A"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ColorSpaceKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::SRGB_NONLINEAR => Some("SRGB_NONLINEAR"), + Self::DISPLAY_P3_NONLINEAR_EXT => Some("DISPLAY_P3_NONLINEAR_EXT"), + Self::EXTENDED_SRGB_LINEAR_EXT => Some("EXTENDED_SRGB_LINEAR_EXT"), + Self::DCI_P3_LINEAR_EXT => Some("DCI_P3_LINEAR_EXT"), + Self::DCI_P3_NONLINEAR_EXT => Some("DCI_P3_NONLINEAR_EXT"), + Self::BT709_LINEAR_EXT => Some("BT709_LINEAR_EXT"), + Self::BT709_NONLINEAR_EXT => Some("BT709_NONLINEAR_EXT"), + Self::BT2020_LINEAR_EXT => Some("BT2020_LINEAR_EXT"), + Self::HDR10_ST2084_EXT => Some("HDR10_ST2084_EXT"), + Self::DOLBYVISION_EXT => Some("DOLBYVISION_EXT"), + Self::HDR10_HLG_EXT => Some("HDR10_HLG_EXT"), + Self::ADOBERGB_LINEAR_EXT => Some("ADOBERGB_LINEAR_EXT"), + Self::ADOBERGB_NONLINEAR_EXT => Some("ADOBERGB_NONLINEAR_EXT"), + Self::PASS_THROUGH_EXT => Some("PASS_THROUGH_EXT"), + Self::EXTENDED_SRGB_NONLINEAR_EXT => Some("EXTENDED_SRGB_NONLINEAR_EXT"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for CommandBufferLevel { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::PRIMARY => Some("PRIMARY"), + Self::SECONDARY => Some("SECONDARY"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for CommandBufferResetFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[( + CommandBufferResetFlags::RELEASE_RESOURCES.0, + "RELEASE_RESOURCES", + )]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for CommandBufferUsageFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + CommandBufferUsageFlags::ONE_TIME_SUBMIT.0, + "ONE_TIME_SUBMIT", + ), + ( + CommandBufferUsageFlags::RENDER_PASS_CONTINUE.0, + "RENDER_PASS_CONTINUE", + ), + ( + CommandBufferUsageFlags::SIMULTANEOUS_USE.0, + "SIMULTANEOUS_USE", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for CommandPoolCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (CommandPoolCreateFlags::TRANSIENT.0, "TRANSIENT"), + ( + CommandPoolCreateFlags::RESET_COMMAND_BUFFER.0, + "RESET_COMMAND_BUFFER", + ), + (CommandPoolCreateFlags::PROTECTED.0, "PROTECTED"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for CommandPoolResetFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[( + CommandPoolResetFlags::RELEASE_RESOURCES.0, + "RELEASE_RESOURCES", + )]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for CommandPoolTrimFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for CompareOp { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::NEVER => Some("NEVER"), + Self::LESS => Some("LESS"), + Self::EQUAL => Some("EQUAL"), + Self::LESS_OR_EQUAL => Some("LESS_OR_EQUAL"), + Self::GREATER => Some("GREATER"), + Self::NOT_EQUAL => Some("NOT_EQUAL"), + Self::GREATER_OR_EQUAL => Some("GREATER_OR_EQUAL"), + Self::ALWAYS => Some("ALWAYS"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for ComponentSwizzle { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::IDENTITY => Some("IDENTITY"), + Self::ZERO => Some("ZERO"), + Self::ONE => Some("ONE"), + Self::R => Some("R"), + Self::G => Some("G"), + Self::B => Some("B"), + Self::A => Some("A"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for CompositeAlphaFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (CompositeAlphaFlagsKHR::OPAQUE.0, "OPAQUE"), + (CompositeAlphaFlagsKHR::PRE_MULTIPLIED.0, "PRE_MULTIPLIED"), + (CompositeAlphaFlagsKHR::POST_MULTIPLIED.0, "POST_MULTIPLIED"), + (CompositeAlphaFlagsKHR::INHERIT.0, "INHERIT"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ConditionalRenderingFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[(ConditionalRenderingFlagsEXT::INVERTED.0, "INVERTED")]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ConservativeRasterizationModeEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::DISABLED => Some("DISABLED"), + Self::OVERESTIMATE => Some("OVERESTIMATE"), + Self::UNDERESTIMATE => Some("UNDERESTIMATE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for CopyAccelerationStructureModeNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::CLONE => Some("CLONE"), + Self::COMPACT => Some("COMPACT"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for CoverageModulationModeNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::NONE => Some("NONE"), + Self::RGB => Some("RGB"), + Self::ALPHA => Some("ALPHA"), + Self::RGBA => Some("RGBA"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for CullModeFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (CullModeFlags::NONE.0, "NONE"), + (CullModeFlags::FRONT.0, "FRONT"), + (CullModeFlags::BACK.0, "BACK"), + (CullModeFlags::FRONT_AND_BACK.0, "FRONT_AND_BACK"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DebugReportFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (DebugReportFlagsEXT::INFORMATION.0, "INFORMATION"), + (DebugReportFlagsEXT::WARNING.0, "WARNING"), + ( + DebugReportFlagsEXT::PERFORMANCE_WARNING.0, + "PERFORMANCE_WARNING", + ), + (DebugReportFlagsEXT::ERROR.0, "ERROR"), + (DebugReportFlagsEXT::DEBUG.0, "DEBUG"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DebugReportObjectTypeEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::UNKNOWN => Some("UNKNOWN"), + Self::INSTANCE => Some("INSTANCE"), + Self::PHYSICAL_DEVICE => Some("PHYSICAL_DEVICE"), + Self::DEVICE => Some("DEVICE"), + Self::QUEUE => Some("QUEUE"), + Self::SEMAPHORE => Some("SEMAPHORE"), + Self::COMMAND_BUFFER => Some("COMMAND_BUFFER"), + Self::FENCE => Some("FENCE"), + Self::DEVICE_MEMORY => Some("DEVICE_MEMORY"), + Self::BUFFER => Some("BUFFER"), + Self::IMAGE => Some("IMAGE"), + Self::EVENT => Some("EVENT"), + Self::QUERY_POOL => Some("QUERY_POOL"), + Self::BUFFER_VIEW => Some("BUFFER_VIEW"), + Self::IMAGE_VIEW => Some("IMAGE_VIEW"), + Self::SHADER_MODULE => Some("SHADER_MODULE"), + Self::PIPELINE_CACHE => Some("PIPELINE_CACHE"), + Self::PIPELINE_LAYOUT => Some("PIPELINE_LAYOUT"), + Self::RENDER_PASS => Some("RENDER_PASS"), + Self::PIPELINE => Some("PIPELINE"), + Self::DESCRIPTOR_SET_LAYOUT => Some("DESCRIPTOR_SET_LAYOUT"), + Self::SAMPLER => Some("SAMPLER"), + Self::DESCRIPTOR_POOL => Some("DESCRIPTOR_POOL"), + Self::DESCRIPTOR_SET => Some("DESCRIPTOR_SET"), + Self::FRAMEBUFFER => Some("FRAMEBUFFER"), + Self::COMMAND_POOL => Some("COMMAND_POOL"), + Self::SURFACE_KHR => Some("SURFACE_KHR"), + Self::SWAPCHAIN_KHR => Some("SWAPCHAIN_KHR"), + Self::DEBUG_REPORT_CALLBACK => Some("DEBUG_REPORT_CALLBACK"), + Self::DISPLAY_KHR => Some("DISPLAY_KHR"), + Self::DISPLAY_MODE_KHR => Some("DISPLAY_MODE_KHR"), + Self::OBJECT_TABLE_NVX => Some("OBJECT_TABLE_NVX"), + Self::INDIRECT_COMMANDS_LAYOUT_NVX => Some("INDIRECT_COMMANDS_LAYOUT_NVX"), + Self::VALIDATION_CACHE => Some("VALIDATION_CACHE"), + Self::SAMPLER_YCBCR_CONVERSION => Some("SAMPLER_YCBCR_CONVERSION"), + Self::DESCRIPTOR_UPDATE_TEMPLATE => Some("DESCRIPTOR_UPDATE_TEMPLATE"), + Self::ACCELERATION_STRUCTURE_NV => Some("ACCELERATION_STRUCTURE_NV"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for DebugUtilsMessageSeverityFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (DebugUtilsMessageSeverityFlagsEXT::VERBOSE.0, "VERBOSE"), + (DebugUtilsMessageSeverityFlagsEXT::INFO.0, "INFO"), + (DebugUtilsMessageSeverityFlagsEXT::WARNING.0, "WARNING"), + (DebugUtilsMessageSeverityFlagsEXT::ERROR.0, "ERROR"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DebugUtilsMessageTypeFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (DebugUtilsMessageTypeFlagsEXT::GENERAL.0, "GENERAL"), + (DebugUtilsMessageTypeFlagsEXT::VALIDATION.0, "VALIDATION"), + (DebugUtilsMessageTypeFlagsEXT::PERFORMANCE.0, "PERFORMANCE"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DebugUtilsMessengerCallbackDataFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DebugUtilsMessengerCreateFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DependencyFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (DependencyFlags::BY_REGION.0, "BY_REGION"), + (DependencyFlags::DEVICE_GROUP.0, "DEVICE_GROUP"), + (DependencyFlags::VIEW_LOCAL.0, "VIEW_LOCAL"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DescriptorBindingFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + DescriptorBindingFlagsEXT::UPDATE_AFTER_BIND.0, + "UPDATE_AFTER_BIND", + ), + ( + DescriptorBindingFlagsEXT::UPDATE_UNUSED_WHILE_PENDING.0, + "UPDATE_UNUSED_WHILE_PENDING", + ), + ( + DescriptorBindingFlagsEXT::PARTIALLY_BOUND.0, + "PARTIALLY_BOUND", + ), + ( + DescriptorBindingFlagsEXT::VARIABLE_DESCRIPTOR_COUNT.0, + "VARIABLE_DESCRIPTOR_COUNT", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DescriptorPoolCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + DescriptorPoolCreateFlags::FREE_DESCRIPTOR_SET.0, + "FREE_DESCRIPTOR_SET", + ), + ( + DescriptorPoolCreateFlags::UPDATE_AFTER_BIND_EXT.0, + "UPDATE_AFTER_BIND_EXT", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DescriptorPoolResetFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DescriptorSetLayoutCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + DescriptorSetLayoutCreateFlags::PUSH_DESCRIPTOR_KHR.0, + "PUSH_DESCRIPTOR_KHR", + ), + ( + DescriptorSetLayoutCreateFlags::UPDATE_AFTER_BIND_POOL_EXT.0, + "UPDATE_AFTER_BIND_POOL_EXT", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DescriptorType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::SAMPLER => Some("SAMPLER"), + Self::COMBINED_IMAGE_SAMPLER => Some("COMBINED_IMAGE_SAMPLER"), + Self::SAMPLED_IMAGE => Some("SAMPLED_IMAGE"), + Self::STORAGE_IMAGE => Some("STORAGE_IMAGE"), + Self::UNIFORM_TEXEL_BUFFER => Some("UNIFORM_TEXEL_BUFFER"), + Self::STORAGE_TEXEL_BUFFER => Some("STORAGE_TEXEL_BUFFER"), + Self::UNIFORM_BUFFER => Some("UNIFORM_BUFFER"), + Self::STORAGE_BUFFER => Some("STORAGE_BUFFER"), + Self::UNIFORM_BUFFER_DYNAMIC => Some("UNIFORM_BUFFER_DYNAMIC"), + Self::STORAGE_BUFFER_DYNAMIC => Some("STORAGE_BUFFER_DYNAMIC"), + Self::INPUT_ATTACHMENT => Some("INPUT_ATTACHMENT"), + Self::INLINE_UNIFORM_BLOCK_EXT => Some("INLINE_UNIFORM_BLOCK_EXT"), + Self::ACCELERATION_STRUCTURE_NV => Some("ACCELERATION_STRUCTURE_NV"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for DescriptorUpdateTemplateCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DescriptorUpdateTemplateType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::DESCRIPTOR_SET => Some("DESCRIPTOR_SET"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for DeviceCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DeviceEventTypeEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::DISPLAY_HOTPLUG => Some("DISPLAY_HOTPLUG"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for DeviceGroupPresentModeFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (DeviceGroupPresentModeFlagsKHR::LOCAL.0, "LOCAL"), + (DeviceGroupPresentModeFlagsKHR::REMOTE.0, "REMOTE"), + (DeviceGroupPresentModeFlagsKHR::SUM.0, "SUM"), + ( + DeviceGroupPresentModeFlagsKHR::LOCAL_MULTI_DEVICE.0, + "LOCAL_MULTI_DEVICE", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DeviceQueueCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[(DeviceQueueCreateFlags::PROTECTED.0, "PROTECTED")]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DiscardRectangleModeEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::INCLUSIVE => Some("INCLUSIVE"), + Self::EXCLUSIVE => Some("EXCLUSIVE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for DisplayEventTypeEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::FIRST_PIXEL_OUT => Some("FIRST_PIXEL_OUT"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for DisplayModeCreateFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DisplayPlaneAlphaFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (DisplayPlaneAlphaFlagsKHR::OPAQUE.0, "OPAQUE"), + (DisplayPlaneAlphaFlagsKHR::GLOBAL.0, "GLOBAL"), + (DisplayPlaneAlphaFlagsKHR::PER_PIXEL.0, "PER_PIXEL"), + ( + DisplayPlaneAlphaFlagsKHR::PER_PIXEL_PREMULTIPLIED.0, + "PER_PIXEL_PREMULTIPLIED", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DisplayPowerStateEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::OFF => Some("OFF"), + Self::SUSPEND => Some("SUSPEND"), + Self::ON => Some("ON"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for DisplaySurfaceCreateFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for DriverIdKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::AMD_PROPRIETARY => Some("AMD_PROPRIETARY"), + Self::AMD_OPEN_SOURCE => Some("AMD_OPEN_SOURCE"), + Self::MESA_RADV => Some("MESA_RADV"), + Self::NVIDIA_PROPRIETARY => Some("NVIDIA_PROPRIETARY"), + Self::INTEL_PROPRIETARY_WINDOWS => Some("INTEL_PROPRIETARY_WINDOWS"), + Self::INTEL_OPEN_SOURCE_MESA => Some("INTEL_OPEN_SOURCE_MESA"), + Self::IMAGINATION_PROPRIETARY => Some("IMAGINATION_PROPRIETARY"), + Self::QUALCOMM_PROPRIETARY => Some("QUALCOMM_PROPRIETARY"), + Self::ARM_PROPRIETARY => Some("ARM_PROPRIETARY"), + Self::GOOGLE_PASTEL => Some("GOOGLE_PASTEL"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for DynamicState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::VIEWPORT => Some("VIEWPORT"), + Self::SCISSOR => Some("SCISSOR"), + Self::LINE_WIDTH => Some("LINE_WIDTH"), + Self::DEPTH_BIAS => Some("DEPTH_BIAS"), + Self::BLEND_CONSTANTS => Some("BLEND_CONSTANTS"), + Self::DEPTH_BOUNDS => Some("DEPTH_BOUNDS"), + Self::STENCIL_COMPARE_MASK => Some("STENCIL_COMPARE_MASK"), + Self::STENCIL_WRITE_MASK => Some("STENCIL_WRITE_MASK"), + Self::STENCIL_REFERENCE => Some("STENCIL_REFERENCE"), + Self::VIEWPORT_W_SCALING_NV => Some("VIEWPORT_W_SCALING_NV"), + Self::DISCARD_RECTANGLE_EXT => Some("DISCARD_RECTANGLE_EXT"), + Self::SAMPLE_LOCATIONS_EXT => Some("SAMPLE_LOCATIONS_EXT"), + Self::VIEWPORT_SHADING_RATE_PALETTE_NV => Some("VIEWPORT_SHADING_RATE_PALETTE_NV"), + Self::VIEWPORT_COARSE_SAMPLE_ORDER_NV => Some("VIEWPORT_COARSE_SAMPLE_ORDER_NV"), + Self::EXCLUSIVE_SCISSOR_NV => Some("EXCLUSIVE_SCISSOR_NV"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for EventCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ExternalFenceFeatureFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + ExternalFenceFeatureFlags::EXTERNAL_FENCE_FEATURE_EXPORTABLE.0, + "EXTERNAL_FENCE_FEATURE_EXPORTABLE", + ), + ( + ExternalFenceFeatureFlags::EXTERNAL_FENCE_FEATURE_IMPORTABLE.0, + "EXTERNAL_FENCE_FEATURE_IMPORTABLE", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ExternalFenceHandleTypeFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + ExternalFenceHandleTypeFlags::EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD.0, + "EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD", + ), + ( + ExternalFenceHandleTypeFlags::EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32.0, + "EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32", + ), + ( + ExternalFenceHandleTypeFlags::EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT.0, + "EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT", + ), + ( + ExternalFenceHandleTypeFlags::EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD.0, + "EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ExternalMemoryFeatureFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + ExternalMemoryFeatureFlags::EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY.0, + "EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY", + ), + ( + ExternalMemoryFeatureFlags::EXTERNAL_MEMORY_FEATURE_EXPORTABLE.0, + "EXTERNAL_MEMORY_FEATURE_EXPORTABLE", + ), + ( + ExternalMemoryFeatureFlags::EXTERNAL_MEMORY_FEATURE_IMPORTABLE.0, + "EXTERNAL_MEMORY_FEATURE_IMPORTABLE", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ExternalMemoryFeatureFlagsNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + ExternalMemoryFeatureFlagsNV::EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_NV.0, + "EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_NV", + ), + ( + ExternalMemoryFeatureFlagsNV::EXTERNAL_MEMORY_FEATURE_EXPORTABLE_NV.0, + "EXTERNAL_MEMORY_FEATURE_EXPORTABLE_NV", + ), + ( + ExternalMemoryFeatureFlagsNV::EXTERNAL_MEMORY_FEATURE_IMPORTABLE_NV.0, + "EXTERNAL_MEMORY_FEATURE_IMPORTABLE_NV", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ExternalMemoryHandleTypeFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN : & [ ( Flags , & str ) ] = & [ ( ExternalMemoryHandleTypeFlags :: EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD . 0 , "EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD" ) , ( ExternalMemoryHandleTypeFlags :: EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 . 0 , "EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32" ) , ( ExternalMemoryHandleTypeFlags :: EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT . 0 , "EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT" ) , ( ExternalMemoryHandleTypeFlags :: EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE . 0 , "EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE" ) , ( ExternalMemoryHandleTypeFlags :: EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT . 0 , "EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT" ) , ( ExternalMemoryHandleTypeFlags :: EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP . 0 , "EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP" ) , ( ExternalMemoryHandleTypeFlags :: EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE . 0 , "EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE" ) , ( ExternalMemoryHandleTypeFlags :: EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF . 0 , "EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF" ) , ( ExternalMemoryHandleTypeFlags :: EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_ANDROID . 0 , "EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_ANDROID" ) , ( ExternalMemoryHandleTypeFlags :: EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION . 0 , "EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION" ) , ( ExternalMemoryHandleTypeFlags :: EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY . 0 , "EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY" ) ] ; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ExternalMemoryHandleTypeFlagsNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + ExternalMemoryHandleTypeFlagsNV::EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_NV.0, + "EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_NV", + ), + ( + ExternalMemoryHandleTypeFlagsNV::EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_NV.0, + "EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_NV", + ), + ( + ExternalMemoryHandleTypeFlagsNV::EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_NV.0, + "EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_NV", + ), + ( + ExternalMemoryHandleTypeFlagsNV::EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_NV.0, + "EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_NV", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ExternalSemaphoreFeatureFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + ExternalSemaphoreFeatureFlags::EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE.0, + "EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE", + ), + ( + ExternalSemaphoreFeatureFlags::EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE.0, + "EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ExternalSemaphoreHandleTypeFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + ExternalSemaphoreHandleTypeFlags::EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD.0, + "EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD", + ), + ( + ExternalSemaphoreHandleTypeFlags::EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32.0, + "EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32", + ), + ( + ExternalSemaphoreHandleTypeFlags::EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT.0, + "EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT", + ), + ( + ExternalSemaphoreHandleTypeFlags::EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE.0, + "EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE", + ), + ( + ExternalSemaphoreHandleTypeFlags::EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD.0, + "EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for FenceCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[(FenceCreateFlags::SIGNALED.0, "SIGNALED")]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for FenceImportFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[(FenceImportFlags::TEMPORARY.0, "TEMPORARY")]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for Filter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::NEAREST => Some("NEAREST"), + Self::LINEAR => Some("LINEAR"), + Self::CUBIC_IMG => Some("CUBIC_IMG"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for Format { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::UNDEFINED => Some("UNDEFINED"), + Self::R4G4_UNORM_PACK8 => Some("R4G4_UNORM_PACK8"), + Self::R4G4B4A4_UNORM_PACK16 => Some("R4G4B4A4_UNORM_PACK16"), + Self::B4G4R4A4_UNORM_PACK16 => Some("B4G4R4A4_UNORM_PACK16"), + Self::R5G6B5_UNORM_PACK16 => Some("R5G6B5_UNORM_PACK16"), + Self::B5G6R5_UNORM_PACK16 => Some("B5G6R5_UNORM_PACK16"), + Self::R5G5B5A1_UNORM_PACK16 => Some("R5G5B5A1_UNORM_PACK16"), + Self::B5G5R5A1_UNORM_PACK16 => Some("B5G5R5A1_UNORM_PACK16"), + Self::A1R5G5B5_UNORM_PACK16 => Some("A1R5G5B5_UNORM_PACK16"), + Self::R8_UNORM => Some("R8_UNORM"), + Self::R8_SNORM => Some("R8_SNORM"), + Self::R8_USCALED => Some("R8_USCALED"), + Self::R8_SSCALED => Some("R8_SSCALED"), + Self::R8_UINT => Some("R8_UINT"), + Self::R8_SINT => Some("R8_SINT"), + Self::R8_SRGB => Some("R8_SRGB"), + Self::R8G8_UNORM => Some("R8G8_UNORM"), + Self::R8G8_SNORM => Some("R8G8_SNORM"), + Self::R8G8_USCALED => Some("R8G8_USCALED"), + Self::R8G8_SSCALED => Some("R8G8_SSCALED"), + Self::R8G8_UINT => Some("R8G8_UINT"), + Self::R8G8_SINT => Some("R8G8_SINT"), + Self::R8G8_SRGB => Some("R8G8_SRGB"), + Self::R8G8B8_UNORM => Some("R8G8B8_UNORM"), + Self::R8G8B8_SNORM => Some("R8G8B8_SNORM"), + Self::R8G8B8_USCALED => Some("R8G8B8_USCALED"), + Self::R8G8B8_SSCALED => Some("R8G8B8_SSCALED"), + Self::R8G8B8_UINT => Some("R8G8B8_UINT"), + Self::R8G8B8_SINT => Some("R8G8B8_SINT"), + Self::R8G8B8_SRGB => Some("R8G8B8_SRGB"), + Self::B8G8R8_UNORM => Some("B8G8R8_UNORM"), + Self::B8G8R8_SNORM => Some("B8G8R8_SNORM"), + Self::B8G8R8_USCALED => Some("B8G8R8_USCALED"), + Self::B8G8R8_SSCALED => Some("B8G8R8_SSCALED"), + Self::B8G8R8_UINT => Some("B8G8R8_UINT"), + Self::B8G8R8_SINT => Some("B8G8R8_SINT"), + Self::B8G8R8_SRGB => Some("B8G8R8_SRGB"), + Self::R8G8B8A8_UNORM => Some("R8G8B8A8_UNORM"), + Self::R8G8B8A8_SNORM => Some("R8G8B8A8_SNORM"), + Self::R8G8B8A8_USCALED => Some("R8G8B8A8_USCALED"), + Self::R8G8B8A8_SSCALED => Some("R8G8B8A8_SSCALED"), + Self::R8G8B8A8_UINT => Some("R8G8B8A8_UINT"), + Self::R8G8B8A8_SINT => Some("R8G8B8A8_SINT"), + Self::R8G8B8A8_SRGB => Some("R8G8B8A8_SRGB"), + Self::B8G8R8A8_UNORM => Some("B8G8R8A8_UNORM"), + Self::B8G8R8A8_SNORM => Some("B8G8R8A8_SNORM"), + Self::B8G8R8A8_USCALED => Some("B8G8R8A8_USCALED"), + Self::B8G8R8A8_SSCALED => Some("B8G8R8A8_SSCALED"), + Self::B8G8R8A8_UINT => Some("B8G8R8A8_UINT"), + Self::B8G8R8A8_SINT => Some("B8G8R8A8_SINT"), + Self::B8G8R8A8_SRGB => Some("B8G8R8A8_SRGB"), + Self::A8B8G8R8_UNORM_PACK32 => Some("A8B8G8R8_UNORM_PACK32"), + Self::A8B8G8R8_SNORM_PACK32 => Some("A8B8G8R8_SNORM_PACK32"), + Self::A8B8G8R8_USCALED_PACK32 => Some("A8B8G8R8_USCALED_PACK32"), + Self::A8B8G8R8_SSCALED_PACK32 => Some("A8B8G8R8_SSCALED_PACK32"), + Self::A8B8G8R8_UINT_PACK32 => Some("A8B8G8R8_UINT_PACK32"), + Self::A8B8G8R8_SINT_PACK32 => Some("A8B8G8R8_SINT_PACK32"), + Self::A8B8G8R8_SRGB_PACK32 => Some("A8B8G8R8_SRGB_PACK32"), + Self::A2R10G10B10_UNORM_PACK32 => Some("A2R10G10B10_UNORM_PACK32"), + Self::A2R10G10B10_SNORM_PACK32 => Some("A2R10G10B10_SNORM_PACK32"), + Self::A2R10G10B10_USCALED_PACK32 => Some("A2R10G10B10_USCALED_PACK32"), + Self::A2R10G10B10_SSCALED_PACK32 => Some("A2R10G10B10_SSCALED_PACK32"), + Self::A2R10G10B10_UINT_PACK32 => Some("A2R10G10B10_UINT_PACK32"), + Self::A2R10G10B10_SINT_PACK32 => Some("A2R10G10B10_SINT_PACK32"), + Self::A2B10G10R10_UNORM_PACK32 => Some("A2B10G10R10_UNORM_PACK32"), + Self::A2B10G10R10_SNORM_PACK32 => Some("A2B10G10R10_SNORM_PACK32"), + Self::A2B10G10R10_USCALED_PACK32 => Some("A2B10G10R10_USCALED_PACK32"), + Self::A2B10G10R10_SSCALED_PACK32 => Some("A2B10G10R10_SSCALED_PACK32"), + Self::A2B10G10R10_UINT_PACK32 => Some("A2B10G10R10_UINT_PACK32"), + Self::A2B10G10R10_SINT_PACK32 => Some("A2B10G10R10_SINT_PACK32"), + Self::R16_UNORM => Some("R16_UNORM"), + Self::R16_SNORM => Some("R16_SNORM"), + Self::R16_USCALED => Some("R16_USCALED"), + Self::R16_SSCALED => Some("R16_SSCALED"), + Self::R16_UINT => Some("R16_UINT"), + Self::R16_SINT => Some("R16_SINT"), + Self::R16_SFLOAT => Some("R16_SFLOAT"), + Self::R16G16_UNORM => Some("R16G16_UNORM"), + Self::R16G16_SNORM => Some("R16G16_SNORM"), + Self::R16G16_USCALED => Some("R16G16_USCALED"), + Self::R16G16_SSCALED => Some("R16G16_SSCALED"), + Self::R16G16_UINT => Some("R16G16_UINT"), + Self::R16G16_SINT => Some("R16G16_SINT"), + Self::R16G16_SFLOAT => Some("R16G16_SFLOAT"), + Self::R16G16B16_UNORM => Some("R16G16B16_UNORM"), + Self::R16G16B16_SNORM => Some("R16G16B16_SNORM"), + Self::R16G16B16_USCALED => Some("R16G16B16_USCALED"), + Self::R16G16B16_SSCALED => Some("R16G16B16_SSCALED"), + Self::R16G16B16_UINT => Some("R16G16B16_UINT"), + Self::R16G16B16_SINT => Some("R16G16B16_SINT"), + Self::R16G16B16_SFLOAT => Some("R16G16B16_SFLOAT"), + Self::R16G16B16A16_UNORM => Some("R16G16B16A16_UNORM"), + Self::R16G16B16A16_SNORM => Some("R16G16B16A16_SNORM"), + Self::R16G16B16A16_USCALED => Some("R16G16B16A16_USCALED"), + Self::R16G16B16A16_SSCALED => Some("R16G16B16A16_SSCALED"), + Self::R16G16B16A16_UINT => Some("R16G16B16A16_UINT"), + Self::R16G16B16A16_SINT => Some("R16G16B16A16_SINT"), + Self::R16G16B16A16_SFLOAT => Some("R16G16B16A16_SFLOAT"), + Self::R32_UINT => Some("R32_UINT"), + Self::R32_SINT => Some("R32_SINT"), + Self::R32_SFLOAT => Some("R32_SFLOAT"), + Self::R32G32_UINT => Some("R32G32_UINT"), + Self::R32G32_SINT => Some("R32G32_SINT"), + Self::R32G32_SFLOAT => Some("R32G32_SFLOAT"), + Self::R32G32B32_UINT => Some("R32G32B32_UINT"), + Self::R32G32B32_SINT => Some("R32G32B32_SINT"), + Self::R32G32B32_SFLOAT => Some("R32G32B32_SFLOAT"), + Self::R32G32B32A32_UINT => Some("R32G32B32A32_UINT"), + Self::R32G32B32A32_SINT => Some("R32G32B32A32_SINT"), + Self::R32G32B32A32_SFLOAT => Some("R32G32B32A32_SFLOAT"), + Self::R64_UINT => Some("R64_UINT"), + Self::R64_SINT => Some("R64_SINT"), + Self::R64_SFLOAT => Some("R64_SFLOAT"), + Self::R64G64_UINT => Some("R64G64_UINT"), + Self::R64G64_SINT => Some("R64G64_SINT"), + Self::R64G64_SFLOAT => Some("R64G64_SFLOAT"), + Self::R64G64B64_UINT => Some("R64G64B64_UINT"), + Self::R64G64B64_SINT => Some("R64G64B64_SINT"), + Self::R64G64B64_SFLOAT => Some("R64G64B64_SFLOAT"), + Self::R64G64B64A64_UINT => Some("R64G64B64A64_UINT"), + Self::R64G64B64A64_SINT => Some("R64G64B64A64_SINT"), + Self::R64G64B64A64_SFLOAT => Some("R64G64B64A64_SFLOAT"), + Self::B10G11R11_UFLOAT_PACK32 => Some("B10G11R11_UFLOAT_PACK32"), + Self::E5B9G9R9_UFLOAT_PACK32 => Some("E5B9G9R9_UFLOAT_PACK32"), + Self::D16_UNORM => Some("D16_UNORM"), + Self::X8_D24_UNORM_PACK32 => Some("X8_D24_UNORM_PACK32"), + Self::D32_SFLOAT => Some("D32_SFLOAT"), + Self::S8_UINT => Some("S8_UINT"), + Self::D16_UNORM_S8_UINT => Some("D16_UNORM_S8_UINT"), + Self::D24_UNORM_S8_UINT => Some("D24_UNORM_S8_UINT"), + Self::D32_SFLOAT_S8_UINT => Some("D32_SFLOAT_S8_UINT"), + Self::BC1_RGB_UNORM_BLOCK => Some("BC1_RGB_UNORM_BLOCK"), + Self::BC1_RGB_SRGB_BLOCK => Some("BC1_RGB_SRGB_BLOCK"), + Self::BC1_RGBA_UNORM_BLOCK => Some("BC1_RGBA_UNORM_BLOCK"), + Self::BC1_RGBA_SRGB_BLOCK => Some("BC1_RGBA_SRGB_BLOCK"), + Self::BC2_UNORM_BLOCK => Some("BC2_UNORM_BLOCK"), + Self::BC2_SRGB_BLOCK => Some("BC2_SRGB_BLOCK"), + Self::BC3_UNORM_BLOCK => Some("BC3_UNORM_BLOCK"), + Self::BC3_SRGB_BLOCK => Some("BC3_SRGB_BLOCK"), + Self::BC4_UNORM_BLOCK => Some("BC4_UNORM_BLOCK"), + Self::BC4_SNORM_BLOCK => Some("BC4_SNORM_BLOCK"), + Self::BC5_UNORM_BLOCK => Some("BC5_UNORM_BLOCK"), + Self::BC5_SNORM_BLOCK => Some("BC5_SNORM_BLOCK"), + Self::BC6H_UFLOAT_BLOCK => Some("BC6H_UFLOAT_BLOCK"), + Self::BC6H_SFLOAT_BLOCK => Some("BC6H_SFLOAT_BLOCK"), + Self::BC7_UNORM_BLOCK => Some("BC7_UNORM_BLOCK"), + Self::BC7_SRGB_BLOCK => Some("BC7_SRGB_BLOCK"), + Self::ETC2_R8G8B8_UNORM_BLOCK => Some("ETC2_R8G8B8_UNORM_BLOCK"), + Self::ETC2_R8G8B8_SRGB_BLOCK => Some("ETC2_R8G8B8_SRGB_BLOCK"), + Self::ETC2_R8G8B8A1_UNORM_BLOCK => Some("ETC2_R8G8B8A1_UNORM_BLOCK"), + Self::ETC2_R8G8B8A1_SRGB_BLOCK => Some("ETC2_R8G8B8A1_SRGB_BLOCK"), + Self::ETC2_R8G8B8A8_UNORM_BLOCK => Some("ETC2_R8G8B8A8_UNORM_BLOCK"), + Self::ETC2_R8G8B8A8_SRGB_BLOCK => Some("ETC2_R8G8B8A8_SRGB_BLOCK"), + Self::EAC_R11_UNORM_BLOCK => Some("EAC_R11_UNORM_BLOCK"), + Self::EAC_R11_SNORM_BLOCK => Some("EAC_R11_SNORM_BLOCK"), + Self::EAC_R11G11_UNORM_BLOCK => Some("EAC_R11G11_UNORM_BLOCK"), + Self::EAC_R11G11_SNORM_BLOCK => Some("EAC_R11G11_SNORM_BLOCK"), + Self::ASTC_4X4_UNORM_BLOCK => Some("ASTC_4X4_UNORM_BLOCK"), + Self::ASTC_4X4_SRGB_BLOCK => Some("ASTC_4X4_SRGB_BLOCK"), + Self::ASTC_5X4_UNORM_BLOCK => Some("ASTC_5X4_UNORM_BLOCK"), + Self::ASTC_5X4_SRGB_BLOCK => Some("ASTC_5X4_SRGB_BLOCK"), + Self::ASTC_5X5_UNORM_BLOCK => Some("ASTC_5X5_UNORM_BLOCK"), + Self::ASTC_5X5_SRGB_BLOCK => Some("ASTC_5X5_SRGB_BLOCK"), + Self::ASTC_6X5_UNORM_BLOCK => Some("ASTC_6X5_UNORM_BLOCK"), + Self::ASTC_6X5_SRGB_BLOCK => Some("ASTC_6X5_SRGB_BLOCK"), + Self::ASTC_6X6_UNORM_BLOCK => Some("ASTC_6X6_UNORM_BLOCK"), + Self::ASTC_6X6_SRGB_BLOCK => Some("ASTC_6X6_SRGB_BLOCK"), + Self::ASTC_8X5_UNORM_BLOCK => Some("ASTC_8X5_UNORM_BLOCK"), + Self::ASTC_8X5_SRGB_BLOCK => Some("ASTC_8X5_SRGB_BLOCK"), + Self::ASTC_8X6_UNORM_BLOCK => Some("ASTC_8X6_UNORM_BLOCK"), + Self::ASTC_8X6_SRGB_BLOCK => Some("ASTC_8X6_SRGB_BLOCK"), + Self::ASTC_8X8_UNORM_BLOCK => Some("ASTC_8X8_UNORM_BLOCK"), + Self::ASTC_8X8_SRGB_BLOCK => Some("ASTC_8X8_SRGB_BLOCK"), + Self::ASTC_10X5_UNORM_BLOCK => Some("ASTC_10X5_UNORM_BLOCK"), + Self::ASTC_10X5_SRGB_BLOCK => Some("ASTC_10X5_SRGB_BLOCK"), + Self::ASTC_10X6_UNORM_BLOCK => Some("ASTC_10X6_UNORM_BLOCK"), + Self::ASTC_10X6_SRGB_BLOCK => Some("ASTC_10X6_SRGB_BLOCK"), + Self::ASTC_10X8_UNORM_BLOCK => Some("ASTC_10X8_UNORM_BLOCK"), + Self::ASTC_10X8_SRGB_BLOCK => Some("ASTC_10X8_SRGB_BLOCK"), + Self::ASTC_10X10_UNORM_BLOCK => Some("ASTC_10X10_UNORM_BLOCK"), + Self::ASTC_10X10_SRGB_BLOCK => Some("ASTC_10X10_SRGB_BLOCK"), + Self::ASTC_12X10_UNORM_BLOCK => Some("ASTC_12X10_UNORM_BLOCK"), + Self::ASTC_12X10_SRGB_BLOCK => Some("ASTC_12X10_SRGB_BLOCK"), + Self::ASTC_12X12_UNORM_BLOCK => Some("ASTC_12X12_UNORM_BLOCK"), + Self::ASTC_12X12_SRGB_BLOCK => Some("ASTC_12X12_SRGB_BLOCK"), + Self::PVRTC1_2BPP_UNORM_BLOCK_IMG => Some("PVRTC1_2BPP_UNORM_BLOCK_IMG"), + Self::PVRTC1_4BPP_UNORM_BLOCK_IMG => Some("PVRTC1_4BPP_UNORM_BLOCK_IMG"), + Self::PVRTC2_2BPP_UNORM_BLOCK_IMG => Some("PVRTC2_2BPP_UNORM_BLOCK_IMG"), + Self::PVRTC2_4BPP_UNORM_BLOCK_IMG => Some("PVRTC2_4BPP_UNORM_BLOCK_IMG"), + Self::PVRTC1_2BPP_SRGB_BLOCK_IMG => Some("PVRTC1_2BPP_SRGB_BLOCK_IMG"), + Self::PVRTC1_4BPP_SRGB_BLOCK_IMG => Some("PVRTC1_4BPP_SRGB_BLOCK_IMG"), + Self::PVRTC2_2BPP_SRGB_BLOCK_IMG => Some("PVRTC2_2BPP_SRGB_BLOCK_IMG"), + Self::PVRTC2_4BPP_SRGB_BLOCK_IMG => Some("PVRTC2_4BPP_SRGB_BLOCK_IMG"), + Self::G8B8G8R8_422_UNORM => Some("G8B8G8R8_422_UNORM"), + Self::B8G8R8G8_422_UNORM => Some("B8G8R8G8_422_UNORM"), + Self::G8_B8_R8_3PLANE_420_UNORM => Some("G8_B8_R8_3PLANE_420_UNORM"), + Self::G8_B8R8_2PLANE_420_UNORM => Some("G8_B8R8_2PLANE_420_UNORM"), + Self::G8_B8_R8_3PLANE_422_UNORM => Some("G8_B8_R8_3PLANE_422_UNORM"), + Self::G8_B8R8_2PLANE_422_UNORM => Some("G8_B8R8_2PLANE_422_UNORM"), + Self::G8_B8_R8_3PLANE_444_UNORM => Some("G8_B8_R8_3PLANE_444_UNORM"), + Self::R10X6_UNORM_PACK16 => Some("R10X6_UNORM_PACK16"), + Self::R10X6G10X6_UNORM_2PACK16 => Some("R10X6G10X6_UNORM_2PACK16"), + Self::R10X6G10X6B10X6A10X6_UNORM_4PACK16 => Some("R10X6G10X6B10X6A10X6_UNORM_4PACK16"), + Self::G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 => { + Some("G10X6B10X6G10X6R10X6_422_UNORM_4PACK16") + } + Self::B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 => { + Some("B10X6G10X6R10X6G10X6_422_UNORM_4PACK16") + } + Self::G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 => { + Some("G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16") + } + Self::G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 => { + Some("G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16") + } + Self::G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 => { + Some("G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16") + } + Self::G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 => { + Some("G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16") + } + Self::G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 => { + Some("G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16") + } + Self::R12X4_UNORM_PACK16 => Some("R12X4_UNORM_PACK16"), + Self::R12X4G12X4_UNORM_2PACK16 => Some("R12X4G12X4_UNORM_2PACK16"), + Self::R12X4G12X4B12X4A12X4_UNORM_4PACK16 => Some("R12X4G12X4B12X4A12X4_UNORM_4PACK16"), + Self::G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 => { + Some("G12X4B12X4G12X4R12X4_422_UNORM_4PACK16") + } + Self::B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 => { + Some("B12X4G12X4R12X4G12X4_422_UNORM_4PACK16") + } + Self::G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 => { + Some("G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16") + } + Self::G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 => { + Some("G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16") + } + Self::G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 => { + Some("G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16") + } + Self::G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 => { + Some("G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16") + } + Self::G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 => { + Some("G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16") + } + Self::G16B16G16R16_422_UNORM => Some("G16B16G16R16_422_UNORM"), + Self::B16G16R16G16_422_UNORM => Some("B16G16R16G16_422_UNORM"), + Self::G16_B16_R16_3PLANE_420_UNORM => Some("G16_B16_R16_3PLANE_420_UNORM"), + Self::G16_B16R16_2PLANE_420_UNORM => Some("G16_B16R16_2PLANE_420_UNORM"), + Self::G16_B16_R16_3PLANE_422_UNORM => Some("G16_B16_R16_3PLANE_422_UNORM"), + Self::G16_B16R16_2PLANE_422_UNORM => Some("G16_B16R16_2PLANE_422_UNORM"), + Self::G16_B16_R16_3PLANE_444_UNORM => Some("G16_B16_R16_3PLANE_444_UNORM"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for FormatFeatureFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN : & [ ( Flags , & str ) ] = & [ ( FormatFeatureFlags :: SAMPLED_IMAGE . 0 , "SAMPLED_IMAGE" ) , ( FormatFeatureFlags :: STORAGE_IMAGE . 0 , "STORAGE_IMAGE" ) , ( FormatFeatureFlags :: STORAGE_IMAGE_ATOMIC . 0 , "STORAGE_IMAGE_ATOMIC" ) , ( FormatFeatureFlags :: UNIFORM_TEXEL_BUFFER . 0 , "UNIFORM_TEXEL_BUFFER" ) , ( FormatFeatureFlags :: STORAGE_TEXEL_BUFFER . 0 , "STORAGE_TEXEL_BUFFER" ) , ( FormatFeatureFlags :: STORAGE_TEXEL_BUFFER_ATOMIC . 0 , "STORAGE_TEXEL_BUFFER_ATOMIC" ) , ( FormatFeatureFlags :: VERTEX_BUFFER . 0 , "VERTEX_BUFFER" ) , ( FormatFeatureFlags :: COLOR_ATTACHMENT . 0 , "COLOR_ATTACHMENT" ) , ( FormatFeatureFlags :: COLOR_ATTACHMENT_BLEND . 0 , "COLOR_ATTACHMENT_BLEND" ) , ( FormatFeatureFlags :: DEPTH_STENCIL_ATTACHMENT . 0 , "DEPTH_STENCIL_ATTACHMENT" ) , ( FormatFeatureFlags :: BLIT_SRC . 0 , "BLIT_SRC" ) , ( FormatFeatureFlags :: BLIT_DST . 0 , "BLIT_DST" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_FILTER_LINEAR . 0 , "SAMPLED_IMAGE_FILTER_LINEAR" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_FILTER_CUBIC_IMG . 0 , "SAMPLED_IMAGE_FILTER_CUBIC_IMG" ) , ( FormatFeatureFlags :: RESERVED_27_KHR . 0 , "RESERVED_27_KHR" ) , ( FormatFeatureFlags :: RESERVED_28_KHR . 0 , "RESERVED_28_KHR" ) , ( FormatFeatureFlags :: RESERVED_25_KHR . 0 , "RESERVED_25_KHR" ) , ( FormatFeatureFlags :: RESERVED_26_KHR . 0 , "RESERVED_26_KHR" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_FILTER_MINMAX_EXT . 0 , "SAMPLED_IMAGE_FILTER_MINMAX_EXT" ) , ( FormatFeatureFlags :: FRAGMENT_DENSITY_MAP_EXT . 0 , "FRAGMENT_DENSITY_MAP_EXT" ) , ( FormatFeatureFlags :: TRANSFER_SRC . 0 , "TRANSFER_SRC" ) , ( FormatFeatureFlags :: TRANSFER_DST . 0 , "TRANSFER_DST" ) , ( FormatFeatureFlags :: MIDPOINT_CHROMA_SAMPLES . 0 , "MIDPOINT_CHROMA_SAMPLES" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE" ) , ( FormatFeatureFlags :: DISJOINT . 0 , "DISJOINT" ) , ( FormatFeatureFlags :: COSITED_CHROMA_SAMPLES . 0 , "COSITED_CHROMA_SAMPLES" ) ] ; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for FramebufferCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for FrontFace { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::COUNTER_CLOCKWISE => Some("COUNTER_CLOCKWISE"), + Self::CLOCKWISE => Some("CLOCKWISE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for GeometryFlagsNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (GeometryFlagsNV::OPAQUE.0, "OPAQUE"), + ( + GeometryFlagsNV::NO_DUPLICATE_ANY_HIT_INVOCATION.0, + "NO_DUPLICATE_ANY_HIT_INVOCATION", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for GeometryInstanceFlagsNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + GeometryInstanceFlagsNV::TRIANGLE_CULL_DISABLE.0, + "TRIANGLE_CULL_DISABLE", + ), + ( + GeometryInstanceFlagsNV::TRIANGLE_FRONT_COUNTERCLOCKWISE.0, + "TRIANGLE_FRONT_COUNTERCLOCKWISE", + ), + (GeometryInstanceFlagsNV::FORCE_OPAQUE.0, "FORCE_OPAQUE"), + ( + GeometryInstanceFlagsNV::FORCE_NO_OPAQUE.0, + "FORCE_NO_OPAQUE", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for GeometryTypeNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::TRIANGLES => Some("TRIANGLES"), + Self::AABBS => Some("AABBS"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for IOSSurfaceCreateFlagsMVK { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ImageAspectFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (ImageAspectFlags::COLOR.0, "COLOR"), + (ImageAspectFlags::DEPTH.0, "DEPTH"), + (ImageAspectFlags::STENCIL.0, "STENCIL"), + (ImageAspectFlags::METADATA.0, "METADATA"), + (ImageAspectFlags::MEMORY_PLANE_0_EXT.0, "MEMORY_PLANE_0_EXT"), + (ImageAspectFlags::MEMORY_PLANE_1_EXT.0, "MEMORY_PLANE_1_EXT"), + (ImageAspectFlags::MEMORY_PLANE_2_EXT.0, "MEMORY_PLANE_2_EXT"), + (ImageAspectFlags::MEMORY_PLANE_3_EXT.0, "MEMORY_PLANE_3_EXT"), + (ImageAspectFlags::PLANE_0.0, "PLANE_0"), + (ImageAspectFlags::PLANE_1.0, "PLANE_1"), + (ImageAspectFlags::PLANE_2.0, "PLANE_2"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ImageCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (ImageCreateFlags::SPARSE_BINDING.0, "SPARSE_BINDING"), + (ImageCreateFlags::SPARSE_RESIDENCY.0, "SPARSE_RESIDENCY"), + (ImageCreateFlags::SPARSE_ALIASED.0, "SPARSE_ALIASED"), + (ImageCreateFlags::MUTABLE_FORMAT.0, "MUTABLE_FORMAT"), + (ImageCreateFlags::CUBE_COMPATIBLE.0, "CUBE_COMPATIBLE"), + (ImageCreateFlags::CORNER_SAMPLED_NV.0, "CORNER_SAMPLED_NV"), + ( + ImageCreateFlags::SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_EXT.0, + "SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_EXT", + ), + (ImageCreateFlags::SUBSAMPLED_EXT.0, "SUBSAMPLED_EXT"), + (ImageCreateFlags::ALIAS.0, "ALIAS"), + ( + ImageCreateFlags::SPLIT_INSTANCE_BIND_REGIONS.0, + "SPLIT_INSTANCE_BIND_REGIONS", + ), + ( + ImageCreateFlags::TYPE_2D_ARRAY_COMPATIBLE.0, + "TYPE_2D_ARRAY_COMPATIBLE", + ), + ( + ImageCreateFlags::BLOCK_TEXEL_VIEW_COMPATIBLE.0, + "BLOCK_TEXEL_VIEW_COMPATIBLE", + ), + (ImageCreateFlags::EXTENDED_USAGE.0, "EXTENDED_USAGE"), + (ImageCreateFlags::PROTECTED.0, "PROTECTED"), + (ImageCreateFlags::DISJOINT.0, "DISJOINT"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ImageLayout { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::UNDEFINED => Some("UNDEFINED"), + Self::GENERAL => Some("GENERAL"), + Self::COLOR_ATTACHMENT_OPTIMAL => Some("COLOR_ATTACHMENT_OPTIMAL"), + Self::DEPTH_STENCIL_ATTACHMENT_OPTIMAL => Some("DEPTH_STENCIL_ATTACHMENT_OPTIMAL"), + Self::DEPTH_STENCIL_READ_ONLY_OPTIMAL => Some("DEPTH_STENCIL_READ_ONLY_OPTIMAL"), + Self::SHADER_READ_ONLY_OPTIMAL => Some("SHADER_READ_ONLY_OPTIMAL"), + Self::TRANSFER_SRC_OPTIMAL => Some("TRANSFER_SRC_OPTIMAL"), + Self::TRANSFER_DST_OPTIMAL => Some("TRANSFER_DST_OPTIMAL"), + Self::PREINITIALIZED => Some("PREINITIALIZED"), + Self::PRESENT_SRC_KHR => Some("PRESENT_SRC_KHR"), + Self::SHARED_PRESENT_KHR => Some("SHARED_PRESENT_KHR"), + Self::SHADING_RATE_OPTIMAL_NV => Some("SHADING_RATE_OPTIMAL_NV"), + Self::FRAGMENT_DENSITY_MAP_OPTIMAL_EXT => Some("FRAGMENT_DENSITY_MAP_OPTIMAL_EXT"), + Self::DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL => { + Some("DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL") + } + Self::DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL => { + Some("DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL") + } + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for ImagePipeSurfaceCreateFlagsFUCHSIA { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ImageTiling { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::OPTIMAL => Some("OPTIMAL"), + Self::LINEAR => Some("LINEAR"), + Self::DRM_FORMAT_MODIFIER_EXT => Some("DRM_FORMAT_MODIFIER_EXT"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for ImageType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::TYPE_1D => Some("TYPE_1D"), + Self::TYPE_2D => Some("TYPE_2D"), + Self::TYPE_3D => Some("TYPE_3D"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for ImageUsageFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (ImageUsageFlags::TRANSFER_SRC.0, "TRANSFER_SRC"), + (ImageUsageFlags::TRANSFER_DST.0, "TRANSFER_DST"), + (ImageUsageFlags::SAMPLED.0, "SAMPLED"), + (ImageUsageFlags::STORAGE.0, "STORAGE"), + (ImageUsageFlags::COLOR_ATTACHMENT.0, "COLOR_ATTACHMENT"), + ( + ImageUsageFlags::DEPTH_STENCIL_ATTACHMENT.0, + "DEPTH_STENCIL_ATTACHMENT", + ), + ( + ImageUsageFlags::TRANSIENT_ATTACHMENT.0, + "TRANSIENT_ATTACHMENT", + ), + (ImageUsageFlags::INPUT_ATTACHMENT.0, "INPUT_ATTACHMENT"), + (ImageUsageFlags::RESERVED_13_KHR.0, "RESERVED_13_KHR"), + (ImageUsageFlags::RESERVED_14_KHR.0, "RESERVED_14_KHR"), + (ImageUsageFlags::RESERVED_15_KHR.0, "RESERVED_15_KHR"), + (ImageUsageFlags::RESERVED_10_KHR.0, "RESERVED_10_KHR"), + (ImageUsageFlags::RESERVED_11_KHR.0, "RESERVED_11_KHR"), + (ImageUsageFlags::RESERVED_12_KHR.0, "RESERVED_12_KHR"), + ( + ImageUsageFlags::SHADING_RATE_IMAGE_NV.0, + "SHADING_RATE_IMAGE_NV", + ), + ( + ImageUsageFlags::FRAGMENT_DENSITY_MAP_EXT.0, + "FRAGMENT_DENSITY_MAP_EXT", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ImageViewCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[( + ImageViewCreateFlags::FRAGMENT_DENSITY_MAP_DYNAMIC_EXT.0, + "FRAGMENT_DENSITY_MAP_DYNAMIC_EXT", + )]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ImageViewType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::TYPE_1D => Some("TYPE_1D"), + Self::TYPE_2D => Some("TYPE_2D"), + Self::TYPE_3D => Some("TYPE_3D"), + Self::CUBE => Some("CUBE"), + Self::TYPE_1D_ARRAY => Some("TYPE_1D_ARRAY"), + Self::TYPE_2D_ARRAY => Some("TYPE_2D_ARRAY"), + Self::CUBE_ARRAY => Some("CUBE_ARRAY"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for IndexType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::UINT16 => Some("UINT16"), + Self::UINT32 => Some("UINT32"), + Self::NONE_NV => Some("NONE_NV"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for IndirectCommandsLayoutUsageFlagsNVX { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + IndirectCommandsLayoutUsageFlagsNVX::UNORDERED_SEQUENCES.0, + "UNORDERED_SEQUENCES", + ), + ( + IndirectCommandsLayoutUsageFlagsNVX::SPARSE_SEQUENCES.0, + "SPARSE_SEQUENCES", + ), + ( + IndirectCommandsLayoutUsageFlagsNVX::EMPTY_EXECUTIONS.0, + "EMPTY_EXECUTIONS", + ), + ( + IndirectCommandsLayoutUsageFlagsNVX::INDEXED_SEQUENCES.0, + "INDEXED_SEQUENCES", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for IndirectCommandsTokenTypeNVX { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::PIPELINE => Some("PIPELINE"), + Self::DESCRIPTOR_SET => Some("DESCRIPTOR_SET"), + Self::INDEX_BUFFER => Some("INDEX_BUFFER"), + Self::VERTEX_BUFFER => Some("VERTEX_BUFFER"), + Self::PUSH_CONSTANT => Some("PUSH_CONSTANT"), + Self::DRAW_INDEXED => Some("DRAW_INDEXED"), + Self::DRAW => Some("DRAW"), + Self::DISPATCH => Some("DISPATCH"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for InstanceCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for InternalAllocationType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::EXECUTABLE => Some("EXECUTABLE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for LogicOp { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::CLEAR => Some("CLEAR"), + Self::AND => Some("AND"), + Self::AND_REVERSE => Some("AND_REVERSE"), + Self::COPY => Some("COPY"), + Self::AND_INVERTED => Some("AND_INVERTED"), + Self::NO_OP => Some("NO_OP"), + Self::XOR => Some("XOR"), + Self::OR => Some("OR"), + Self::NOR => Some("NOR"), + Self::EQUIVALENT => Some("EQUIVALENT"), + Self::INVERT => Some("INVERT"), + Self::OR_REVERSE => Some("OR_REVERSE"), + Self::COPY_INVERTED => Some("COPY_INVERTED"), + Self::OR_INVERTED => Some("OR_INVERTED"), + Self::NAND => Some("NAND"), + Self::SET => Some("SET"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for MacOSSurfaceCreateFlagsMVK { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for MemoryAllocateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[(MemoryAllocateFlags::DEVICE_MASK.0, "DEVICE_MASK")]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for MemoryHeapFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (MemoryHeapFlags::DEVICE_LOCAL.0, "DEVICE_LOCAL"), + (MemoryHeapFlags::MULTI_INSTANCE.0, "MULTI_INSTANCE"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for MemoryMapFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for MemoryOverallocationBehaviorAMD { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::DEFAULT => Some("DEFAULT"), + Self::ALLOWED => Some("ALLOWED"), + Self::DISALLOWED => Some("DISALLOWED"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for MemoryPropertyFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (MemoryPropertyFlags::DEVICE_LOCAL.0, "DEVICE_LOCAL"), + (MemoryPropertyFlags::HOST_VISIBLE.0, "HOST_VISIBLE"), + (MemoryPropertyFlags::HOST_COHERENT.0, "HOST_COHERENT"), + (MemoryPropertyFlags::HOST_CACHED.0, "HOST_CACHED"), + (MemoryPropertyFlags::LAZILY_ALLOCATED.0, "LAZILY_ALLOCATED"), + (MemoryPropertyFlags::PROTECTED.0, "PROTECTED"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ObjectEntryTypeNVX { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::DESCRIPTOR_SET => Some("DESCRIPTOR_SET"), + Self::PIPELINE => Some("PIPELINE"), + Self::INDEX_BUFFER => Some("INDEX_BUFFER"), + Self::VERTEX_BUFFER => Some("VERTEX_BUFFER"), + Self::PUSH_CONSTANT => Some("PUSH_CONSTANT"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for ObjectEntryUsageFlagsNVX { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (ObjectEntryUsageFlagsNVX::GRAPHICS.0, "GRAPHICS"), + (ObjectEntryUsageFlagsNVX::COMPUTE.0, "COMPUTE"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ObjectType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::UNKNOWN => Some("UNKNOWN"), + Self::INSTANCE => Some("INSTANCE"), + Self::PHYSICAL_DEVICE => Some("PHYSICAL_DEVICE"), + Self::DEVICE => Some("DEVICE"), + Self::QUEUE => Some("QUEUE"), + Self::SEMAPHORE => Some("SEMAPHORE"), + Self::COMMAND_BUFFER => Some("COMMAND_BUFFER"), + Self::FENCE => Some("FENCE"), + Self::DEVICE_MEMORY => Some("DEVICE_MEMORY"), + Self::BUFFER => Some("BUFFER"), + Self::IMAGE => Some("IMAGE"), + Self::EVENT => Some("EVENT"), + Self::QUERY_POOL => Some("QUERY_POOL"), + Self::BUFFER_VIEW => Some("BUFFER_VIEW"), + Self::IMAGE_VIEW => Some("IMAGE_VIEW"), + Self::SHADER_MODULE => Some("SHADER_MODULE"), + Self::PIPELINE_CACHE => Some("PIPELINE_CACHE"), + Self::PIPELINE_LAYOUT => Some("PIPELINE_LAYOUT"), + Self::RENDER_PASS => Some("RENDER_PASS"), + Self::PIPELINE => Some("PIPELINE"), + Self::DESCRIPTOR_SET_LAYOUT => Some("DESCRIPTOR_SET_LAYOUT"), + Self::SAMPLER => Some("SAMPLER"), + Self::DESCRIPTOR_POOL => Some("DESCRIPTOR_POOL"), + Self::DESCRIPTOR_SET => Some("DESCRIPTOR_SET"), + Self::FRAMEBUFFER => Some("FRAMEBUFFER"), + Self::COMMAND_POOL => Some("COMMAND_POOL"), + Self::SURFACE_KHR => Some("SURFACE_KHR"), + Self::SWAPCHAIN_KHR => Some("SWAPCHAIN_KHR"), + Self::DISPLAY_KHR => Some("DISPLAY_KHR"), + Self::DISPLAY_MODE_KHR => Some("DISPLAY_MODE_KHR"), + Self::DEBUG_REPORT_CALLBACK_EXT => Some("DEBUG_REPORT_CALLBACK_EXT"), + Self::OBJECT_TABLE_NVX => Some("OBJECT_TABLE_NVX"), + Self::INDIRECT_COMMANDS_LAYOUT_NVX => Some("INDIRECT_COMMANDS_LAYOUT_NVX"), + Self::DEBUG_UTILS_MESSENGER_EXT => Some("DEBUG_UTILS_MESSENGER_EXT"), + Self::VALIDATION_CACHE_EXT => Some("VALIDATION_CACHE_EXT"), + Self::ACCELERATION_STRUCTURE_NV => Some("ACCELERATION_STRUCTURE_NV"), + Self::SAMPLER_YCBCR_CONVERSION => Some("SAMPLER_YCBCR_CONVERSION"), + Self::DESCRIPTOR_UPDATE_TEMPLATE => Some("DESCRIPTOR_UPDATE_TEMPLATE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PeerMemoryFeatureFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (PeerMemoryFeatureFlags::COPY_SRC.0, "COPY_SRC"), + (PeerMemoryFeatureFlags::COPY_DST.0, "COPY_DST"), + (PeerMemoryFeatureFlags::GENERIC_SRC.0, "GENERIC_SRC"), + (PeerMemoryFeatureFlags::GENERIC_DST.0, "GENERIC_DST"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PhysicalDeviceType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::OTHER => Some("OTHER"), + Self::INTEGRATED_GPU => Some("INTEGRATED_GPU"), + Self::DISCRETE_GPU => Some("DISCRETE_GPU"), + Self::VIRTUAL_GPU => Some("VIRTUAL_GPU"), + Self::CPU => Some("CPU"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PipelineBindPoint { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::GRAPHICS => Some("GRAPHICS"), + Self::COMPUTE => Some("COMPUTE"), + Self::RAY_TRACING_NV => Some("RAY_TRACING_NV"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PipelineCacheCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineCacheHeaderVersion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::ONE => Some("ONE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PipelineColorBlendStateCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineCoverageModulationStateCreateFlagsNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineCoverageToColorStateCreateFlagsNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + PipelineCreateFlags::DISABLE_OPTIMIZATION.0, + "DISABLE_OPTIMIZATION", + ), + ( + PipelineCreateFlags::ALLOW_DERIVATIVES.0, + "ALLOW_DERIVATIVES", + ), + (PipelineCreateFlags::DERIVATIVE.0, "DERIVATIVE"), + (PipelineCreateFlags::DEFER_COMPILE_NV.0, "DEFER_COMPILE_NV"), + ( + PipelineCreateFlags::VIEW_INDEX_FROM_DEVICE_INDEX.0, + "VIEW_INDEX_FROM_DEVICE_INDEX", + ), + (PipelineCreateFlags::DISPATCH_BASE.0, "DISPATCH_BASE"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineDepthStencilStateCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineDiscardRectangleStateCreateFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineDynamicStateCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineInputAssemblyStateCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineLayoutCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineMultisampleStateCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineRasterizationConservativeStateCreateFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineRasterizationStateCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineRasterizationStateStreamCreateFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineShaderStageCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineStageFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (PipelineStageFlags::TOP_OF_PIPE.0, "TOP_OF_PIPE"), + (PipelineStageFlags::DRAW_INDIRECT.0, "DRAW_INDIRECT"), + (PipelineStageFlags::VERTEX_INPUT.0, "VERTEX_INPUT"), + (PipelineStageFlags::VERTEX_SHADER.0, "VERTEX_SHADER"), + ( + PipelineStageFlags::TESSELLATION_CONTROL_SHADER.0, + "TESSELLATION_CONTROL_SHADER", + ), + ( + PipelineStageFlags::TESSELLATION_EVALUATION_SHADER.0, + "TESSELLATION_EVALUATION_SHADER", + ), + (PipelineStageFlags::GEOMETRY_SHADER.0, "GEOMETRY_SHADER"), + (PipelineStageFlags::FRAGMENT_SHADER.0, "FRAGMENT_SHADER"), + ( + PipelineStageFlags::EARLY_FRAGMENT_TESTS.0, + "EARLY_FRAGMENT_TESTS", + ), + ( + PipelineStageFlags::LATE_FRAGMENT_TESTS.0, + "LATE_FRAGMENT_TESTS", + ), + ( + PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT.0, + "COLOR_ATTACHMENT_OUTPUT", + ), + (PipelineStageFlags::COMPUTE_SHADER.0, "COMPUTE_SHADER"), + (PipelineStageFlags::TRANSFER.0, "TRANSFER"), + (PipelineStageFlags::BOTTOM_OF_PIPE.0, "BOTTOM_OF_PIPE"), + (PipelineStageFlags::HOST.0, "HOST"), + (PipelineStageFlags::ALL_GRAPHICS.0, "ALL_GRAPHICS"), + (PipelineStageFlags::ALL_COMMANDS.0, "ALL_COMMANDS"), + (PipelineStageFlags::RESERVED_27_KHR.0, "RESERVED_27_KHR"), + (PipelineStageFlags::RESERVED_26_KHR.0, "RESERVED_26_KHR"), + ( + PipelineStageFlags::TRANSFORM_FEEDBACK_EXT.0, + "TRANSFORM_FEEDBACK_EXT", + ), + ( + PipelineStageFlags::CONDITIONAL_RENDERING_EXT.0, + "CONDITIONAL_RENDERING_EXT", + ), + ( + PipelineStageFlags::COMMAND_PROCESS_NVX.0, + "COMMAND_PROCESS_NVX", + ), + ( + PipelineStageFlags::SHADING_RATE_IMAGE_NV.0, + "SHADING_RATE_IMAGE_NV", + ), + ( + PipelineStageFlags::RAY_TRACING_SHADER_NV.0, + "RAY_TRACING_SHADER_NV", + ), + ( + PipelineStageFlags::ACCELERATION_STRUCTURE_BUILD_NV.0, + "ACCELERATION_STRUCTURE_BUILD_NV", + ), + (PipelineStageFlags::TASK_SHADER_NV.0, "TASK_SHADER_NV"), + (PipelineStageFlags::MESH_SHADER_NV.0, "MESH_SHADER_NV"), + ( + PipelineStageFlags::FRAGMENT_DENSITY_PROCESS_EXT.0, + "FRAGMENT_DENSITY_PROCESS_EXT", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineTessellationStateCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineVertexInputStateCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineViewportStateCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PipelineViewportSwizzleStateCreateFlagsNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PointClippingBehavior { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::ALL_CLIP_PLANES => Some("ALL_CLIP_PLANES"), + Self::USER_CLIP_PLANES_ONLY => Some("USER_CLIP_PLANES_ONLY"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PolygonMode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::FILL => Some("FILL"), + Self::LINE => Some("LINE"), + Self::POINT => Some("POINT"), + Self::FILL_RECTANGLE_NV => Some("FILL_RECTANGLE_NV"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PresentModeKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::IMMEDIATE => Some("IMMEDIATE"), + Self::MAILBOX => Some("MAILBOX"), + Self::FIFO => Some("FIFO"), + Self::FIFO_RELAXED => Some("FIFO_RELAXED"), + Self::SHARED_DEMAND_REFRESH => Some("SHARED_DEMAND_REFRESH"), + Self::SHARED_CONTINUOUS_REFRESH => Some("SHARED_CONTINUOUS_REFRESH"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PrimitiveTopology { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::POINT_LIST => Some("POINT_LIST"), + Self::LINE_LIST => Some("LINE_LIST"), + Self::LINE_STRIP => Some("LINE_STRIP"), + Self::TRIANGLE_LIST => Some("TRIANGLE_LIST"), + Self::TRIANGLE_STRIP => Some("TRIANGLE_STRIP"), + Self::TRIANGLE_FAN => Some("TRIANGLE_FAN"), + Self::LINE_LIST_WITH_ADJACENCY => Some("LINE_LIST_WITH_ADJACENCY"), + Self::LINE_STRIP_WITH_ADJACENCY => Some("LINE_STRIP_WITH_ADJACENCY"), + Self::TRIANGLE_LIST_WITH_ADJACENCY => Some("TRIANGLE_LIST_WITH_ADJACENCY"), + Self::TRIANGLE_STRIP_WITH_ADJACENCY => Some("TRIANGLE_STRIP_WITH_ADJACENCY"), + Self::PATCH_LIST => Some("PATCH_LIST"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for QueryControlFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[(QueryControlFlags::PRECISE.0, "PRECISE")]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for QueryPipelineStatisticFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + QueryPipelineStatisticFlags::INPUT_ASSEMBLY_VERTICES.0, + "INPUT_ASSEMBLY_VERTICES", + ), + ( + QueryPipelineStatisticFlags::INPUT_ASSEMBLY_PRIMITIVES.0, + "INPUT_ASSEMBLY_PRIMITIVES", + ), + ( + QueryPipelineStatisticFlags::VERTEX_SHADER_INVOCATIONS.0, + "VERTEX_SHADER_INVOCATIONS", + ), + ( + QueryPipelineStatisticFlags::GEOMETRY_SHADER_INVOCATIONS.0, + "GEOMETRY_SHADER_INVOCATIONS", + ), + ( + QueryPipelineStatisticFlags::GEOMETRY_SHADER_PRIMITIVES.0, + "GEOMETRY_SHADER_PRIMITIVES", + ), + ( + QueryPipelineStatisticFlags::CLIPPING_INVOCATIONS.0, + "CLIPPING_INVOCATIONS", + ), + ( + QueryPipelineStatisticFlags::CLIPPING_PRIMITIVES.0, + "CLIPPING_PRIMITIVES", + ), + ( + QueryPipelineStatisticFlags::FRAGMENT_SHADER_INVOCATIONS.0, + "FRAGMENT_SHADER_INVOCATIONS", + ), + ( + QueryPipelineStatisticFlags::TESSELLATION_CONTROL_SHADER_PATCHES.0, + "TESSELLATION_CONTROL_SHADER_PATCHES", + ), + ( + QueryPipelineStatisticFlags::TESSELLATION_EVALUATION_SHADER_INVOCATIONS.0, + "TESSELLATION_EVALUATION_SHADER_INVOCATIONS", + ), + ( + QueryPipelineStatisticFlags::COMPUTE_SHADER_INVOCATIONS.0, + "COMPUTE_SHADER_INVOCATIONS", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for QueryPoolCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for QueryResultFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (QueryResultFlags::TYPE_64.0, "TYPE_64"), + (QueryResultFlags::WAIT.0, "WAIT"), + (QueryResultFlags::WITH_AVAILABILITY.0, "WITH_AVAILABILITY"), + (QueryResultFlags::PARTIAL.0, "PARTIAL"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for QueryType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::OCCLUSION => Some("OCCLUSION"), + Self::PIPELINE_STATISTICS => Some("PIPELINE_STATISTICS"), + Self::TIMESTAMP => Some("TIMESTAMP"), + Self::RESERVED_8 => Some("RESERVED_8"), + Self::RESERVED_4 => Some("RESERVED_4"), + Self::TRANSFORM_FEEDBACK_STREAM_EXT => Some("TRANSFORM_FEEDBACK_STREAM_EXT"), + Self::ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV => { + Some("ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV") + } + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for QueueFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (QueueFlags::GRAPHICS.0, "GRAPHICS"), + (QueueFlags::COMPUTE.0, "COMPUTE"), + (QueueFlags::TRANSFER.0, "TRANSFER"), + (QueueFlags::SPARSE_BINDING.0, "SPARSE_BINDING"), + (QueueFlags::RESERVED_6_KHR.0, "RESERVED_6_KHR"), + (QueueFlags::RESERVED_5_KHR.0, "RESERVED_5_KHR"), + (QueueFlags::PROTECTED.0, "PROTECTED"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for QueueGlobalPriorityEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::LOW => Some("LOW"), + Self::MEDIUM => Some("MEDIUM"), + Self::HIGH => Some("HIGH"), + Self::REALTIME => Some("REALTIME"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for RasterizationOrderAMD { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::STRICT => Some("STRICT"), + Self::RELAXED => Some("RELAXED"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for RayTracingShaderGroupTypeNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::GENERAL => Some("GENERAL"), + Self::TRIANGLES_HIT_GROUP => Some("TRIANGLES_HIT_GROUP"), + Self::PROCEDURAL_HIT_GROUP => Some("PROCEDURAL_HIT_GROUP"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for RenderPassCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = + &[(RenderPassCreateFlags::RESERVED_0_KHR.0, "RESERVED_0_KHR")]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ResolveModeFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (ResolveModeFlagsKHR::NONE.0, "NONE"), + (ResolveModeFlagsKHR::SAMPLE_ZERO.0, "SAMPLE_ZERO"), + (ResolveModeFlagsKHR::AVERAGE.0, "AVERAGE"), + (ResolveModeFlagsKHR::MIN.0, "MIN"), + (ResolveModeFlagsKHR::MAX.0, "MAX"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for Result { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::SUCCESS => Some("SUCCESS"), + Self::NOT_READY => Some("NOT_READY"), + Self::TIMEOUT => Some("TIMEOUT"), + Self::EVENT_SET => Some("EVENT_SET"), + Self::EVENT_RESET => Some("EVENT_RESET"), + Self::INCOMPLETE => Some("INCOMPLETE"), + Self::ERROR_OUT_OF_HOST_MEMORY => Some("ERROR_OUT_OF_HOST_MEMORY"), + Self::ERROR_OUT_OF_DEVICE_MEMORY => Some("ERROR_OUT_OF_DEVICE_MEMORY"), + Self::ERROR_INITIALIZATION_FAILED => Some("ERROR_INITIALIZATION_FAILED"), + Self::ERROR_DEVICE_LOST => Some("ERROR_DEVICE_LOST"), + Self::ERROR_MEMORY_MAP_FAILED => Some("ERROR_MEMORY_MAP_FAILED"), + Self::ERROR_LAYER_NOT_PRESENT => Some("ERROR_LAYER_NOT_PRESENT"), + Self::ERROR_EXTENSION_NOT_PRESENT => Some("ERROR_EXTENSION_NOT_PRESENT"), + Self::ERROR_FEATURE_NOT_PRESENT => Some("ERROR_FEATURE_NOT_PRESENT"), + Self::ERROR_INCOMPATIBLE_DRIVER => Some("ERROR_INCOMPATIBLE_DRIVER"), + Self::ERROR_TOO_MANY_OBJECTS => Some("ERROR_TOO_MANY_OBJECTS"), + Self::ERROR_FORMAT_NOT_SUPPORTED => Some("ERROR_FORMAT_NOT_SUPPORTED"), + Self::ERROR_FRAGMENTED_POOL => Some("ERROR_FRAGMENTED_POOL"), + Self::ERROR_SURFACE_LOST_KHR => Some("ERROR_SURFACE_LOST_KHR"), + Self::ERROR_NATIVE_WINDOW_IN_USE_KHR => Some("ERROR_NATIVE_WINDOW_IN_USE_KHR"), + Self::SUBOPTIMAL_KHR => Some("SUBOPTIMAL_KHR"), + Self::ERROR_OUT_OF_DATE_KHR => Some("ERROR_OUT_OF_DATE_KHR"), + Self::ERROR_INCOMPATIBLE_DISPLAY_KHR => Some("ERROR_INCOMPATIBLE_DISPLAY_KHR"), + Self::ERROR_VALIDATION_FAILED_EXT => Some("ERROR_VALIDATION_FAILED_EXT"), + Self::ERROR_INVALID_SHADER_NV => Some("ERROR_INVALID_SHADER_NV"), + Self::ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT => { + Some("ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT") + } + Self::ERROR_FRAGMENTATION_EXT => Some("ERROR_FRAGMENTATION_EXT"), + Self::ERROR_NOT_PERMITTED_EXT => Some("ERROR_NOT_PERMITTED_EXT"), + Self::ERROR_INVALID_DEVICE_ADDRESS_EXT => Some("ERROR_INVALID_DEVICE_ADDRESS_EXT"), + Self::ERROR_OUT_OF_POOL_MEMORY => Some("ERROR_OUT_OF_POOL_MEMORY"), + Self::ERROR_INVALID_EXTERNAL_HANDLE => Some("ERROR_INVALID_EXTERNAL_HANDLE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for SampleCountFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (SampleCountFlags::TYPE_1.0, "TYPE_1"), + (SampleCountFlags::TYPE_2.0, "TYPE_2"), + (SampleCountFlags::TYPE_4.0, "TYPE_4"), + (SampleCountFlags::TYPE_8.0, "TYPE_8"), + (SampleCountFlags::TYPE_16.0, "TYPE_16"), + (SampleCountFlags::TYPE_32.0, "TYPE_32"), + (SampleCountFlags::TYPE_64.0, "TYPE_64"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for SamplerAddressMode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::REPEAT => Some("REPEAT"), + Self::MIRRORED_REPEAT => Some("MIRRORED_REPEAT"), + Self::CLAMP_TO_EDGE => Some("CLAMP_TO_EDGE"), + Self::CLAMP_TO_BORDER => Some("CLAMP_TO_BORDER"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for SamplerCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (SamplerCreateFlags::SUBSAMPLED_EXT.0, "SUBSAMPLED_EXT"), + ( + SamplerCreateFlags::SUBSAMPLED_COARSE_RECONSTRUCTION_EXT.0, + "SUBSAMPLED_COARSE_RECONSTRUCTION_EXT", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for SamplerMipmapMode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::NEAREST => Some("NEAREST"), + Self::LINEAR => Some("LINEAR"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for SamplerReductionModeEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::WEIGHTED_AVERAGE => Some("WEIGHTED_AVERAGE"), + Self::MIN => Some("MIN"), + Self::MAX => Some("MAX"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for SamplerYcbcrModelConversion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::RGB_IDENTITY => Some("RGB_IDENTITY"), + Self::YCBCR_IDENTITY => Some("YCBCR_IDENTITY"), + Self::YCBCR_709 => Some("YCBCR_709"), + Self::YCBCR_601 => Some("YCBCR_601"), + Self::YCBCR_2020 => Some("YCBCR_2020"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for SamplerYcbcrRange { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::ITU_FULL => Some("ITU_FULL"), + Self::ITU_NARROW => Some("ITU_NARROW"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for SemaphoreCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for SemaphoreImportFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[(SemaphoreImportFlags::TEMPORARY.0, "TEMPORARY")]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ShaderInfoTypeAMD { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::STATISTICS => Some("STATISTICS"), + Self::BINARY => Some("BINARY"), + Self::DISASSEMBLY => Some("DISASSEMBLY"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for ShaderModuleCreateFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ShaderStageFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (ShaderStageFlags::VERTEX.0, "VERTEX"), + ( + ShaderStageFlags::TESSELLATION_CONTROL.0, + "TESSELLATION_CONTROL", + ), + ( + ShaderStageFlags::TESSELLATION_EVALUATION.0, + "TESSELLATION_EVALUATION", + ), + (ShaderStageFlags::GEOMETRY.0, "GEOMETRY"), + (ShaderStageFlags::FRAGMENT.0, "FRAGMENT"), + (ShaderStageFlags::COMPUTE.0, "COMPUTE"), + (ShaderStageFlags::ALL_GRAPHICS.0, "ALL_GRAPHICS"), + (ShaderStageFlags::ALL.0, "ALL"), + (ShaderStageFlags::RAYGEN_NV.0, "RAYGEN_NV"), + (ShaderStageFlags::ANY_HIT_NV.0, "ANY_HIT_NV"), + (ShaderStageFlags::CLOSEST_HIT_NV.0, "CLOSEST_HIT_NV"), + (ShaderStageFlags::MISS_NV.0, "MISS_NV"), + (ShaderStageFlags::INTERSECTION_NV.0, "INTERSECTION_NV"), + (ShaderStageFlags::CALLABLE_NV.0, "CALLABLE_NV"), + (ShaderStageFlags::TASK_NV.0, "TASK_NV"), + (ShaderStageFlags::MESH_NV.0, "MESH_NV"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ShadingRatePaletteEntryNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::NO_INVOCATIONS => Some("NO_INVOCATIONS"), + Self::TYPE_16_INVOCATIONS_PER_PIXEL => Some("TYPE_16_INVOCATIONS_PER_PIXEL"), + Self::TYPE_8_INVOCATIONS_PER_PIXEL => Some("TYPE_8_INVOCATIONS_PER_PIXEL"), + Self::TYPE_4_INVOCATIONS_PER_PIXEL => Some("TYPE_4_INVOCATIONS_PER_PIXEL"), + Self::TYPE_2_INVOCATIONS_PER_PIXEL => Some("TYPE_2_INVOCATIONS_PER_PIXEL"), + Self::TYPE_1_INVOCATION_PER_PIXEL => Some("TYPE_1_INVOCATION_PER_PIXEL"), + Self::TYPE_1_INVOCATION_PER_2X1_PIXELS => Some("TYPE_1_INVOCATION_PER_2X1_PIXELS"), + Self::TYPE_1_INVOCATION_PER_1X2_PIXELS => Some("TYPE_1_INVOCATION_PER_1X2_PIXELS"), + Self::TYPE_1_INVOCATION_PER_2X2_PIXELS => Some("TYPE_1_INVOCATION_PER_2X2_PIXELS"), + Self::TYPE_1_INVOCATION_PER_4X2_PIXELS => Some("TYPE_1_INVOCATION_PER_4X2_PIXELS"), + Self::TYPE_1_INVOCATION_PER_2X4_PIXELS => Some("TYPE_1_INVOCATION_PER_2X4_PIXELS"), + Self::TYPE_1_INVOCATION_PER_4X4_PIXELS => Some("TYPE_1_INVOCATION_PER_4X4_PIXELS"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for SharingMode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::EXCLUSIVE => Some("EXCLUSIVE"), + Self::CONCURRENT => Some("CONCURRENT"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for SparseImageFormatFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (SparseImageFormatFlags::SINGLE_MIPTAIL.0, "SINGLE_MIPTAIL"), + ( + SparseImageFormatFlags::ALIGNED_MIP_SIZE.0, + "ALIGNED_MIP_SIZE", + ), + ( + SparseImageFormatFlags::NONSTANDARD_BLOCK_SIZE.0, + "NONSTANDARD_BLOCK_SIZE", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for SparseMemoryBindFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[(SparseMemoryBindFlags::METADATA.0, "METADATA")]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for StencilFaceFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (StencilFaceFlags::FRONT.0, "FRONT"), + (StencilFaceFlags::BACK.0, "BACK"), + ( + StencilFaceFlags::STENCIL_FRONT_AND_BACK.0, + "STENCIL_FRONT_AND_BACK", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for StencilOp { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::KEEP => Some("KEEP"), + Self::ZERO => Some("ZERO"), + Self::REPLACE => Some("REPLACE"), + Self::INCREMENT_AND_CLAMP => Some("INCREMENT_AND_CLAMP"), + Self::DECREMENT_AND_CLAMP => Some("DECREMENT_AND_CLAMP"), + Self::INVERT => Some("INVERT"), + Self::INCREMENT_AND_WRAP => Some("INCREMENT_AND_WRAP"), + Self::DECREMENT_AND_WRAP => Some("DECREMENT_AND_WRAP"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for StructureType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::APPLICATION_INFO => Some("APPLICATION_INFO"), + Self::INSTANCE_CREATE_INFO => Some("INSTANCE_CREATE_INFO"), + Self::DEVICE_QUEUE_CREATE_INFO => Some("DEVICE_QUEUE_CREATE_INFO"), + Self::DEVICE_CREATE_INFO => Some("DEVICE_CREATE_INFO"), + Self::SUBMIT_INFO => Some("SUBMIT_INFO"), + Self::MEMORY_ALLOCATE_INFO => Some("MEMORY_ALLOCATE_INFO"), + Self::MAPPED_MEMORY_RANGE => Some("MAPPED_MEMORY_RANGE"), + Self::BIND_SPARSE_INFO => Some("BIND_SPARSE_INFO"), + Self::FENCE_CREATE_INFO => Some("FENCE_CREATE_INFO"), + Self::SEMAPHORE_CREATE_INFO => Some("SEMAPHORE_CREATE_INFO"), + Self::EVENT_CREATE_INFO => Some("EVENT_CREATE_INFO"), + Self::QUERY_POOL_CREATE_INFO => Some("QUERY_POOL_CREATE_INFO"), + Self::BUFFER_CREATE_INFO => Some("BUFFER_CREATE_INFO"), + Self::BUFFER_VIEW_CREATE_INFO => Some("BUFFER_VIEW_CREATE_INFO"), + Self::IMAGE_CREATE_INFO => Some("IMAGE_CREATE_INFO"), + Self::IMAGE_VIEW_CREATE_INFO => Some("IMAGE_VIEW_CREATE_INFO"), + Self::SHADER_MODULE_CREATE_INFO => Some("SHADER_MODULE_CREATE_INFO"), + Self::PIPELINE_CACHE_CREATE_INFO => Some("PIPELINE_CACHE_CREATE_INFO"), + Self::PIPELINE_SHADER_STAGE_CREATE_INFO => Some("PIPELINE_SHADER_STAGE_CREATE_INFO"), + Self::PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO => { + Some("PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO") + } + Self::PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO => { + Some("PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO") + } + Self::PIPELINE_TESSELLATION_STATE_CREATE_INFO => { + Some("PIPELINE_TESSELLATION_STATE_CREATE_INFO") + } + Self::PIPELINE_VIEWPORT_STATE_CREATE_INFO => { + Some("PIPELINE_VIEWPORT_STATE_CREATE_INFO") + } + Self::PIPELINE_RASTERIZATION_STATE_CREATE_INFO => { + Some("PIPELINE_RASTERIZATION_STATE_CREATE_INFO") + } + Self::PIPELINE_MULTISAMPLE_STATE_CREATE_INFO => { + Some("PIPELINE_MULTISAMPLE_STATE_CREATE_INFO") + } + Self::PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO => { + Some("PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO") + } + Self::PIPELINE_COLOR_BLEND_STATE_CREATE_INFO => { + Some("PIPELINE_COLOR_BLEND_STATE_CREATE_INFO") + } + Self::PIPELINE_DYNAMIC_STATE_CREATE_INFO => Some("PIPELINE_DYNAMIC_STATE_CREATE_INFO"), + Self::GRAPHICS_PIPELINE_CREATE_INFO => Some("GRAPHICS_PIPELINE_CREATE_INFO"), + Self::COMPUTE_PIPELINE_CREATE_INFO => Some("COMPUTE_PIPELINE_CREATE_INFO"), + Self::PIPELINE_LAYOUT_CREATE_INFO => Some("PIPELINE_LAYOUT_CREATE_INFO"), + Self::SAMPLER_CREATE_INFO => Some("SAMPLER_CREATE_INFO"), + Self::DESCRIPTOR_SET_LAYOUT_CREATE_INFO => Some("DESCRIPTOR_SET_LAYOUT_CREATE_INFO"), + Self::DESCRIPTOR_POOL_CREATE_INFO => Some("DESCRIPTOR_POOL_CREATE_INFO"), + Self::DESCRIPTOR_SET_ALLOCATE_INFO => Some("DESCRIPTOR_SET_ALLOCATE_INFO"), + Self::WRITE_DESCRIPTOR_SET => Some("WRITE_DESCRIPTOR_SET"), + Self::COPY_DESCRIPTOR_SET => Some("COPY_DESCRIPTOR_SET"), + Self::FRAMEBUFFER_CREATE_INFO => Some("FRAMEBUFFER_CREATE_INFO"), + Self::RENDER_PASS_CREATE_INFO => Some("RENDER_PASS_CREATE_INFO"), + Self::COMMAND_POOL_CREATE_INFO => Some("COMMAND_POOL_CREATE_INFO"), + Self::COMMAND_BUFFER_ALLOCATE_INFO => Some("COMMAND_BUFFER_ALLOCATE_INFO"), + Self::COMMAND_BUFFER_INHERITANCE_INFO => Some("COMMAND_BUFFER_INHERITANCE_INFO"), + Self::COMMAND_BUFFER_BEGIN_INFO => Some("COMMAND_BUFFER_BEGIN_INFO"), + Self::RENDER_PASS_BEGIN_INFO => Some("RENDER_PASS_BEGIN_INFO"), + Self::BUFFER_MEMORY_BARRIER => Some("BUFFER_MEMORY_BARRIER"), + Self::IMAGE_MEMORY_BARRIER => Some("IMAGE_MEMORY_BARRIER"), + Self::MEMORY_BARRIER => Some("MEMORY_BARRIER"), + Self::LOADER_INSTANCE_CREATE_INFO => Some("LOADER_INSTANCE_CREATE_INFO"), + Self::LOADER_DEVICE_CREATE_INFO => Some("LOADER_DEVICE_CREATE_INFO"), + Self::SWAPCHAIN_CREATE_INFO_KHR => Some("SWAPCHAIN_CREATE_INFO_KHR"), + Self::PRESENT_INFO_KHR => Some("PRESENT_INFO_KHR"), + Self::DEVICE_GROUP_PRESENT_CAPABILITIES_KHR => { + Some("DEVICE_GROUP_PRESENT_CAPABILITIES_KHR") + } + Self::IMAGE_SWAPCHAIN_CREATE_INFO_KHR => Some("IMAGE_SWAPCHAIN_CREATE_INFO_KHR"), + Self::BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR => { + Some("BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR") + } + Self::ACQUIRE_NEXT_IMAGE_INFO_KHR => Some("ACQUIRE_NEXT_IMAGE_INFO_KHR"), + Self::DEVICE_GROUP_PRESENT_INFO_KHR => Some("DEVICE_GROUP_PRESENT_INFO_KHR"), + Self::DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR => { + Some("DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR") + } + Self::DISPLAY_MODE_CREATE_INFO_KHR => Some("DISPLAY_MODE_CREATE_INFO_KHR"), + Self::DISPLAY_SURFACE_CREATE_INFO_KHR => Some("DISPLAY_SURFACE_CREATE_INFO_KHR"), + Self::DISPLAY_PRESENT_INFO_KHR => Some("DISPLAY_PRESENT_INFO_KHR"), + Self::XLIB_SURFACE_CREATE_INFO_KHR => Some("XLIB_SURFACE_CREATE_INFO_KHR"), + Self::XCB_SURFACE_CREATE_INFO_KHR => Some("XCB_SURFACE_CREATE_INFO_KHR"), + Self::WAYLAND_SURFACE_CREATE_INFO_KHR => Some("WAYLAND_SURFACE_CREATE_INFO_KHR"), + Self::ANDROID_SURFACE_CREATE_INFO_KHR => Some("ANDROID_SURFACE_CREATE_INFO_KHR"), + Self::WIN32_SURFACE_CREATE_INFO_KHR => Some("WIN32_SURFACE_CREATE_INFO_KHR"), + Self::NATIVE_BUFFER_ANDROID => Some("NATIVE_BUFFER_ANDROID"), + Self::DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT => { + Some("DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT") + } + Self::PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD => { + Some("PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD") + } + Self::DEBUG_MARKER_OBJECT_NAME_INFO_EXT => Some("DEBUG_MARKER_OBJECT_NAME_INFO_EXT"), + Self::DEBUG_MARKER_OBJECT_TAG_INFO_EXT => Some("DEBUG_MARKER_OBJECT_TAG_INFO_EXT"), + Self::DEBUG_MARKER_MARKER_INFO_EXT => Some("DEBUG_MARKER_MARKER_INFO_EXT"), + Self::DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV => { + Some("DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV") + } + Self::DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV => { + Some("DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV") + } + Self::DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV => { + Some("DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV") + } + Self::PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT") + } + Self::PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT => { + Some("PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT") + } + Self::TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD => { + Some("TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD") + } + Self::PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV => { + Some("PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV") + } + Self::EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV => { + Some("EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV") + } + Self::EXPORT_MEMORY_ALLOCATE_INFO_NV => Some("EXPORT_MEMORY_ALLOCATE_INFO_NV"), + Self::IMPORT_MEMORY_WIN32_HANDLE_INFO_NV => Some("IMPORT_MEMORY_WIN32_HANDLE_INFO_NV"), + Self::EXPORT_MEMORY_WIN32_HANDLE_INFO_NV => Some("EXPORT_MEMORY_WIN32_HANDLE_INFO_NV"), + Self::WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV => { + Some("WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV") + } + Self::VALIDATION_FLAGS_EXT => Some("VALIDATION_FLAGS_EXT"), + Self::VI_SURFACE_CREATE_INFO_NN => Some("VI_SURFACE_CREATE_INFO_NN"), + Self::IMAGE_VIEW_ASTC_DECODE_MODE_EXT => Some("IMAGE_VIEW_ASTC_DECODE_MODE_EXT"), + Self::PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT") + } + Self::IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR => { + Some("IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR") + } + Self::EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR => { + Some("EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR") + } + Self::MEMORY_WIN32_HANDLE_PROPERTIES_KHR => Some("MEMORY_WIN32_HANDLE_PROPERTIES_KHR"), + Self::MEMORY_GET_WIN32_HANDLE_INFO_KHR => Some("MEMORY_GET_WIN32_HANDLE_INFO_KHR"), + Self::IMPORT_MEMORY_FD_INFO_KHR => Some("IMPORT_MEMORY_FD_INFO_KHR"), + Self::MEMORY_FD_PROPERTIES_KHR => Some("MEMORY_FD_PROPERTIES_KHR"), + Self::MEMORY_GET_FD_INFO_KHR => Some("MEMORY_GET_FD_INFO_KHR"), + Self::WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR => { + Some("WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR") + } + Self::IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR => { + Some("IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR") + } + Self::EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR => { + Some("EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR") + } + Self::D3D12_FENCE_SUBMIT_INFO_KHR => Some("D3D12_FENCE_SUBMIT_INFO_KHR"), + Self::SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR => { + Some("SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR") + } + Self::IMPORT_SEMAPHORE_FD_INFO_KHR => Some("IMPORT_SEMAPHORE_FD_INFO_KHR"), + Self::SEMAPHORE_GET_FD_INFO_KHR => Some("SEMAPHORE_GET_FD_INFO_KHR"), + Self::PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR => { + Some("PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR") + } + Self::COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT => { + Some("COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT") + } + Self::PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT") + } + Self::CONDITIONAL_RENDERING_BEGIN_INFO_EXT => { + Some("CONDITIONAL_RENDERING_BEGIN_INFO_EXT") + } + Self::PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR => { + Some("PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR") + } + Self::PRESENT_REGIONS_KHR => Some("PRESENT_REGIONS_KHR"), + Self::OBJECT_TABLE_CREATE_INFO_NVX => Some("OBJECT_TABLE_CREATE_INFO_NVX"), + Self::INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX => { + Some("INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX") + } + Self::CMD_PROCESS_COMMANDS_INFO_NVX => Some("CMD_PROCESS_COMMANDS_INFO_NVX"), + Self::CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX => { + Some("CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX") + } + Self::DEVICE_GENERATED_COMMANDS_LIMITS_NVX => { + Some("DEVICE_GENERATED_COMMANDS_LIMITS_NVX") + } + Self::DEVICE_GENERATED_COMMANDS_FEATURES_NVX => { + Some("DEVICE_GENERATED_COMMANDS_FEATURES_NVX") + } + Self::PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV => { + Some("PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV") + } + Self::SURFACE_CAPABILITIES_2_EXT => Some("SURFACE_CAPABILITIES_2_EXT"), + Self::DISPLAY_POWER_INFO_EXT => Some("DISPLAY_POWER_INFO_EXT"), + Self::DEVICE_EVENT_INFO_EXT => Some("DEVICE_EVENT_INFO_EXT"), + Self::DISPLAY_EVENT_INFO_EXT => Some("DISPLAY_EVENT_INFO_EXT"), + Self::SWAPCHAIN_COUNTER_CREATE_INFO_EXT => Some("SWAPCHAIN_COUNTER_CREATE_INFO_EXT"), + Self::PRESENT_TIMES_INFO_GOOGLE => Some("PRESENT_TIMES_INFO_GOOGLE"), + Self::PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX => { + Some("PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX") + } + Self::PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV => { + Some("PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV") + } + Self::PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT") + } + Self::PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT => { + Some("PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT") + } + Self::PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT") + } + Self::PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT => { + Some("PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT") + } + Self::HDR_METADATA_EXT => Some("HDR_METADATA_EXT"), + Self::ATTACHMENT_DESCRIPTION_2_KHR => Some("ATTACHMENT_DESCRIPTION_2_KHR"), + Self::ATTACHMENT_REFERENCE_2_KHR => Some("ATTACHMENT_REFERENCE_2_KHR"), + Self::SUBPASS_DESCRIPTION_2_KHR => Some("SUBPASS_DESCRIPTION_2_KHR"), + Self::SUBPASS_DEPENDENCY_2_KHR => Some("SUBPASS_DEPENDENCY_2_KHR"), + Self::RENDER_PASS_CREATE_INFO_2_KHR => Some("RENDER_PASS_CREATE_INFO_2_KHR"), + Self::SUBPASS_BEGIN_INFO_KHR => Some("SUBPASS_BEGIN_INFO_KHR"), + Self::SUBPASS_END_INFO_KHR => Some("SUBPASS_END_INFO_KHR"), + Self::SHARED_PRESENT_SURFACE_CAPABILITIES_KHR => { + Some("SHARED_PRESENT_SURFACE_CAPABILITIES_KHR") + } + Self::IMPORT_FENCE_WIN32_HANDLE_INFO_KHR => Some("IMPORT_FENCE_WIN32_HANDLE_INFO_KHR"), + Self::EXPORT_FENCE_WIN32_HANDLE_INFO_KHR => Some("EXPORT_FENCE_WIN32_HANDLE_INFO_KHR"), + Self::FENCE_GET_WIN32_HANDLE_INFO_KHR => Some("FENCE_GET_WIN32_HANDLE_INFO_KHR"), + Self::IMPORT_FENCE_FD_INFO_KHR => Some("IMPORT_FENCE_FD_INFO_KHR"), + Self::FENCE_GET_FD_INFO_KHR => Some("FENCE_GET_FD_INFO_KHR"), + Self::PHYSICAL_DEVICE_SURFACE_INFO_2_KHR => Some("PHYSICAL_DEVICE_SURFACE_INFO_2_KHR"), + Self::SURFACE_CAPABILITIES_2_KHR => Some("SURFACE_CAPABILITIES_2_KHR"), + Self::SURFACE_FORMAT_2_KHR => Some("SURFACE_FORMAT_2_KHR"), + Self::DISPLAY_PROPERTIES_2_KHR => Some("DISPLAY_PROPERTIES_2_KHR"), + Self::DISPLAY_PLANE_PROPERTIES_2_KHR => Some("DISPLAY_PLANE_PROPERTIES_2_KHR"), + Self::DISPLAY_MODE_PROPERTIES_2_KHR => Some("DISPLAY_MODE_PROPERTIES_2_KHR"), + Self::DISPLAY_PLANE_INFO_2_KHR => Some("DISPLAY_PLANE_INFO_2_KHR"), + Self::DISPLAY_PLANE_CAPABILITIES_2_KHR => Some("DISPLAY_PLANE_CAPABILITIES_2_KHR"), + Self::IOS_SURFACE_CREATE_INFO_M => Some("IOS_SURFACE_CREATE_INFO_M"), + Self::MACOS_SURFACE_CREATE_INFO_M => Some("MACOS_SURFACE_CREATE_INFO_M"), + Self::DEBUG_UTILS_OBJECT_NAME_INFO_EXT => Some("DEBUG_UTILS_OBJECT_NAME_INFO_EXT"), + Self::DEBUG_UTILS_OBJECT_TAG_INFO_EXT => Some("DEBUG_UTILS_OBJECT_TAG_INFO_EXT"), + Self::DEBUG_UTILS_LABEL_EXT => Some("DEBUG_UTILS_LABEL_EXT"), + Self::DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT => { + Some("DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT") + } + Self::DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT => { + Some("DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT") + } + Self::ANDROID_HARDWARE_BUFFER_USAGE_ANDROID => { + Some("ANDROID_HARDWARE_BUFFER_USAGE_ANDROID") + } + Self::ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID => { + Some("ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID") + } + Self::ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID => { + Some("ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID") + } + Self::IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID => { + Some("IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID") + } + Self::MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID => { + Some("MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID") + } + Self::EXTERNAL_FORMAT_ANDROID => Some("EXTERNAL_FORMAT_ANDROID"), + Self::PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT") + } + Self::SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT => { + Some("SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT") + } + Self::PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT") + } + Self::WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT => { + Some("WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT") + } + Self::DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT => { + Some("DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT") + } + Self::SAMPLE_LOCATIONS_INFO_EXT => Some("SAMPLE_LOCATIONS_INFO_EXT"), + Self::RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT => { + Some("RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT") + } + Self::PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT => { + Some("PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT") + } + Self::PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT") + } + Self::MULTISAMPLE_PROPERTIES_EXT => Some("MULTISAMPLE_PROPERTIES_EXT"), + Self::IMAGE_FORMAT_LIST_CREATE_INFO_KHR => Some("IMAGE_FORMAT_LIST_CREATE_INFO_KHR"), + Self::PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT") + } + Self::PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT => { + Some("PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT") + } + Self::PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV => { + Some("PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV") + } + Self::PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV => { + Some("PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV") + } + Self::DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT => { + Some("DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT") + } + Self::DRM_FORMAT_MODIFIER_PROPERTIES_EXT => Some("DRM_FORMAT_MODIFIER_PROPERTIES_EXT"), + Self::PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT => { + Some("PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT") + } + Self::IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT => { + Some("IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT") + } + Self::IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT => { + Some("IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT") + } + Self::IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT => { + Some("IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT") + } + Self::VALIDATION_CACHE_CREATE_INFO_EXT => Some("VALIDATION_CACHE_CREATE_INFO_EXT"), + Self::SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT => { + Some("SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT") + } + Self::DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT => { + Some("DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT") + } + Self::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT") + } + Self::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT => { + Some("DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT") + } + Self::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT => { + Some("DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT") + } + Self::PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV => { + Some("PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV") + } + Self::PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV => { + Some("PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV") + } + Self::PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV => { + Some("PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV") + } + Self::PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV => { + Some("PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV") + } + Self::RAY_TRACING_PIPELINE_CREATE_INFO_NV => { + Some("RAY_TRACING_PIPELINE_CREATE_INFO_NV") + } + Self::ACCELERATION_STRUCTURE_CREATE_INFO_NV => { + Some("ACCELERATION_STRUCTURE_CREATE_INFO_NV") + } + Self::GEOMETRY_NV => Some("GEOMETRY_NV"), + Self::GEOMETRY_TRIANGLES_NV => Some("GEOMETRY_TRIANGLES_NV"), + Self::GEOMETRY_AABB_NV => Some("GEOMETRY_AABB_NV"), + Self::BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV => { + Some("BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV") + } + Self::WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV => { + Some("WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV") + } + Self::ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV => { + Some("ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV") + } + Self::PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV => { + Some("PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV") + } + Self::RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV => { + Some("RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV") + } + Self::ACCELERATION_STRUCTURE_INFO_NV => Some("ACCELERATION_STRUCTURE_INFO_NV"), + Self::PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV => { + Some("PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV") + } + Self::PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV => { + Some("PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV") + } + Self::DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT => { + Some("DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT") + } + Self::PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR => { + Some("PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR") + } + Self::IMPORT_MEMORY_HOST_POINTER_INFO_EXT => { + Some("IMPORT_MEMORY_HOST_POINTER_INFO_EXT") + } + Self::MEMORY_HOST_POINTER_PROPERTIES_EXT => Some("MEMORY_HOST_POINTER_PROPERTIES_EXT"), + Self::PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT") + } + Self::PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR => { + Some("PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR") + } + Self::CALIBRATED_TIMESTAMP_INFO_EXT => Some("CALIBRATED_TIMESTAMP_INFO_EXT"), + Self::PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD => { + Some("PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD") + } + Self::DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD => { + Some("DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD") + } + Self::PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT") + } + Self::PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT => { + Some("PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT") + } + Self::PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR => { + Some("PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR") + } + Self::PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR => { + Some("PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR") + } + Self::PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR => { + Some("PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR") + } + Self::SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR => { + Some("SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR") + } + Self::PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV => { + Some("PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV") + } + Self::PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV => { + Some("PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV") + } + Self::PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV => { + Some("PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV") + } + Self::PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV => { + Some("PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV") + } + Self::PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV => { + Some("PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV") + } + Self::PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV => { + Some("PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV") + } + Self::PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV => { + Some("PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV") + } + Self::CHECKPOINT_DATA_NV => Some("CHECKPOINT_DATA_NV"), + Self::QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV => { + Some("QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV") + } + Self::PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR => { + Some("PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR") + } + Self::PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT") + } + Self::IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA => { + Some("IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA") + } + Self::PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT") + } + Self::RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT => { + Some("RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT") + } + Self::PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT") + } + Self::PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT") + } + Self::MEMORY_PRIORITY_ALLOCATE_INFO_EXT => Some("MEMORY_PRIORITY_ALLOCATE_INFO_EXT"), + Self::PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT") + } + Self::BUFFER_DEVICE_ADDRESS_INFO_EXT => Some("BUFFER_DEVICE_ADDRESS_INFO_EXT"), + Self::BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT => { + Some("BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT") + } + Self::IMAGE_STENCIL_USAGE_CREATE_INFO_EXT => { + Some("IMAGE_STENCIL_USAGE_CREATE_INFO_EXT") + } + Self::VALIDATION_FEATURES_EXT => Some("VALIDATION_FEATURES_EXT"), + Self::PHYSICAL_DEVICE_SUBGROUP_PROPERTIES => { + Some("PHYSICAL_DEVICE_SUBGROUP_PROPERTIES") + } + Self::BIND_BUFFER_MEMORY_INFO => Some("BIND_BUFFER_MEMORY_INFO"), + Self::BIND_IMAGE_MEMORY_INFO => Some("BIND_IMAGE_MEMORY_INFO"), + Self::PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES => { + Some("PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES") + } + Self::MEMORY_DEDICATED_REQUIREMENTS => Some("MEMORY_DEDICATED_REQUIREMENTS"), + Self::MEMORY_DEDICATED_ALLOCATE_INFO => Some("MEMORY_DEDICATED_ALLOCATE_INFO"), + Self::MEMORY_ALLOCATE_FLAGS_INFO => Some("MEMORY_ALLOCATE_FLAGS_INFO"), + Self::DEVICE_GROUP_RENDER_PASS_BEGIN_INFO => { + Some("DEVICE_GROUP_RENDER_PASS_BEGIN_INFO") + } + Self::DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO => { + Some("DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO") + } + Self::DEVICE_GROUP_SUBMIT_INFO => Some("DEVICE_GROUP_SUBMIT_INFO"), + Self::DEVICE_GROUP_BIND_SPARSE_INFO => Some("DEVICE_GROUP_BIND_SPARSE_INFO"), + Self::BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO => { + Some("BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO") + } + Self::BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO => { + Some("BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO") + } + Self::PHYSICAL_DEVICE_GROUP_PROPERTIES => Some("PHYSICAL_DEVICE_GROUP_PROPERTIES"), + Self::DEVICE_GROUP_DEVICE_CREATE_INFO => Some("DEVICE_GROUP_DEVICE_CREATE_INFO"), + Self::BUFFER_MEMORY_REQUIREMENTS_INFO_2 => Some("BUFFER_MEMORY_REQUIREMENTS_INFO_2"), + Self::IMAGE_MEMORY_REQUIREMENTS_INFO_2 => Some("IMAGE_MEMORY_REQUIREMENTS_INFO_2"), + Self::IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 => { + Some("IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2") + } + Self::MEMORY_REQUIREMENTS_2 => Some("MEMORY_REQUIREMENTS_2"), + Self::SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 => Some("SPARSE_IMAGE_MEMORY_REQUIREMENTS_2"), + Self::PHYSICAL_DEVICE_FEATURES_2 => Some("PHYSICAL_DEVICE_FEATURES_2"), + Self::PHYSICAL_DEVICE_PROPERTIES_2 => Some("PHYSICAL_DEVICE_PROPERTIES_2"), + Self::FORMAT_PROPERTIES_2 => Some("FORMAT_PROPERTIES_2"), + Self::IMAGE_FORMAT_PROPERTIES_2 => Some("IMAGE_FORMAT_PROPERTIES_2"), + Self::PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 => { + Some("PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2") + } + Self::QUEUE_FAMILY_PROPERTIES_2 => Some("QUEUE_FAMILY_PROPERTIES_2"), + Self::PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 => { + Some("PHYSICAL_DEVICE_MEMORY_PROPERTIES_2") + } + Self::SPARSE_IMAGE_FORMAT_PROPERTIES_2 => Some("SPARSE_IMAGE_FORMAT_PROPERTIES_2"), + Self::PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 => { + Some("PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2") + } + Self::PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES => { + Some("PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES") + } + Self::RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO => { + Some("RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO") + } + Self::IMAGE_VIEW_USAGE_CREATE_INFO => Some("IMAGE_VIEW_USAGE_CREATE_INFO"), + Self::PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO => { + Some("PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO") + } + Self::RENDER_PASS_MULTIVIEW_CREATE_INFO => Some("RENDER_PASS_MULTIVIEW_CREATE_INFO"), + Self::PHYSICAL_DEVICE_MULTIVIEW_FEATURES => Some("PHYSICAL_DEVICE_MULTIVIEW_FEATURES"), + Self::PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES => { + Some("PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES") + } + Self::PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES => { + Some("PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES") + } + Self::PROTECTED_SUBMIT_INFO => Some("PROTECTED_SUBMIT_INFO"), + Self::PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES => { + Some("PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES") + } + Self::PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES => { + Some("PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES") + } + Self::DEVICE_QUEUE_INFO_2 => Some("DEVICE_QUEUE_INFO_2"), + Self::SAMPLER_YCBCR_CONVERSION_CREATE_INFO => { + Some("SAMPLER_YCBCR_CONVERSION_CREATE_INFO") + } + Self::SAMPLER_YCBCR_CONVERSION_INFO => Some("SAMPLER_YCBCR_CONVERSION_INFO"), + Self::BIND_IMAGE_PLANE_MEMORY_INFO => Some("BIND_IMAGE_PLANE_MEMORY_INFO"), + Self::IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO => { + Some("IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO") + } + Self::PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES => { + Some("PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES") + } + Self::SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES => { + Some("SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES") + } + Self::DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO => { + Some("DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO") + } + Self::PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO => { + Some("PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO") + } + Self::EXTERNAL_IMAGE_FORMAT_PROPERTIES => Some("EXTERNAL_IMAGE_FORMAT_PROPERTIES"), + Self::PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO => { + Some("PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO") + } + Self::EXTERNAL_BUFFER_PROPERTIES => Some("EXTERNAL_BUFFER_PROPERTIES"), + Self::PHYSICAL_DEVICE_ID_PROPERTIES => Some("PHYSICAL_DEVICE_ID_PROPERTIES"), + Self::EXTERNAL_MEMORY_BUFFER_CREATE_INFO => Some("EXTERNAL_MEMORY_BUFFER_CREATE_INFO"), + Self::EXTERNAL_MEMORY_IMAGE_CREATE_INFO => Some("EXTERNAL_MEMORY_IMAGE_CREATE_INFO"), + Self::EXPORT_MEMORY_ALLOCATE_INFO => Some("EXPORT_MEMORY_ALLOCATE_INFO"), + Self::PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO => { + Some("PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO") + } + Self::EXTERNAL_FENCE_PROPERTIES => Some("EXTERNAL_FENCE_PROPERTIES"), + Self::EXPORT_FENCE_CREATE_INFO => Some("EXPORT_FENCE_CREATE_INFO"), + Self::EXPORT_SEMAPHORE_CREATE_INFO => Some("EXPORT_SEMAPHORE_CREATE_INFO"), + Self::PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO => { + Some("PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO") + } + Self::EXTERNAL_SEMAPHORE_PROPERTIES => Some("EXTERNAL_SEMAPHORE_PROPERTIES"), + Self::PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES => { + Some("PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES") + } + Self::DESCRIPTOR_SET_LAYOUT_SUPPORT => Some("DESCRIPTOR_SET_LAYOUT_SUPPORT"), + Self::PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES => { + Some("PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES") + } + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for SubgroupFeatureFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (SubgroupFeatureFlags::BASIC.0, "BASIC"), + (SubgroupFeatureFlags::VOTE.0, "VOTE"), + (SubgroupFeatureFlags::ARITHMETIC.0, "ARITHMETIC"), + (SubgroupFeatureFlags::BALLOT.0, "BALLOT"), + (SubgroupFeatureFlags::SHUFFLE.0, "SHUFFLE"), + (SubgroupFeatureFlags::SHUFFLE_RELATIVE.0, "SHUFFLE_RELATIVE"), + (SubgroupFeatureFlags::CLUSTERED.0, "CLUSTERED"), + (SubgroupFeatureFlags::QUAD.0, "QUAD"), + (SubgroupFeatureFlags::PARTITIONED_NV.0, "PARTITIONED_NV"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for SubpassContents { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::INLINE => Some("INLINE"), + Self::SECONDARY_COMMAND_BUFFERS => Some("SECONDARY_COMMAND_BUFFERS"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for SubpassDescriptionFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + SubpassDescriptionFlags::PER_VIEW_ATTRIBUTES_NVX.0, + "PER_VIEW_ATTRIBUTES_NVX", + ), + ( + SubpassDescriptionFlags::PER_VIEW_POSITION_X_ONLY_NVX.0, + "PER_VIEW_POSITION_X_ONLY_NVX", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for SurfaceCounterFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[(SurfaceCounterFlagsEXT::VBLANK.0, "VBLANK")]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for SurfaceTransformFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (SurfaceTransformFlagsKHR::IDENTITY.0, "IDENTITY"), + (SurfaceTransformFlagsKHR::ROTATE_90.0, "ROTATE_90"), + (SurfaceTransformFlagsKHR::ROTATE_180.0, "ROTATE_180"), + (SurfaceTransformFlagsKHR::ROTATE_270.0, "ROTATE_270"), + ( + SurfaceTransformFlagsKHR::HORIZONTAL_MIRROR.0, + "HORIZONTAL_MIRROR", + ), + ( + SurfaceTransformFlagsKHR::HORIZONTAL_MIRROR_ROTATE_90.0, + "HORIZONTAL_MIRROR_ROTATE_90", + ), + ( + SurfaceTransformFlagsKHR::HORIZONTAL_MIRROR_ROTATE_180.0, + "HORIZONTAL_MIRROR_ROTATE_180", + ), + ( + SurfaceTransformFlagsKHR::HORIZONTAL_MIRROR_ROTATE_270.0, + "HORIZONTAL_MIRROR_ROTATE_270", + ), + (SurfaceTransformFlagsKHR::INHERIT.0, "INHERIT"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for SwapchainCreateFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + SwapchainCreateFlagsKHR::SPLIT_INSTANCE_BIND_REGIONS.0, + "SPLIT_INSTANCE_BIND_REGIONS", + ), + (SwapchainCreateFlagsKHR::PROTECTED.0, "PROTECTED"), + (SwapchainCreateFlagsKHR::MUTABLE_FORMAT.0, "MUTABLE_FORMAT"), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for SystemAllocationScope { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::COMMAND => Some("COMMAND"), + Self::OBJECT => Some("OBJECT"), + Self::CACHE => Some("CACHE"), + Self::DEVICE => Some("DEVICE"), + Self::INSTANCE => Some("INSTANCE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for TessellationDomainOrigin { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::UPPER_LEFT => Some("UPPER_LEFT"), + Self::LOWER_LEFT => Some("LOWER_LEFT"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for TimeDomainEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::DEVICE => Some("DEVICE"), + Self::CLOCK_MONOTONIC => Some("CLOCK_MONOTONIC"), + Self::CLOCK_MONOTONIC_RAW => Some("CLOCK_MONOTONIC_RAW"), + Self::QUERY_PERFORMANCE_COUNTER => Some("QUERY_PERFORMANCE_COUNTER"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for ValidationCacheCreateFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ValidationCacheHeaderVersionEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::ONE => Some("ONE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for ValidationCheckEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::ALL => Some("ALL"), + Self::SHADERS => Some("SHADERS"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for ValidationFeatureDisableEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::ALL => Some("ALL"), + Self::SHADERS => Some("SHADERS"), + Self::THREAD_SAFETY => Some("THREAD_SAFETY"), + Self::API_PARAMETERS => Some("API_PARAMETERS"), + Self::OBJECT_LIFETIMES => Some("OBJECT_LIFETIMES"), + Self::CORE_CHECKS => Some("CORE_CHECKS"), + Self::UNIQUE_HANDLES => Some("UNIQUE_HANDLES"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for ValidationFeatureEnableEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::GPU_ASSISTED => Some("GPU_ASSISTED"), + Self::GPU_ASSISTED_RESERVE_BINDING_SLOT => Some("GPU_ASSISTED_RESERVE_BINDING_SLOT"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for VendorId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::VIV => Some("VIV"), + Self::VSI => Some("VSI"), + Self::KAZAN => Some("KAZAN"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for VertexInputRate { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::VERTEX => Some("VERTEX"), + Self::INSTANCE => Some("INSTANCE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for ViSurfaceCreateFlagsNN { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ViewportCoordinateSwizzleNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::POSITIVE_X => Some("POSITIVE_X"), + Self::NEGATIVE_X => Some("NEGATIVE_X"), + Self::POSITIVE_Y => Some("POSITIVE_Y"), + Self::NEGATIVE_Y => Some("NEGATIVE_Y"), + Self::POSITIVE_Z => Some("POSITIVE_Z"), + Self::NEGATIVE_Z => Some("NEGATIVE_Z"), + Self::POSITIVE_W => Some("POSITIVE_W"), + Self::NEGATIVE_W => Some("NEGATIVE_W"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for WaylandSurfaceCreateFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for Win32SurfaceCreateFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for XcbSurfaceCreateFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for XlibSurfaceCreateFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +pub type DescriptorUpdateTemplateCreateFlagsKHR = DescriptorUpdateTemplateCreateFlags; +pub type PeerMemoryFeatureFlagsKHR = PeerMemoryFeatureFlags; +pub type MemoryAllocateFlagsKHR = MemoryAllocateFlags; +pub type CommandPoolTrimFlagsKHR = CommandPoolTrimFlags; +pub type ExternalMemoryHandleTypeFlagsKHR = ExternalMemoryHandleTypeFlags; +pub type ExternalMemoryFeatureFlagsKHR = ExternalMemoryFeatureFlags; +pub type ExternalSemaphoreHandleTypeFlagsKHR = ExternalSemaphoreHandleTypeFlags; +pub type ExternalSemaphoreFeatureFlagsKHR = ExternalSemaphoreFeatureFlags; +pub type SemaphoreImportFlagsKHR = SemaphoreImportFlags; +pub type ExternalFenceHandleTypeFlagsKHR = ExternalFenceHandleTypeFlags; +pub type ExternalFenceFeatureFlagsKHR = ExternalFenceFeatureFlags; +pub type FenceImportFlagsKHR = FenceImportFlags; +pub type DescriptorUpdateTemplateKHR = DescriptorUpdateTemplate; +pub type SamplerYcbcrConversionKHR = SamplerYcbcrConversion; +pub type DescriptorUpdateTemplateTypeKHR = DescriptorUpdateTemplateType; +pub type PointClippingBehaviorKHR = PointClippingBehavior; +pub type TessellationDomainOriginKHR = TessellationDomainOrigin; +pub type SamplerYcbcrModelConversionKHR = SamplerYcbcrModelConversion; +pub type SamplerYcbcrRangeKHR = SamplerYcbcrRange; +pub type ChromaLocationKHR = ChromaLocation; +pub type PhysicalDeviceFeatures2KHR = PhysicalDeviceFeatures2; +pub type PhysicalDeviceProperties2KHR = PhysicalDeviceProperties2; +pub type FormatProperties2KHR = FormatProperties2; +pub type ImageFormatProperties2KHR = ImageFormatProperties2; +pub type PhysicalDeviceImageFormatInfo2KHR = PhysicalDeviceImageFormatInfo2; +pub type QueueFamilyProperties2KHR = QueueFamilyProperties2; +pub type PhysicalDeviceMemoryProperties2KHR = PhysicalDeviceMemoryProperties2; +pub type SparseImageFormatProperties2KHR = SparseImageFormatProperties2; +pub type PhysicalDeviceSparseImageFormatInfo2KHR = PhysicalDeviceSparseImageFormatInfo2; +pub type PhysicalDeviceVariablePointerFeaturesKHR = PhysicalDeviceVariablePointerFeatures; +pub type ExternalMemoryPropertiesKHR = ExternalMemoryProperties; +pub type PhysicalDeviceExternalImageFormatInfoKHR = PhysicalDeviceExternalImageFormatInfo; +pub type ExternalImageFormatPropertiesKHR = ExternalImageFormatProperties; +pub type PhysicalDeviceExternalBufferInfoKHR = PhysicalDeviceExternalBufferInfo; +pub type ExternalBufferPropertiesKHR = ExternalBufferProperties; +pub type PhysicalDeviceIDPropertiesKHR = PhysicalDeviceIDProperties; +pub type ExternalMemoryImageCreateInfoKHR = ExternalMemoryImageCreateInfo; +pub type ExternalMemoryBufferCreateInfoKHR = ExternalMemoryBufferCreateInfo; +pub type ExportMemoryAllocateInfoKHR = ExportMemoryAllocateInfo; +pub type PhysicalDeviceExternalSemaphoreInfoKHR = PhysicalDeviceExternalSemaphoreInfo; +pub type ExternalSemaphorePropertiesKHR = ExternalSemaphoreProperties; +pub type ExportSemaphoreCreateInfoKHR = ExportSemaphoreCreateInfo; +pub type PhysicalDeviceExternalFenceInfoKHR = PhysicalDeviceExternalFenceInfo; +pub type ExternalFencePropertiesKHR = ExternalFenceProperties; +pub type ExportFenceCreateInfoKHR = ExportFenceCreateInfo; +pub type PhysicalDeviceMultiviewFeaturesKHR = PhysicalDeviceMultiviewFeatures; +pub type PhysicalDeviceMultiviewPropertiesKHR = PhysicalDeviceMultiviewProperties; +pub type RenderPassMultiviewCreateInfoKHR = RenderPassMultiviewCreateInfo; +pub type PhysicalDeviceGroupPropertiesKHR = PhysicalDeviceGroupProperties; +pub type MemoryAllocateFlagsInfoKHR = MemoryAllocateFlagsInfo; +pub type BindBufferMemoryInfoKHR = BindBufferMemoryInfo; +pub type BindBufferMemoryDeviceGroupInfoKHR = BindBufferMemoryDeviceGroupInfo; +pub type BindImageMemoryInfoKHR = BindImageMemoryInfo; +pub type BindImageMemoryDeviceGroupInfoKHR = BindImageMemoryDeviceGroupInfo; +pub type DeviceGroupRenderPassBeginInfoKHR = DeviceGroupRenderPassBeginInfo; +pub type DeviceGroupCommandBufferBeginInfoKHR = DeviceGroupCommandBufferBeginInfo; +pub type DeviceGroupSubmitInfoKHR = DeviceGroupSubmitInfo; +pub type DeviceGroupBindSparseInfoKHR = DeviceGroupBindSparseInfo; +pub type DeviceGroupDeviceCreateInfoKHR = DeviceGroupDeviceCreateInfo; +pub type DescriptorUpdateTemplateEntryKHR = DescriptorUpdateTemplateEntry; +pub type DescriptorUpdateTemplateCreateInfoKHR = DescriptorUpdateTemplateCreateInfo; +pub type InputAttachmentAspectReferenceKHR = InputAttachmentAspectReference; +pub type RenderPassInputAttachmentAspectCreateInfoKHR = RenderPassInputAttachmentAspectCreateInfo; +pub type PhysicalDevice16BitStorageFeaturesKHR = PhysicalDevice16BitStorageFeatures; +pub type BufferMemoryRequirementsInfo2KHR = BufferMemoryRequirementsInfo2; +pub type ImageMemoryRequirementsInfo2KHR = ImageMemoryRequirementsInfo2; +pub type ImageSparseMemoryRequirementsInfo2KHR = ImageSparseMemoryRequirementsInfo2; +pub type MemoryRequirements2KHR = MemoryRequirements2; +pub type SparseImageMemoryRequirements2KHR = SparseImageMemoryRequirements2; +pub type PhysicalDevicePointClippingPropertiesKHR = PhysicalDevicePointClippingProperties; +pub type MemoryDedicatedRequirementsKHR = MemoryDedicatedRequirements; +pub type MemoryDedicatedAllocateInfoKHR = MemoryDedicatedAllocateInfo; +pub type ImageViewUsageCreateInfoKHR = ImageViewUsageCreateInfo; +pub type PipelineTessellationDomainOriginStateCreateInfoKHR = + PipelineTessellationDomainOriginStateCreateInfo; +pub type SamplerYcbcrConversionInfoKHR = SamplerYcbcrConversionInfo; +pub type SamplerYcbcrConversionCreateInfoKHR = SamplerYcbcrConversionCreateInfo; +pub type BindImagePlaneMemoryInfoKHR = BindImagePlaneMemoryInfo; +pub type ImagePlaneMemoryRequirementsInfoKHR = ImagePlaneMemoryRequirementsInfo; +pub type PhysicalDeviceSamplerYcbcrConversionFeaturesKHR = + PhysicalDeviceSamplerYcbcrConversionFeatures; +pub type SamplerYcbcrConversionImageFormatPropertiesKHR = + SamplerYcbcrConversionImageFormatProperties; +pub type PhysicalDeviceMaintenance3PropertiesKHR = PhysicalDeviceMaintenance3Properties; +pub type DescriptorSetLayoutSupportKHR = DescriptorSetLayoutSupport; diff --git a/third_party/rust/ash/tests/constant_size_arrays.rs b/third_party/rust/ash/tests/constant_size_arrays.rs new file mode 100644 index 000000000000..096f3436525f --- /dev/null +++ b/third_party/rust/ash/tests/constant_size_arrays.rs @@ -0,0 +1,41 @@ +extern crate ash; + +use ash::vk::{PhysicalDeviceProperties, PipelineColorBlendStateCreateInfo}; + +#[test] +fn assert_struct_field_is_array() { + let pipeline_cache_uuid: [u8; 16] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + + let _ = PhysicalDeviceProperties::builder().pipeline_cache_uuid(pipeline_cache_uuid); + + let _ = PhysicalDeviceProperties { + pipeline_cache_uuid, + ..Default::default() + }; + + let blend_constants: [f32; 4] = [0.0, 0.0, 0.0, 0.0]; + + let _ = PipelineColorBlendStateCreateInfo::builder().blend_constants(blend_constants); + + let _ = PipelineColorBlendStateCreateInfo { + blend_constants, + ..Default::default() + }; +} + +#[test] +#[allow(dead_code)] +fn assert_ffi_array_param_is_pointer() { + use ash::version::DeviceV1_0; + unsafe { + + if false { + let device: ash::Device = std::mem::uninitialized(); + let cmd_buffer = std::mem::uninitialized(); + + let blend_constants: [f32; 4] = [0.0, 0.0, 0.0, 0.0]; + + device.cmd_set_blend_constants(cmd_buffer, &blend_constants); + } + } +} diff --git a/third_party/rust/ash/tests/display.rs b/third_party/rust/ash/tests/display.rs new file mode 100644 index 000000000000..87460226de54 --- /dev/null +++ b/third_party/rust/ash/tests/display.rs @@ -0,0 +1,18 @@ +extern crate ash; +use ash::vk; + +#[test] +fn debug_flags() { + assert_eq!( + format!( + "{:?}", + vk::AccessFlags::INDIRECT_COMMAND_READ | vk::AccessFlags::VERTEX_ATTRIBUTE_READ + ), + "INDIRECT_COMMAND_READ | VERTEX_ATTRIBUTE_READ" + ); +} + +#[test] +fn debug_enum() { + assert_eq!(format!("{:?}", vk::ChromaLocation::MIDPOINT), "MIDPOINT"); +} diff --git a/third_party/rust/atom/.cargo-checksum.json b/third_party/rust/atom/.cargo-checksum.json new file mode 100644 index 000000000000..805de5b92a04 --- /dev/null +++ b/third_party/rust/atom/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"e002ed3dd38dc0551a851c4266742ed6d51b7173556c370b57d1b156b59c7350","LICENSE":"09e8a9bcec8067104652c168685ab0931e7868f9c8284b66f5ae6edae5f1130b","examples/fifo.rs":"f6a1091ecc3061c8c51a5906a93abb2f43853f23fbe56b3b36430ab0bece2e10","examples/simple.rs":"5590003f2775307d0d00ef6bcd2c009a011f71850033fca4ed7d2105e9a88b1c","readme.md":"a91b178c0b0fab0af36854d760e354808c36bbeda1bf11e77a8e02a5e4ad1a9d","src/lib.rs":"7a682b15762ad81e2cbc87add0d7538bc9627ddee5eb60af4d34b7276df0b974","tests/atom.rs":"d94cdd5a1bb9626b21642a4b2345927991e822b2623f1971f053c48e99979db8"},"package":"3c86699c3f02778ec07158376991c8f783dd1f2f95c579ffaf0738dc984b2fe2"} \ No newline at end of file diff --git a/third_party/rust/atom/Cargo.toml b/third_party/rust/atom/Cargo.toml new file mode 100644 index 000000000000..f769a765cc61 --- /dev/null +++ b/third_party/rust/atom/Cargo.toml @@ -0,0 +1,19 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "atom" +version = "0.3.5" +authors = ["Colin Sherratt "] +description = "A safe abstraction around AtomicPtr" +homepage = "https://github.com/slide-rs/atom" +license = "Apache-2.0" diff --git a/third_party/rust/atom/LICENSE b/third_party/rust/atom/LICENSE new file mode 100644 index 000000000000..67db8588217f --- /dev/null +++ b/third_party/rust/atom/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/third_party/rust/atom/examples/fifo.rs b/third_party/rust/atom/examples/fifo.rs new file mode 100644 index 000000000000..3063eac3f082 --- /dev/null +++ b/third_party/rust/atom/examples/fifo.rs @@ -0,0 +1,83 @@ + + + + + + + + + + + + + + +extern crate atom; + +use atom::*; +use std::mem; +use std::sync::{Arc, Barrier}; +use std::thread; + +const THREADS: usize = 100; + +#[derive(Debug)] +struct Link { + next: AtomSetOnce>, +} + +impl Drop for Link { + fn drop(&mut self) { + + while let Some(mut h) = self.next.atom().take() { + self.next = mem::replace(&mut h.next, AtomSetOnce::empty()); + } + } +} + +fn main() { + let b = Arc::new(Barrier::new(THREADS + 1)); + + let head = Arc::new(Link { + next: AtomSetOnce::empty(), + }); + + for _ in 0..THREADS { + let b = b.clone(); + let head = head.clone(); + thread::spawn(move || { + let mut hptr = &*head; + + for _ in 0..10_000 { + let mut my_awesome_node = Box::new(Link { + next: AtomSetOnce::empty(), + }); + + loop { + while let Some(h) = hptr.next.get() { + hptr = h; + } + + my_awesome_node = match hptr.next.set_if_none(my_awesome_node) { + Some(v) => v, + None => break, + }; + } + } + b.wait(); + }); + } + + b.wait(); + + let mut hptr = &*head; + let mut count = 0; + while let Some(h) = hptr.next.get() { + hptr = h; + count += 1; + } + println!( + "Using {} threads we wrote {} links at the same time!", + THREADS, count + ); +} diff --git a/third_party/rust/atom/examples/simple.rs b/third_party/rust/atom/examples/simple.rs new file mode 100644 index 000000000000..f09e05785259 --- /dev/null +++ b/third_party/rust/atom/examples/simple.rs @@ -0,0 +1,33 @@ +extern crate atom; + +use atom::*; +use std::sync::Arc; +use std::thread; + +fn main() { + + let shared_atom = Arc::new(Atom::empty()); + + + shared_atom.swap(Box::new(75)); + + + let threads: Vec> = (0..8) + .map(|_| { + let shared_atom = shared_atom.clone(); + thread::spawn(move || { + + if let Some(v) = shared_atom.take() { + println!("I got it: {:?} :D", v); + } else { + println!("I did not get it :("); + } + }) + }) + .collect(); + + + for t in threads { + t.join().unwrap(); + } +} diff --git a/third_party/rust/atom/readme.md b/third_party/rust/atom/readme.md new file mode 100644 index 000000000000..a655bf470169 --- /dev/null +++ b/third_party/rust/atom/readme.md @@ -0,0 +1,101 @@ +Atom +==== + +[![Build Status](https://travis-ci.org/slide-rs/atom.svg?branch=master)](https://travis-ci.org/csherratt/atom) +[![Atom](http://meritbadge.herokuapp.com/atom)](https://crates.io/crates/atom) + +`Atom` is a simple abstraction around Rust's `AtomicPtr`. It provides a simple, wait-free way to exchange +data between threads safely. `Atom` is built around the principle that an atomic swap can be used to +safely emulate Rust's ownership. + +![store](https://raw.githubusercontent.com/csherratt/atom/master/.store.png) + +Using [`store`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicPtr.html#method.store) to set a shared +atomic pointer is unsafe in rust (or any language) because the contents of the pointer can be overwritten at any +point in time causing the contents of the pointer to be lost. This can cause your system to leak memory, and +if you are expecting that memory to do something useful (like wake a sleeping thread), you are in trouble. + +![load](https://raw.githubusercontent.com/csherratt/atom/master/.load.png) + +Similarly, [`load`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicPtr.html#method.store) +is unsafe since there is no guarantee that that pointer will live for even a cycle after you have read it. Another +thread may modify the pointer, or free it. For `load` to be safe you need to have some outside contract to preserve +the correct ownership semantics. + +![swap](https://raw.githubusercontent.com/csherratt/atom/master/.swap.png) + +A [`swap`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicPtr.html#method.swap) is special as it allows +a reference to be exchanged without the risk of that pointer being freed, or stomped on. When a thread +swaps an `AtomicPtr` the old pointer ownership is moved to the caller, and the `AtomicPtr` takes ownership of the new +pointer. + + +Using `Atom` +------------ + +Add atom your `Cargo.toml` +``` +[dependencies] +atom="*" +``` + +A short example: +```rust +extern crate atom; + +use std::sync::Arc; +use std::thread; +use atom::*; + +fn main() { + // Create an empty atom + let shared_atom = Arc::new(Atom::empty()); + + // set the value 75 + shared_atom.swap(Box::new(75)); + + // Spawn a bunch of thread that will try and take the value + let threads: Vec> = (0..8).map(|_| { + let shared_atom = shared_atom.clone(); + thread::spawn(move || { + // Take the contents of the atom, only one will win the race + if let Some(v) = shared_atom.take() { + println!("I got it: {:?} :D", v); + } else { + println!("I did not get it :("); + } + }) + }).collect(); + + // join the threads + for t in threads { t.join().unwrap(); } + +``` + +The result will look something like this: +``` +I did not get it :( +I got it: 75 :D +I did not get it :( +I did not get it :( +I did not get it :( +I did not get it :( +I did not get it :( +I did not get it :( +``` + +Using an `Atom` has some advantages over using a raw `AtomicPtr`. First, you don't need any +unsafe code in order to convert the `Box` to and from a `Box` the library handles that for +you. Secondly, `Atom` implements `drop` so you won't accidentally leak a pointer when dropping +your data structure. + +AtomSetOnce +----------- + +This is an additional bit of abstraction around an Atom. Recall that I said `load` was unsafe +unless you have an additional restrictions. `AtomSetOnce` as the name indicates may only be +set once, and then it may never be unset. We know that if the `Atom` is set the pointer will be +valid for the lifetime of the `Atom`. This means we can implement `Deref` in a safe way. + +Take a look at the `fifo` example to see how this can be used to write a lock-free linked list. + diff --git a/third_party/rust/atom/src/lib.rs b/third_party/rust/atom/src/lib.rs new file mode 100644 index 000000000000..37521216b0f5 --- /dev/null +++ b/third_party/rust/atom/src/lib.rs @@ -0,0 +1,340 @@ + + + + + + + + + + + + + + +use std::fmt::{self, Debug, Formatter}; +use std::marker::PhantomData; +use std::mem; +use std::ops::Deref; +use std::ptr; +use std::sync::atomic::AtomicPtr; +use std::sync::atomic::Ordering; +use std::sync::Arc; + + + +pub struct Atom

+where + P: IntoRawPtr + FromRawPtr, +{ + inner: AtomicPtr<()>, + data: PhantomData

, +} + +impl

Debug for Atom

+where + P: IntoRawPtr + FromRawPtr, +{ + fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { + write!(f, "atom({:?})", self.inner.load(Ordering::Relaxed)) + } +} + +impl

Atom

+where + P: IntoRawPtr + FromRawPtr, +{ + + pub fn empty() -> Atom

{ + Atom { + inner: AtomicPtr::new(ptr::null_mut()), + data: PhantomData, + } + } + + + pub fn new(value: P) -> Atom

{ + Atom { + inner: AtomicPtr::new(unsafe { value.into_raw() }), + data: PhantomData, + } + } + + + + pub fn swap(&self, v: P) -> Option

{ + let new = unsafe { v.into_raw() }; + let old = self.inner.swap(new, Ordering::AcqRel); + if !old.is_null() { + Some(unsafe { FromRawPtr::from_raw(old) }) + } else { + None + } + } + + + + + pub fn take(&self) -> Option

{ + let old = self.inner.swap(ptr::null_mut(), Ordering::Acquire); + if !old.is_null() { + Some(unsafe { FromRawPtr::from_raw(old) }) + } else { + None + } + } + + + + + + pub fn set_if_none(&self, v: P) -> Option

{ + let new = unsafe { v.into_raw() }; + let old = self.inner + .compare_and_swap(ptr::null_mut(), new, Ordering::Release); + if !old.is_null() { + Some(unsafe { FromRawPtr::from_raw(new) }) + } else { + None + } + } + + + + + + pub fn replace_and_set_next(&self, mut value: P) -> bool + where + P: GetNextMut>, + { + unsafe { + let next = value.get_next() as *mut Option

; + let raw = value.into_raw(); + + + drop(ptr::read(next)); + loop { + let pcurrent = self.inner.load(Ordering::Relaxed); + let current = if pcurrent.is_null() { + None + } else { + Some(FromRawPtr::from_raw(pcurrent)) + }; + ptr::write(next, current); + let last = self.inner.compare_and_swap(pcurrent, raw, Ordering::AcqRel); + if last == pcurrent { + return last.is_null(); + } + } + } + } + + + + + pub fn is_none(&self) -> bool { + self.inner.load(Ordering::Relaxed).is_null() + } +} + +impl

Drop for Atom

+where + P: IntoRawPtr + FromRawPtr, +{ + fn drop(&mut self) { + unsafe { + let ptr = self.inner.load(Ordering::Relaxed); + if !ptr.is_null() { + let _: P = FromRawPtr::from_raw(ptr); + } + } + } +} + +unsafe impl

Send for Atom

+where + P: IntoRawPtr + FromRawPtr, +{ +} +unsafe impl

Sync for Atom

+where + P: IntoRawPtr + FromRawPtr, +{ +} + + +pub trait IntoRawPtr { + unsafe fn into_raw(self) -> *mut (); +} + + +pub trait FromRawPtr { + unsafe fn from_raw(ptr: *mut ()) -> Self; +} + +impl IntoRawPtr for Box { + #[inline] + unsafe fn into_raw(self) -> *mut () { + Box::into_raw(self) as *mut () + } +} + +impl FromRawPtr for Box { + #[inline] + unsafe fn from_raw(ptr: *mut ()) -> Box { + Box::from_raw(ptr as *mut T) + } +} + +impl IntoRawPtr for Arc { + #[inline] + unsafe fn into_raw(self) -> *mut () { + Arc::into_raw(self) as *mut T as *mut () + } +} + +impl FromRawPtr for Arc { + #[inline] + unsafe fn from_raw(ptr: *mut ()) -> Arc { + Arc::from_raw(ptr as *const () as *const T) + } +} + + +#[inline] +unsafe fn copy_lifetime<'a, S: ?Sized, T: ?Sized + 'a>(_ptr: &'a S, ptr: &T) -> &'a T { + mem::transmute(ptr) +} + + +#[inline] +unsafe fn copy_mut_lifetime<'a, S: ?Sized, T: ?Sized + 'a>(_ptr: &'a S, ptr: &mut T) -> &'a mut T { + mem::transmute(ptr) +} + + + + + + +#[derive(Debug)] +pub struct AtomSetOnce

+where + P: IntoRawPtr + FromRawPtr, +{ + inner: Atom

, +} + +impl

AtomSetOnce

+where + P: IntoRawPtr + FromRawPtr, +{ + + pub fn empty() -> AtomSetOnce

{ + AtomSetOnce { + inner: Atom::empty(), + } + } + + + pub fn new(value: P) -> AtomSetOnce

{ + AtomSetOnce { + inner: Atom::new(value), + } + } + + + + + + pub fn set_if_none(&self, v: P) -> Option

{ + self.inner.set_if_none(v) + } + + + pub fn into_atom(self) -> Atom

{ + self.inner + } + + + pub fn atom(&mut self) -> &mut Atom

{ + &mut self.inner + } + + + + + pub fn is_none(&self) -> bool { + self.inner.is_none() + } +} + +impl AtomSetOnce

+where + P: IntoRawPtr + FromRawPtr + Deref, +{ + + pub fn get<'a>(&'a self) -> Option<&'a T> { + let ptr = self.inner.inner.load(Ordering::Acquire); + if ptr.is_null() { + None + } else { + unsafe { + + + let v: P = FromRawPtr::from_raw(ptr); + let out = copy_lifetime(self, &*v); + mem::forget(v); + Some(out) + } + } + } +} + +impl AtomSetOnce> { + + pub fn get_mut<'a>(&'a mut self) -> Option<&'a mut T> { + let ptr = self.inner.inner.load(Ordering::Acquire); + if ptr.is_null() { + None + } else { + unsafe { + + + let mut v: Box = FromRawPtr::from_raw(ptr); + let out = copy_mut_lifetime(self, &mut *v); + mem::forget(v); + Some(out) + } + } + } +} + +impl AtomSetOnce +where + T: Clone + IntoRawPtr + FromRawPtr, +{ + + pub fn dup<'a>(&self) -> Option { + let ptr = self.inner.inner.load(Ordering::Acquire); + if ptr.is_null() { + None + } else { + unsafe { + + + let v: T = FromRawPtr::from_raw(ptr); + let out = v.clone(); + mem::forget(v); + Some(out) + } + } + } +} + + + +pub trait GetNextMut { + type NextPtr; + fn get_next(&mut self) -> &mut Self::NextPtr; +} diff --git a/third_party/rust/atom/tests/atom.rs b/third_party/rust/atom/tests/atom.rs new file mode 100644 index 000000000000..cb3ca70b37b0 --- /dev/null +++ b/third_party/rust/atom/tests/atom.rs @@ -0,0 +1,189 @@ + + + + + + + + + + + + + + +extern crate atom; + +use atom::*; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::sync::*; +use std::thread; + +#[test] +fn swap() { + let a = Atom::empty(); + assert_eq!(a.swap(Box::new(1u8)), None); + assert_eq!(a.swap(Box::new(2u8)), Some(Box::new(1u8))); + assert_eq!(a.swap(Box::new(3u8)), Some(Box::new(2u8))); +} + +#[test] +fn take() { + let a = Atom::new(Box::new(7u8)); + assert_eq!(a.take(), Some(Box::new(7))); + assert_eq!(a.take(), None); +} + +#[test] +fn set_if_none() { + let a = Atom::empty(); + assert_eq!(a.set_if_none(Box::new(7u8)), None); + assert_eq!(a.set_if_none(Box::new(8u8)), Some(Box::new(8u8))); +} + +#[derive(Clone)] +struct Canary(Arc); + +impl Drop for Canary { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } +} + +#[test] +fn ensure_drop() { + let v = Arc::new(AtomicUsize::new(0)); + let a = Box::new(Canary(v.clone())); + let a = Atom::new(a); + assert_eq!(v.load(Ordering::SeqCst), 0); + drop(a); + assert_eq!(v.load(Ordering::SeqCst), 1); +} + +#[test] +fn ensure_drop_arc() { + let v = Arc::new(AtomicUsize::new(0)); + let a = Arc::new(Canary(v.clone())); + let a = Atom::new(a); + assert_eq!(v.load(Ordering::SeqCst), 0); + drop(a); + assert_eq!(v.load(Ordering::SeqCst), 1); +} + +#[test] +fn ensure_send() { + let atom = Arc::new(Atom::empty()); + let wait = Arc::new(Barrier::new(2)); + + let w = wait.clone(); + let a = atom.clone(); + thread::spawn(move || { + a.swap(Box::new(7u8)); + w.wait(); + }); + + wait.wait(); + assert_eq!(atom.take(), Some(Box::new(7u8))); +} + +#[test] +fn get() { + let atom = Arc::new(AtomSetOnce::empty()); + assert_eq!(atom.get(), None); + assert_eq!(atom.set_if_none(Box::new(8u8)), None); + assert_eq!(atom.get(), Some(&8u8)); +} + +#[test] +fn get_arc() { + let atom = Arc::new(AtomSetOnce::empty()); + assert_eq!(atom.get(), None); + assert_eq!(atom.set_if_none(Arc::new(8u8)), None); + assert_eq!(atom.get(), Some(&8u8)); + + let v = Arc::new(AtomicUsize::new(0)); + let atom = Arc::new(AtomSetOnce::empty()); + atom.get(); + atom.set_if_none(Arc::new(Canary(v.clone()))); + atom.get(); + drop(atom); + + assert_eq!(v.load(Ordering::SeqCst), 1); +} + +#[derive(Debug)] +struct Link { + next: Option>, + value: u32, +} + +impl Link { + fn new(v: u32) -> Box { + Box::new(Link { + next: None, + value: v, + }) + } +} + +impl GetNextMut for Box { + type NextPtr = Option>; + fn get_next(&mut self) -> &mut Option> { + &mut self.next + } +} + +#[test] +fn lifo() { + let atom = Atom::empty(); + for i in 0..100 { + let x = atom.replace_and_set_next(Link::new(99 - i)); + assert_eq!(x, i == 0); + } + + let expected: Vec = (0..100).collect(); + let mut found = Vec::new(); + let mut chain = atom.take(); + while let Some(v) = chain { + found.push(v.value); + chain = v.next; + } + assert_eq!(expected, found); +} + +#[allow(dead_code)] +struct LinkCanary { + next: Option>, + value: Canary, +} + +impl LinkCanary { + fn new(v: Canary) -> Box { + Box::new(LinkCanary { + next: None, + value: v, + }) + } +} + +impl GetNextMut for Box { + type NextPtr = Option>; + fn get_next(&mut self) -> &mut Option> { + &mut self.next + } +} + +#[test] +fn lifo_drop() { + let v = Arc::new(AtomicUsize::new(0)); + let canary = Canary(v.clone()); + let mut link = LinkCanary::new(canary.clone()); + link.next = Some(LinkCanary::new(canary.clone())); + + let atom = Atom::empty(); + atom.replace_and_set_next(link); + assert_eq!(1, v.load(Ordering::SeqCst)); + drop(atom); + assert_eq!(2, v.load(Ordering::SeqCst)); +} diff --git a/third_party/rust/backtrace/.cargo-checksum.json b/third_party/rust/backtrace/.cargo-checksum.json index aa671f00d2c6..b9a8a3284ac1 100644 --- a/third_party/rust/backtrace/.cargo-checksum.json +++ b/third_party/rust/backtrace/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"8f2c15cc33e55c532bef00c06823eb9d06676b0e674f330cf78c6bbdf957ab21","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"914767b814ee5c5d794468902d1863649a8cfec36072df81884d396580e9748a","appveyor.yml":"568f76b9e68b862e3a21c64ce34894ff5e753c6019f45de27df0335809420030","ci/android-ndk.sh":"89fafa41d08ff477f949bfc163d04d1eb34fdee370f7a695cfba4ef34c164a55","ci/docker/aarch64-linux-android/Dockerfile":"c97f23fe2892f406d3deb7479c89e1c1dbbdbd0db456ac699f9399852300348d","ci/docker/aarch64-unknown-linux-gnu/Dockerfile":"97fa8f20c6899ee36b47371d485b64a2e96b626a2746b5f434c01eae9168b2a1","ci/docker/arm-linux-androideabi/Dockerfile":"11f6963365de062cf0ac81debec00aee29932df2714d8505f9f6383722d211a8","ci/docker/arm-unknown-linux-gnueabihf/Dockerfile":"41133d712ef13f05e67796857db86476b3ed9c6355d5eb56115575b06d739e04","ci/docker/armv7-linux-androideabi/Dockerfile":"39038d17a423683e0af27a050b34dea610729fb0085814ec6c81726a7f52556f","ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile":"2a216244baad705491f249278083994bf68744a2759f51e768b2f92e0da7c360","ci/docker/i586-unknown-linux-gnu/Dockerfile":"ef898c914590d721488ca11e62f3d7c26852346e1612deb0f3e12ab011187109","ci/docker/i686-linux-android/Dockerfile":"9181e5fe5503077652659bc3353c3c21fcf4fc6b03730430fb40d6adc3096079","ci/docker/i686-unknown-linux-gnu/Dockerfile":"ef898c914590d721488ca11e62f3d7c26852346e1612deb0f3e12ab011187109","ci/docker/powerpc-unknown-linux-gnu/Dockerfile":"83e0e3adbb2d6f2398e70d2c8f71ee301fe99e24554f902602c2f2bb067c2f2c","ci/docker/powerpc64-unknown-linux-gnu/Dockerfile":"828b657e1748bcd8d331794624d5fc1cd07087a051e507eb9206757985194bf1","ci/docker/x86_64-linux-android/Dockerfile":"074bb2906ba587466490ab9d802eb817b9f23eb54aa095ee53e1a33be5569328","ci/docker/x86_64-pc-windows-gnu/Dockerfile":"0822e270108ec39a6b93721598156031a0469ed680e62ce4acd13bbb1a952b9d","ci/docker/x86_64-unknown-linux-gnu/Dockerfile":"e1c202a6831b17e017b4737e80d5b992905895b086bbc06285fc9c337cadbc23","ci/docker/x86_64-unknown-linux-musl/Dockerfile":"2efbba08cc8fff8d2431dde92517df7d122dc754c778820c668f1ac29a885290","ci/run-docker.sh":"517db62fa790712734a1410b27995134ec88c613a0cae548382fb0d3f0b55080","ci/run.sh":"30a3807c195cd86d8b8884e1228cd061aa112b26c54277beebf5108777a36fe9","examples/backtrace.rs":"fd6e1cc6c3378ec7d41cd03b2bef187051298dceb01147e71f207dbb8a0c4867","examples/raw.rs":"f07be26d1f97cd7ac79290ac99d19c4eec5d27031fe270ab5364c25d9c2ad9e0","src/backtrace/dbghelp.rs":"d052fa4bcb4f3c012e0066d01c18d89a9c0003a6e022ebdca5a03bf09ab7a973","src/backtrace/libunwind.rs":"cc9cdc1d389571cdedf43dfc2d39b8c3af85531a3965ed700c724f436afb213e","src/backtrace/mod.rs":"91a544bd9e89da6b580e2580ab15ead354f13243bca50516ff5cefe68a8cd199","src/backtrace/noop.rs":"dc4a6602e9852b945c382194402314d3d68c8ca90199af9a8159419fb91a3c99","src/backtrace/unix_backtrace.rs":"31204989a8852428792a1c99d36717559aad14d93526e8a37744214adf188268","src/capture.rs":"a6f379300f6a578c52fce5927461fb0d084b2eb080113561a2e0cc11aa1f5c73","src/dylib.rs":"09f3d7f32849cf0daa4de9df48f8e4a4d5ba62e20723c79578201bd271dc4777","src/lib.rs":"e0176033b10579b02228f8860a4beb684fa4c246dc6225425ebe8897c662b589","src/symbolize/coresymbolication.rs":"95c7dab3e65dd7217de5dd22cd550192c1505dfada56040197675ea3b9b380f1","src/symbolize/dbghelp.rs":"6bf7c3cc9542e4084aca417b67af25da0d0caa7df83787e92046f5918d32e9d8","src/symbolize/dladdr.rs":"8287cbca440a9e92e74d88c5a7b920f6b4cf6d8f50bc8b0f61aca5ba42d5b5ec","src/symbolize/gimli.rs":"c385d4ac9a2c87c1eddf5a999bb17d46ff400026766e8c8b1fef7afc747a19e5","src/symbolize/libbacktrace.rs":"0cdad7de2501baef9da193ee6aab21c453d26348a2071c805a133efe1209eaa1","src/symbolize/mod.rs":"2fcf4a6c8319d886e03f7a45fbb25d7e35c4c6021ae3d49d243ce901f213e5c9","src/symbolize/noop.rs":"b622fcecb4e22b42c3d3e2ef5dc5a6ab14601fec83c7797ee1fbbacc12fe6ca1","tests/long_fn_name.rs":"a59eebef3e9403a566b2cdcb7c76e3237675883fa018baca6fe55801f5d11b80","tests/smoke.rs":"f3c03fc5d31281f6a08232814a7b1ca74f514014f0f8098cb014d6e7d7eb6541"},"package":"89a47830402e9981c5c41223151efcced65a0510c13097c769cede7efb34782a"} \ No newline at end of file +{"files":{"Cargo.lock":"6996b0d930b1487b87297a3e7b09e51ba63767a259429b63d3f7e1ea27c07e0f","Cargo.toml":"e53541a24f26ba6f7fc4339b1b467adb3b012f12d1196f0f73add7958b4838f0","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"5038903e047ac0287b2a16218109171c99a8b67242bdeaff52db51bd60b68fb0","benches/benchmarks.rs":"ce5763576fa8aeb722d3b8bcdb294d8a31e80e3c5272da25525fdcc392ccc6cc","ci/android-ndk.sh":"89fafa41d08ff477f949bfc163d04d1eb34fdee370f7a695cfba4ef34c164a55","ci/android-sdk.sh":"3269ef90675e8d4c2031e542cd3a55d9e060b2a094105cde982b282185936dda","ci/docker/aarch64-linux-android/Dockerfile":"c97f23fe2892f406d3deb7479c89e1c1dbbdbd0db456ac699f9399852300348d","ci/docker/aarch64-unknown-linux-gnu/Dockerfile":"97fa8f20c6899ee36b47371d485b64a2e96b626a2746b5f434c01eae9168b2a1","ci/docker/arm-linux-androideabi/Dockerfile":"3fe712b0927fd8fb05c1f733f0bf0d8a139bfc5a0108ba5515bd6b499976f1ab","ci/docker/arm-unknown-linux-gnueabihf/Dockerfile":"41133d712ef13f05e67796857db86476b3ed9c6355d5eb56115575b06d739e04","ci/docker/armv7-linux-androideabi/Dockerfile":"39038d17a423683e0af27a050b34dea610729fb0085814ec6c81726a7f52556f","ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile":"2a216244baad705491f249278083994bf68744a2759f51e768b2f92e0da7c360","ci/docker/i586-unknown-linux-gnu/Dockerfile":"ef898c914590d721488ca11e62f3d7c26852346e1612deb0f3e12ab011187109","ci/docker/i686-linux-android/Dockerfile":"9181e5fe5503077652659bc3353c3c21fcf4fc6b03730430fb40d6adc3096079","ci/docker/i686-unknown-linux-gnu/Dockerfile":"ef898c914590d721488ca11e62f3d7c26852346e1612deb0f3e12ab011187109","ci/docker/powerpc64-unknown-linux-gnu/Dockerfile":"828b657e1748bcd8d331794624d5fc1cd07087a051e507eb9206757985194bf1","ci/docker/x86_64-linux-android/Dockerfile":"074bb2906ba587466490ab9d802eb817b9f23eb54aa095ee53e1a33be5569328","ci/docker/x86_64-pc-windows-gnu/Dockerfile":"0822e270108ec39a6b93721598156031a0469ed680e62ce4acd13bbb1a952b9d","ci/docker/x86_64-unknown-linux-gnu/Dockerfile":"e1c202a6831b17e017b4737e80d5b992905895b086bbc06285fc9c337cadbc23","ci/docker/x86_64-unknown-linux-musl/Dockerfile":"2efbba08cc8fff8d2431dde92517df7d122dc754c778820c668f1ac29a885290","ci/run-docker.sh":"c4efad39c65a38c89e90d61a9ef15136903dd4cf01a6ae8079aa005b882d751e","ci/run.sh":"30a3807c195cd86d8b8884e1228cd061aa112b26c54277beebf5108777a36fe9","ci/runtest-android.rs":"be2e49bb296b92a8e3643a6c0a70917fe7b130fa43b71a29c4e7514f45c00e7e","examples/backtrace.rs":"fd6e1cc6c3378ec7d41cd03b2bef187051298dceb01147e71f207dbb8a0c4867","examples/raw.rs":"81fb66bd0f71010f9a954f667d92bf792cfd1ac90fa3a09c362767d866dc8374","src/backtrace/dbghelp.rs":"e1389870f4376538da2aa71264cee00ad1c34b424dba892a04f76bf267997d4a","src/backtrace/libunwind.rs":"8bf3183c251fabafd2c6d78be60c32345c5a819e0c5a13738220449547fc95c7","src/backtrace/mod.rs":"68a864fa0a76803df67c9e0af6213d445b28f675982fa41a5bdbe9bcc2c267c9","src/backtrace/noop.rs":"9d9a93a890cef988b691232d9d1e7d114768ada58e9591345826135cb380f01a","src/backtrace/unix_backtrace.rs":"293c6ac31e9eb8e32dc7d219f4cf5dbeb9b66bdc42dcc949d1de63233dea2666","src/capture.rs":"c7c9f0a55312c9687d43118b69d4416d9c8eac1f7eaeb542807bc5e23bc18ce0","src/dbghelp.rs":"c33788eeef29bb78a1cdd7cf7f3e034b82d3686ba87d76a920692db19b26212b","src/lib.rs":"efcdd67622dd654906ae74b249f7ce785582b29fde2f0da8dea1fb398fd80a48","src/print.rs":"4eb27182e12ad02fb1dba6ee4052c29a1a61b156d0c40b48375a5bab001f2097","src/print/fuchsia.rs":"fcb177101037302536b590f5f14ec9ff72aca06199dedcd1091baa07a1638c10","src/symbolize/coresymbolication.rs":"f5442333d8c58188fe3c7749783336bd2e096ed9940d77c19ce4295dd592fb0b","src/symbolize/dbghelp.rs":"649d5ff1656e8fd84ab1088bb48054f6ea7cd0c50a09af09f72d562c17e52b6b","src/symbolize/dladdr.rs":"4b420b07f345f7c4e545c43f68d1b2d1f419a9228712097f35eae875b182b349","src/symbolize/dladdr_resolve.rs":"ec3bc5c8462cce48e97136e4867dd08a5f1b0c133e45d511745c1c3197e78405","src/symbolize/gimli.rs":"eaf81e6e0b3394a052219dbce8ec406a0065c989da2c6e379864112b529c92c0","src/symbolize/libbacktrace.rs":"4bd53245666f7651d45dbf6c9b44280c4254d00b808a2caa2ec318bb314ba5e5","src/symbolize/mod.rs":"aec85aee8ff3e00a532f7d1ab8739802d6e2df369aecfe045fe6d8b6a4004c11","src/symbolize/noop.rs":"a8581b117e1065236c12f0a4a4c49f4c1929c4e6a3283f2253bb1fd0c77e4f7a","src/types.rs":"85dac420ee8791281f4cdec78e86be056c84ffecc0e5e5821d91862fc0e23444","src/windows.rs":"d6ae52faf9005251bd0d977fdd363e38f0d56e4c034a5fea3982538f743c6dab","tests/accuracy/auxiliary.rs":"8bcf4a3a15ebed82f878c440910a07866efbc5eacea8b74f482dae297c779226","tests/accuracy/main.rs":"db2870e7fa51df01a5176e59bd87d205d83cad1599f374f273e11baf5efd41bb","tests/concurrent-panics.rs":"7c37df054c57c0c6694265bcaa596c1418a90cce65a569699e1b0c56bcd4bdfa","tests/long_fn_name.rs":"ca2480a72e8d2c02c678f047c202cdec7efafeb2fb32d94a327c669b7c81807a","tests/skip_inner_frames.rs":"b5c9137723c8648da46396c4c631e4b7d9933cac33397ae3da588a4692322212","tests/smoke.rs":"0330093fa76bd83851f2b8efb37330ce1c1c48d2be99313074d9c727491377f7"},"package":"690a62be8920ccf773ee00ef0968649b0e724cda8bd5b12286302b4ae955fdf5"} \ No newline at end of file diff --git a/third_party/rust/backtrace/Cargo.lock b/third_party/rust/backtrace/Cargo.lock new file mode 100644 index 000000000000..307efa9e1572 --- /dev/null +++ b/third_party/rust/backtrace/Cargo.lock @@ -0,0 +1,392 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "addr2line" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fallible-iterator 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gimli 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)", + "intervaltree 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "arrayvec" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "backtrace" +version = "0.3.38" +dependencies = [ + "addr2line 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace-sys 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", + "cpp_demangle 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "findshlibs 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "goblin 0.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-std-workspace-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "backtrace-sys" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-std-workspace-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "byteorder" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cc" +version = "1.0.45" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cfg-if" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "compiler_builtins 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-std-workspace-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "compiler_builtins" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cpp_demangle" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "findshlibs" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "gimli" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "fallible-iterator 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "glob" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "goblin" +version = "0.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "plain 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "scroll 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "intervaltree" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "lazycell" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.62" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc-std-workspace-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "log" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "memmap" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "nodrop" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "proc-macro2" +version = "0.4.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro2" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quote" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quote" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "compiler_builtins 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-std-workspace-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-serialize" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc-std-workspace-core" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scroll" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "scroll_derive 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scroll_derive" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "serde" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_derive" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "smallvec" +version = "0.6.10" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "stable_deref_trait" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "syn" +version = "0.15.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "syn" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-xid" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unicode-xid" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum addr2line 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "95b06ae5a8a3bae54910c9029a52f83203ce2001c71b10b1faae3a337fee4ab5" +"checksum arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b8d73f9beda665eaa98ab9e4f7442bd4e7de6652587de55b2525e52e29c1b0ba" +"checksum backtrace-sys 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)" = "82a830b4ef2d1124a711c71d263c5abdc710ef8e907bd508c88be475cebc422b" +"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" +"checksum cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)" = "4fc9a35e1f4290eb9e5fc54ba6cf40671ed2a2514c3eeb2b2a908dda2ea5a1be" +"checksum cfg-if 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "89431bba4e6b7092fb5fcd00a6f6ca596c55cc26b2f1e6dcdd08a1f4933f66b2" +"checksum compiler_builtins 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)" = "4e32b9fc11fdb3aefbd0a4761a8d3a2b7419608b759fa14a26525df4ea5deaba" +"checksum cpp_demangle 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "d30c0c4ba59fc4951d15cce1b9ba3b448a2b5d601964768fe7cd69c09cc69028" +"checksum fallible-iterator 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +"checksum findshlibs 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b1260d61e4fe2a6ab845ffdc426a0bd68ffb240b91cf0ec5a8d1170cec535bd8" +"checksum gimli 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)" = "162d18ae5f2e3b90a993d202f1ba17a5633c2484426f8bcae201f86194bacd00" +"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" +"checksum goblin 0.0.24 (registry+https://github.com/rust-lang/crates.io-index)" = "e3fa261d919c1ae9d1e4533c4a2f99e10938603c4208d56c05bec7a872b661b0" +"checksum intervaltree 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "af39074dd8d5eff756ddea3d8f34c7ae287d4dadb6f29fb1b67ca6b3f5036482" +"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +"checksum lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" +"checksum libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)" = "34fcd2c08d2f832f376f4173a231990fa5aef4e99fb569867318a227ef4c06ba" +"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945" +"checksum plain 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" +"checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" +"checksum proc-macro2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afdc77cc74ec70ed262262942ebb7dac3d479e9e5cfa2da1841c0806f6cdabcc" +"checksum quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" +"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" +"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" +"checksum rustc-std-workspace-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1956f5517128a2b6f23ab2dadf1a976f4f5b27962e7724c2bf3d45e539ec098c" +"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +"checksum scroll 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f84d114ef17fd144153d608fba7c446b0145d038985e7a8cc5d08bb0ce20383" +"checksum scroll_derive 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)" = "8f1aa96c45e7f5a91cb7fabe7b279f02fea7126239fc40b732316e8b6a2d0fcb" +"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +"checksum serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)" = "9796c9b7ba2ffe7a9ce53c2287dfc48080f4b2b362fcc245a259b3a7201119dd" +"checksum serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)" = "4b133a43a1ecd55d4086bd5b4dc6c1751c68b1bfbeba7a5040442022c7e7c02e" +"checksum smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ab606a9c5e214920bb66c458cd7be8ef094f813f20fe77a54cc7dbfff220d4b7" +"checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8" +"checksum syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)" = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" +"checksum syn 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "66850e97125af79138385e9b88339cbcd037e3f28ceab8c5ad98e64f0f1f80bf" +"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" +"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/third_party/rust/backtrace/Cargo.toml b/third_party/rust/backtrace/Cargo.toml index f229dd5b48a2..27dc19620733 100644 --- a/third_party/rust/backtrace/Cargo.toml +++ b/third_party/rust/backtrace/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,21 +11,70 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "backtrace" -version = "0.3.9" -authors = ["Alex Crichton ", "The Rust Project Developers"] +version = "0.3.38" +authors = ["The Rust Project Developers"] +autoexamples = true +autotests = true description = "A library to acquire a stack trace (backtrace) at runtime in a Rust program.\n" -homepage = "https://github.com/alexcrichton/backtrace-rs" +homepage = "https://github.com/rust-lang/backtrace-rs" documentation = "https://docs.rs/backtrace" readme = "README.md" license = "MIT/Apache-2.0" -repository = "https://github.com/alexcrichton/backtrace-rs" +repository = "https://github.com/rust-lang/backtrace-rs" + +[[example]] +name = "backtrace" +required-features = ["std"] + +[[example]] +name = "raw" +required-features = ["std"] + +[[test]] +name = "skip_inner_frames" +required-features = ["std"] + +[[test]] +name = "long_fn_name" +required-features = ["std"] + +[[test]] +name = "smoke" +required-features = ["std"] +edition = "2018" + +[[test]] +name = "accuracy" +required-features = ["std", "dbghelp", "libbacktrace", "libunwind"] +edition = "2018" + +[[test]] +name = "concurrent-panics" +harness = false +required-features = ["std"] [dependencies.addr2line] -version = "0.6.0" +version = "0.10.0" +features = ["std"] +optional = true +default-features = false + +[dependencies.backtrace-sys] +version = "0.1.17" optional = true [dependencies.cfg-if] -version = "0.1" +version = "0.1.6" + +[dependencies.compiler_builtins] +version = "0.1.2" +optional = true + +[dependencies.core] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-core" [dependencies.cpp_demangle] version = "0.2.3" @@ -33,18 +82,20 @@ optional = true default-features = false [dependencies.findshlibs] -version = "0.3.3" +version = "0.5.0" optional = true -[dependencies.gimli] -version = "0.15.0" +[dependencies.goblin] +version = "0.0.24" +features = ["elf32", "elf64", "mach32", "mach64", "pe32", "pe64", "std"] optional = true +default-features = false -[dependencies.memmap] -version = "0.6.2" -optional = true +[dependencies.libc] +version = "0.2.45" +default-features = false -[dependencies.object] +[dependencies.memmap] version = "0.7.0" optional = true @@ -57,30 +108,24 @@ optional = true [dependencies.serde] version = "1.0" -optional = true - -[dependencies.serde_derive] -version = "1.0" +features = ["derive"] optional = true [features] coresymbolication = [] -dbghelp = ["winapi"] -default = ["libunwind", "libbacktrace", "coresymbolication", "dladdr", "dbghelp"] +dbghelp = [] +default = ["std", "libunwind", "libbacktrace", "dladdr", "dbghelp"] dladdr = [] -gimli-symbolize = ["addr2line", "findshlibs", "gimli", "memmap", "object"] +gimli-symbolize = ["addr2line", "findshlibs", "memmap", "goblin"] kernel32 = [] libbacktrace = ["backtrace-sys"] libunwind = [] +rustc-dep-of-std = ["backtrace-sys/rustc-dep-of-std", "cfg-if/rustc-dep-of-std", "core", "compiler_builtins", "libc/rustc-dep-of-std", "rustc-demangle/rustc-dep-of-std"] serialize-rustc = ["rustc-serialize"] -serialize-serde = ["serde", "serde_derive"] +serialize-serde = ["serde"] +std = [] unix-backtrace = [] -[target."cfg(all(unix, not(target_os = \"fuchsia\"), not(target_os = \"emscripten\"), not(target_os = \"macos\"), not(target_os = \"ios\")))".dependencies.backtrace-sys] -version = "0.1.17" -optional = true -[target."cfg(unix)".dependencies.libc] -version = "0.2" +verify-winapi = ["winapi/dbghelp", "winapi/handleapi", "winapi/libloaderapi", "winapi/minwindef", "winapi/processthreadsapi", "winapi/synchapi", "winapi/winbase", "winapi/winnt"] [target."cfg(windows)".dependencies.winapi] version = "0.3.3" -features = ["std", "dbghelp", "processthreadsapi", "winnt", "minwindef"] optional = true diff --git a/third_party/rust/backtrace/README.md b/third_party/rust/backtrace/README.md index e25d66ef780b..9713f0c3c21f 100644 --- a/third_party/rust/backtrace/README.md +++ b/third_party/rust/backtrace/README.md @@ -1,13 +1,11 @@ # backtrace-rs -[![Build Status](https://travis-ci.org/alexcrichton/backtrace-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/backtrace-rs) -[![Build status](https://ci.appveyor.com/api/projects/status/v4l9oj4aqbbgyx44?svg=true)](https://ci.appveyor.com/project/alexcrichton/backtrace-rs) - [Documentation](https://docs.rs/backtrace) A library for acquiring backtraces at runtime for Rust. This library aims to -enhance the support given by the standard library at `std::rt` by providing a -more stable and programmatic interface. +enhance the support of the standard library by providing a programmatic +interface to work with, but it also supports simply easily printing the current +backtrace like libstd's panics. ## Install @@ -16,12 +14,9 @@ more stable and programmatic interface. backtrace = "0.3" ``` -```rust -extern crate backtrace; -``` - -Note that this crate requires `make`, `objcopy`, and `ar` to be present on Linux -systems. +Note that this crate requires `cc` and `ar` to be present on Unix systems when +`libbacktrace` is used (which is the default). For configuring C compilers see +the [`cc` crate documentation](https://github.com/alexcrichton/cc-rs). ## Usage @@ -54,7 +49,7 @@ fn main() { let symbol_address = frame.symbol_address(); // Resolve this instruction pointer to a symbol name - backtrace::resolve(ip, |symbol| { + backtrace::resolve_frame(frame, |symbol| { if let Some(name) = symbol.name() { // ... } @@ -68,11 +63,6 @@ fn main() { } ``` -## Platform Support - -This library currently supports OSX, Linux, and Windows. Support for other -platforms is always welcome! - # License This project is licensed under either of diff --git a/third_party/rust/backtrace/appveyor.yml b/third_party/rust/backtrace/appveyor.yml deleted file mode 100644 index a1b2bc18b596..000000000000 --- a/third_party/rust/backtrace/appveyor.yml +++ /dev/null @@ -1,20 +0,0 @@ -environment: - matrix: - - TARGET: x86_64-pc-windows-gnu - MSYS_BITS: 64 - - TARGET: i686-pc-windows-gnu - MSYS_BITS: 32 - - TARGET: x86_64-pc-windows-msvc - - TARGET: i686-pc-windows-msvc -install: - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" - - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - - set PATH=%PATH%;C:\Program Files (x86)\Rust\bin - - if defined MSYS_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS_BITS%\bin - - rustc -V - - cargo -V - -build: false - -test_script: - - cargo test --target %TARGET% diff --git a/third_party/rust/backtrace/benches/benchmarks.rs b/third_party/rust/backtrace/benches/benchmarks.rs new file mode 100644 index 000000000000..ad55788c27a2 --- /dev/null +++ b/third_party/rust/backtrace/benches/benchmarks.rs @@ -0,0 +1,94 @@ +#![feature(test)] + +extern crate test; + +extern crate backtrace; + +#[cfg(feature = "std")] +use backtrace::Backtrace; + +#[bench] +#[cfg(feature = "std")] +fn trace(b: &mut test::Bencher) { + #[inline(never)] + fn the_function() { + backtrace::trace(|frame| { + let ip = frame.ip(); + test::black_box(ip); + true + }); + } + b.iter(the_function); +} + +#[bench] +#[cfg(feature = "std")] +fn trace_and_resolve_callback(b: &mut test::Bencher) { + #[inline(never)] + fn the_function() { + backtrace::trace(|frame| { + backtrace::resolve(frame.ip(), |symbol| { + let addr = symbol.addr(); + test::black_box(addr); + }); + true + }); + } + b.iter(the_function); +} + +#[bench] +#[cfg(feature = "std")] +fn trace_and_resolve_separate(b: &mut test::Bencher) { + #[inline(never)] + fn the_function(frames: &mut Vec<*mut std::ffi::c_void>) { + backtrace::trace(|frame| { + frames.push(frame.ip()); + true + }); + frames.iter().for_each(|frame_ip| { + backtrace::resolve(*frame_ip, |symbol| { + test::black_box(symbol); + }); + }); + } + let mut frames = Vec::with_capacity(1024); + b.iter(|| { + the_function(&mut frames); + frames.clear(); + }); +} + +#[bench] +#[cfg(feature = "std")] +fn new_unresolved(b: &mut test::Bencher) { + #[inline(never)] + fn the_function() { + let bt = Backtrace::new_unresolved(); + test::black_box(bt); + } + b.iter(the_function); +} + +#[bench] +#[cfg(feature = "std")] +fn new(b: &mut test::Bencher) { + #[inline(never)] + fn the_function() { + let bt = Backtrace::new(); + test::black_box(bt); + } + b.iter(the_function); +} + +#[bench] +#[cfg(feature = "std")] +fn new_unresolved_and_resolve_separate(b: &mut test::Bencher) { + #[inline(never)] + fn the_function() { + let mut bt = Backtrace::new_unresolved(); + bt.resolve(); + test::black_box(bt); + } + b.iter(the_function); +} diff --git a/third_party/rust/backtrace/ci/android-sdk.sh b/third_party/rust/backtrace/ci/android-sdk.sh new file mode 100644 index 000000000000..aee133e3a098 --- /dev/null +++ b/third_party/rust/backtrace/ci/android-sdk.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env sh +# Copyright 2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -ex + +# Prep the SDK and emulator +# +# Note that the update process requires that we accept a bunch of licenses, and +# we can't just pipe `yes` into it for some reason, so we take the same strategy +# located in https://github.com/appunite/docker by just wrapping it in a script +# which apparently magically accepts the licenses. + +SDK=4333796 +mkdir sdk +curl --retry 20 https://dl.google.com/android/repository/sdk-tools-linux-${SDK}.zip -O +unzip -q -d sdk sdk-tools-linux-${SDK}.zip + +case "$1" in + arm | armv7) + api=24 + image="system-images;android-${api};google_apis;armeabi-v7a" + ;; + aarch64) + api=24 + image="system-images;android-${api};google_apis;arm64-v8a" + ;; + i686) + api=28 + image="system-images;android-${api};default;x86" + ;; + x86_64) + api=28 + image="system-images;android-${api};default;x86_64" + ;; + *) + echo "invalid arch: $1" + exit 1 + ;; +esac; + +# Try to fix warning about missing file. +# See https://askubuntu.com/a/1078784 +mkdir -p /root/.android/ +echo '### User Sources for Android SDK Manager' >> /root/.android/repositories.cfg +echo '#Fri Nov 03 10:11:27 CET 2017 count=0' >> /root/.android/repositories.cfg + +# Print all available packages +# yes | ./sdk/tools/bin/sdkmanager --list --verbose + +# --no_https avoids +# javax.net.ssl.SSLHandshakeException: sun.security.validator.ValidatorException: No trusted certificate found +# +# | grep -v = || true removes the progress bar output from the sdkmanager +# which produces an insane amount of output. +yes | ./sdk/tools/bin/sdkmanager --licenses --no_https | grep -v = || true +yes | ./sdk/tools/bin/sdkmanager --no_https \ + "emulator" \ + "platform-tools" \ + "platforms;android-${api}" \ + "${image}" | grep -v = || true + +echo "no" | + ./sdk/tools/bin/avdmanager create avd \ + --name "${1}" \ + --package "${image}" | grep -v = || true + diff --git a/third_party/rust/backtrace/ci/docker/arm-linux-androideabi/Dockerfile b/third_party/rust/backtrace/ci/docker/arm-linux-androideabi/Dockerfile index 10799974e191..7cfdae639e1b 100644 --- a/third_party/rust/backtrace/ci/docker/arm-linux-androideabi/Dockerfile +++ b/third_party/rust/backtrace/ci/docker/arm-linux-androideabi/Dockerfile @@ -11,8 +11,27 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ COPY android-ndk.sh / RUN /android-ndk.sh arm -ENV PATH=$PATH:/android-toolchain/bin +WORKDIR /android +COPY android-sdk.sh /android/sdk.sh +RUN ./sdk.sh arm +RUN mv /root/.android /tmp +RUN chmod 777 -R /tmp/.android +RUN chmod 755 /android/sdk/tools/* /android/sdk/emulator/qemu/linux-x86_64/* +ENV PATH=$PATH:/android-toolchain/bin:/android/sdk/platform-tools # TODO: run tests in an emulator eventually ENV CARGO_TARGET_ARM_LINUX_ANDROIDEABI_LINKER=arm-linux-androideabi-gcc \ - CARGO_TARGET_ARM_LINUX_ANDROIDEABI_RUNNER="true" + CARGO_TARGET_ARM_LINUX_ANDROIDEABI_RUNNER=/tmp/runtest \ + HOME=/tmp + +ADD runtest-android.rs /tmp/runtest.rs +ENTRYPOINT [ \ + "bash", \ + "-c", \ + # set SHELL so android can detect a 64bits system, see + # http://stackoverflow.com/a/41789144 + "SHELL=/bin/dash /android/sdk/emulator/emulator @arm -no-window & \ + /rust/bin/rustc /tmp/runtest.rs -o /tmp/runtest && \ + exec \"$@\"", \ + "--" \ +] diff --git a/third_party/rust/backtrace/ci/docker/powerpc-unknown-linux-gnu/Dockerfile b/third_party/rust/backtrace/ci/docker/powerpc-unknown-linux-gnu/Dockerfile deleted file mode 100644 index 7323e8591167..000000000000 --- a/third_party/rust/backtrace/ci/docker/powerpc-unknown-linux-gnu/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM ubuntu:18.04 - -RUN apt-get update && apt-get install -y --no-install-recommends \ - gcc libc6-dev qemu-user ca-certificates \ - gcc-powerpc-linux-gnu libc6-dev-powerpc-cross \ - qemu-system-ppc - -ENV CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_LINKER=powerpc-linux-gnu-gcc \ - CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_RUNNER="qemu-ppc -cpu Vger -L /usr/powerpc-linux-gnu" diff --git a/third_party/rust/backtrace/ci/run-docker.sh b/third_party/rust/backtrace/ci/run-docker.sh index abce99e70133..5a9934b41a9e 100644 --- a/third_party/rust/backtrace/ci/run-docker.sh +++ b/third_party/rust/backtrace/ci/run-docker.sh @@ -10,7 +10,7 @@ run() { --user `id -u`:`id -g` \ --rm \ --init \ - --volume $HOME/.cargo:/cargo \ + --volume $(dirname $(dirname `which cargo`)):/cargo \ --env CARGO_HOME=/cargo \ --volume `rustc --print sysroot`:/rust:ro \ --env TARGET=$1 \ diff --git a/third_party/rust/backtrace/ci/runtest-android.rs b/third_party/rust/backtrace/ci/runtest-android.rs new file mode 100644 index 000000000000..dc70121dc82e --- /dev/null +++ b/third_party/rust/backtrace/ci/runtest-android.rs @@ -0,0 +1,50 @@ +use std::env; +use std::process::Command; +use std::path::{Path, PathBuf}; + +fn main() { + let args = env::args_os() + .skip(1) + .filter(|arg| arg != "--quiet") + .collect::>(); + assert_eq!(args.len(), 1); + let test = PathBuf::from(&args[0]); + let dst = Path::new("/data/local/tmp").join(test.file_name().unwrap()); + + println!("waiting for device to come online..."); + let status = Command::new("adb") + .arg("wait-for-device") + .status() + .expect("failed to run: adb wait-for-device"); + assert!(status.success()); + + println!("pushing executable..."); + let status = Command::new("adb") + .arg("push") + .arg(&test) + .arg(&dst) + .status() + .expect("failed to run: adb pushr"); + assert!(status.success()); + + println!("executing tests..."); + let output = Command::new("adb") + .arg("shell") + .arg(&dst) + .output() + .expect("failed to run: adb shell"); + assert!(status.success()); + + println!("status: {}\nstdout ---\n{}\nstderr ---\n{}", + output.status, + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr)); + + let stdout = String::from_utf8_lossy(&output.stdout); + stdout.lines().find(|l| + (l.starts_with("PASSED ") && l.contains(" tests")) || + l.starts_with("test result: ok") + ).unwrap_or_else(|| { + panic!("failed to find successful test run"); + }); +} diff --git a/third_party/rust/backtrace/examples/raw.rs b/third_party/rust/backtrace/examples/raw.rs index ec8c552d43f7..21abe86a5874 100644 --- a/third_party/rust/backtrace/examples/raw.rs +++ b/third_party/rust/backtrace/examples/raw.rs @@ -4,12 +4,20 @@ fn main() { foo(); } -fn foo() { bar() } -fn bar() { baz() } -fn baz() { print() } +fn foo() { + bar() +} +fn bar() { + baz() +} +fn baz() { + print() +} -#[cfg(target_pointer_width = "32")] const HEX_WIDTH: usize = 10; -#[cfg(target_pointer_width = "64")] const HEX_WIDTH: usize = 20; +#[cfg(target_pointer_width = "32")] +const HEX_WIDTH: usize = 10; +#[cfg(target_pointer_width = "64")] +const HEX_WIDTH: usize = 20; fn print() { let mut cnt = 0; @@ -33,12 +41,10 @@ fn print() { } if let Some(file) = symbol.filename() { if let Some(l) = symbol.lineno() { - print!("\n{:13}{:4$}@ {}:{}", "", "", file.display(), l, - HEX_WIDTH); + print!("\n{:13}{:4$}@ {}:{}", "", "", file.display(), l, HEX_WIDTH); } } println!(""); - }); if !resolved { println!(" - "); diff --git a/third_party/rust/backtrace/src/backtrace/dbghelp.rs b/third_party/rust/backtrace/src/backtrace/dbghelp.rs index 12fb8e88e8ed..a9e537a7b64e 100644 --- a/third_party/rust/backtrace/src/backtrace/dbghelp.rs +++ b/third_party/rust/backtrace/src/backtrace/dbghelp.rs @@ -7,97 +7,199 @@ + + + + + + + + + + + #![allow(bad_style)] -use std::mem; -use winapi::ctypes::*; -use winapi::shared::minwindef::*; -use winapi::um::processthreadsapi; -use winapi::um::winnt::{self, CONTEXT}; -use winapi::um::dbghelp; -use winapi::um::dbghelp::*; +use core::ffi::c_void; +use core::mem; +use crate::dbghelp; +use crate::windows::*; -pub struct Frame { - inner: STACKFRAME64, +#[derive(Clone, Copy)] +pub enum Frame { + New(STACKFRAME_EX), + Old(STACKFRAME64), } + + +unsafe impl Send for Frame {} +unsafe impl Sync for Frame {} + impl Frame { pub fn ip(&self) -> *mut c_void { - self.inner.AddrPC.Offset as *mut _ + self.addr_pc().Offset as *mut _ } pub fn symbol_address(&self) -> *mut c_void { self.ip() } + + fn addr_pc(&self) -> &ADDRESS64 { + match self { + Frame::New(new) => &new.AddrPC, + Frame::Old(old) => &old.AddrPC, + } + } + + fn addr_pc_mut(&mut self) -> &mut ADDRESS64 { + match self { + Frame::New(new) => &mut new.AddrPC, + Frame::Old(old) => &mut old.AddrPC, + } + } + + fn addr_frame_mut(&mut self) -> &mut ADDRESS64 { + match self { + Frame::New(new) => &mut new.AddrFrame, + Frame::Old(old) => &mut old.AddrFrame, + } + } + + fn addr_stack_mut(&mut self) -> &mut ADDRESS64 { + match self { + Frame::New(new) => &mut new.AddrStack, + Frame::Old(old) => &mut old.AddrStack, + } + } } +#[repr(C, align(16))] +struct MyContext(CONTEXT); + #[inline(always)] -pub fn trace(cb: &mut FnMut(&super::Frame) -> bool) { +pub unsafe fn trace(cb: &mut FnMut(&super::Frame) -> bool) { + let process = GetCurrentProcess(); + let thread = GetCurrentThread(); + + let mut context = mem::zeroed::(); + RtlCaptureContext(&mut context.0); + - let _g = ::lock::lock(); + let dbghelp = match dbghelp::init() { + Ok(dbghelp) => dbghelp, + Err(()) => return, + }; - unsafe { - - let process = processthreadsapi::GetCurrentProcess(); - let thread = processthreadsapi::GetCurrentThread(); - - - - - - let mut context = Box::new(mem::zeroed::()); - winnt::RtlCaptureContext(&mut *context); - let mut frame = super::Frame { - inner: Frame { inner: mem::zeroed() }, - }; - let image = init_frame(&mut frame.inner.inner, &context); - - - let _c = ::dbghelp_init(); - - - while dbghelp::StackWalk64(image as DWORD, - process, - thread, - &mut frame.inner.inner, - &mut *context as *mut _ as *mut _, - None, - Some(dbghelp::SymFunctionTableAccess64), - Some(dbghelp::SymGetModuleBase64), - None) == TRUE { - if frame.inner.inner.AddrPC.Offset == frame.inner.inner.AddrReturn.Offset || - frame.inner.inner.AddrPC.Offset == 0 || - frame.inner.inner.AddrReturn.Offset == 0 { - break + + + match (*dbghelp.dbghelp()).StackWalkEx() { + Some(StackWalkEx) => { + let mut frame = super::Frame { + inner: Frame::New(mem::zeroed()), + }; + let image = init_frame(&mut frame.inner, &context.0); + let frame_ptr = match &mut frame.inner { + Frame::New(ptr) => ptr as *mut STACKFRAME_EX, + _ => unreachable!(), + }; + + while StackWalkEx( + image as DWORD, + process, + thread, + frame_ptr, + &mut context.0 as *mut CONTEXT as *mut _, + None, + Some(dbghelp.SymFunctionTableAccess64()), + Some(dbghelp.SymGetModuleBase64()), + None, + 0, + ) == TRUE + { + if !cb(&frame) { + break; + } } - - if !cb(&frame) { - break + } + None => { + let mut frame = super::Frame { + inner: Frame::Old(mem::zeroed()), + }; + let image = init_frame(&mut frame.inner, &context.0); + let frame_ptr = match &mut frame.inner { + Frame::Old(ptr) => ptr as *mut STACKFRAME64, + _ => unreachable!(), + }; + + while dbghelp.StackWalk64()( + image as DWORD, + process, + thread, + frame_ptr, + &mut context.0 as *mut CONTEXT as *mut _, + None, + Some(dbghelp.SymFunctionTableAccess64()), + Some(dbghelp.SymGetModuleBase64()), + None, + ) == TRUE + { + if !cb(&frame) { + break; + } } } } } #[cfg(target_arch = "x86_64")] -fn init_frame(frame: &mut STACKFRAME64, ctx: &CONTEXT) -> WORD { - frame.AddrPC.Offset = ctx.Rip as u64; - frame.AddrPC.Mode = AddrModeFlat; - frame.AddrStack.Offset = ctx.Rsp as u64; - frame.AddrStack.Mode = AddrModeFlat; - frame.AddrFrame.Offset = ctx.Rbp as u64; - frame.AddrFrame.Mode = AddrModeFlat; - winnt::IMAGE_FILE_MACHINE_AMD64 +fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> WORD { + frame.addr_pc_mut().Offset = ctx.Rip as u64; + frame.addr_pc_mut().Mode = AddrModeFlat; + frame.addr_stack_mut().Offset = ctx.Rsp as u64; + frame.addr_stack_mut().Mode = AddrModeFlat; + frame.addr_frame_mut().Offset = ctx.Rbp as u64; + frame.addr_frame_mut().Mode = AddrModeFlat; + + IMAGE_FILE_MACHINE_AMD64 } #[cfg(target_arch = "x86")] -fn init_frame(frame: &mut STACKFRAME64, ctx: &CONTEXT) -> WORD { - frame.AddrPC.Offset = ctx.Eip as u64; - frame.AddrPC.Mode = AddrModeFlat; - frame.AddrStack.Offset = ctx.Esp as u64; - frame.AddrStack.Mode = AddrModeFlat; - frame.AddrFrame.Offset = ctx.Ebp as u64; - frame.AddrFrame.Mode = AddrModeFlat; - winnt::IMAGE_FILE_MACHINE_I386 +fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> WORD { + frame.addr_pc_mut().Offset = ctx.Eip as u64; + frame.addr_pc_mut().Mode = AddrModeFlat; + frame.addr_stack_mut().Offset = ctx.Esp as u64; + frame.addr_stack_mut().Mode = AddrModeFlat; + frame.addr_frame_mut().Offset = ctx.Ebp as u64; + frame.addr_frame_mut().Mode = AddrModeFlat; + + IMAGE_FILE_MACHINE_I386 +} + +#[cfg(target_arch = "aarch64")] +fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> WORD { + frame.addr_pc_mut().Offset = ctx.Pc as u64; + frame.addr_pc_mut().Mode = AddrModeFlat; + frame.addr_stack_mut().Offset = ctx.Sp as u64; + frame.addr_stack_mut().Mode = AddrModeFlat; + unsafe { + frame.addr_frame_mut().Offset = ctx.u.s().Fp as u64; + } + frame.addr_frame_mut().Mode = AddrModeFlat; + IMAGE_FILE_MACHINE_ARM64 +} + +#[cfg(target_arch = "arm")] +fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> WORD { + frame.addr_pc_mut().Offset = ctx.Pc as u64; + frame.addr_pc_mut().Mode = AddrModeFlat; + frame.addr_stack_mut().Offset = ctx.Sp as u64; + frame.addr_stack_mut().Mode = AddrModeFlat; + unsafe { + frame.addr_frame_mut().Offset = ctx.R11 as u64; + } + frame.addr_frame_mut().Mode = AddrModeFlat; + IMAGE_FILE_MACHINE_ARMNT } diff --git a/third_party/rust/backtrace/src/backtrace/libunwind.rs b/third_party/rust/backtrace/src/backtrace/libunwind.rs index e9d871ef2630..ed9e838c1609 100644 --- a/third_party/rust/backtrace/src/backtrace/libunwind.rs +++ b/third_party/rust/backtrace/src/backtrace/libunwind.rs @@ -8,29 +8,56 @@ -use std::os::raw::c_void; -pub struct Frame { - ctx: *mut uw::_Unwind_Context, + + + + + + + + + + + + + + + + +use core::ffi::c_void; + +pub enum Frame { + Raw(*mut uw::_Unwind_Context), + Cloned { + ip: *mut c_void, + symbol_address: *mut c_void, + }, } + + + + +unsafe impl Send for Frame {} +unsafe impl Sync for Frame {} + impl Frame { pub fn ip(&self) -> *mut c_void { - let mut ip_before_insn = 0; - let mut ip = unsafe { - uw::_Unwind_GetIPInfo(self.ctx, &mut ip_before_insn) as *mut c_void + let ctx = match *self { + Frame::Raw(ctx) => ctx, + Frame::Cloned { ip, .. } => return ip, }; - if !ip.is_null() && ip_before_insn == 0 { - - - ip = (ip as usize - 1) as *mut _; + unsafe { + uw::_Unwind_GetIP(ctx) as *mut c_void } - return ip } pub fn symbol_address(&self) -> *mut c_void { - - + if let Frame::Cloned { symbol_address, .. } = *self { + return symbol_address; + } + @@ -47,20 +74,27 @@ impl Frame { } } -#[inline(always)] -pub fn trace(mut cb: &mut FnMut(&super::Frame) -> bool) { - unsafe { - uw::_Unwind_Backtrace(trace_fn, &mut cb as *mut _ as *mut _); +impl Clone for Frame { + fn clone(&self) -> Frame { + Frame::Cloned { + ip: self.ip(), + symbol_address: self.symbol_address(), + } } +} + +#[inline(always)] +pub unsafe fn trace(mut cb: &mut FnMut(&super::Frame) -> bool) { + uw::_Unwind_Backtrace(trace_fn, &mut cb as *mut _ as *mut _); extern fn trace_fn(ctx: *mut uw::_Unwind_Context, arg: *mut c_void) -> uw::_Unwind_Reason_Code { let cb = unsafe { &mut *(arg as *mut &mut FnMut(&super::Frame) -> bool) }; let cx = super::Frame { - inner: Frame { ctx: ctx }, + inner: Frame::Raw(ctx), }; - let mut bomb = ::Bomb { enabled: true }; + let mut bomb = crate::Bomb { enabled: true }; let keep_going = cb(&cx); bomb.enabled = false; @@ -83,8 +117,7 @@ pub fn trace(mut cb: &mut FnMut(&super::Frame) -> bool) { mod uw { pub use self::_Unwind_Reason_Code::*; - use libc; - use std::os::raw::{c_int, c_void}; + use core::ffi::c_void; #[repr(C)] pub enum _Unwind_Reason_Code { @@ -115,12 +148,13 @@ mod uw { #[cfg(all(not(all(target_os = "android", target_arch = "arm")), + not(all(target_os = "freebsd", target_arch = "arm")), not(all(target_os = "linux", target_arch = "arm"))))] - pub fn _Unwind_GetIPInfo(ctx: *mut _Unwind_Context, - ip_before_insn: *mut c_int) + pub fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t; #[cfg(all(not(target_os = "android"), + not(all(target_os = "freebsd", target_arch = "arm")), not(all(target_os = "linux", target_arch = "arm"))))] pub fn _Unwind_FindEnclosingFunction(pc: *mut c_void) -> *mut c_void; @@ -130,6 +164,7 @@ mod uw { #[cfg(any(all(target_os = "android", target_arch = "arm"), + all(target_os = "freebsd", target_arch = "arm"), all(target_os = "linux", target_arch = "arm")))] pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t { #[repr(C)] @@ -176,19 +211,8 @@ mod uw { - #[cfg(any(all(target_os = "android", target_arch = "arm"), - all(target_os = "linux", target_arch = "arm")))] - pub unsafe fn _Unwind_GetIPInfo(ctx: *mut _Unwind_Context, - ip_before_insn: *mut c_int) - -> libc::uintptr_t - { - *ip_before_insn = 0; - _Unwind_GetIP(ctx) - } - - - #[cfg(any(target_os = "android", + all(target_os = "freebsd", target_arch = "arm"), all(target_os = "linux", target_arch = "arm")))] pub unsafe fn _Unwind_FindEnclosingFunction(pc: *mut c_void) -> *mut c_void diff --git a/third_party/rust/backtrace/src/backtrace/mod.rs b/third_party/rust/backtrace/src/backtrace/mod.rs index 3af90a2f3d8f..15b98ec718e5 100644 --- a/third_party/rust/backtrace/src/backtrace/mod.rs +++ b/third_party/rust/backtrace/src/backtrace/mod.rs @@ -1,6 +1,6 @@ -use std::fmt; +use core::ffi::c_void; +use core::fmt; -use std::os::raw::c_void; @@ -36,9 +36,33 @@ use std::os::raw::c_void; -#[inline(never)] - -pub fn trace bool>(mut cb: F) { + + + + + + + + + + + +#[cfg(feature = "std")] +pub fn trace bool>(cb: F) { + let _guard = crate::lock::lock(); + unsafe { trace_unsynchronized(cb) } +} + + + + + + + + + + +pub unsafe fn trace_unsynchronized bool>(mut cb: F) { trace_imp(&mut cb) } @@ -48,8 +72,9 @@ pub fn trace bool>(mut cb: F) { +#[derive(Clone)] pub struct Frame { - inner: FrameImp, + pub(crate) inner: FrameImp, } impl Frame { @@ -87,27 +112,38 @@ impl fmt::Debug for Frame { } } -cfg_if! { - if #[cfg(all(unix, - not(target_os = "emscripten"), - not(all(target_os = "ios", target_arch = "arm")), - feature = "libunwind"))] { +cfg_if::cfg_if! { + if #[cfg( + any( + all( + unix, + not(target_os = "emscripten"), + not(all(target_os = "ios", target_arch = "arm")), + feature = "libunwind", + ), + target_env = "sgx", + ) + )] { mod libunwind; use self::libunwind::trace as trace_imp; - use self::libunwind::Frame as FrameImp; - } else if #[cfg(all(unix, - not(target_os = "emscripten"), - feature = "unix-backtrace"))] { + pub(crate) use self::libunwind::Frame as FrameImp; + } else if #[cfg( + all( + unix, + not(target_os = "emscripten"), + feature = "unix-backtrace", + ) + )] { mod unix_backtrace; use self::unix_backtrace::trace as trace_imp; - use self::unix_backtrace::Frame as FrameImp; - } else if #[cfg(all(windows, feature = "dbghelp"))] { + pub(crate) use self::unix_backtrace::Frame as FrameImp; + } else if #[cfg(all(windows, feature = "dbghelp", not(target_vendor = "uwp")))] { mod dbghelp; use self::dbghelp::trace as trace_imp; - use self::dbghelp::Frame as FrameImp; + pub(crate) use self::dbghelp::Frame as FrameImp; } else { mod noop; use self::noop::trace as trace_imp; - use self::noop::Frame as FrameImp; + pub(crate) use self::noop::Frame as FrameImp; } } diff --git a/third_party/rust/backtrace/src/backtrace/noop.rs b/third_party/rust/backtrace/src/backtrace/noop.rs index 8b8f8766ed41..c17cd2b99929 100644 --- a/third_party/rust/backtrace/src/backtrace/noop.rs +++ b/third_party/rust/backtrace/src/backtrace/noop.rs @@ -1,8 +1,12 @@ -use std::os::raw::c_void; + + + +use core::ffi::c_void; #[inline(always)] pub fn trace(_cb: &mut FnMut(&super::Frame) -> bool) {} +#[derive(Clone)] pub struct Frame; impl Frame { diff --git a/third_party/rust/backtrace/src/backtrace/unix_backtrace.rs b/third_party/rust/backtrace/src/backtrace/unix_backtrace.rs index d4ae1c33b31c..585a47709e5a 100644 --- a/third_party/rust/backtrace/src/backtrace/unix_backtrace.rs +++ b/third_party/rust/backtrace/src/backtrace/unix_backtrace.rs @@ -8,39 +8,54 @@ -use std::mem; -use std::os::raw::{c_void, c_int}; + + + + + + + +use core::ffi::c_void; +use core::mem; +use libc::c_int; + +#[derive(Clone)] pub struct Frame { - addr: *mut c_void, + addr: usize, } impl Frame { - pub fn ip(&self) -> *mut c_void { self.addr } - pub fn symbol_address(&self) -> *mut c_void { self.addr } + pub fn ip(&self) -> *mut c_void { + self.addr as *mut c_void + } + pub fn symbol_address(&self) -> *mut c_void { + self.ip() + } } -extern { +extern "C" { fn backtrace(buf: *mut *mut c_void, sz: c_int) -> c_int; } #[inline(always)] -pub fn trace(cb: &mut FnMut(&super::Frame) -> bool) { +pub unsafe fn trace(cb: &mut FnMut(&super::Frame) -> bool) { const SIZE: usize = 100; let mut buf: [*mut c_void; SIZE]; let cnt; - unsafe { - buf = mem::zeroed(); - cnt = backtrace(buf.as_mut_ptr(), SIZE as c_int); - } + + buf = mem::zeroed(); + cnt = backtrace(buf.as_mut_ptr(), SIZE as c_int); for addr in buf[..cnt as usize].iter() { let cx = super::Frame { - inner: Frame { addr: *addr }, + inner: Frame { + addr: *addr as usize, + }, }; if !cb(&cx) { - return + return; } } } diff --git a/third_party/rust/backtrace/src/capture.rs b/third_party/rust/backtrace/src/capture.rs index 06b6c4afb470..bc3b8dbf2ae3 100644 --- a/third_party/rust/backtrace/src/capture.rs +++ b/third_party/rust/backtrace/src/capture.rs @@ -1,9 +1,20 @@ +use crate::PrintFmt; +use crate::{resolve, resolve_frame, trace, BacktraceFmt, Symbol, SymbolName}; +use std::ffi::c_void; use std::fmt; -use std::mem; -use std::os::raw::c_void; use std::path::{Path, PathBuf}; +use std::prelude::v1::*; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + + + + + + + -use {trace, resolve, SymbolName}; @@ -11,31 +22,73 @@ use {trace, resolve, SymbolName}; #[derive(Clone)] #[cfg_attr(feature = "serialize-rustc", derive(RustcDecodable, RustcEncodable))] -#[cfg_attr(feature = "serialize-serde", derive(Deserialize, Serialize))] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub struct Backtrace { + frames: Vec, + + + actual_start_index: usize, +} + +fn _assert_send_sync() { + fn _assert() {} + _assert::(); } + + + + + #[derive(Clone)] -#[cfg_attr(feature = "serialize-rustc", derive(RustcDecodable, RustcEncodable))] -#[cfg_attr(feature = "serialize-serde", derive(Deserialize, Serialize))] pub struct BacktraceFrame { - ip: usize, - symbol_address: usize, + frame: Frame, symbols: Option>, } +#[derive(Clone)] +enum Frame { + Raw(crate::Frame), + #[allow(dead_code)] + Deserialized { + ip: usize, + symbol_address: usize, + }, +} + +impl Frame { + fn ip(&self) -> *mut c_void { + match *self { + Frame::Raw(ref f) => f.ip(), + Frame::Deserialized { ip, .. } => ip as *mut c_void, + } + } + + fn symbol_address(&self) -> *mut c_void { + match *self { + Frame::Raw(ref f) => f.symbol_address(), + Frame::Deserialized { symbol_address, .. } => symbol_address as *mut c_void, + } + } +} + + + + + + #[derive(Clone)] #[cfg_attr(feature = "serialize-rustc", derive(RustcDecodable, RustcEncodable))] -#[cfg_attr(feature = "serialize-serde", derive(Deserialize, Serialize))] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub struct BacktraceSymbol { name: Option>, addr: Option, @@ -59,10 +112,22 @@ impl Backtrace { + + + + + + + + + + + + #[inline(never)] pub fn new() -> Backtrace { - let mut bt = Backtrace::new_unresolved(); + let mut bt = Self::create(Self::new as usize); bt.resolve(); - return bt + bt } @@ -83,18 +148,35 @@ impl Backtrace { + + + + + + #[inline(never)] pub fn new_unresolved() -> Backtrace { + Self::create(Self::new_unresolved as usize) + } + + fn create(ip: usize) -> Backtrace { let mut frames = Vec::new(); + let mut actual_start_index = None; trace(|frame| { frames.push(BacktraceFrame { - ip: frame.ip() as usize, - symbol_address: frame.symbol_address() as usize, + frame: Frame::Raw(frame.clone()), symbols: None, }); + + if frame.symbol_address() as usize == ip && actual_start_index.is_none() { + actual_start_index = Some(frames.len()); + } true }); - Backtrace { frames: frames } + Backtrace { + frames, + actual_start_index: actual_start_index.unwrap_or(0), + } } @@ -102,8 +184,13 @@ impl Backtrace { + + + + + pub fn frames(&self) -> &[BacktraceFrame] { - &self.frames + &self.frames[self.actual_start_index..] } @@ -111,17 +198,30 @@ impl Backtrace { + + + + + pub fn resolve(&mut self) { for frame in self.frames.iter_mut().filter(|f| f.symbols.is_none()) { let mut symbols = Vec::new(); - resolve(frame.ip as *mut _, |symbol| { - symbols.push(BacktraceSymbol { - name: symbol.name().map(|m| m.as_bytes().to_vec()), - addr: symbol.addr().map(|a| a as usize), - filename: symbol.filename().map(|m| m.to_path_buf()), - lineno: symbol.lineno(), - }); - }); + { + let sym = |symbol: &Symbol| { + symbols.push(BacktraceSymbol { + name: symbol.name().map(|m| m.as_bytes().to_vec()), + addr: symbol.addr().map(|a| a as usize), + filename: symbol.filename().map(|m| m.to_owned()), + lineno: symbol.lineno(), + }); + }; + match frame.frame { + Frame::Raw(ref f) => resolve_frame(f, sym), + Frame::Deserialized { ip, .. } => { + resolve(ip as *mut c_void, sym); + } + } + } frame.symbols = Some(symbols); } } @@ -130,7 +230,8 @@ impl Backtrace { impl From> for Backtrace { fn from(frames: Vec) -> Self { Backtrace { - frames: frames + frames, + actual_start_index: 0, } } } @@ -143,13 +244,23 @@ impl Into> for Backtrace { impl BacktraceFrame { + + + + + pub fn ip(&self) -> *mut c_void { - self.ip as *mut c_void + self.frame.ip() as *mut c_void } + + + + + pub fn symbol_address(&self) -> *mut c_void { - self.symbol_address as *mut c_void + self.frame.symbol_address() as *mut c_void } @@ -161,6 +272,11 @@ impl BacktraceFrame { + + + + + pub fn symbols(&self) -> &[BacktraceSymbol] { self.symbols.as_ref().map(|s| &s[..]).unwrap_or(&[]) } @@ -168,21 +284,41 @@ impl BacktraceFrame { impl BacktraceSymbol { + + + + + pub fn name(&self) -> Option { self.name.as_ref().map(|s| SymbolName::new(s)) } + + + + + pub fn addr(&self) -> Option<*mut c_void> { self.addr.map(|s| s as *mut c_void) } + + + + + pub fn filename(&self) -> Option<&Path> { self.filename.as_ref().map(|p| &**p) } + + + + + pub fn lineno(&self) -> Option { self.lineno } @@ -190,42 +326,36 @@ impl BacktraceSymbol { impl fmt::Debug for Backtrace { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let hex_width = mem::size_of::() * 2 + 2; - - try!(write!(fmt, "stack backtrace:")); - - for (idx, frame) in self.frames().iter().enumerate() { - let ip = frame.ip(); - try!(write!(fmt, "\n{:4}: {:2$?}", idx, ip, hex_width)); - - let symbols = match frame.symbols { - Some(ref s) => s, - None => { - try!(write!(fmt, " - ")); - continue + let full = fmt.alternate(); + let (frames, style) = if full { + (&self.frames[..], PrintFmt::Full) + } else { + (&self.frames[self.actual_start_index..], PrintFmt::Short) + }; + + + + + + let cwd = std::env::current_dir(); + let mut print_path = move |fmt: &mut fmt::Formatter, path: crate::BytesOrWideString| { + let path = path.into_path_buf(); + if !full { + if let Ok(cwd) = &cwd { + if let Ok(suffix) = path.strip_prefix(cwd) { + return fmt::Display::fmt(&suffix.display(), fmt); + } } - }; - if symbols.len() == 0 { - try!(write!(fmt, " - ")); } + fmt::Display::fmt(&path.display(), fmt) + }; - for (idx, symbol) in symbols.iter().enumerate() { - if idx != 0 { - try!(write!(fmt, "\n {:1$}", "", hex_width)); - } - - if let Some(name) = symbol.name() { - try!(write!(fmt, " - {}", name)); - } else { - try!(write!(fmt, " - ")); - } - - if let (Some(file), Some(line)) = (symbol.filename(), symbol.lineno()) { - try!(write!(fmt, "\n {:3$}at {}:{}", "", file.display(), line, hex_width)); - } - } + let mut f = BacktraceFmt::new(fmt, style, &mut print_path); + f.add_context()?; + for frame in frames { + f.frame().backtrace_frame(frame)?; } - + f.finish()?; Ok(()) } } @@ -235,3 +365,115 @@ impl Default for Backtrace { Backtrace::new() } } + +impl fmt::Debug for BacktraceFrame { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("BacktraceFrame") + .field("ip", &self.ip()) + .field("symbol_address", &self.symbol_address()) + .finish() + } +} + +impl fmt::Debug for BacktraceSymbol { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("BacktraceSymbol") + .field("name", &self.name()) + .field("addr", &self.addr()) + .field("filename", &self.filename()) + .field("lineno", &self.lineno()) + .finish() + } +} + +#[cfg(feature = "serialize-rustc")] +mod rustc_serialize_impls { + use super::*; + use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; + + #[derive(RustcEncodable, RustcDecodable)] + struct SerializedFrame { + ip: usize, + symbol_address: usize, + symbols: Option>, + } + + impl Decodable for BacktraceFrame { + fn decode(d: &mut D) -> Result + where + D: Decoder, + { + let frame: SerializedFrame = SerializedFrame::decode(d)?; + Ok(BacktraceFrame { + frame: Frame::Deserialized { + ip: frame.ip, + symbol_address: frame.symbol_address, + }, + symbols: frame.symbols, + }) + } + } + + impl Encodable for BacktraceFrame { + fn encode(&self, e: &mut E) -> Result<(), E::Error> + where + E: Encoder, + { + let BacktraceFrame { frame, symbols } = self; + SerializedFrame { + ip: frame.ip() as usize, + symbol_address: frame.symbol_address() as usize, + symbols: symbols.clone(), + } + .encode(e) + } + } +} + +#[cfg(feature = "serde")] +mod serde_impls { + extern crate serde; + + use self::serde::de::Deserializer; + use self::serde::ser::Serializer; + use self::serde::{Deserialize, Serialize}; + use super::*; + + #[derive(Serialize, Deserialize)] + struct SerializedFrame { + ip: usize, + symbol_address: usize, + symbols: Option>, + } + + impl Serialize for BacktraceFrame { + fn serialize(&self, s: S) -> Result + where + S: Serializer, + { + let BacktraceFrame { frame, symbols } = self; + SerializedFrame { + ip: frame.ip() as usize, + symbol_address: frame.symbol_address() as usize, + symbols: symbols.clone(), + } + .serialize(s) + } + } + + impl<'a> Deserialize<'a> for BacktraceFrame { + fn deserialize(d: D) -> Result + where + D: Deserializer<'a>, + { + let frame: SerializedFrame = SerializedFrame::deserialize(d)?; + Ok(BacktraceFrame { + frame: Frame::Deserialized { + ip: frame.ip, + symbol_address: frame.symbol_address, + }, + symbols: frame.symbols, + }) + } + } +} diff --git a/third_party/rust/backtrace/src/dbghelp.rs b/third_party/rust/backtrace/src/dbghelp.rs new file mode 100644 index 000000000000..87eb24b2fd00 --- /dev/null +++ b/third_party/rust/backtrace/src/dbghelp.rs @@ -0,0 +1,370 @@ + + + + + + + + + + + + + + + + + + + + + + + +#![allow(non_snake_case)] + +use crate::windows::*; +use core::mem; +use core::ptr; + + + + +#[cfg(feature = "verify-winapi")] +mod dbghelp { + use crate::windows::*; + pub use winapi::um::dbghelp::{ + StackWalk64, SymCleanup, SymFromAddrW, SymFunctionTableAccess64, SymGetLineFromAddrW64, + SymGetModuleBase64, SymInitializeW, + }; + + extern "system" { + + pub fn SymGetOptions() -> u32; + pub fn SymSetOptions(_: u32); + + + pub fn StackWalkEx( + MachineType: DWORD, + hProcess: HANDLE, + hThread: HANDLE, + StackFrame: LPSTACKFRAME_EX, + ContextRecord: PVOID, + ReadMemoryRoutine: PREAD_PROCESS_MEMORY_ROUTINE64, + FunctionTableAccessRoutine: PFUNCTION_TABLE_ACCESS_ROUTINE64, + GetModuleBaseRoutine: PGET_MODULE_BASE_ROUTINE64, + TranslateAddress: PTRANSLATE_ADDRESS_ROUTINE64, + Flags: DWORD, + ) -> BOOL; + + + pub fn SymFromInlineContextW( + hProcess: HANDLE, + Address: DWORD64, + InlineContext: ULONG, + Displacement: PDWORD64, + Symbol: PSYMBOL_INFOW, + ) -> BOOL; + pub fn SymGetLineFromInlineContextW( + hProcess: HANDLE, + dwAddr: DWORD64, + InlineContext: ULONG, + qwModuleBaseAddress: DWORD64, + pdwDisplacement: PDWORD, + Line: PIMAGEHLP_LINEW64, + ) -> BOOL; + } + + pub fn assert_equal_types(a: T, _b: T) -> T { + a + } +} + + + +macro_rules! dbghelp { + (extern "system" { + $(fn $name:ident($($arg:ident: $argty:ty),*) -> $ret: ty;)* + }) => ( + pub struct Dbghelp { + /// The loaded DLL for `dbghelp.dll` + dll: HMODULE, + + // Each function pointer for each function we might use + $($name: usize,)* + } + + static mut DBGHELP: Dbghelp = Dbghelp { + // Initially we haven't loaded the DLL + dll: 0 as *mut _, + // Initiall all functions are set to zero to say they need to be + // dynamically loaded. + $($name: 0,)* + }; + + // Convenience typedef for each function type. + $(pub type $name = unsafe extern "system" fn($($argty),*) -> $ret;)* + + impl Dbghelp { + /// Attempts to open `dbghelp.dll`. Returns success if it works or + /// error if `LoadLibraryW` fails. + /// + /// Panics if library is already loaded. + fn ensure_open(&mut self) -> Result<(), ()> { + if !self.dll.is_null() { + return Ok(()) + } + let lib = b"dbghelp.dll\0"; + unsafe { + self.dll = LoadLibraryA(lib.as_ptr() as *const i8); + if self.dll.is_null() { + Err(()) + } else { + Ok(()) + } + } + } + + // Function for each method we'd like to use. When called it will + // either read the cached function pointer or load it and return the + // loaded value. Loads are asserted to succeed. + $(pub fn $name(&mut self) -> Option<$name> { + unsafe { + if self.$name == 0 { + let name = concat!(stringify!($name), "\0"); + self.$name = self.symbol(name.as_bytes())?; + } + let ret = mem::transmute::(self.$name); + #[cfg(feature = "verify-winapi")] + dbghelp::assert_equal_types(ret, dbghelp::$name); + Some(ret) + } + })* + + fn symbol(&self, symbol: &[u8]) -> Option { + unsafe { + match GetProcAddress(self.dll, symbol.as_ptr() as *const _) as usize { + 0 => None, + n => Some(n), + } + } + } + } + + // Convenience proxy to use the cleanup locks to reference dbghelp + // functions. + #[allow(dead_code)] + impl Init { + $(pub fn $name(&self) -> $name { + unsafe { + DBGHELP.$name().unwrap() + } + })* + + pub fn dbghelp(&self) -> *mut Dbghelp { + unsafe { + &mut DBGHELP + } + } + } + ) + +} + +const SYMOPT_DEFERRED_LOADS: DWORD = 0x00000004; + +dbghelp! { + extern "system" { + fn SymGetOptions() -> DWORD; + fn SymSetOptions(options: DWORD) -> (); + fn SymInitializeW( + handle: HANDLE, + path: PCWSTR, + invade: BOOL + ) -> BOOL; + fn SymCleanup(handle: HANDLE) -> BOOL; + fn StackWalk64( + MachineType: DWORD, + hProcess: HANDLE, + hThread: HANDLE, + StackFrame: LPSTACKFRAME64, + ContextRecord: PVOID, + ReadMemoryRoutine: PREAD_PROCESS_MEMORY_ROUTINE64, + FunctionTableAccessRoutine: PFUNCTION_TABLE_ACCESS_ROUTINE64, + GetModuleBaseRoutine: PGET_MODULE_BASE_ROUTINE64, + TranslateAddress: PTRANSLATE_ADDRESS_ROUTINE64 + ) -> BOOL; + fn SymFunctionTableAccess64( + hProcess: HANDLE, + AddrBase: DWORD64 + ) -> PVOID; + fn SymGetModuleBase64( + hProcess: HANDLE, + AddrBase: DWORD64 + ) -> DWORD64; + fn SymFromAddrW( + hProcess: HANDLE, + Address: DWORD64, + Displacement: PDWORD64, + Symbol: PSYMBOL_INFOW + ) -> BOOL; + fn SymGetLineFromAddrW64( + hProcess: HANDLE, + dwAddr: DWORD64, + pdwDisplacement: PDWORD, + Line: PIMAGEHLP_LINEW64 + ) -> BOOL; + fn StackWalkEx( + MachineType: DWORD, + hProcess: HANDLE, + hThread: HANDLE, + StackFrame: LPSTACKFRAME_EX, + ContextRecord: PVOID, + ReadMemoryRoutine: PREAD_PROCESS_MEMORY_ROUTINE64, + FunctionTableAccessRoutine: PFUNCTION_TABLE_ACCESS_ROUTINE64, + GetModuleBaseRoutine: PGET_MODULE_BASE_ROUTINE64, + TranslateAddress: PTRANSLATE_ADDRESS_ROUTINE64, + Flags: DWORD + ) -> BOOL; + fn SymFromInlineContextW( + hProcess: HANDLE, + Address: DWORD64, + InlineContext: ULONG, + Displacement: PDWORD64, + Symbol: PSYMBOL_INFOW + ) -> BOOL; + fn SymGetLineFromInlineContextW( + hProcess: HANDLE, + dwAddr: DWORD64, + InlineContext: ULONG, + qwModuleBaseAddress: DWORD64, + pdwDisplacement: PDWORD, + Line: PIMAGEHLP_LINEW64 + ) -> BOOL; + } +} + +pub struct Init { + lock: HANDLE, +} + + + + + + + +#[cfg(all(windows, feature = "dbghelp"))] +pub fn init() -> Result { + use core::sync::atomic::{AtomicUsize, Ordering::SeqCst}; + + unsafe { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + static LOCK: AtomicUsize = AtomicUsize::new(0); + let mut lock = LOCK.load(SeqCst); + if lock == 0 { + lock = CreateMutexA( + ptr::null_mut(), + 0, + "Local\\RustBacktraceMutex\0".as_ptr() as _, + ) as usize; + if lock == 0 { + return Err(()); + } + if let Err(other) = LOCK.compare_exchange(0, lock, SeqCst, SeqCst) { + debug_assert!(other != 0); + CloseHandle(lock as HANDLE); + lock = other; + } + } + debug_assert!(lock != 0); + let lock = lock as HANDLE; + let r = WaitForSingleObjectEx(lock, INFINITE, FALSE); + debug_assert_eq!(r, 0); + let ret = Init { lock }; + + + + + + + + + + + + + + DBGHELP.ensure_open()?; + + static mut INITIALIZED: bool = false; + if INITIALIZED { + return Ok(ret); + } + + let orig = DBGHELP.SymGetOptions().unwrap()(); + + + + + DBGHELP.SymSetOptions().unwrap()(orig | SYMOPT_DEFERRED_LOADS); + + + + + + + + + + + + + + DBGHELP.SymInitializeW().unwrap()(GetCurrentProcess(), ptr::null_mut(), TRUE); + INITIALIZED = true; + Ok(ret) + } +} + +impl Drop for Init { + fn drop(&mut self) { + unsafe { + let r = ReleaseMutex(self.lock); + debug_assert!(r != 0); + } + } +} diff --git a/third_party/rust/backtrace/src/dylib.rs b/third_party/rust/backtrace/src/dylib.rs deleted file mode 100644 index 34fb0bf16ee6..000000000000 --- a/third_party/rust/backtrace/src/dylib.rs +++ /dev/null @@ -1,70 +0,0 @@ -use std::ffi::CString; -use std::marker; -use std::mem; -use std::sync::atomic::{AtomicUsize, Ordering}; - -use libc::{self, c_char, c_void}; - -pub struct Dylib { - pub init: AtomicUsize, -} - -pub struct Symbol { - pub name: &'static str, - pub addr: AtomicUsize, - pub _marker: marker::PhantomData, -} - -impl Dylib { - pub unsafe fn get<'a, T>(&self, sym: &'a Symbol) -> Option<&'a T> { - self.load().and_then(|handle| { - sym.get(handle) - }) - } - - pub unsafe fn init(&self, path: &str) -> bool { - if self.init.load(Ordering::SeqCst) != 0 { - return true - } - let name = CString::new(path).unwrap(); - let ptr = libc::dlopen(name.as_ptr() as *const c_char, libc::RTLD_LAZY); - if ptr.is_null() { - return false - } - match self.init.compare_and_swap(0, ptr as usize, Ordering::SeqCst) { - 0 => {} - _ => { libc::dlclose(ptr); } - } - return true - } - - unsafe fn load(&self) -> Option<*mut c_void> { - match self.init.load(Ordering::SeqCst) { - 0 => None, - n => Some(n as *mut c_void), - } - } -} - -impl Symbol { - unsafe fn get(&self, handle: *mut c_void) -> Option<&T> { - assert_eq!(mem::size_of::(), mem::size_of_val(&self.addr)); - if self.addr.load(Ordering::SeqCst) == 0 { - self.addr.store(fetch(handle, self.name.as_ptr()), Ordering::SeqCst) - } - if self.addr.load(Ordering::SeqCst) == 1 { - None - } else { - mem::transmute::<&AtomicUsize, Option<&T>>(&self.addr) - } - } -} - -unsafe fn fetch(handle: *mut c_void, name: *const u8) -> usize { - let ptr = libc::dlsym(handle, name as *const _); - if ptr.is_null() { - 1 - } else { - ptr as usize - } -} diff --git a/third_party/rust/backtrace/src/lib.rs b/third_party/rust/backtrace/src/lib.rs index e230bb8c379d..0fc79f8ad03e 100644 --- a/third_party/rust/backtrace/src/lib.rs +++ b/third_party/rust/backtrace/src/lib.rs @@ -58,60 +58,46 @@ - - - #![doc(html_root_url = "https://docs.rs/backtrace")] #![deny(missing_docs)] -#![deny(warnings)] +#![no_std] +#![cfg_attr(all(feature = "std", target_env = "sgx"), feature(sgx_platform))] +#![allow(bare_trait_objects)] +#![allow(rust_2018_idioms)] -#[cfg(unix)] -extern crate libc; -#[cfg(all(windows, feature = "winapi"))] extern crate winapi; +#[cfg(feature = "std")] +#[macro_use] +extern crate std; -#[cfg(feature = "serde_derive")] -#[cfg_attr(feature = "serde_derive", macro_use)] -extern crate serde_derive; +pub use crate::backtrace::{trace_unsynchronized, Frame}; +mod backtrace; -#[cfg(feature = "rustc-serialize")] -extern crate rustc_serialize; +pub use crate::symbolize::resolve_frame_unsynchronized; +pub use crate::symbolize::{resolve_unsynchronized, Symbol, SymbolName}; +mod symbolize; -#[macro_use] -extern crate cfg_if; +pub use crate::types::BytesOrWideString; +mod types; -extern crate rustc_demangle; +#[cfg(feature = "std")] +pub use crate::symbolize::clear_symbol_cache; -#[cfg(feature = "cpp_demangle")] -extern crate cpp_demangle; +mod print; +pub use print::{BacktraceFmt, BacktraceFrameFmt, PrintFmt}; -cfg_if! { - if #[cfg(all(feature = "gimli-symbolize", unix, target_os = "linux"))] { - extern crate addr2line; - extern crate findshlibs; - extern crate gimli; - extern crate memmap; - extern crate object; +cfg_if::cfg_if! { + if #[cfg(feature = "std")] { + pub use crate::backtrace::trace; + pub use crate::symbolize::{resolve, resolve_frame}; + pub use crate::capture::{Backtrace, BacktraceFrame, BacktraceSymbol}; + mod capture; } } -#[allow(dead_code)] -#[cfg(unix)] -#[macro_use] -mod dylib; - -pub use backtrace::{trace, Frame}; -mod backtrace; - -pub use symbolize::{resolve, Symbol, SymbolName}; -mod symbolize; - -pub use capture::{Backtrace, BacktraceFrame, BacktraceSymbol}; -mod capture; - #[allow(dead_code)] struct Bomb { enabled: bool, @@ -127,52 +113,44 @@ impl Drop for Bomb { } #[allow(dead_code)] +#[cfg(feature = "std")] mod lock { + use std::boxed::Box; use std::cell::Cell; - use std::mem; - use std::sync::{Once, Mutex, MutexGuard, ONCE_INIT}; + use std::sync::{Mutex, MutexGuard, Once}; - pub struct LockGuard(MutexGuard<'static, ()>); + pub struct LockGuard(Option>); static mut LOCK: *mut Mutex<()> = 0 as *mut _; - static INIT: Once = ONCE_INIT; + static INIT: Once = Once::new(); thread_local!(static LOCK_HELD: Cell = Cell::new(false)); impl Drop for LockGuard { fn drop(&mut self) { - LOCK_HELD.with(|slot| { - assert!(slot.get()); - slot.set(false); - }); + if self.0.is_some() { + LOCK_HELD.with(|slot| { + assert!(slot.get()); + slot.set(false); + }); + } } } - pub fn lock() -> Option { + pub fn lock() -> LockGuard { if LOCK_HELD.with(|l| l.get()) { - return None + return LockGuard(None); } LOCK_HELD.with(|s| s.set(true)); unsafe { INIT.call_once(|| { - LOCK = mem::transmute(Box::new(Mutex::new(()))); + LOCK = Box::into_raw(Box::new(Mutex::new(()))); }); - Some(LockGuard((*LOCK).lock().unwrap())) + LockGuard(Some((*LOCK).lock().unwrap())) } } } - -#[cfg(all(windows, feature = "dbghelp"))] -unsafe fn dbghelp_init() { - use winapi::shared::minwindef; - use winapi::um::{dbghelp, processthreadsapi}; - - static mut INITIALIZED: bool = false; - - if !INITIALIZED { - dbghelp::SymInitializeW(processthreadsapi::GetCurrentProcess(), - 0 as *mut _, - minwindef::TRUE); - INITIALIZED = true; - } -} +#[cfg(all(windows, feature = "dbghelp", not(target_vendor = "uwp")))] +mod dbghelp; +#[cfg(windows)] +mod windows; diff --git a/third_party/rust/backtrace/src/print.rs b/third_party/rust/backtrace/src/print.rs new file mode 100644 index 000000000000..daa6e196712a --- /dev/null +++ b/third_party/rust/backtrace/src/print.rs @@ -0,0 +1,268 @@ +use crate::BytesOrWideString; +use core::ffi::c_void; +use core::fmt; + +const HEX_WIDTH: usize = 2 + 2 * core::mem::size_of::(); + +#[cfg(target_os = "fuchsia")] +mod fuchsia; + + + + + + +pub struct BacktraceFmt<'a, 'b> { + fmt: &'a mut fmt::Formatter<'b>, + frame_index: usize, + format: PrintFmt, + print_path: &'a mut (FnMut(&mut fmt::Formatter, BytesOrWideString) -> fmt::Result + 'b), +} + + +#[derive(Copy, Clone, Eq, PartialEq)] +pub enum PrintFmt { + + Short, + + Full, + #[doc(hidden)] + __Nonexhaustive, +} + +impl<'a, 'b> BacktraceFmt<'a, 'b> { + + + + + + + + pub fn new( + fmt: &'a mut fmt::Formatter<'b>, + format: PrintFmt, + print_path: &'a mut (FnMut(&mut fmt::Formatter, BytesOrWideString) -> fmt::Result + 'b), + ) -> Self { + BacktraceFmt { + fmt, + frame_index: 0, + format, + print_path, + } + } + + + + + + + pub fn add_context(&mut self) -> fmt::Result { + self.fmt.write_str("stack backtrace:\n")?; + #[cfg(target_os = "fuchsia")] + fuchsia::print_dso_context(self.fmt)?; + Ok(()) + } + + + + + + + pub fn frame(&mut self) -> BacktraceFrameFmt<'_, 'a, 'b> { + BacktraceFrameFmt { + fmt: self, + symbol_index: 0, + } + } + + + + + + pub fn finish(&mut self) -> fmt::Result { + + Ok(()) + } +} + + + + +pub struct BacktraceFrameFmt<'fmt, 'a, 'b> { + fmt: &'fmt mut BacktraceFmt<'a, 'b>, + symbol_index: usize, +} + +impl BacktraceFrameFmt<'_, '_, '_> { + + + + + + + + + + #[cfg(feature = "std")] + pub fn backtrace_frame(&mut self, frame: &crate::BacktraceFrame) -> fmt::Result { + let symbols = frame.symbols(); + for symbol in symbols { + self.backtrace_symbol(frame, symbol)?; + } + if symbols.is_empty() { + self.print_raw(frame.ip(), None, None, None)?; + } + Ok(()) + } + + + + + + + + #[cfg(feature = "std")] + pub fn backtrace_symbol( + &mut self, + frame: &crate::BacktraceFrame, + symbol: &crate::BacktraceSymbol, + ) -> fmt::Result { + self.print_raw( + frame.ip(), + symbol.name(), + + + + symbol + .filename() + .and_then(|p| Some(BytesOrWideString::Bytes(p.to_str()?.as_bytes()))), + symbol.lineno(), + )?; + Ok(()) + } + + + + pub fn symbol(&mut self, frame: &crate::Frame, symbol: &crate::Symbol) -> fmt::Result { + self.print_raw( + frame.ip(), + symbol.name(), + symbol.filename_raw(), + symbol.lineno(), + )?; + Ok(()) + } + + + + + + + pub fn print_raw( + &mut self, + frame_ip: *mut c_void, + symbol_name: Option, + filename: Option, + lineno: Option, + ) -> fmt::Result { + + + + if cfg!(target_os = "fuchsia") { + self.print_raw_fuchsia(frame_ip)?; + } else { + self.print_raw_generic(frame_ip, symbol_name, filename, lineno)?; + } + self.symbol_index += 1; + Ok(()) + } + + #[allow(unused_mut)] + fn print_raw_generic( + &mut self, + mut frame_ip: *mut c_void, + symbol_name: Option, + filename: Option, + lineno: Option, + ) -> fmt::Result { + + + if let PrintFmt::Short = self.fmt.format { + if frame_ip.is_null() { + return Ok(()); + } + } + + + + + #[cfg(all(feature = "std", target_env = "sgx"))] + { + let image_base = std::os::fortanix_sgx::mem::image_base(); + frame_ip = usize::wrapping_sub(frame_ip as usize, image_base as _) as _; + } + + + + + if self.symbol_index == 0 { + write!(self.fmt.fmt, "{:4}: ", self.fmt.frame_index)?; + if let PrintFmt::Full = self.fmt.format { + write!(self.fmt.fmt, "{:1$?} - ", frame_ip, HEX_WIDTH)?; + } + } else { + write!(self.fmt.fmt, " ")?; + if let PrintFmt::Full = self.fmt.format { + write!(self.fmt.fmt, "{:1$}", "", HEX_WIDTH + 3)?; + } + } + + + + + match (symbol_name, &self.fmt.format) { + (Some(name), PrintFmt::Short) => write!(self.fmt.fmt, "{:#}", name)?, + (Some(name), PrintFmt::Full) => write!(self.fmt.fmt, "{}", name)?, + (None, _) | (_, PrintFmt::__Nonexhaustive) => write!(self.fmt.fmt, "")?, + } + self.fmt.fmt.write_str("\n")?; + + + if let (Some(file), Some(line)) = (filename, lineno) { + self.print_fileline(file, line)?; + } + + Ok(()) + } + + fn print_fileline(&mut self, file: BytesOrWideString, line: u32) -> fmt::Result { + + + if let PrintFmt::Full = self.fmt.format { + write!(self.fmt.fmt, "{:1$}", "", HEX_WIDTH)?; + } + write!(self.fmt.fmt, " at ")?; + + + + (self.fmt.print_path)(self.fmt.fmt, file)?; + write!(self.fmt.fmt, ":{}\n", line)?; + Ok(()) + } + + fn print_raw_fuchsia(&mut self, frame_ip: *mut c_void) -> fmt::Result { + + if self.symbol_index == 0 { + self.fmt.fmt.write_str("{{{bt:")?; + write!(self.fmt.fmt, "{}:{:?}", self.fmt.frame_index, frame_ip)?; + self.fmt.fmt.write_str("}}}\n")?; + } + Ok(()) + } +} + +impl Drop for BacktraceFrameFmt<'_, '_, '_> { + fn drop(&mut self) { + self.fmt.frame_index += 1; + } +} diff --git a/third_party/rust/backtrace/src/print/fuchsia.rs b/third_party/rust/backtrace/src/print/fuchsia.rs new file mode 100644 index 000000000000..76a176ab27e0 --- /dev/null +++ b/third_party/rust/backtrace/src/print/fuchsia.rs @@ -0,0 +1,432 @@ +use core::fmt::{self, Write}; +use core::mem::{size_of, transmute}; +use core::slice::from_raw_parts; +use libc::c_char; + +extern "C" { + + + + + + + #[allow(improper_ctypes)] + fn dl_iterate_phdr( + f: extern "C" fn(info: &dl_phdr_info, size: usize, data: &mut DsoPrinter) -> i32, + data: &mut DsoPrinter, + ) -> i32; +} + + + + +const PT_LOAD: u32 = 1; +const PT_NOTE: u32 = 4; + + + + + + + + +#[allow(non_camel_case_types)] +#[repr(C)] +struct dl_phdr_info { + addr: *const u8, + name: *const c_char, + phdr: *const Elf_Phdr, + phnum: u16, + adds: u64, + subs: u64, + tls_modid: usize, + tls_data: *const u8, +} + +impl dl_phdr_info { + fn program_headers(&self) -> PhdrIter<'_> { + PhdrIter { + phdrs: self.phdr_slice(), + base: self.addr, + } + } + + + fn phdr_slice(&self) -> &[Elf_Phdr] { + unsafe { from_raw_parts(self.phdr, self.phnum as usize) } + } +} + +struct PhdrIter<'a> { + phdrs: &'a [Elf_Phdr], + base: *const u8, +} + +impl<'a> Iterator for PhdrIter<'a> { + type Item = Phdr<'a>; + fn next(&mut self) -> Option { + self.phdrs.split_first().map(|(phdr, new_phdrs)| { + self.phdrs = new_phdrs; + Phdr { + phdr, + base: self.base, + } + }) + } +} + + + +#[allow(non_camel_case_types)] +#[derive(Clone, Debug)] +#[repr(C)] +struct Elf_Phdr { + p_type: u32, + p_flags: u32, + p_offset: u64, + p_vaddr: u64, + p_paddr: u64, + p_filesz: u64, + p_memsz: u64, + p_align: u64, +} + + +struct Phdr<'a> { + phdr: &'a Elf_Phdr, + base: *const u8, +} + +impl<'a> Phdr<'a> { + + + + + + fn notes(&self) -> NoteIter<'a> { + unsafe { + NoteIter::new( + self.base.add(self.phdr.p_offset as usize), + self.phdr.p_memsz as usize, + ) + } + } +} + + +const NT_GNU_BUILD_ID: u32 = 3; + + +#[allow(non_camel_case_types)] +#[repr(C)] +struct Elf_Nhdr { + n_namesz: u32, + n_descsz: u32, + n_type: u32, +} + + + + +struct Note<'a> { + name: &'a [u8], + desc: &'a [u8], + tipe: u32, +} + + + + +struct NoteIter<'a> { + base: &'a [u8], + error: bool, +} + +impl<'a> NoteIter<'a> { + + + + unsafe fn new(base: *const u8, size: usize) -> Self { + NoteIter { + base: from_raw_parts(base, size), + error: false, + } + } +} + + + + + +fn align_to(x: usize, to: usize) -> usize { + (x + to - 1) & (!to + 1) +} + + + + + + +fn take_bytes_align4<'a>(num: usize, bytes: &mut &'a [u8]) -> Option<&'a [u8]> { + if bytes.len() < align_to(num, 4) { + return None; + } + let (out, bytes_new) = bytes.split_at(num); + *bytes = &bytes_new[align_to(num, 4) - num..]; + Some(out) +} + + + + + +fn take_nhdr<'a>(bytes: &mut &'a [u8]) -> Option<&'a Elf_Nhdr> { + if size_of::() > bytes.len() { + return None; + } + + + let out = unsafe { transmute::<*const u8, &'a Elf_Nhdr>(bytes.as_ptr()) }; + + *bytes = &bytes[size_of::()..]; + Some(out) +} + +impl<'a> Iterator for NoteIter<'a> { + type Item = Note<'a>; + fn next(&mut self) -> Option { + + if self.base.len() == 0 || self.error { + return None; + } + + + + + let nhdr = take_nhdr(&mut self.base)?; + let name = take_bytes_align4(nhdr.n_namesz as usize, &mut self.base)?; + let desc = take_bytes_align4(nhdr.n_descsz as usize, &mut self.base)?; + Some(Note { + name: name, + desc: desc, + tipe: nhdr.n_type, + }) + } +} + +struct Perm(u32); + + +const PERM_X: u32 = 0b00000001; + +const PERM_W: u32 = 0b00000010; + +const PERM_R: u32 = 0b00000100; + +impl core::fmt::Display for Perm { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let v = self.0; + if v & PERM_R != 0 { + f.write_char('r')? + } + if v & PERM_W != 0 { + f.write_char('w')? + } + if v & PERM_X != 0 { + f.write_char('x')? + } + Ok(()) + } +} + + +struct Segment { + + addr: usize, + + size: usize, + + mod_rel_addr: usize, + + + flags: Perm, +} + + +struct SegmentIter<'a> { + phdrs: &'a [Elf_Phdr], + base: usize, +} + +impl Iterator for SegmentIter<'_> { + type Item = Segment; + + fn next(&mut self) -> Option { + self.phdrs.split_first().and_then(|(phdr, new_phdrs)| { + self.phdrs = new_phdrs; + if phdr.p_type != PT_LOAD { + self.next() + } else { + Some(Segment { + addr: phdr.p_vaddr as usize + self.base, + size: phdr.p_memsz as usize, + mod_rel_addr: phdr.p_vaddr as usize, + flags: Perm(phdr.p_flags), + }) + } + }) + } +} + + + +struct Dso<'a> { + + + + name: &'a str, + + + + + build_id: &'a [u8], + + base: usize, + phdrs: &'a [Elf_Phdr], +} + +impl Dso<'_> { + + fn segments(&self) -> SegmentIter<'_> { + SegmentIter { + phdrs: self.phdrs.as_ref(), + base: self.base, + } + } +} + +struct HexSlice<'a> { + bytes: &'a [u8], +} + +impl fmt::Display for HexSlice<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for byte in self.bytes { + write!(f, "{:x}", byte)?; + } + Ok(()) + } +} + +fn get_build_id<'a>(info: &'a dl_phdr_info) -> Option<&'a [u8]> { + for phdr in info.program_headers() { + if phdr.phdr.p_type == PT_NOTE { + for note in phdr.notes() { + if note.tipe == NT_GNU_BUILD_ID && (note.name == b"GNU\0" || note.name == b"GNU") { + return Some(note.desc); + } + } + } + } + None +} + + + +enum Error { + + + NameError(core::str::Utf8Error), + + + + BuildIDError, +} + + + + + + + +fn for_each_dso(mut visitor: &mut DsoPrinter) { + extern "C" fn callback(info: &dl_phdr_info, _size: usize, visitor: &mut DsoPrinter) -> i32 { + + + let name_len = unsafe { libc::strlen(info.name) }; + let name_slice: &[u8] = + unsafe { core::slice::from_raw_parts(info.name as *const u8, name_len) }; + let name = match core::str::from_utf8(name_slice) { + Ok(name) => name, + Err(err) => { + return visitor.error(Error::NameError(err)) as i32; + } + }; + let build_id = match get_build_id(info) { + Some(build_id) => build_id, + None => { + return visitor.error(Error::BuildIDError) as i32; + } + }; + visitor.dso(Dso { + name: name, + build_id: build_id, + phdrs: info.phdr_slice(), + base: info.addr as usize, + }) as i32 + } + unsafe { dl_iterate_phdr(callback, &mut visitor) }; +} + +struct DsoPrinter<'a, 'b> { + writer: &'a mut core::fmt::Formatter<'b>, + module_count: usize, + error: core::fmt::Result, +} + +impl DsoPrinter<'_, '_> { + fn dso(&mut self, dso: Dso<'_>) -> bool { + let mut write = || { + write!( + self.writer, + "{{{{{{module:{:#x}:{}:elf:{}}}}}}}\n", + self.module_count, + dso.name, + HexSlice { + bytes: dso.build_id.as_ref() + } + )?; + for seg in dso.segments() { + write!( + self.writer, + "{{{{{{mmap:{:#x}:{:#x}:load:{:#x}:{}:{:#x}}}}}}}\n", + seg.addr, seg.size, self.module_count, seg.flags, seg.mod_rel_addr + )?; + } + self.module_count += 1; + Ok(()) + }; + match write() { + Ok(()) => false, + Err(err) => { + self.error = Err(err); + true + } + } + } + fn error(&mut self, _error: Error) -> bool { + false + } +} + + +pub fn print_dso_context(out: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + out.write_str("{{{reset}}}\n")?; + let mut visitor = DsoPrinter { + writer: out, + module_count: 0, + error: Ok(()), + }; + for_each_dso(&mut visitor); + visitor.error +} diff --git a/third_party/rust/backtrace/src/symbolize/coresymbolication.rs b/third_party/rust/backtrace/src/symbolize/coresymbolication.rs index e90bbed18f62..7f0af9de10a2 100644 --- a/third_party/rust/backtrace/src/symbolize/coresymbolication.rs +++ b/third_party/rust/backtrace/src/symbolize/coresymbolication.rs @@ -8,27 +8,38 @@ -#![allow(bad_style)] -use std::ffi::{CStr, OsStr}; -use std::mem; -use std::os::raw::{c_void, c_char, c_int}; -use std::os::unix::prelude::*; -use std::path::Path; -use std::ptr; -use std::sync::atomic::ATOMIC_USIZE_INIT; -use libc::{self, Dl_info}; -use SymbolName; -use dylib::Dylib; -use dylib::Symbol as DylibSymbol; + + + + + + + + + + + + +#![allow(bad_style)] + +use crate::symbolize::dladdr; +use crate::symbolize::ResolveWhat; +use crate::types::BytesOrWideString; +use crate::SymbolName; +use core::ffi::c_void; +use core::mem; +use core::ptr; +use core::slice; +use libc::{c_char, c_int}; #[repr(C)] #[derive(Copy, Clone, PartialEq)] pub struct CSTypeRef { cpp_data: *const c_void, - cpp_obj: *const c_void + cpp_obj: *const c_void, } const CS_NOW: u64 = 0x80000000; @@ -37,27 +48,28 @@ const CSREF_NULL: CSTypeRef = CSTypeRef { cpp_obj: 0 as *const c_void, }; -pub enum Symbol { +pub enum Symbol<'a> { Core { path: *const c_char, lineno: u32, name: *const c_char, addr: *mut c_void, }, - Dladdr(Dl_info), + Dladdr(dladdr::Symbol<'a>), } -impl Symbol { +impl Symbol<'_> { pub fn name(&self) -> Option { let name = match *self { Symbol::Core { name, .. } => name, - Symbol::Dladdr(ref info) => info.dli_sname, + Symbol::Dladdr(ref info) => return info.name(), }; if name.is_null() { None } else { Some(SymbolName::new(unsafe { - CStr::from_ptr(name).to_bytes() + let len = libc::strlen(name); + slice::from_raw_parts(name as *const u8, len) })) } } @@ -65,25 +77,39 @@ impl Symbol { pub fn addr(&self) -> Option<*mut c_void> { match *self { Symbol::Core { addr, .. } => Some(addr), - Symbol::Dladdr(ref info) => Some(info.dli_saddr as *mut _), + Symbol::Dladdr(ref info) => info.addr(), } } - pub fn filename(&self) -> Option<&Path> { + fn filename_bytes(&self) -> Option<&[u8]> { match *self { Symbol::Core { path, .. } => { if path.is_null() { None } else { - Some(Path::new(OsStr::from_bytes(unsafe { - CStr::from_ptr(path).to_bytes() - }))) + Some(unsafe { + let len = libc::strlen(path); + slice::from_raw_parts(path as *const u8, len) + }) } } Symbol::Dladdr(_) => None, } } + pub fn filename_raw(&self) -> Option { + self.filename_bytes().map(BytesOrWideString::Bytes) + } + + #[cfg(feature = "std")] + pub fn filename(&self) -> Option<&::std::path::Path> { + use std::ffi::OsStr; + use std::os::unix::prelude::*; + use std::path::Path; + + self.filename_bytes().map(OsStr::from_bytes).map(Path::new) + } + pub fn lineno(&self) -> Option { match *self { Symbol::Core { lineno: 0, .. } => None, @@ -93,25 +119,75 @@ impl Symbol { } } -static CORESYMBOLICATION: Dylib = Dylib { init: ATOMIC_USIZE_INIT }; - -macro_rules! dlsym { - (extern { - $(fn $name:ident($($arg:ident: $t:ty),*) -> $ret:ty;)* - }) => ($( - static $name: ::dylib::Symbol $ret> = - ::dylib::Symbol { - name: concat!(stringify!($name), "\0"), - addr: ::std::sync::atomic::ATOMIC_USIZE_INIT, - _marker: ::std::marker::PhantomData, - }; - )*) +macro_rules! coresymbolication { + (#[load_path = $path:tt] extern "C" { + $(fn $name:ident($($arg:ident: $argty:ty),*) -> $ret: ty;)* + }) => ( + pub struct CoreSymbolication { + // The loaded dynamic library + dll: *mut c_void, + + // Each function pointer for each function we might use + $($name: usize,)* + } + + static mut CORESYMBOLICATION: CoreSymbolication = CoreSymbolication { + // Initially we haven't loaded the dynamic library + dll: 0 as *mut _, + // Initiall all functions are set to zero to say they need to be + // dynamically loaded. + $($name: 0,)* + }; + + // Convenience typedef for each function type. + $(pub type $name = unsafe extern "C" fn($($argty),*) -> $ret;)* + + impl CoreSymbolication { + /// Attempts to open `dbghelp.dll`. Returns `true` if it works or + /// `false` if `dlopen` fails. + fn open(&mut self) -> bool { + if !self.dll.is_null() { + return true; + } + let lib = concat!($path, "\0").as_bytes(); + unsafe { + self.dll = libc::dlopen(lib.as_ptr() as *const _, libc::RTLD_LAZY); + !self.dll.is_null() + } + } + + // Function for each method we'd like to use. When called it will + // either read the cached function pointer or load it and return the + // loaded value. Loads are asserted to succeed. + $(pub fn $name(&mut self) -> $name { + unsafe { + if self.$name == 0 { + let name = concat!(stringify!($name), "\0"); + self.$name = self.symbol(name.as_bytes()) + .expect(concat!("symbol ", stringify!($name), " is missing")); + } + mem::transmute::(self.$name) + } + })* + + fn symbol(&self, symbol: &[u8]) -> Option { + unsafe { + match libc::dlsym(self.dll, symbol.as_ptr() as *const _) as usize { + 0 => None, + n => Some(n), + } + } + } + } + ) } -dlsym! { - extern { +coresymbolication! { + #[load_path = "/System/Library/PrivateFrameworks/CoreSymbolication.framework\ + /Versions/A/CoreSymbolication"] + extern "C" { fn CSSymbolicatorCreateWithPid(pid: c_int) -> CSTypeRef; - fn CSRelease(rf: CSTypeRef) -> c_void; + fn CSRelease(rf: CSTypeRef) -> (); fn CSSymbolicatorGetSymbolWithAddressAtTime( cs: CSTypeRef, addr: *const c_void, time: u64) -> CSTypeRef; fn CSSymbolicatorGetSourceInfoWithAddressAtTime( @@ -125,68 +201,77 @@ dlsym! { } } -unsafe fn get(sym: &DylibSymbol) -> &T { - CORESYMBOLICATION.get(sym).unwrap() -} - unsafe fn try_resolve(addr: *mut c_void, cb: &mut FnMut(&super::Symbol)) -> bool { - let path = "/System/Library/PrivateFrameworks/CoreSymbolication.framework\ - /Versions/A/CoreSymbolication"; - if !CORESYMBOLICATION.init(path) { + + + let lib = &mut CORESYMBOLICATION; + if !lib.open() { return false; } - let cs = get(&CSSymbolicatorCreateWithPid)(libc::getpid()); + let cs = lib.CSSymbolicatorCreateWithPid()(libc::getpid()); if cs == CSREF_NULL { - return false + return false; } + let _dtor = OwnedCSTypeRef { + ptr: cs, + CSRelease: lib.CSRelease(), + }; - let info = get(&CSSymbolicatorGetSourceInfoWithAddressAtTime)( - cs, addr, CS_NOW); + let info = lib.CSSymbolicatorGetSourceInfoWithAddressAtTime()(cs, addr, CS_NOW); let sym = if info == CSREF_NULL { - get(&CSSymbolicatorGetSymbolWithAddressAtTime)(cs, addr, CS_NOW) + lib.CSSymbolicatorGetSymbolWithAddressAtTime()(cs, addr, CS_NOW) } else { - get(&CSSourceInfoGetSymbol)(info) + lib.CSSourceInfoGetSymbol()(info) }; - - let mut rv = false; - if sym != CSREF_NULL { - let owner = get(&CSSymbolGetSymbolOwner)(sym); - if owner != CSREF_NULL { - cb(&super::Symbol { - inner: Symbol::Core { - path: if info != CSREF_NULL { - get(&CSSourceInfoGetPath)(info) - } else { - ptr::null() - }, - lineno: if info != CSREF_NULL { - get(&CSSourceInfoGetLineNumber)(info) as u32 - } else { - 0 - }, - name: get(&CSSymbolGetMangledName)(sym), - addr: get(&CSSymbolOwnerGetBaseAddress)(owner), - }, - }); - rv = true; - } + if sym == CSREF_NULL { + return false; + } + let owner = lib.CSSymbolGetSymbolOwner()(sym); + if owner == CSREF_NULL { + return false; } - get(&CSRelease)(cs); - rv + cb(&super::Symbol { + inner: Symbol::Core { + path: if info != CSREF_NULL { + lib.CSSourceInfoGetPath()(info) + } else { + ptr::null() + }, + lineno: if info != CSREF_NULL { + lib.CSSourceInfoGetLineNumber()(info) as u32 + } else { + 0 + }, + name: lib.CSSymbolGetMangledName()(sym), + addr: lib.CSSymbolOwnerGetBaseAddress()(owner), + }, + }); + true } -pub fn resolve(addr: *mut c_void, cb: &mut FnMut(&super::Symbol)) { - unsafe { - if try_resolve(addr, cb) { - return - } - let mut info: Dl_info = mem::zeroed(); - if libc::dladdr(addr as *mut _, &mut info) != 0 { - cb(&super::Symbol { - inner: Symbol::Dladdr(info), - }); +struct OwnedCSTypeRef { + ptr: CSTypeRef, + CSRelease: unsafe extern "C" fn(CSTypeRef), +} + +impl Drop for OwnedCSTypeRef { + fn drop(&mut self) { + unsafe { + (self.CSRelease)(self.ptr); } } } + +pub unsafe fn resolve(what: ResolveWhat, cb: &mut FnMut(&super::Symbol)) { + let addr = what.address_or_ip(); + if try_resolve(addr, cb) { + return; + } + dladdr::resolve(addr, &mut |sym| { + cb(&super::Symbol { + inner: Symbol::Dladdr(sym), + }) + }) +} diff --git a/third_party/rust/backtrace/src/symbolize/dbghelp.rs b/third_party/rust/backtrace/src/symbolize/dbghelp.rs index 86b56699fd24..cb81d6767ad5 100644 --- a/third_party/rust/backtrace/src/symbolize/dbghelp.rs +++ b/third_party/rust/backtrace/src/symbolize/dbghelp.rs @@ -4,115 +4,223 @@ + + + + + + + + + + + + + + + + + #![allow(bad_style)] -use std::ffi::OsString; -use std::mem; -use std::path::Path; -use std::os::windows::prelude::*; -use std::slice; -use winapi::ctypes::*; -use winapi::shared::basetsd::*; -use winapi::shared::minwindef::*; -use winapi::um::processthreadsapi; -use winapi::um::dbghelp; -use winapi::um::dbghelp::*; - -use SymbolName; - -pub struct Symbol { - name: OsString, +use crate::backtrace::FrameImp as Frame; +use crate::dbghelp; +use crate::symbolize::ResolveWhat; +use crate::types::BytesOrWideString; +use crate::windows::*; +use crate::SymbolName; +use core::char; +use core::ffi::c_void; +use core::marker; +use core::mem; +use core::slice; + + +pub struct Symbol<'a> { + name: *const [u8], addr: *mut c_void, line: Option, - filename: Option, + filename: Option<*const [u16]>, + #[cfg(feature = "std")] + _filename_cache: Option<::std::ffi::OsString>, + #[cfg(not(feature = "std"))] + _filename_cache: (), + _marker: marker::PhantomData<&'a i32>, } -impl Symbol { +impl Symbol<'_> { pub fn name(&self) -> Option { - self.name.to_str().map(|s| SymbolName::new(s.as_bytes())) + Some(SymbolName::new(unsafe { &*self.name })) } pub fn addr(&self) -> Option<*mut c_void> { Some(self.addr as *mut _) } - pub fn filename(&self) -> Option<&Path> { - self.filename.as_ref().map(Path::new) + pub fn filename_raw(&self) -> Option { + self.filename + .map(|slice| unsafe { BytesOrWideString::Wide(&*slice) }) } pub fn lineno(&self) -> Option { self.line } + + #[cfg(feature = "std")] + pub fn filename(&self) -> Option<&::std::path::Path> { + use std::path::Path; + + self._filename_cache.as_ref().map(Path::new) + } } -pub fn resolve(addr: *mut c_void, cb: &mut FnMut(&super::Symbol)) { - +#[repr(C, align(8))] +struct Aligned8(T); + +pub unsafe fn resolve(what: ResolveWhat, cb: &mut FnMut(&super::Symbol)) { - let _g = ::lock::lock(); - - unsafe { - let size = 2 * MAX_SYM_NAME + mem::size_of::(); - let mut data = vec![0u8; size]; - let info = &mut *(data.as_mut_ptr() as *mut SYMBOL_INFOW); - info.MaxNameLen = MAX_SYM_NAME as ULONG; - - - - info.SizeOfStruct = 88; - - let _c = ::dbghelp_init(); - - let mut displacement = 0u64; - let ret = dbghelp::SymFromAddrW(processthreadsapi::GetCurrentProcess(), - addr as DWORD64, - &mut displacement, - info); - if ret != TRUE { - return - } + let dbghelp = match dbghelp::init() { + Ok(dbghelp) => dbghelp, + Err(()) => return, + }; + + match what { + ResolveWhat::Address(_) => resolve_without_inline(&dbghelp, what.address_or_ip(), cb), + ResolveWhat::Frame(frame) => match &frame.inner { + Frame::New(frame) => resolve_with_inline(&dbghelp, frame, cb), + Frame::Old(_) => resolve_without_inline(&dbghelp, frame.ip(), cb), + }, + } +} + +unsafe fn resolve_with_inline( + dbghelp: &dbghelp::Init, + frame: &STACKFRAME_EX, + cb: &mut FnMut(&super::Symbol), +) { + do_resolve( + |info| { + dbghelp.SymFromInlineContextW()( + GetCurrentProcess(), + super::adjust_ip(frame.AddrPC.Offset as *mut _) as u64, + frame.InlineFrameContext, + &mut 0, + info, + ) + }, + |line| { + dbghelp.SymGetLineFromInlineContextW()( + GetCurrentProcess(), + super::adjust_ip(frame.AddrPC.Offset as *mut _) as u64, + frame.InlineFrameContext, + 0, + &mut 0, + line, + ) + }, + cb, + ) +} + +unsafe fn resolve_without_inline( + dbghelp: &dbghelp::Init, + addr: *mut c_void, + cb: &mut FnMut(&super::Symbol), +) { + do_resolve( + |info| dbghelp.SymFromAddrW()(GetCurrentProcess(), addr as DWORD64, &mut 0, info), + |line| dbghelp.SymGetLineFromAddrW64()(GetCurrentProcess(), addr as DWORD64, &mut 0, line), + cb, + ) +} + +unsafe fn do_resolve( + sym_from_addr: impl FnOnce(*mut SYMBOL_INFOW) -> BOOL, + get_line_from_addr: impl FnOnce(&mut IMAGEHLP_LINEW64) -> BOOL, + cb: &mut FnMut(&super::Symbol), +) { + const SIZE: usize = 2 * MAX_SYM_NAME + mem::size_of::(); + let mut data = Aligned8([0u8; SIZE]); + let data = &mut data.0; + let info = &mut *(data.as_mut_ptr() as *mut SYMBOL_INFOW); + info.MaxNameLen = MAX_SYM_NAME as ULONG; + // the struct size in C. the value is different to + // `size_of::() - MAX_SYM_NAME + 1` (== 81) + // due to struct alignment. + info.SizeOfStruct = 88; + + if sym_from_addr(info) != TRUE { + return; + } - - - - let name_len = ::std::cmp::min(info.NameLen as usize, - info.MaxNameLen as usize - 1); - - let name = slice::from_raw_parts(info.Name.as_ptr() as *const u16, - name_len); - let name = OsString::from_wide(name); - - let mut line = mem::zeroed::(); - line.SizeOfStruct = mem::size_of::() as DWORD; - let mut displacement = 0; - let ret = dbghelp::SymGetLineFromAddrW64(processthreadsapi::GetCurrentProcess(), - addr as DWORD64, - &mut displacement, - &mut line); - let mut filename = None; - let mut lineno = None; - if ret == TRUE { - lineno = Some(line.LineNumber as u32); - - let base = line.FileName; - let mut len = 0; - while *base.offset(len) != 0 { - len += 1; + // If the symbol name is greater than MaxNameLen, SymFromAddrW will + // give a buffer of (MaxNameLen - 1) characters and set NameLen to + // the real value. + let name_len = ::core::cmp::min(info.NameLen as usize, info.MaxNameLen as usize - 1); + let name_ptr = info.Name.as_ptr() as *const u16; + let name = slice::from_raw_parts(name_ptr, name_len); + + // Reencode the utf-16 symbol to utf-8 so we can use `SymbolName::new` like + // all other platforms + let mut name_len = 0; + let mut name_buffer = [0; 256]; + { + let mut remaining = &mut name_buffer[..]; + for c in char::decode_utf16(name.iter().cloned()) { + let c = c.unwrap_or(char::REPLACEMENT_CHARACTER); + let len = c.len_utf8(); + if len < remaining.len() { + c.encode_utf8(remaining); + let tmp = remaining; + remaining = &mut tmp[len..]; + name_len += len; + } else { + break; } - let name = slice::from_raw_parts(base, len as usize); - filename = Some(OsString::from_wide(name)); + } + } + let name = &name_buffer[..name_len] as *const [u8]; + + let mut line = mem::zeroed::(); + line.SizeOfStruct = mem::size_of::() as DWORD; + + let mut filename = None; + let mut lineno = None; + if get_line_from_addr(&mut line) == TRUE { + lineno = Some(line.LineNumber as u32); + + let base = line.FileName; + let mut len = 0; + while *base.offset(len) != 0 { + len += 1; } - cb(&super::Symbol { - inner: Symbol { - name: name, - addr: info.Address as *mut _, - line: lineno, - filename: filename, - }, - }) + let len = len as usize; + + filename = Some(slice::from_raw_parts(base, len) as *const [u16]); } + + cb(&super::Symbol { + inner: Symbol { + name, + addr: info.Address as *mut _, + line: lineno, + filename, + _filename_cache: cache(filename), + _marker: marker::PhantomData, + }, + }) } + +#[cfg(feature = "std")] +unsafe fn cache(filename: Option<*const [u16]>) -> Option<::std::ffi::OsString> { + use std::os::windows::ffi::OsStringExt; + filename.map(|f| ::std::ffi::OsString::from_wide(&*f)) +} + +#[cfg(not(feature = "std"))] +unsafe fn cache(_filename: Option<*const [u16]>) {} diff --git a/third_party/rust/backtrace/src/symbolize/dladdr.rs b/third_party/rust/backtrace/src/symbolize/dladdr.rs index 5bd3ab815a2f..1794382de842 100644 --- a/third_party/rust/backtrace/src/symbolize/dladdr.rs +++ b/third_party/rust/backtrace/src/symbolize/dladdr.rs @@ -8,52 +8,105 @@ -use std::ffi::CStr; -use std::mem; -use std::os::raw::c_void; -use std::path::Path; -use libc::{self, Dl_info}; -use SymbolName; -pub struct Symbol { - inner: Dl_info, -} +#![allow(dead_code)] + +cfg_if::cfg_if! { + if #[cfg(all(unix, not(target_os = "emscripten"), feature = "dladdr"))] { + use core::ffi::c_void; + use core::marker; + use core::{mem, slice}; + use crate::SymbolName; + use crate::types::BytesOrWideString; + use libc::{self, Dl_info}; -impl Symbol { - pub fn name(&self) -> Option { - if self.inner.dli_sname.is_null() { - None - } else { - Some(SymbolName::new(unsafe { - CStr::from_ptr(self.inner.dli_sname).to_bytes() - })) + pub struct Symbol<'a> { + inner: Dl_info, + _marker: marker::PhantomData<&'a i32>, } - } - pub fn addr(&self) -> Option<*mut c_void> { - Some(self.inner.dli_saddr as *mut _) - } + impl Symbol<'_> { + pub fn name(&self) -> Option { + if self.inner.dli_sname.is_null() { + None + } else { + let ptr = self.inner.dli_sname as *const u8; + unsafe { + let len = libc::strlen(self.inner.dli_sname); + Some(SymbolName::new(slice::from_raw_parts(ptr, len))) + } + } + } + + pub fn addr(&self) -> Option<*mut c_void> { + Some(self.inner.dli_saddr as *mut _) + } + + pub fn filename_raw(&self) -> Option { + None + } + + #[cfg(feature = "std")] + pub fn filename(&self) -> Option<&::std::path::Path> { + None + } + + pub fn lineno(&self) -> Option { + None + } + } - pub fn filename(&self) -> Option<&Path> { - None - } + pub unsafe fn resolve(addr: *mut c_void, cb: &mut FnMut(Symbol<'static>)) { + let mut info = Symbol { + inner: mem::zeroed(), + _marker: marker::PhantomData, + }; + // Skip null addresses to avoid calling into libc and having it do + // things with the dynamic symbol table for no reason. + if !addr.is_null() && libc::dladdr(addr as *mut _, &mut info.inner) != 0 { + cb(info) + } + } + } else { + use core::ffi::c_void; + use core::marker; + use crate::symbolize::SymbolName; + use crate::types::BytesOrWideString; + + pub struct Symbol<'a> { + a: Void, + _b: marker::PhantomData<&'a i32>, + } - pub fn lineno(&self) -> Option { - None - } -} + enum Void {} -pub fn resolve(addr: *mut c_void, cb: &mut FnMut(&super::Symbol)) { - unsafe { - let mut info: super::Symbol = super::Symbol { - inner: Symbol { - inner: mem::zeroed(), - }, - }; - if libc::dladdr(addr as *mut _, &mut info.inner.inner) != 0 { - cb(&info) + impl Symbol<'_> { + pub fn name(&self) -> Option { + match self.a {} + } + + pub fn addr(&self) -> Option<*mut c_void> { + match self.a {} + } + + pub fn filename_raw(&self) -> Option { + match self.a {} + } + + #[cfg(feature = "std")] + pub fn filename(&self) -> Option<&::std::path::Path> { + match self.a {} + } + + pub fn lineno(&self) -> Option { + match self.a {} + } + } + + pub unsafe fn resolve(addr: *mut c_void, cb: &mut FnMut(Symbol<'static>)) { + drop((addr, cb)); } } } diff --git a/third_party/rust/backtrace/src/symbolize/dladdr_resolve.rs b/third_party/rust/backtrace/src/symbolize/dladdr_resolve.rs new file mode 100644 index 000000000000..d8e4d262c431 --- /dev/null +++ b/third_party/rust/backtrace/src/symbolize/dladdr_resolve.rs @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + +use crate::symbolize::{dladdr, ResolveWhat, SymbolName}; +use crate::types::BytesOrWideString; +use core::ffi::c_void; + +pub struct Symbol<'a>(dladdr::Symbol<'a>); + +impl Symbol<'_> { + pub fn name(&self) -> Option { + self.0.name() + } + + pub fn addr(&self) -> Option<*mut c_void> { + self.0.addr() + } + + pub fn filename_raw(&self) -> Option { + self.0.filename_raw() + } + + #[cfg(feature = "std")] + pub fn filename(&self) -> Option<&::std::path::Path> { + self.0.filename() + } + + pub fn lineno(&self) -> Option { + self.0.lineno() + } +} + +pub unsafe fn resolve(what: ResolveWhat, cb: &mut FnMut(&super::Symbol)) { + dladdr::resolve(what.address_or_ip(), &mut |sym| { + cb(&super::Symbol { inner: Symbol(sym) }) + }); +} diff --git a/third_party/rust/backtrace/src/symbolize/gimli.rs b/third_party/rust/backtrace/src/symbolize/gimli.rs index 4afa71e23fd2..d4761bfcce18 100644 --- a/third_party/rust/backtrace/src/symbolize/gimli.rs +++ b/third_party/rust/backtrace/src/symbolize/gimli.rs @@ -1,112 +1,374 @@ -use addr2line; + + + + + + +use self::gimli::read::EndianSlice; +use self::gimli::LittleEndian as Endian; +use crate::symbolize::dladdr; +use crate::symbolize::ResolveWhat; +use crate::types::BytesOrWideString; +use crate::SymbolName; +use addr2line::gimli; +use core::convert::TryFrom; +use core::mem; +use core::u32; use findshlibs::{self, Segment, SharedLibrary}; -use gimli; +use libc::c_void; use memmap::Mmap; -use object::{self, Object}; -use std::cell::RefCell; use std::env; +use std::ffi::OsString; use std::fs::File; -use std::mem; -use std::os::raw::c_void; -use std::path::{Path, PathBuf}; -use std::u32; - -use SymbolName; +use std::path::Path; +use std::prelude::v1::*; const MAPPINGS_CACHE_SIZE: usize = 4; -type Dwarf<'map> = addr2line::Context>; -type Symbols<'map> = object::SymbolMap<'map>; +struct Context<'a> { + dwarf: addr2line::Context>, + object: Object<'a>, +} struct Mapping { - dwarf: Dwarf<'static>, - symbols: Symbols<'static>, + cx: Context<'static>, _map: Mmap, } -impl Mapping { - fn new(path: &PathBuf) -> Option { - let file = File::open(path).ok()?; - - let map = unsafe { Mmap::map(&file).ok()? }; - let (dwarf, symbols) = { - let object = object::File::parse(&*map).ok()?; - let dwarf = addr2line::Context::new(&object).ok()?; - let symbols = object.symbol_map(); - - unsafe { (mem::transmute(dwarf), mem::transmute(symbols)) } - }; - Some(Mapping { - dwarf, - symbols, - _map: map, - }) - } - - - fn rent(&self, mut f: F) +fn cx<'data>(object: Object<'data>) -> Option> { + fn load_section<'data, S>(obj: &Object<'data>) -> S where - F: FnMut(&Dwarf, &Symbols), + S: gimli::Section>, { - f(&self.dwarf, &self.symbols) + let data = obj.section(S::section_name()).unwrap_or(&[]); + S::from(EndianSlice::new(data, Endian)) } + + let dwarf = addr2line::Context::from_sections( + load_section(&object), + load_section(&object), + load_section(&object), + load_section(&object), + load_section(&object), + load_section(&object), + load_section(&object), + load_section(&object), + load_section(&object), + gimli::EndianSlice::new(&[], Endian), + ) + .ok()?; + Some(Context { dwarf, object }) } -thread_local! { - // A very small, very simple LRU cache for debug info mappings. - // - // The hit rate should be very high, since the typical stack doesn't cross - // between many shared libraries. - // - // The `addr2line::Context` structures are pretty expensive to create. Its - // cost is expected to be amortized by subsequent `locate` queries, which - // leverage the structures built when constructing `addr2line::Context`s to - // get nice speedups. If we didn't have this cache, that amortization would - // never happen, and symbolicating backtraces would be ssssllllooooowwww. - static MAPPINGS_CACHE: RefCell> - = RefCell::new(Vec::with_capacity(MAPPINGS_CACHE_SIZE)); +fn assert_lifetimes<'a>(_: &'a Mmap, _: &Context<'a>) {} + +macro_rules! mk { + (Mapping { $map:expr, $inner:expr }) => {{ + assert_lifetimes(&$map, &$inner); + Mapping { + // Convert to 'static lifetimes since the symbols should + // only borrow `map` and we're preserving `map` below. + cx: unsafe { mem::transmute::, Context<'static>>($inner) }, + _map: $map, + } + }}; } -fn with_mapping_for_path(path: PathBuf, f: F) -where - F: FnMut(&Dwarf, &Symbols), -{ - MAPPINGS_CACHE.with(|cache| { - let mut cache = cache.borrow_mut(); +fn mmap(path: &Path) -> Option { + let file = File::open(path).ok()?; + + unsafe { Mmap::map(&file).ok() } +} - let idx = cache.iter().position(|&(ref p, _)| p == &path); +cfg_if::cfg_if! { + if #[cfg(windows)] { + use std::cmp; + use goblin::pe::{self, PE}; + use goblin::strtab::Strtab; - - + struct Object<'a> { + pe: PE<'a>, + data: &'a [u8], + symbols: Vec<(usize, pe::symbol::Symbol)>, + strtab: Strtab<'a>, + } - if let Some(idx) = idx { - - if idx != 0 { - let entry = cache.remove(idx); - cache.insert(0, entry); + impl<'a> Object<'a> { + fn parse(data: &'a [u8]) -> Option> { + let pe = PE::parse(data).ok()?; + let syms = pe.header.coff_header.symbols(data).ok()?; + let strtab = pe.header.coff_header.strings(data).ok()?; + + // Collect all the symbols into a local vector which is sorted + // by address and contains enough data to learn about the symbol + // name. Note that we only look at function symbols and also + // note that the sections are 1-indexed because the zero section + // is special (apparently). + let mut symbols = Vec::new(); + for (_, _, sym) in syms.iter() { + if sym.derived_type() != pe::symbol::IMAGE_SYM_DTYPE_FUNCTION + || sym.section_number == 0 + { + continue; + } + let addr = usize::try_from(sym.value).ok()?; + let section = pe.sections.get(usize::try_from(sym.section_number).ok()? - 1)?; + let va = usize::try_from(section.virtual_address).ok()?; + symbols.push((addr + va + pe.image_base, sym)); + } + symbols.sort_unstable_by_key(|x| x.0); + Some(Object { pe, data, symbols, strtab }) } - } else { - - - - let mapping = match Mapping::new(&path) { - None => return, - Some(m) => m, + + fn section(&self, name: &str) -> Option<&'a [u8]> { + let section = self.pe + .sections + .iter() + .find(|section| section.name().ok() == Some(name)); + section + .and_then(|section| { + let offset = section.pointer_to_raw_data as usize; + let size = cmp::min(section.virtual_size, section.size_of_raw_data) as usize; + self.data.get(offset..).and_then(|data| data.get(..size)) + }) + } + + fn search_symtab<'b>(&'b self, addr: u64) -> Option<&'b [u8]> { + // Note that unlike other formats COFF doesn't embed the size of + // each symbol. As a last ditch effort search for the *closest* + // symbol to a particular address and return that one. This gets + // really wonky once symbols start getting removed because the + // symbols returned here can be totally incorrect, but we have + // no idea of knowing how to detect that. + let addr = usize::try_from(addr).ok()?; + let i = match self.symbols.binary_search_by_key(&addr, |p| p.0) { + Ok(i) => i, + // typically `addr` isn't in the array, but `i` is where + // we'd insert it, so the previous position must be the + // greatest less than `addr` + Err(i) => i.checked_sub(1)?, + }; + Some(self.symbols[i].1.name(&self.strtab).ok()?.as_bytes()) + } + } + } else if #[cfg(target_os = "macos")] { + use goblin::mach::MachO; + + struct Object<'a> { + macho: MachO<'a>, + dwarf: Option, + } + + impl<'a> Object<'a> { + fn parse(macho: MachO<'a>) -> Option> { + if !macho.little_endian { + return None; + } + let dwarf = macho + .segments + .iter() + .enumerate() + .find(|(_, segment)| segment.name().ok() == Some("__DWARF")) + .map(|p| p.0); + Some(Object { macho, dwarf }) + } + + fn section(&self, name: &str) -> Option<&'a [u8]> { + let dwarf = self.dwarf?; + let dwarf = &self.macho.segments[dwarf]; + dwarf + .into_iter() + .filter_map(|s| s.ok()) + .find(|(section, _data)| { + let section_name = match section.name() { + Ok(s) => s, + Err(_) => return false, + }; + §ion_name[..] == name || { + section_name.starts_with("__") + && name.starts_with(".") + && §ion_name[2..] == &name[1..] + } + }) + .map(|p| p.1) + } + + fn search_symtab<'b>(&'b self, _addr: u64) -> Option<&'b [u8]> { + // So far it seems that we don't need to implement this. Maybe + // `dladdr` on OSX has us covered? Maybe there's not much in the + // symbol table? In any case our relevant tests are passing + // without this being implemented, so let's skip it for now. + None + } + } + } else { + use goblin::elf::Elf; + + struct Object<'a> { + elf: Elf<'a>, + data: &'a [u8], + // List of pre-parsed and sorted symbols by base address. The + // boolean indicates whether it comes from the dynamic symbol table + // or the normal symbol table, affecting where it's symbolicated. + syms: Vec<(goblin::elf::Sym, bool)>, + } + + impl<'a> Object<'a> { + fn parse(data: &'a [u8]) -> Option> { + let elf = Elf::parse(data).ok()?; + if !elf.little_endian { + return None; + } + let mut syms = elf + .syms + .iter() + .map(|s| (s, false)) + .chain(elf.dynsyms.iter().map(|s| (s, true))) + // Only look at function/object symbols. This mirrors what + // libbacktrace does and in general we're only symbolicating + // function addresses in theory. Object symbols correspond + // to data, and maybe someone's crazy enough to have a + // function go into static data? + .filter(|(s, _)| { + s.is_function() || s.st_type() == goblin::elf::sym::STT_OBJECT + }) + // skip anything that's in an undefined section header, + // since it means it's an imported function and we're only + // symbolicating with locally defined functions. + .filter(|(s, _)| { + s.st_shndx != goblin::elf::section_header::SHN_UNDEF as usize + }) + .collect::>(); + syms.sort_unstable_by_key(|s| s.0.st_value); + Some(Object { + syms, + elf, + data, + }) + } + + fn section(&self, name: &str) -> Option<&'a [u8]> { + let section = self.elf.section_headers.iter().find(|section| { + match self.elf.shdr_strtab.get(section.sh_name) { + Some(Ok(section_name)) => section_name == name, + _ => false, + } + }); + section + .and_then(|section| { + self.data.get(section.sh_offset as usize..) + .and_then(|data| data.get(..section.sh_size as usize)) + }) + } + + fn search_symtab<'b>(&'b self, addr: u64) -> Option<&'b [u8]> { + // Same sort of binary search as Windows above + let i = match self.syms.binary_search_by_key(&addr, |s| s.0.st_value) { + Ok(i) => i, + Err(i) => i.checked_sub(1)?, + }; + let (sym, dynamic) = self.syms.get(i)?; + if sym.st_value <= addr && addr <= sym.st_value + sym.st_size { + let strtab = if *dynamic { + &self.elf.dynstrtab + } else { + &self.elf.strtab + }; + Some(strtab.get(sym.st_name)?.ok()?.as_bytes()) + } else { + None + } + } + } + } +} + +impl Mapping { + #[cfg(not(target_os = "macos"))] + fn new(path: &Path) -> Option { + let map = mmap(path)?; + let cx = cx(Object::parse(&map)?)?; + Some(mk!(Mapping { map, cx })) + } + + // The loading path for OSX is is so different we just have a completely + // different implementation of the function here. On OSX we need to go + // probing the filesystem for a bunch of files. + #[cfg(target_os = "macos")] + fn new(path: &Path) -> Option { + // First up we need to load the unique UUID which is stored in the macho + // header of the file we're reading, specified at `path`. + let map = mmap(path)?; + let macho = MachO::parse(&map, 0).ok()?; + let uuid = find_uuid(&macho)?; + + // Next we need to look for a `*.dSYM` file. For now we just probe the + // containing directory and look around for something that matches + // `*.dSYM`. Once it's found we root through the dwarf resources that it + // contains and try to find a macho file which has a matching UUID as + // the one of our own file. If we find a match that's the dwarf file we + // want to return. + let parent = path.parent()?; + for entry in parent.read_dir().ok()? { + let entry = entry.ok()?; + let filename = match entry.file_name().into_string() { + Ok(name) => name, + Err(_) => continue, }; + if !filename.ends_with(".dSYM") { + continue; + } + let candidates = entry.path().join("Contents/Resources/DWARF"); + if let Some(mapping) = load_dsym(&candidates, &uuid) { + return Some(mapping); + } + } + + + + + let inner = cx(Object::parse(macho)?)?; + return Some(mk!(Mapping { map, inner })); - if cache.len() == MAPPINGS_CACHE_SIZE { - cache.pop(); + fn load_dsym(dir: &Path, uuid: &[u8; 16]) -> Option { + for entry in dir.read_dir().ok()? { + let entry = entry.ok()?; + let map = mmap(&entry.path())?; + let macho = MachO::parse(&map, 0).ok()?; + let entry_uuid = find_uuid(&macho)?; + if entry_uuid != uuid { + continue; + } + if let Some(cx) = Object::parse(macho).and_then(cx) { + return Some(mk!(Mapping { map, cx })); + } } - cache.insert(0, (path, mapping)); + None } - cache[0].1.rent(f); - }); + fn find_uuid<'a>(object: &'a MachO) -> Option<&'a [u8; 16]> { + use goblin::mach::load_command::CommandVariant; + + object + .load_commands + .iter() + .filter_map(|cmd| match &cmd.command { + CommandVariant::Uuid(u) => Some(&u.uuid), + _ => None, + }) + .next() + } + } } -pub fn resolve(addr: *mut c_void, cb: &mut FnMut(&super::Symbol)) { +#[derive(Default)] +struct Cache { + + libraries: Vec, + + @@ -115,110 +377,297 @@ pub fn resolve(addr: *mut c_void, cb: &mut FnMut(&super::Symbol)) { - let addr = findshlibs::Avma(addr as *mut u8 as *const u8); - let mut so_info = None; - findshlibs::TargetSharedLibrary::each(|so| { - use findshlibs::IterationControl::*; + mappings: Vec<(usize, Mapping)>, +} - for segment in so.segments() { - if segment.contains_avma(so, addr) { - let addr = so.avma_to_svma(addr); - let path = so.name().to_string_lossy(); - so_info = Some((addr, path.to_string())); - return Break; - } - } +struct Library { + name: OsString, + segments: Vec, + bias: findshlibs::Bias, +} - Continue - }); - let (addr, path) = match so_info { - None => return, - Some((a, p)) => (a, p), - }; +struct LibrarySegment { + len: usize, + stated_virtual_memory_address: findshlibs::Svma, +} - - - let path = if path.is_empty() { - match env::current_exe() { - Err(_) => return, - Ok(p) => p, + +pub unsafe fn clear_symbol_cache() { + Cache::with_global(|cache| cache.mappings.clear()); +} + +impl Cache { + fn new() -> Cache { + let mut libraries = Vec::new(); + + + + findshlibs::TargetSharedLibrary::each(|so| { + use findshlibs::IterationControl::*; + libraries.push(Library { + name: so.name().to_owned(), + segments: so + .segments() + .map(|s| LibrarySegment { + len: s.len(), + stated_virtual_memory_address: s.stated_virtual_memory_address(), + }) + .collect(), + bias: so.virtual_memory_bias(), + }); + Continue + }); + + Cache { + mappings: Vec::with_capacity(MAPPINGS_CACHE_SIZE), + libraries, } - } else { - PathBuf::from(path) - }; + } - - with_mapping_for_path(path, |dwarf, symbols| { - let mut found_sym = false; - if let Ok(mut frames) = dwarf.find_frames(addr.0 as u64) { - while let Ok(Some(frame)) = frames.next() { - let (file, line) = frame - .location - .map(|l| (l.file, l.line)) - .unwrap_or((None, None)); - let name = frame - .function - .and_then(|f| f.raw_name().ok().map(|f| f.to_string())); - let sym = super::Symbol { - inner: Symbol::new(addr.0 as usize, file, line, name), - }; - cb(&sym); - found_sym = true; + unsafe fn with_global(f: impl FnOnce(&mut Self)) { + + + + + + + + + + + static mut MAPPINGS_CACHE: Option = None; + + f(MAPPINGS_CACHE.get_or_insert_with(|| Cache::new())) + } + + fn avma_to_svma(&self, addr: *const u8) -> Option<(usize, findshlibs::Svma)> { + + + + + + if cfg!(windows) { + let addr = findshlibs::Svma(addr); + return Some((usize::max_value(), addr)); + } + + self.libraries + .iter() + .enumerate() + .filter_map(|(i, lib)| { + + + + + + + if !lib.segments.iter().any(|s| { + let svma = s.stated_virtual_memory_address; + let start = unsafe { svma.0.offset(lib.bias.0) as usize }; + let end = start + s.len; + let address = addr as usize; + start <= address && address < end + }) { + return None; + } + + + + let reverse_bias = -lib.bias.0; + let svma = findshlibs::Svma(unsafe { addr.offset(reverse_bias) }); + Some((i, svma)) + }) + .next() + } + + fn mapping_for_lib<'a>(&'a mut self, lib: usize) -> Option<&'a Context<'a>> { + let idx = self.mappings.iter().position(|(idx, _)| *idx == lib); + + + + + if let Some(idx) = idx { + + if idx != 0 { + let entry = self.mappings.remove(idx); + self.mappings.insert(0, entry); } + } else { + + + + let storage; + let path = match self.libraries.get(lib) { + Some(lib) => &lib.name, + None => { + storage = env::current_exe().ok()?.into(); + &storage + } + }; + let mapping = Mapping::new(path.as_ref())?; + + if self.mappings.len() == MAPPINGS_CACHE_SIZE { + self.mappings.pop(); + } + + self.mappings.insert(0, (lib, mapping)); } + let cx: &'a Context<'static> = &self.mappings[0].1.cx; - if !found_sym { - if let Some(name) = symbols.get(addr.0 as u64).and_then(|x| x.name()) { - let sym = super::Symbol { - inner: Symbol::new(addr.0 as usize, None, None, Some(name.to_string())), - }; - cb(&sym); + + Some(unsafe { mem::transmute::<&'a Context<'static>, &'a Context<'a>>(cx) }) + } +} + +pub unsafe fn resolve(what: ResolveWhat, cb: &mut FnMut(&super::Symbol)) { + let addr = what.address_or_ip(); + let mut cb = DladdrFallback { + cb, + addr, + called: false, + }; + + Cache::with_global(|cache| { + let (lib, addr) = match cache.avma_to_svma(addr as *const u8) { + Some(pair) => pair, + None => return, + }; + + + + let cx = match cache.mapping_for_lib(lib) { + Some(cx) => cx, + None => return, + }; + if let Ok(mut frames) = cx.dwarf.find_frames(addr.0 as u64) { + while let Ok(Some(mut frame)) = frames.next() { + let function = frame.function.take(); + let name = function.as_ref().and_then(|f| f.raw_name().ok()); + let name = name.as_ref().map(|n| n.as_bytes()); + cb.call(Symbol::Frame { + addr: addr.0 as *mut c_void, + frame, + name, + }); + } + } + + if !cb.called { + if let Some(name) = cx.object.search_symtab(addr.0 as u64) { + cb.call(Symbol::Symtab { + addr: addr.0 as *mut c_void, + name, + }); } } }); + + drop(cb); +} + +struct DladdrFallback<'a, 'b> { + addr: *mut c_void, + called: bool, + cb: &'a mut (FnMut(&super::Symbol) + 'b), } -pub struct Symbol { - addr: usize, - file: Option, - line: Option, - name: Option, +impl DladdrFallback<'_, '_> { + fn call(&mut self, sym: Symbol) { + self.called = true; + + + + + let sym = unsafe { mem::transmute::>(sym) }; + (self.cb)(&super::Symbol { inner: sym }); + } } -impl Symbol { - fn new(addr: usize, - file: Option, - line: Option, - name: Option) - -> Symbol { - Symbol { - addr, - file, - line, - name, +impl Drop for DladdrFallback<'_, '_> { + fn drop(&mut self) { + if self.called { + return; + } + unsafe { + dladdr::resolve(self.addr, &mut |sym| { + (self.cb)(&super::Symbol { + inner: Symbol::Dladdr(sym), + }) + }); } } +} + +pub enum Symbol<'a> { + + + Frame { + addr: *mut c_void, + frame: addr2line::Frame>, + name: Option<&'a [u8]>, + }, + + + Symtab { addr: *mut c_void, name: &'a [u8] }, + + + Dladdr(dladdr::Symbol<'a>), +} +impl Symbol<'_> { pub fn name(&self) -> Option { - self.name.as_ref().map(|s| SymbolName::new(s.as_bytes())) + match self { + Symbol::Dladdr(s) => s.name(), + Symbol::Frame { name, .. } => { + let name = name.as_ref()?; + Some(SymbolName::new(name)) + } + Symbol::Symtab { name, .. } => Some(SymbolName::new(name)), + } } pub fn addr(&self) -> Option<*mut c_void> { - Some(self.addr as *mut c_void) + match self { + Symbol::Dladdr(s) => s.addr(), + Symbol::Frame { addr, .. } => Some(*addr), + Symbol::Symtab { .. } => None, + } + } + + pub fn filename_raw(&self) -> Option { + match self { + Symbol::Dladdr(s) => return s.filename_raw(), + Symbol::Frame { frame, .. } => { + let location = frame.location.as_ref()?; + let file = location.file.as_ref()?; + Some(BytesOrWideString::Bytes(file.as_bytes())) + } + Symbol::Symtab { .. } => None, + } } pub fn filename(&self) -> Option<&Path> { - self.file.as_ref().map(|f| f.as_ref()) + match self { + Symbol::Dladdr(s) => return s.filename(), + Symbol::Frame { frame, .. } => { + let location = frame.location.as_ref()?; + let file = location.file.as_ref()?; + Some(Path::new(file)) + } + Symbol::Symtab { .. } => None, + } } pub fn lineno(&self) -> Option { - self.line - .and_then(|l| if l > (u32::MAX as u64) { - None - } else { - Some(l as u32) - }) + match self { + Symbol::Dladdr(s) => return s.lineno(), + Symbol::Frame { frame, .. } => { + let location = frame.location.as_ref()?; + location.line.and_then(|l| u32::try_from(l).ok()) + } + Symbol::Symtab { .. } => None, + } } } diff --git a/third_party/rust/backtrace/src/symbolize/libbacktrace.rs b/third_party/rust/backtrace/src/symbolize/libbacktrace.rs index 83e9fdb7127b..7068cf63a349 100644 --- a/third_party/rust/backtrace/src/symbolize/libbacktrace.rs +++ b/third_party/rust/backtrace/src/symbolize/libbacktrace.rs @@ -8,21 +8,41 @@ + + + + + + + + + + + + + + + + + + + + + + + #![allow(bad_style)] extern crate backtrace_sys as bt; -use libc::uintptr_t; -use std::ffi::{CStr, OsStr}; -use std::os::raw::{c_void, c_char, c_int}; -use std::os::unix::prelude::*; -use std::path::Path; -use std::ptr; -use std::sync::{ONCE_INIT, Once}; +use core::{ptr, slice}; +use libc::{self, c_char, c_int, c_void, uintptr_t}; -use SymbolName; +use crate::symbolize::{ResolveWhat, SymbolName}; +use crate::symbolize::dladdr; +use crate::types::BytesOrWideString; -pub enum Symbol { +pub enum Symbol<'a> { Syminfo { pc: uintptr_t, symname: *const c_char, @@ -32,19 +52,44 @@ pub enum Symbol { filename: *const c_char, lineno: c_int, function: *const c_char, + symname: *const c_char, }, + Dladdr(dladdr::Symbol<'a>), } -impl Symbol { +impl Symbol<'_> { pub fn name(&self) -> Option { - let ptr = match *self { - Symbol::Syminfo { symname, .. } => symname, - Symbol::Pcinfo { function, .. } => function, + let symbol = |ptr: *const c_char| { + unsafe { + if ptr.is_null() { + None + } else { + let len = libc::strlen(ptr); + Some(SymbolName::new(slice::from_raw_parts( + ptr as *const u8, + len, + ))) + } + } }; - if ptr.is_null() { - None - } else { - Some(SymbolName::new(unsafe { CStr::from_ptr(ptr).to_bytes() })) + match *self { + Symbol::Syminfo { symname, .. } => symbol(symname), + Symbol::Pcinfo { function, symname, .. } => { + + + + + + + + + + if let Some(sym) = symbol(function) { + return Some(sym) + } + symbol(symname) + } + Symbol::Dladdr(ref s) => s.name(), } } @@ -52,75 +97,150 @@ impl Symbol { let pc = match *self { Symbol::Syminfo { pc, .. } => pc, Symbol::Pcinfo { pc, .. } => pc, + Symbol::Dladdr(ref s) => return s.addr(), }; - if pc == 0 {None} else {Some(pc as *mut _)} + if pc == 0 { + None + } else { + Some(pc as *mut _) + } } - pub fn filename(&self) -> Option<&Path> { + fn filename_bytes(&self) -> Option<&[u8]> { match *self { Symbol::Syminfo { .. } => None, Symbol::Pcinfo { filename, .. } => { - Some(Path::new(OsStr::from_bytes(unsafe { - CStr::from_ptr(filename).to_bytes() - }))) + let ptr = filename as *const u8; + unsafe { + let len = libc::strlen(filename); + Some(slice::from_raw_parts(ptr, len)) + } } + Symbol::Dladdr(_) => None, } } + pub fn filename_raw(&self) -> Option { + self.filename_bytes().map(BytesOrWideString::Bytes) + } + + #[cfg(feature = "std")] + pub fn filename(&self) -> Option<&::std::path::Path> { + use std::path::Path; + + #[cfg(unix)] + fn bytes2path(bytes: &[u8]) -> Option<&Path> { + use std::ffi::OsStr; + use std::os::unix::prelude::*; + Some(Path::new(OsStr::from_bytes(bytes))) + } + + #[cfg(windows)] + fn bytes2path(bytes: &[u8]) -> Option<&Path> { + use std::str; + str::from_utf8(bytes).ok().map(Path::new) + } + + self.filename_bytes().and_then(bytes2path) + } + pub fn lineno(&self) -> Option { match *self { Symbol::Syminfo { .. } => None, Symbol::Pcinfo { lineno, .. } => Some(lineno as u32), + Symbol::Dladdr(ref s) => s.lineno(), } } } -extern fn error_cb(_data: *mut c_void, _msg: *const c_char, - _errnum: c_int) { +extern "C" fn error_cb(_data: *mut c_void, _msg: *const c_char, _errnum: c_int) { } -extern fn syminfo_cb(data: *mut c_void, - pc: uintptr_t, - symname: *const c_char, - _symval: uintptr_t, - _symsize: uintptr_t) { + +struct SyminfoState<'a> { + cb: &'a mut (FnMut(&super::Symbol) + 'a), + pc: usize, +} + +extern "C" fn syminfo_cb( + data: *mut c_void, + pc: uintptr_t, + symname: *const c_char, + _symval: uintptr_t, + _symsize: uintptr_t, +) { + let mut bomb = crate::Bomb { enabled: true }; + + + + + + + + unsafe { - call(data, &super::Symbol { - inner: Symbol::Syminfo { - pc: pc, - symname: symname, - }, - }); + let syminfo_state = &mut *(data as *mut SyminfoState); + let mut pcinfo_state = PcinfoState { + symname, + called: false, + cb: syminfo_state.cb, + }; + bt::backtrace_pcinfo( + init_state(), + syminfo_state.pc as uintptr_t, + pcinfo_cb, + error_cb, + &mut pcinfo_state as *mut _ as *mut _, + ); + if !pcinfo_state.called { + (pcinfo_state.cb)(&super::Symbol { + inner: Symbol::Syminfo { + pc: pc, + symname: symname, + }, + }); + } } + + bomb.enabled = false; +} + + +struct PcinfoState<'a> { + cb: &'a mut (FnMut(&super::Symbol) + 'a), + symname: *const c_char, + called: bool, } -extern fn pcinfo_cb(data: *mut c_void, - pc: uintptr_t, - filename: *const c_char, - lineno: c_int, - function: *const c_char) -> c_int { +extern "C" fn pcinfo_cb( + data: *mut c_void, + pc: uintptr_t, + filename: *const c_char, + lineno: c_int, + function: *const c_char, +) -> c_int { + if filename.is_null() || function.is_null() { + return -1; + } + let mut bomb = crate::Bomb { enabled: true }; + unsafe { - if filename.is_null() || function.is_null() { - return -1 - } - call(data, &super::Symbol { + let state = &mut *(data as *mut PcinfoState); + state.called = true; + (state.cb)(&super::Symbol { inner: Symbol::Pcinfo { pc: pc, filename: filename, lineno: lineno, - function: function, + symname: state.symname, + function, }, }); - return 0 } -} -unsafe fn call(data: *mut c_void, sym: &super::Symbol) { - let cb = data as *mut &mut FnMut(&super::Symbol); - let mut bomb = ::Bomb { enabled: true }; - (*cb)(sym); bomb.enabled = false; + return 0; } @@ -136,45 +256,216 @@ unsafe fn call(data: *mut c_void, sym: &super::Symbol) { +unsafe fn init_state() -> *mut bt::backtrace_state { + static mut STATE: *mut bt::backtrace_state = 0 as *mut _; + if !STATE.is_null() { + return STATE; + } + STATE = bt::backtrace_create_state( + load_filename(), + + + 0, + error_cb, + ptr::null_mut(), + ); + return STATE; + + + + + + + + + + + + + + + + + + + + + + + + + cfg_if::cfg_if! { + if #[cfg(any(target_os = "macos", target_os = "ios"))] { + // Note that ideally we'd use `std::env::current_exe`, but we can't + // require `std` here. + // + // Use `_NSGetExecutablePath` to load the current executable path + // into a static area (which if it's too small just give up). + // + // Note that we're seriously trusting libbacktrace here to not die + // on corrupt executables, but it surely does... + unsafe fn load_filename() -> *const libc::c_char { + const N: usize = 256; + static mut BUF: [u8; N] = [0; N]; + extern { + fn _NSGetExecutablePath( + buf: *mut libc::c_char, + bufsize: *mut u32, + ) -> libc::c_int; + } + let mut sz: u32 = BUF.len() as u32; + let ptr = BUF.as_mut_ptr() as *mut libc::c_char; + if _NSGetExecutablePath(ptr, &mut sz) == 0 { + ptr + } else { + ptr::null() + } + } + } else if #[cfg(windows)] { + use crate::windows::*; + + // Windows has a mode of opening files where after it's opened it + // can't be deleted. That's in general what we want here because we + // want to ensure that our executable isn't changing out from under + // us after we hand it off to libbacktrace, hopefully mitigating the + // ability to pass in arbitrary data into libbacktrace (which may be + // mishandled). + // + // Given that we do a bit of a dance here to attempt to get a sort + // of lock on our own image: + // + // * Get a handle to the current process, load its filename. + // * Open a file to that filename with the right flags. + // * Reload the current process's filename, making sure it's the same + // + // If that all passes we in theory have indeed opened our process's + // file and we're guaranteed it won't change. FWIW a bunch of this + // is copied from libstd historically, so this is my best + // interpretation of what was happening. + unsafe fn load_filename() -> *const libc::c_char { + load_filename_opt().unwrap_or(ptr::null()) + } + unsafe fn load_filename_opt() -> Result<*const libc::c_char, ()> { + const N: usize = 256; + // This lives in static memory so we can return it.. + static mut BUF: [i8; N] = [0; N]; + // ... and this lives on the stack since it's temporary + let mut stack_buf = [0; N]; + let name1 = query_full_name(&mut BUF)?; + + let handle = CreateFileA( + name1.as_ptr(), + GENERIC_READ, + FILE_SHARE_READ | FILE_SHARE_WRITE, + ptr::null_mut(), + OPEN_EXISTING, + 0, + ptr::null_mut(), + ); + if handle.is_null() { + return Err(()); + } + + let name2 = query_full_name(&mut stack_buf)?; + if name1 != name2 { + CloseHandle(handle); + return Err(()) + } + // intentionally leak `handle` here because having that open + // should preserve our lock on this file name. + Ok(name1.as_ptr()) + } - - - -unsafe fn init_state() -> *mut bt::backtrace_state { - static mut STATE: *mut bt::backtrace_state = 0 as *mut _; - static INIT: Once = ONCE_INIT; - INIT.call_once(|| { - - - STATE = bt::backtrace_create_state(ptr::null(), 0, error_cb, - ptr::null_mut()); - }); - - STATE + unsafe fn query_full_name(buf: &mut [i8]) -> Result<&[i8], ()> { + let p1 = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, GetCurrentProcessId()); + let mut len = buf.len() as u32; + let rc = QueryFullProcessImageNameA(p1, 0, buf.as_mut_ptr(), &mut len); + CloseHandle(p1); + + // We want to return a slice that is nul-terminated, so if + // everything was filled in and it equals the total length + // then equate that to failure. + // + // Otherwise when returning success make sure the nul byte is + // included in the slice. + if rc == 0 || len == buf.len() as u32 { + Err(()) + } else { + assert_eq!(buf[len as usize], 0); + Ok(&buf[..(len + 1) as usize]) + } + } + } else if #[cfg(target_os = "vxworks")] { + unsafe fn load_filename() -> *const libc::c_char { + use libc; + use core::mem; + + const N: usize = libc::VX_RTP_NAME_LENGTH + 1; + static mut BUF: [libc::c_char; N] = [0; N]; + + let mut rtp_desc : libc::RTP_DESC = mem::zeroed(); + if (libc::rtpInfoGet(0, &mut rtp_desc as *mut libc::RTP_DESC) == 0) { + BUF.copy_from_slice(&rtp_desc.pathName); + BUF.as_ptr() + } else { + ptr::null() + } + } + } else { + unsafe fn load_filename() -> *const libc::c_char { + ptr::null() + } + } + } } -pub fn resolve(symaddr: *mut c_void, mut cb: &mut FnMut(&super::Symbol)) { - let _guard = ::lock::lock(); +pub unsafe fn resolve(what: ResolveWhat, cb: &mut FnMut(&super::Symbol)) { + let symaddr = what.address_or_ip() as usize; - unsafe { - let state = init_state(); - if state.is_null() { - return - } + let state = init_state(); + if state.is_null() { + return dladdr_fallback(what.address_or_ip(), cb); + } - let ret = bt::backtrace_pcinfo(state, symaddr as uintptr_t, - pcinfo_cb, error_cb, - &mut cb as *mut _ as *mut _); - if ret != 0 { - bt::backtrace_syminfo(state, symaddr as uintptr_t, - syminfo_cb, error_cb, - &mut cb as *mut _ as *mut _); - } + + + + + + + let mut called = false; + { + let mut syminfo_state = SyminfoState { + pc: symaddr, + cb: &mut |sym| { + called = true; + cb(sym); + }, + }; + bt::backtrace_syminfo( + state, + symaddr as uintptr_t, + syminfo_cb, + error_cb, + &mut syminfo_state as *mut _ as *mut _, + ); + } + + if !called { + dladdr_fallback(what.address_or_ip(), cb); } } + +unsafe fn dladdr_fallback(addr: *mut c_void, cb: &mut FnMut(&super::Symbol)) { + dladdr::resolve(addr, &mut |sym| { + cb(&super::Symbol { + inner: Symbol::Dladdr(sym), + }) + }); +} diff --git a/third_party/rust/backtrace/src/symbolize/mod.rs b/third_party/rust/backtrace/src/symbolize/mod.rs index dc14b5165968..7f8919bc8631 100644 --- a/third_party/rust/backtrace/src/symbolize/mod.rs +++ b/third_party/rust/backtrace/src/symbolize/mod.rs @@ -1,94 +1,248 @@ -use std::fmt; -#[cfg(not(feature = "cpp_demangle"))] -use std::marker::PhantomData; -use std::os::raw::c_void; -use std::path::Path; -use std::str; -use rustc_demangle::{try_demangle, Demangle}; - - - - - - - - - - - - - - - - - - - - - +use core::{fmt, str}; - - - - - - - - - -pub fn resolve(addr: *mut c_void, mut cb: F) { - resolve_imp(addr, &mut cb) +cfg_if::cfg_if! { + if #[cfg(feature = "std")] { + use std::path::Path; + use std::prelude::v1::*; + } } +use crate::backtrace::Frame; +use crate::types::BytesOrWideString; +use core::ffi::c_void; +use rustc_demangle::{try_demangle, Demangle}; +/// Resolve an address to a symbol, passing the symbol to the specified +/// closure. +/// +/// This function will look up the given address in areas such as the local +/// symbol table, dynamic symbol table, or DWARF debug info (depending on the +/// activated implementation) to find symbols to yield. +/// +/// The closure may not be called if resolution could not be performed, and it +/// also may be called more than once in the case of inlined functions. +/// +/// Symbols yielded represent the execution at the specified `addr`, returning +/// file/line pairs for that address (if available). +/// +/// Note that if you have a `Frame` then it's recommended to use the +/// `resolve_frame` function instead of this one. +/// +/// # Required features +/// +/// This function requires the `std` feature of the `backtrace` crate to be +/// enabled, and the `std` feature is enabled by default. +/// +/// # Panics +/// +/// This function strives to never panic, but if the `cb` provided panics then +/// some platforms will force a double panic to abort the process. Some +/// platforms use a C library which internally uses callbacks which cannot be +/// unwound through, so panicking from `cb` may trigger a process abort. +/// +/// # Example +/// +/// ``` +/// extern crate backtrace; +/// +/// fn main() { +/// backtrace::trace(|frame| { +/// let ip = frame.ip(); +/// +/// backtrace::resolve(ip, |symbol| { +/// // ... +/// }); +/// +/// false // only look at the top frame +/// }); +/// } +/// ``` +#[cfg(feature = "std")] +pub fn resolve(addr: *mut c_void, cb: F) { + let _guard = crate::lock::lock(); + unsafe { resolve_unsynchronized(addr, cb) } +} +/// Resolve a previously capture frame to a symbol, passing the symbol to the +/// specified closure. +/// +/// This functin performs the same function as `resolve` except that it takes a +/// `Frame` as an argument instead of an address. This can allow some platform +/// implementations of backtracing to provide more accurate symbol information +/// or information about inline frames for example. It's recommended to use this +/// if you can. +/// +/// # Required features +/// +/// This function requires the `std` feature of the `backtrace` crate to be +/// enabled, and the `std` feature is enabled by default. +/// +/// # Panics +/// +/// This function strives to never panic, but if the `cb` provided panics then +/// some platforms will force a double panic to abort the process. Some +/// platforms use a C library which internally uses callbacks which cannot be +/// unwound through, so panicking from `cb` may trigger a process abort. +/// +/// # Example +/// +/// ``` +/// extern crate backtrace; +/// +/// fn main() { +/// backtrace::trace(|frame| { +/// backtrace::resolve_frame(frame, |symbol| { +/// // ... +/// }); +/// +/// false // only look at the top frame +/// }); +/// } +/// ``` +#[cfg(feature = "std")] +pub fn resolve_frame(frame: &Frame, cb: F) { + let _guard = crate::lock::lock(); + unsafe { resolve_frame_unsynchronized(frame, cb) } +} +pub enum ResolveWhat<'a> { + Address(*mut c_void), + Frame(&'a Frame), +} +impl<'a> ResolveWhat<'a> { + #[allow(dead_code)] + fn address_or_ip(&self) -> *mut c_void { + match self { + ResolveWhat::Address(a) => adjust_ip(*a), + ResolveWhat::Frame(f) => adjust_ip(f.ip()), + } + } +} +// IP values from stack frames are typically (always?) the instruction +// *after* the call that's the actual stack trace. Symbolizing this on +// causes the filename/line number to be one ahead and perhaps into +// the void if it's near the end of the function. +// +// This appears to basically always be the case on all platforms, so we always +// subtract one from a resolved ip to resolve it to the previous call +// instruction instead of the instruction being returned to. +// +// Ideally we would not do this. Ideally we would require callers of the +// `resolve` APIs here to manually do the -1 and account that they want location +// information for the *previous* instruction, not the current. Ideally we'd +// also expose on `Frame` if we are indeed the address of the next instruction +// or the current. +// +// For now though this is a pretty niche concern so we just internally always +// subtract one. Consumers should keep working and getting pretty good results, +// so we should be good enough. +fn adjust_ip(a: *mut c_void) -> *mut c_void { + if a.is_null() { + a + } else { + (a as usize - 1) as *mut c_void + } +} +/// Same as `resolve`, only unsafe as it's unsynchronized. +/// +/// This function does not have synchronization guarentees but is available when +/// the `std` feature of this crate isn't compiled in. See the `resolve` +/// function for more documentation and examples. +/// +/// # Panics +/// +/// See information on `resolve` for caveats on `cb` panicking. +pub unsafe fn resolve_unsynchronized(addr: *mut c_void, mut cb: F) +where + F: FnMut(&Symbol), +{ + resolve_imp(ResolveWhat::Address(addr), &mut cb) +} +/// Same as `resolve_frame`, only unsafe as it's unsynchronized. +/// +/// This function does not have synchronization guarentees but is available +/// when the `std` feature of this crate isn't compiled in. See the +/// `resolve_frame` function for more documentation and examples. +/// +/// # Panics +/// +/// See information on `resolve_frame` for caveats on `cb` panicking. +pub unsafe fn resolve_frame_unsynchronized(frame: &Frame, mut cb: F) +where + F: FnMut(&Symbol), +{ + resolve_imp(ResolveWhat::Frame(frame), &mut cb) +} - +/// A trait representing the resolution of a symbol in a file. +/// +/// This trait is yielded as a trait object to the closure given to the +/// `backtrace::resolve` function, and it is virtually dispatched as it's +/// unknown which implementation is behind it. +/// +/// A symbol can give contextual information about a function, for example the +/// name, filename, line number, precise address, etc. Not all information is +/// always available in a symbol, however, so all methods return an `Option`. pub struct Symbol { - inner: SymbolImp, + // TODO: this lifetime bound needs to be persisted eventually to `Symbol`, + // but that's currently a breaking change. For now this is safe since + // `Symbol` is only ever handed out by reference and can't be cloned. + inner: SymbolImp<'static>, } impl Symbol { - - - - - - - - - + /// Returns the name of this function. + /// + /// The returned structure can be used to query various properties about the + /// symbol name: + /// + /// * The `Display` implementation will print out the demangled symbol. + /// * The raw `str` value of the symbol can be accessed (if it's valid + /// utf-8). + /// * The raw bytes for the symbol name can be accessed. pub fn name(&self) -> Option { self.inner.name() } - + /// Returns the starting address of this function. pub fn addr(&self) -> Option<*mut c_void> { - self.inner.addr() + self.inner.addr().map(|p| p as *mut _) } - - - - - - - pub fn filename(&self) -> Option<&Path> { - self.inner.filename() + /// Returns the raw filename as a slice. This is mainly useful for `no_std` + /// environments. + pub fn filename_raw(&self) -> Option { + self.inner.filename_raw() } - - - - + /// Returns the line number for where this symbol is currently executing. + /// + /// This return value is typically `Some` if `filename` returns `Some`, and + /// is consequently subject to similar caveats. pub fn lineno(&self) -> Option { self.inner.lineno() } + + /// Returns the file name where this function was defined. + /// + /// This is currently only available when libbacktrace is being used (e.g. + /// unix platforms other than OSX) and when a binary is compiled with + /// debuginfo. If neither of these conditions is met then this will likely + /// return `None`. + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + #[cfg(feature = "std")] + #[allow(unreachable_code)] + pub fn filename(&self) -> Option<&Path> { + self.inner.filename() + } } impl fmt::Debug for Symbol { @@ -100,9 +254,14 @@ impl fmt::Debug for Symbol { if let Some(addr) = self.addr() { d.field("addr", &addr); } - if let Some(filename) = self.filename() { - d.field("filename", &filename); + + #[cfg(feature = "std")] + { + if let Some(filename) = self.filename() { + d.field("filename", &filename); + } } + if let Some(lineno) = self.lineno() { d.field("lineno", &lineno); } @@ -110,8 +269,7 @@ impl fmt::Debug for Symbol { } } - -cfg_if! { +cfg_if::cfg_if! { if #[cfg(feature = "cpp_demangle")] { // Maybe a parsed C++ symbol, if parsing the mangled symbol as Rust // failed. @@ -127,6 +285,8 @@ cfg_if! { } } } else { + use core::marker::PhantomData; + // Make sure to keep this zero-sized, so that the `cpp_demangle` feature // has no cost when disabled. struct OptionCppSymbol<'a>(PhantomData<&'a ()>); @@ -143,9 +303,9 @@ cfg_if! { } } - - - +/// A wrapper around a symbol name to provide ergonomic accessors to the +/// demangled name, the raw bytes, the raw string, etc. +// Allow dead code for when the `cpp_demangle` feature is not enabled. #[allow(dead_code)] pub struct SymbolName<'a> { bytes: &'a [u8], @@ -154,7 +314,7 @@ pub struct SymbolName<'a> { } impl<'a> SymbolName<'a> { - + /// Creates a new symbol name from the raw underlying bytes. pub fn new(bytes: &'a [u8]) -> SymbolName<'a> { let str_bytes = str::from_utf8(bytes).ok(); let demangled = str_bytes.and_then(|s| try_demangle(s).ok()); @@ -172,23 +332,47 @@ impl<'a> SymbolName<'a> { } } - + /// Returns the raw (mangled) symbol name as a `str` if the symbol is valid utf-8. + /// + /// Use the `Display` implementation if you want the demangled version. pub fn as_str(&self) -> Option<&'a str> { self.demangled .as_ref() .map(|s| s.as_str()) - .or_else(|| { - str::from_utf8(self.bytes).ok() - }) + .or_else(|| str::from_utf8(self.bytes).ok()) } - + /// Returns the raw symbol name as a list of bytes pub fn as_bytes(&self) -> &'a [u8] { self.bytes } } -cfg_if! { +fn format_symbol_name( + fmt: fn(&str, &mut fmt::Formatter) -> fmt::Result, + mut bytes: &[u8], + f: &mut fmt::Formatter, +) -> fmt::Result { + while bytes.len() > 0 { + match str::from_utf8(bytes) { + Ok(name) => { + fmt(name, f)?; + break; + } + Err(err) => { + fmt("\u{FFFD}", f)?; + + match err.error_len() { + Some(len) => bytes = &bytes[err.valid_up_to() + len..], + None => break, + } + } + } + } + Ok(()) +} + +cfg_if::cfg_if! { if #[cfg(feature = "cpp_demangle")] { impl<'a> fmt::Display for SymbolName<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -197,7 +381,7 @@ cfg_if! { } else if let Some(ref cpp) = self.cpp_demangled.0 { cpp.fmt(f) } else { - String::from_utf8_lossy(self.bytes).fmt(f) + format_symbol_name(fmt::Display::fmt, self.bytes, f) } } } @@ -207,15 +391,15 @@ cfg_if! { if let Some(ref s) = self.demangled { s.fmt(f) } else { - String::from_utf8_lossy(self.bytes).fmt(f) + format_symbol_name(fmt::Display::fmt, self.bytes, f) } } } } } -cfg_if! { - if #[cfg(feature = "cpp_demangle")] { +cfg_if::cfg_if! { + if #[cfg(all(feature = "std", feature = "cpp_demangle"))] { impl<'a> fmt::Debug for SymbolName<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use std::fmt::Write; @@ -234,7 +418,7 @@ cfg_if! { } } - String::from_utf8_lossy(self.bytes).fmt(f) + format_symbol_name(fmt::Debug::fmt, self.bytes, f) } } } else { @@ -243,34 +427,55 @@ cfg_if! { if let Some(ref s) = self.demangled { s.fmt(f) } else { - String::from_utf8_lossy(self.bytes).fmt(f) + format_symbol_name(fmt::Debug::fmt, self.bytes, f) } } } } } -cfg_if! { - if #[cfg(all(windows, feature = "dbghelp"))] { +/// Attempt to reclaim that cached memory used to symbolicate addresses. +/// +/// This method will attempt to release any global data structures that have +/// otherwise been cached globally or in the thread which typically represent +/// parsed DWARF information or similar. +/// +/// # Caveats +/// +/// While this function is always available it doesn't actually do anything on +/// most implementations. Libraries like dbghelp or libbacktrace do not provide +/// facilities to deallocate state and manage the allocated memory. For now the +/// `gimli-symbolize` feature of this crate is the only feature where this +/// function has any effect. +#[cfg(feature = "std")] +pub fn clear_symbol_cache() { + let _guard = crate::lock::lock(); + unsafe { + clear_symbol_cache_imp(); + } +} + +mod dladdr; + +cfg_if::cfg_if! { + if #[cfg(all(windows, target_env = "msvc", feature = "dbghelp", not(target_vendor = "uwp")))] { mod dbghelp; use self::dbghelp::resolve as resolve_imp; use self::dbghelp::Symbol as SymbolImp; - } else if #[cfg(all(feature = "gimli-symbolize", - unix, - target_os = "linux"))] { + unsafe fn clear_symbol_cache_imp() {} + } else if #[cfg(all( + feature = "std", + feature = "gimli-symbolize", + any( + target_os = "linux", + target_os = "macos", + windows, + ), + ))] { mod gimli; use self::gimli::resolve as resolve_imp; use self::gimli::Symbol as SymbolImp; - } else if #[cfg(all(feature = "libbacktrace", - unix, - not(target_os = "fuchsia"), - not(target_os = "emscripten"), - not(target_os = "macos"), - not(target_os = "ios")))] { - mod libbacktrace; - use self::libbacktrace::resolve as resolve_imp; - use self::libbacktrace::Symbol as SymbolImp; - + use self::gimli::clear_symbol_cache as clear_symbol_cache_imp; // Note that we only enable coresymbolication on iOS when debug assertions // are enabled because it's helpful in debug mode but it looks like apps get // rejected from the app store if they use this API, see #92 for more info @@ -280,15 +485,28 @@ cfg_if! { mod coresymbolication; use self::coresymbolication::resolve as resolve_imp; use self::coresymbolication::Symbol as SymbolImp; + unsafe fn clear_symbol_cache_imp() {} + } else if #[cfg(all(feature = "libbacktrace", + any(unix, all(windows, not(target_vendor = "uwp"), target_env = "gnu")), + not(target_os = "fuchsia"), + not(target_os = "emscripten")))] { + mod libbacktrace; + use self::libbacktrace::resolve as resolve_imp; + use self::libbacktrace::Symbol as SymbolImp; + unsafe fn clear_symbol_cache_imp() {} } else if #[cfg(all(unix, not(target_os = "emscripten"), + not(target_os = "fuchsia"), feature = "dladdr"))] { - mod dladdr; - use self::dladdr::resolve as resolve_imp; - use self::dladdr::Symbol as SymbolImp; + mod dladdr_resolve; + use self::dladdr_resolve::resolve as resolve_imp; + use self::dladdr_resolve::Symbol as SymbolImp; + unsafe fn clear_symbol_cache_imp() {} } else { mod noop; use self::noop::resolve as resolve_imp; use self::noop::Symbol as SymbolImp; + #[allow(unused)] + unsafe fn clear_symbol_cache_imp() {} } } diff --git a/third_party/rust/backtrace/src/symbolize/noop.rs b/third_party/rust/backtrace/src/symbolize/noop.rs index 78b2a63f851d..98ef381fc223 100644 --- a/third_party/rust/backtrace/src/symbolize/noop.rs +++ b/third_party/rust/backtrace/src/symbolize/noop.rs @@ -1,14 +1,19 @@ -use std::path::Path; -use std::os::raw::c_void; -use SymbolName; -pub fn resolve(_addr: *mut c_void, _cb: &mut FnMut(&super::Symbol)) { -} -pub struct Symbol; +use crate::symbolize::ResolveWhat; +use crate::types::BytesOrWideString; +use crate::SymbolName; +use core::ffi::c_void; +use core::marker; + +pub unsafe fn resolve(_addr: ResolveWhat, _cb: &mut FnMut(&super::Symbol)) {} + +pub struct Symbol<'a> { + _marker: marker::PhantomData<&'a i32>, +} -impl Symbol { +impl Symbol<'_> { pub fn name(&self) -> Option { None } @@ -17,7 +22,12 @@ impl Symbol { None } - pub fn filename(&self) -> Option<&Path> { + pub fn filename_raw(&self) -> Option { + None + } + + #[cfg(feature = "std")] + pub fn filename(&self) -> Option<&::std::path::Path> { None } diff --git a/third_party/rust/backtrace/src/types.rs b/third_party/rust/backtrace/src/types.rs new file mode 100644 index 000000000000..e8594eac499f --- /dev/null +++ b/third_party/rust/backtrace/src/types.rs @@ -0,0 +1,83 @@ + + +cfg_if::cfg_if! { + if #[cfg(feature = "std")] { + use std::borrow::Cow; + use std::fmt; + use std::path::PathBuf; + use std::prelude::v1::*; + use std::str; + } +} + +/// A platform independent representation of a string. When working with `std` +/// enabled it is recommended to the convenience methods for providing +/// conversions to `std` types. +#[derive(Debug)] +pub enum BytesOrWideString<'a> { + /// A slice, typically provided on Unix platforms. + Bytes(&'a [u8]), + /// Wide strings typically from Windows. + Wide(&'a [u16]), +} + +#[cfg(feature = "std")] +impl<'a> BytesOrWideString<'a> { + /// Lossy converts to a `Cow`, will allocate if `Bytes` is not valid + /// UTF-8 or if `BytesOrWideString` is `Wide`. + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn to_str_lossy(&self) -> Cow<'a, str> { + use self::BytesOrWideString::*; + + match self { + &Bytes(slice) => String::from_utf8_lossy(slice), + &Wide(wide) => Cow::Owned(String::from_utf16_lossy(wide)), + } + } + + /// Provides a `Path` representation of `BytesOrWideString`. + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn into_path_buf(self) -> PathBuf { + #[cfg(unix)] + { + use std::ffi::OsStr; + use std::os::unix::ffi::OsStrExt; + + if let BytesOrWideString::Bytes(slice) = self { + return PathBuf::from(OsStr::from_bytes(slice)); + } + } + + #[cfg(windows)] + { + use std::ffi::OsString; + use std::os::windows::ffi::OsStringExt; + + if let BytesOrWideString::Wide(slice) = self { + return PathBuf::from(OsString::from_wide(slice)); + } + } + + if let BytesOrWideString::Bytes(b) = self { + if let Ok(s) = str::from_utf8(b) { + return PathBuf::from(s); + } + } + unreachable!() + } +} + +#[cfg(feature = "std")] +impl<'a> fmt::Display for BytesOrWideString<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.to_str_lossy().fmt(f) + } +} diff --git a/third_party/rust/backtrace/src/windows.rs b/third_party/rust/backtrace/src/windows.rs new file mode 100644 index 000000000000..e8012cdc2c88 --- /dev/null +++ b/third_party/rust/backtrace/src/windows.rs @@ -0,0 +1,623 @@ + + + + + + + + + +#![allow(bad_style, dead_code)] + +cfg_if::cfg_if! { + if #[cfg(feature = "verify-winapi")] { + pub use self::winapi::c_void; + pub use self::winapi::HINSTANCE; + pub use self::winapi::FARPROC; + pub use self::winapi::LPSECURITY_ATTRIBUTES; + + mod winapi { + pub use winapi::ctypes::*; + pub use winapi::shared::basetsd::*; + pub use winapi::shared::minwindef::*; + pub use winapi::um::dbghelp::*; + pub use winapi::um::handleapi::*; + pub use winapi::um::libloaderapi::*; + pub use winapi::um::processthreadsapi::*; + pub use winapi::um::winbase::*; + pub use winapi::um::winnt::*; + pub use winapi::um::fileapi::*; + pub use winapi::um::minwinbase::*; + pub use winapi::um::synchapi::*; + } + } else { + pub use core::ffi::c_void; + pub type HINSTANCE = *mut c_void; + pub type FARPROC = *mut c_void; + pub type LPSECURITY_ATTRIBUTES = *mut c_void; + } +} + +macro_rules! ffi { + () => (); + + (#[repr($($r:tt)*)] pub struct $name:ident { $(pub $field:ident: $ty:ty,)* } $($rest:tt)*) => ( + #[repr($($r)*)] + #[cfg(not(feature = "verify-winapi"))] + #[derive(Copy, Clone)] + pub struct $name { + $(pub $field: $ty,)* + } + + #[cfg(feature = "verify-winapi")] + pub use self::winapi::$name; + + #[test] + #[cfg(feature = "verify-winapi")] + fn $name() { + use core::mem; + + #[repr($($r)*)] + pub struct $name { + $(pub $field: $ty,)* + } + + assert_eq!( + mem::size_of::<$name>(), + mem::size_of::(), + concat!("size of ", stringify!($name), " is wrong"), + ); + assert_eq!( + mem::align_of::<$name>(), + mem::align_of::(), + concat!("align of ", stringify!($name), " is wrong"), + ); + + type Winapi = winapi::$name; + + fn assert_same(_: T, _: T) {} + + unsafe { + let a = &*(mem::align_of::<$name>() as *const $name); + let b = &*(mem::align_of::() as *const Winapi); + + $( + ffi!(@test_fields a b $field $ty); + )* + } + } + + ffi!($($rest)*); + ); + + // Handling verification against unions in winapi requires some special care + (@test_fields $a:ident $b:ident FltSave $ty:ty) => ( + // Skip this field on x86_64 `CONTEXT` since it's a union and a bit funny + ); + (@test_fields $a:ident $b:ident D $ty:ty) => ({ + let a = &$a.D; + let b = $b.D(); + assert_same(a, b); + assert_eq!(a as *const $ty, b as *const $ty, "misplaced field D"); + }); + (@test_fields $a:ident $b:ident s $ty:ty) => ({ + let a = &$a.s; + let b = $b.s(); + assert_same(a, b); + assert_eq!(a as *const $ty, b as *const $ty, "misplaced field s"); + }); + + // Otherwise test all fields normally. + (@test_fields $a:ident $b:ident $field:ident $ty:ty) => ({ + let a = &$a.$field; + let b = &$b.$field; + assert_same(a, b); + assert_eq!(a as *const $ty, b as *const $ty, + concat!("misplaced field ", stringify!($field))); + }); + + (pub type $name:ident = $ty:ty; $($rest:tt)*) => ( + pub type $name = $ty; + + #[cfg(feature = "verify-winapi")] + #[allow(dead_code)] + const $name: () = { + fn _foo() { + trait SameType {} + impl SameType for (T, T) {} + fn assert_same() {} + + assert_same::<($name, winapi::$name)>(); + } + }; + + ffi!($($rest)*); + ); + + (pub const $name:ident: $ty:ty = $val:expr; $($rest:tt)*) => ( + pub const $name: $ty = $val; + + #[cfg(feature = "verify-winapi")] + #[allow(unused_imports)] + mod $name { + use super::*; + #[test] + fn assert_valid() { + let x: $ty = winapi::$name; + assert_eq!(x, $val); + } + } + + + ffi!($($rest)*); + ); + + (extern "system" { $(pub fn $name:ident($($args:tt)*) -> $ret:ty;)* } $($rest:tt)*) => ( + extern "system" { + $(pub fn $name($($args)*) -> $ret;)* + } + + $( + #[cfg(feature = "verify-winapi")] + mod $name { + #[test] + fn assert_same() { + use super::*; + + assert_eq!($name as usize, winapi::$name as usize); + let mut x: unsafe extern "system" fn($($args)*) -> $ret; + x = $name; + drop(x); + x = winapi::$name; + drop(x); + } + } + )* + + ffi!($($rest)*); + ); + + (impl $name:ident { $($i:tt)* } $($rest:tt)*) => ( + #[cfg(not(feature = "verify-winapi"))] + impl $name { + $($i)* + } + + ffi!($($rest)*); + ); +} + +ffi! { + #[repr(C)] + pub struct STACKFRAME64 { + pub AddrPC: ADDRESS64, + pub AddrReturn: ADDRESS64, + pub AddrFrame: ADDRESS64, + pub AddrStack: ADDRESS64, + pub AddrBStore: ADDRESS64, + pub FuncTableEntry: PVOID, + pub Params: [DWORD64; 4], + pub Far: BOOL, + pub Virtual: BOOL, + pub Reserved: [DWORD64; 3], + pub KdHelp: KDHELP64, + } + + pub type LPSTACKFRAME64 = *mut STACKFRAME64; + + #[repr(C)] + pub struct STACKFRAME_EX { + pub AddrPC: ADDRESS64, + pub AddrReturn: ADDRESS64, + pub AddrFrame: ADDRESS64, + pub AddrStack: ADDRESS64, + pub AddrBStore: ADDRESS64, + pub FuncTableEntry: PVOID, + pub Params: [DWORD64; 4], + pub Far: BOOL, + pub Virtual: BOOL, + pub Reserved: [DWORD64; 3], + pub KdHelp: KDHELP64, + pub StackFrameSize: DWORD, + pub InlineFrameContext: DWORD, + } + + pub type LPSTACKFRAME_EX = *mut STACKFRAME_EX; + + #[repr(C)] + pub struct IMAGEHLP_LINEW64 { + pub SizeOfStruct: DWORD, + pub Key: PVOID, + pub LineNumber: DWORD, + pub FileName: PWSTR, + pub Address: DWORD64, + } + + pub type PIMAGEHLP_LINEW64 = *mut IMAGEHLP_LINEW64; + + #[repr(C)] + pub struct SYMBOL_INFOW { + pub SizeOfStruct: ULONG, + pub TypeIndex: ULONG, + pub Reserved: [ULONG64; 2], + pub Index: ULONG, + pub Size: ULONG, + pub ModBase: ULONG64, + pub Flags: ULONG, + pub Value: ULONG64, + pub Address: ULONG64, + pub Register: ULONG, + pub Scope: ULONG, + pub Tag: ULONG, + pub NameLen: ULONG, + pub MaxNameLen: ULONG, + pub Name: [WCHAR; 1], + } + + pub type PSYMBOL_INFOW = *mut SYMBOL_INFOW; + + pub type PTRANSLATE_ADDRESS_ROUTINE64 = Option< + unsafe extern "system" fn(hProcess: HANDLE, hThread: HANDLE, lpaddr: LPADDRESS64) -> DWORD64, + >; + pub type PGET_MODULE_BASE_ROUTINE64 = + Option DWORD64>; + pub type PFUNCTION_TABLE_ACCESS_ROUTINE64 = + Option PVOID>; + pub type PREAD_PROCESS_MEMORY_ROUTINE64 = Option< + unsafe extern "system" fn( + hProcess: HANDLE, + qwBaseAddress: DWORD64, + lpBuffer: PVOID, + nSize: DWORD, + lpNumberOfBytesRead: LPDWORD, + ) -> BOOL, + >; + + #[repr(C)] + pub struct ADDRESS64 { + pub Offset: DWORD64, + pub Segment: WORD, + pub Mode: ADDRESS_MODE, + } + + pub type LPADDRESS64 = *mut ADDRESS64; + + pub type ADDRESS_MODE = u32; + + #[repr(C)] + pub struct KDHELP64 { + pub Thread: DWORD64, + pub ThCallbackStack: DWORD, + pub ThCallbackBStore: DWORD, + pub NextCallback: DWORD, + pub FramePointer: DWORD, + pub KiCallUserMode: DWORD64, + pub KeUserCallbackDispatcher: DWORD64, + pub SystemRangeStart: DWORD64, + pub KiUserExceptionDispatcher: DWORD64, + pub StackBase: DWORD64, + pub StackLimit: DWORD64, + pub BuildVersion: DWORD, + pub Reserved0: DWORD, + pub Reserved1: [DWORD64; 4], + } + + pub const MAX_SYM_NAME: usize = 2000; + pub const AddrModeFlat: ADDRESS_MODE = 3; + pub const TRUE: BOOL = 1; + pub const FALSE: BOOL = 0; + pub const PROCESS_QUERY_INFORMATION: DWORD = 0x400; + pub const IMAGE_FILE_MACHINE_ARM64: u16 = 43620; + pub const IMAGE_FILE_MACHINE_AMD64: u16 = 34404; + pub const IMAGE_FILE_MACHINE_I386: u16 = 332; + pub const IMAGE_FILE_MACHINE_ARMNT: u16 = 452; + pub const FILE_SHARE_READ: DWORD = 0x1; + pub const FILE_SHARE_WRITE: DWORD = 0x2; + pub const OPEN_EXISTING: DWORD = 0x3; + pub const GENERIC_READ: DWORD = 0x80000000; + pub const INFINITE: DWORD = !0; + + pub type DWORD = u32; + pub type PDWORD = *mut u32; + pub type BOOL = i32; + pub type DWORD64 = u64; + pub type PDWORD64 = *mut u64; + pub type HANDLE = *mut c_void; + pub type PVOID = HANDLE; + pub type PCWSTR = *const u16; + pub type LPSTR = *mut i8; + pub type LPCSTR = *const i8; + pub type PWSTR = *mut u16; + pub type WORD = u16; + pub type ULONG = u32; + pub type ULONG64 = u64; + pub type WCHAR = u16; + pub type PCONTEXT = *mut CONTEXT; + pub type LPDWORD = *mut DWORD; + pub type DWORDLONG = u64; + pub type HMODULE = HINSTANCE; + + extern "system" { + pub fn GetCurrentProcess() -> HANDLE; + pub fn GetCurrentThread() -> HANDLE; + pub fn RtlCaptureContext(ContextRecord: PCONTEXT) -> (); + pub fn LoadLibraryA(a: *const i8) -> HMODULE; + pub fn GetProcAddress(h: HMODULE, name: *const i8) -> FARPROC; + pub fn OpenProcess( + dwDesiredAccess: DWORD, + bInheitHandle: BOOL, + dwProcessId: DWORD, + ) -> HANDLE; + pub fn GetCurrentProcessId() -> DWORD; + pub fn CloseHandle(h: HANDLE) -> BOOL; + pub fn QueryFullProcessImageNameA( + hProcess: HANDLE, + dwFlags: DWORD, + lpExeName: LPSTR, + lpdwSize: PDWORD, + ) -> BOOL; + pub fn CreateFileA( + lpFileName: LPCSTR, + dwDesiredAccess: DWORD, + dwShareMode: DWORD, + lpSecurityAttributes: LPSECURITY_ATTRIBUTES, + dwCreationDisposition: DWORD, + dwFlagsAndAttributes: DWORD, + hTemplateFile: HANDLE, + ) -> HANDLE; + pub fn CreateMutexA( + attrs: LPSECURITY_ATTRIBUTES, + initial: BOOL, + name: LPCSTR, + ) -> HANDLE; + pub fn ReleaseMutex(hMutex: HANDLE) -> BOOL; + pub fn WaitForSingleObjectEx( + hHandle: HANDLE, + dwMilliseconds: DWORD, + bAlertable: BOOL, + ) -> DWORD; + } +} + +#[cfg(target_arch = "aarch64")] +ffi! { + #[repr(C, align(16))] + pub struct CONTEXT { + pub ContextFlags: DWORD, + pub Cpsr: DWORD, + pub u: CONTEXT_u, + pub Sp: u64, + pub Pc: u64, + pub V: [ARM64_NT_NEON128; 32], + pub Fpcr: DWORD, + pub Fpsr: DWORD, + pub Bcr: [DWORD; ARM64_MAX_BREAKPOINTS], + pub Bvr: [DWORD64; ARM64_MAX_BREAKPOINTS], + pub Wcr: [DWORD; ARM64_MAX_WATCHPOINTS], + pub Wvr: [DWORD64; ARM64_MAX_WATCHPOINTS], + } + + #[repr(C)] + pub struct CONTEXT_u { + pub s: CONTEXT_u_s, + } + + impl CONTEXT_u { + pub unsafe fn s(&self) -> &CONTEXT_u_s { + &self.s + } + } + + #[repr(C)] + pub struct CONTEXT_u_s { + pub X0: u64, + pub X1: u64, + pub X2: u64, + pub X3: u64, + pub X4: u64, + pub X5: u64, + pub X6: u64, + pub X7: u64, + pub X8: u64, + pub X9: u64, + pub X10: u64, + pub X11: u64, + pub X12: u64, + pub X13: u64, + pub X14: u64, + pub X15: u64, + pub X16: u64, + pub X17: u64, + pub X18: u64, + pub X19: u64, + pub X20: u64, + pub X21: u64, + pub X22: u64, + pub X23: u64, + pub X24: u64, + pub X25: u64, + pub X26: u64, + pub X27: u64, + pub X28: u64, + pub Fp: u64, + pub Lr: u64, + } + + pub const ARM64_MAX_BREAKPOINTS: usize = 8; + pub const ARM64_MAX_WATCHPOINTS: usize = 2; + + #[repr(C)] + pub struct ARM64_NT_NEON128 { + pub D: [f64; 2], + } +} + +#[cfg(target_arch = "x86")] +ffi! { + #[repr(C)] + pub struct CONTEXT { + pub ContextFlags: DWORD, + pub Dr0: DWORD, + pub Dr1: DWORD, + pub Dr2: DWORD, + pub Dr3: DWORD, + pub Dr6: DWORD, + pub Dr7: DWORD, + pub FloatSave: FLOATING_SAVE_AREA, + pub SegGs: DWORD, + pub SegFs: DWORD, + pub SegEs: DWORD, + pub SegDs: DWORD, + pub Edi: DWORD, + pub Esi: DWORD, + pub Ebx: DWORD, + pub Edx: DWORD, + pub Ecx: DWORD, + pub Eax: DWORD, + pub Ebp: DWORD, + pub Eip: DWORD, + pub SegCs: DWORD, + pub EFlags: DWORD, + pub Esp: DWORD, + pub SegSs: DWORD, + pub ExtendedRegisters: [u8; 512], + } + + #[repr(C)] + pub struct FLOATING_SAVE_AREA { + pub ControlWord: DWORD, + pub StatusWord: DWORD, + pub TagWord: DWORD, + pub ErrorOffset: DWORD, + pub ErrorSelector: DWORD, + pub DataOffset: DWORD, + pub DataSelector: DWORD, + pub RegisterArea: [u8; 80], + pub Spare0: DWORD, + } +} + +#[cfg(target_arch = "x86_64")] +ffi! { + #[repr(C, align(8))] + pub struct CONTEXT { + pub P1Home: DWORDLONG, + pub P2Home: DWORDLONG, + pub P3Home: DWORDLONG, + pub P4Home: DWORDLONG, + pub P5Home: DWORDLONG, + pub P6Home: DWORDLONG, + + pub ContextFlags: DWORD, + pub MxCsr: DWORD, + + pub SegCs: WORD, + pub SegDs: WORD, + pub SegEs: WORD, + pub SegFs: WORD, + pub SegGs: WORD, + pub SegSs: WORD, + pub EFlags: DWORD, + + pub Dr0: DWORDLONG, + pub Dr1: DWORDLONG, + pub Dr2: DWORDLONG, + pub Dr3: DWORDLONG, + pub Dr6: DWORDLONG, + pub Dr7: DWORDLONG, + + pub Rax: DWORDLONG, + pub Rcx: DWORDLONG, + pub Rdx: DWORDLONG, + pub Rbx: DWORDLONG, + pub Rsp: DWORDLONG, + pub Rbp: DWORDLONG, + pub Rsi: DWORDLONG, + pub Rdi: DWORDLONG, + pub R8: DWORDLONG, + pub R9: DWORDLONG, + pub R10: DWORDLONG, + pub R11: DWORDLONG, + pub R12: DWORDLONG, + pub R13: DWORDLONG, + pub R14: DWORDLONG, + pub R15: DWORDLONG, + + pub Rip: DWORDLONG, + + pub FltSave: FLOATING_SAVE_AREA, + + pub VectorRegister: [M128A; 26], + pub VectorControl: DWORDLONG, + + pub DebugControl: DWORDLONG, + pub LastBranchToRip: DWORDLONG, + pub LastBranchFromRip: DWORDLONG, + pub LastExceptionToRip: DWORDLONG, + pub LastExceptionFromRip: DWORDLONG, + } + + #[repr(C)] + pub struct M128A { + pub Low: u64, + pub High: i64, + } +} + +#[repr(C)] +#[cfg(target_arch = "x86_64")] +#[derive(Copy, Clone)] +pub struct FLOATING_SAVE_AREA { + _Dummy: [u8; 512], +} + +#[cfg(target_arch = "arm")] +ffi! { + + + + + + + + + #[repr(C)] + pub struct CONTEXT_u { + + pub D: [ULONG64; 32], + + } + + pub const ARM_MAX_BREAKPOINTS: usize = 8; + pub const ARM_MAX_WATCHPOINTS: usize = 1; + + #[repr(C)] + pub struct CONTEXT { + pub ContextFlags: DWORD, + pub R0: DWORD, + pub R1: DWORD, + pub R2: DWORD, + pub R3: DWORD, + pub R4: DWORD, + pub R5: DWORD, + pub R6: DWORD, + pub R7: DWORD, + pub R8: DWORD, + pub R9: DWORD, + pub R10: DWORD, + pub R11: DWORD, + pub R12: DWORD, + pub Sp: DWORD, + pub Lr: DWORD, + pub Pc: DWORD, + pub Cpsr: DWORD, + pub Fpsrc: DWORD, + pub Padding: DWORD, + pub u: CONTEXT_u, + pub Bvr: [DWORD; ARM_MAX_BREAKPOINTS], + pub Bcr: [DWORD; ARM_MAX_BREAKPOINTS], + pub Wvr: [DWORD; ARM_MAX_WATCHPOINTS], + pub Wcr: [DWORD; ARM_MAX_WATCHPOINTS], + pub Padding2: [DWORD; 2], + } +} diff --git a/third_party/rust/backtrace/tests/accuracy/auxiliary.rs b/third_party/rust/backtrace/tests/accuracy/auxiliary.rs new file mode 100644 index 000000000000..7d0457083e65 --- /dev/null +++ b/third_party/rust/backtrace/tests/accuracy/auxiliary.rs @@ -0,0 +1,16 @@ +#[inline(never)] +pub fn callback(f: F) +where + F: FnOnce((&'static str, u32)), +{ + f((file!(), line!())) +} + +#[inline(always)] +#[cfg_attr(feature = "coresymbolication", inline(never))] +pub fn callback_inlined(f: F) +where + F: FnOnce((&'static str, u32)), +{ + f((file!(), line!())) +} diff --git a/third_party/rust/backtrace/tests/accuracy/main.rs b/third_party/rust/backtrace/tests/accuracy/main.rs new file mode 100644 index 000000000000..782b1d61d871 --- /dev/null +++ b/third_party/rust/backtrace/tests/accuracy/main.rs @@ -0,0 +1,92 @@ +mod auxiliary; + +macro_rules! pos { + () => { + (file!(), line!()) + }; +} + +macro_rules! check { + ($($pos:expr),*) => ({ + verify(&[$($pos,)* pos!()]); + }) +} + +type Pos = (&'static str, u32); + +#[test] +fn doit() { + outer(pos!()); +} + +#[inline(never)] +fn outer(main_pos: Pos) { + inner(main_pos, pos!()); + inner_inlined(main_pos, pos!()); +} + +#[inline(never)] +#[rustfmt::skip] +fn inner(main_pos: Pos, outer_pos: Pos) { + check!(main_pos, outer_pos); + check!(main_pos, outer_pos); + let inner_pos = pos!(); auxiliary::callback(|aux_pos| { + check!(main_pos, outer_pos, inner_pos, aux_pos); + }); + let inner_pos = pos!(); auxiliary::callback_inlined(|aux_pos| { + check!(main_pos, outer_pos, inner_pos, aux_pos); + }); +} + +#[inline(always)] +#[cfg_attr(feature = "coresymbolication", inline(never))] +#[rustfmt::skip] +fn inner_inlined(main_pos: Pos, outer_pos: Pos) { + check!(main_pos, outer_pos); + check!(main_pos, outer_pos); + + #[inline(always)] + #[cfg_attr(feature = "coresymbolication", inline(never))] + fn inner_further_inlined(main_pos: Pos, outer_pos: Pos, inner_pos: Pos) { + check!(main_pos, outer_pos, inner_pos); + } + inner_further_inlined(main_pos, outer_pos, pos!()); + + let inner_pos = pos!(); auxiliary::callback(|aux_pos| { + check!(main_pos, outer_pos, inner_pos, aux_pos); + }); + let inner_pos = pos!(); auxiliary::callback_inlined(|aux_pos| { + check!(main_pos, outer_pos, inner_pos, aux_pos); + }); + + + + inner_further_inlined(main_pos, outer_pos, pos!()); +} + +fn verify(filelines: &[Pos]) { + let trace = backtrace::Backtrace::new(); + println!("-----------------------------------"); + println!("looking for:"); + for (file, line) in filelines.iter().rev() { + println!("\t{}:{}", file, line); + } + println!("found:\n{:?}", trace); + let mut symbols = trace.frames().iter().flat_map(|frame| frame.symbols()); + let mut iter = filelines.iter().rev(); + while let Some((file, line)) = iter.next() { + loop { + let sym = match symbols.next() { + Some(sym) => sym, + None => panic!("failed to find {}:{}", file, line), + }; + if let Some(filename) = sym.filename() { + if let Some(lineno) = sym.lineno() { + if filename.ends_with(file) && lineno == *line { + break; + } + } + } + } + } +} diff --git a/third_party/rust/backtrace/tests/concurrent-panics.rs b/third_party/rust/backtrace/tests/concurrent-panics.rs new file mode 100644 index 000000000000..e8b69eed6ad0 --- /dev/null +++ b/third_party/rust/backtrace/tests/concurrent-panics.rs @@ -0,0 +1,72 @@ +use std::env; +use std::panic; +use std::process::Command; +use std::sync::atomic::{AtomicBool, Ordering::SeqCst}; +use std::sync::Arc; +use std::thread; + +const PANICS: usize = 100; +const THREADS: usize = 8; +const VAR: &str = "__THE_TEST_YOU_ARE_LUKE"; + +fn main() { + + + + if cfg!(unix) && (cfg!(target_arch = "arm") || cfg!(target_arch = "aarch64")) { + println!("test result: ok"); + return; + } + + if env::var(VAR).is_err() { + parent(); + } else { + child(); + } +} + +fn parent() { + let me = env::current_exe().unwrap(); + let result = Command::new(&me) + .env("RUST_BACKTRACE", "1") + .env(VAR, "1") + .output() + .unwrap(); + if result.status.success() { + println!("test result: ok"); + return; + } + println!("stdout:\n{}", String::from_utf8_lossy(&result.stdout)); + println!("stderr:\n{}", String::from_utf8_lossy(&result.stderr)); + println!("code: {}", result.status); + panic!(); +} + +fn child() { + let done = Arc::new(AtomicBool::new(false)); + let done2 = done.clone(); + let a = thread::spawn(move || { + while !done2.load(SeqCst) { + format!("{:?}", backtrace::Backtrace::new()); + } + }); + + let threads = (0..THREADS) + .map(|_| { + thread::spawn(|| { + for _ in 0..PANICS { + assert!(panic::catch_unwind(|| { + panic!(); + }) + .is_err()); + } + }) + }) + .collect::>(); + for thread in threads { + thread.join().unwrap(); + } + + done.store(true, SeqCst); + a.join().unwrap(); +} diff --git a/third_party/rust/backtrace/tests/long_fn_name.rs b/third_party/rust/backtrace/tests/long_fn_name.rs index 96ab6c782ec8..494eafb1e664 100644 --- a/third_party/rust/backtrace/tests/long_fn_name.rs +++ b/third_party/rust/backtrace/tests/long_fn_name.rs @@ -1,8 +1,5 @@ extern crate backtrace; -#[cfg(all(windows, feature = "dbghelp"))] -extern crate winapi; - use backtrace::Backtrace; @@ -12,8 +9,8 @@ mod _234567890_234567890_234567890_234567890_234567890 { pub struct _234567890_234567890_234567890_234567890_234567890(T); impl _234567890_234567890_234567890_234567890_234567890 { #[allow(dead_code)] - pub fn new() -> ::Backtrace { - ::Backtrace::new() + pub fn new() -> crate::Backtrace { + crate::Backtrace::new() } } } @@ -23,9 +20,7 @@ mod _234567890_234567890_234567890_234567890_234567890 { #[test] #[cfg(all(windows, feature = "dbghelp", target_env = "msvc"))] fn test_long_fn_name() { - use winapi::um::dbghelp; - use _234567890_234567890_234567890_234567890_234567890:: - _234567890_234567890_234567890_234567890_234567890 as S; + use _234567890_234567890_234567890_234567890_234567890::_234567890_234567890_234567890_234567890_234567890 as S; @@ -44,11 +39,9 @@ fn test_long_fn_name() { if let Some(function_name) = symbols[0].name() { let function_name = function_name.as_str().unwrap(); - if function_name.contains( - "::_234567890_234567890_234567890_234567890_234567890") - { + if function_name.contains("::_234567890_234567890_234567890_234567890_234567890") { found_long_name_frame = true; - assert_eq!(function_name.len(), dbghelp::MAX_SYM_NAME - 1); + assert!(function_name.len() > 200); } } } diff --git a/third_party/rust/backtrace/tests/skip_inner_frames.rs b/third_party/rust/backtrace/tests/skip_inner_frames.rs new file mode 100644 index 000000000000..b84a703ba21a --- /dev/null +++ b/third_party/rust/backtrace/tests/skip_inner_frames.rs @@ -0,0 +1,49 @@ +extern crate backtrace; + +use backtrace::Backtrace; + + + + +const ENABLED: bool = cfg!(all( + // Windows hasn't really been tested, and OSX doesn't support actually + // finding an enclosing frame, so disable this + target_os = "linux", + // This is the only method currently that supports accurate enough + // backtraces for this test to work. + feature = "libunwind", + // On ARM finding the enclosing function is simply returning the ip itself. + not(target_arch = "arm"), +)); + +#[test] +fn backtrace_new_unresolved_should_start_with_call_site_trace() { + if !ENABLED { + return; + } + let mut b = Backtrace::new_unresolved(); + b.resolve(); + println!("{:?}", b); + + assert!(!b.frames().is_empty()); + + let this_ip = backtrace_new_unresolved_should_start_with_call_site_trace as usize; + println!("this_ip: {:?}", this_ip as *const usize); + let frame_ip = b.frames().first().unwrap().symbol_address() as usize; + assert_eq!(this_ip, frame_ip); +} + +#[test] +fn backtrace_new_should_start_with_call_site_trace() { + if !ENABLED { + return; + } + let b = Backtrace::new(); + println!("{:?}", b); + + assert!(!b.frames().is_empty()); + + let this_ip = backtrace_new_should_start_with_call_site_trace as usize; + let frame_ip = b.frames().first().unwrap().symbol_address() as usize; + assert_eq!(this_ip, frame_ip); +} diff --git a/third_party/rust/backtrace/tests/smoke.rs b/third_party/rust/backtrace/tests/smoke.rs index 198d2b2b9e7c..7d5ab43ce7d5 100644 --- a/third_party/rust/backtrace/tests/smoke.rs +++ b/third_party/rust/backtrace/tests/smoke.rs @@ -1,25 +1,24 @@ extern crate backtrace; -use std::os::raw::c_void; +use backtrace::Frame; use std::thread; static LIBUNWIND: bool = cfg!(all(unix, feature = "libunwind")); static UNIX_BACKTRACE: bool = cfg!(all(unix, feature = "unix-backtrace")); -static LIBBACKTRACE: bool = cfg!(all(unix, feature = "libbacktrace")) && - !cfg!(target_os = "fuchsia") && !cfg!(target_os = "macos") && - !cfg!(target_os = "ios"); -static CORESYMBOLICATION: bool = cfg!(all(any(target_os = "macos", target_os = "ios"), - feature = "coresymbolication")); +static LIBBACKTRACE: bool = cfg!(feature = "libbacktrace") && !cfg!(target_os = "fuchsia"); +static CORESYMBOLICATION: bool = cfg!(all( + any(target_os = "macos", target_os = "ios"), + feature = "coresymbolication" +)); static DLADDR: bool = cfg!(all(unix, feature = "dladdr")) && !cfg!(target_os = "fuchsia"); static DBGHELP: bool = cfg!(all(windows, feature = "dbghelp")); static MSVC: bool = cfg!(target_env = "msvc"); -static GIMLI_SYMBOLIZE: bool = cfg!(all(feature = "gimli-symbolize", - unix, - target_os = "linux")); +static GIMLI_SYMBOLIZE: bool = cfg!(all(feature = "gimli-symbolize", unix, target_os = "linux")); #[test] #[cfg_attr(all(target_arch = "x86", target_env = "msvc"), ignore)] +#[rustfmt::skip] fn smoke_test_frames() { frame_1(line!()); #[inline(never)] fn frame_1(start_line: u32) { frame_2(start_line) } @@ -28,7 +27,7 @@ fn smoke_test_frames() { #[inline(never)] fn frame_4(start_line: u32) { let mut v = Vec::new(); backtrace::trace(|cx| { - v.push((cx.ip(), cx.symbol_address())); + v.push(cx.clone()); true }); @@ -36,43 +35,105 @@ fn smoke_test_frames() { assert!(!LIBUNWIND); assert!(!UNIX_BACKTRACE); assert!(!DBGHELP); - return + return; } - let o = if cfg!(all(windows, target_pointer_width = "32")) {1} else {0}; - - assert_frame(&v, o, 1, frame_4 as usize, "frame_4", - "tests/smoke.rs", start_line + 6); - assert_frame(&v, o, 2, frame_3 as usize, "frame_3", "tests/smoke.rs", - start_line + 3); - assert_frame(&v, o, 3, frame_2 as usize, "frame_2", "tests/smoke.rs", - start_line + 2); - assert_frame(&v, o, 4, frame_1 as usize, "frame_1", "tests/smoke.rs", - start_line + 1); - assert_frame(&v, o, 5, smoke_test_frames as usize, - "smoke_test_frames", "", 0); + let target = frame_4 as usize; + let offset = v + .iter() + .map(|frame| frame.symbol_address() as usize) + .enumerate() + .filter_map(|(i, sym)| { + if sym >= target { + Some((sym, i)) + } else { + None + } + }) + .min() + .unwrap() + .1; + let mut frames = v[offset..].iter(); + + assert_frame( + frames.next().unwrap(), + frame_4 as usize, + "frame_4", + "tests/smoke.rs", + start_line + 6, + ); + assert_frame( + frames.next().unwrap(), + frame_3 as usize, + "frame_3", + "tests/smoke.rs", + start_line + 3, + ); + assert_frame( + frames.next().unwrap(), + frame_2 as usize, + "frame_2", + "tests/smoke.rs", + start_line + 2, + ); + assert_frame( + frames.next().unwrap(), + frame_1 as usize, + "frame_1", + "tests/smoke.rs", + start_line + 1, + ); + assert_frame( + frames.next().unwrap(), + smoke_test_frames as usize, + "smoke_test_frames", + "", + 0, + ); } - fn assert_frame(syms: &[(*mut c_void, *mut c_void)], - offset: usize, - idx: usize, - actual_fn_pointer: usize, - expected_name: &str, - expected_file: &str, - expected_line: u32) { - if offset > idx { return } - let (ip, sym) = syms[idx - offset]; - let ip = ip as usize; - let sym = sym as usize; + fn assert_frame( + frame: &Frame, + actual_fn_pointer: usize, + expected_name: &str, + expected_file: &str, + expected_line: u32, + ) { + backtrace::resolve_frame(frame, |sym| { + print!("symbol ip:{:?} address:{:?} ", frame.ip(), frame.symbol_address()); + if let Some(name) = sym.name() { + print!("name:{} ", name); + } + if let Some(file) = sym.filename() { + print!("file:{} ", file.display()); + } + if let Some(lineno) = sym.lineno() { + print!("lineno:{} ", lineno); + } + println!(); + }); + + let ip = frame.ip() as usize; + let sym = frame.symbol_address() as usize; assert!(ip >= sym); - assert!(sym >= actual_fn_pointer); + assert!( + sym >= actual_fn_pointer, + "{:?} < {:?} ({} {}:{})", + sym as *const usize, + actual_fn_pointer as *const usize, + expected_name, + expected_file, + expected_line, + ); - if !DBGHELP { + + + if !DBGHELP && cfg!(debug_assertions) { assert!(sym - actual_fn_pointer < 1024); } @@ -83,7 +144,7 @@ fn smoke_test_frames() { let mut addr = None; let mut line = None; let mut file = None; - backtrace::resolve(ip as *mut c_void, |sym| { + backtrace::resolve_frame(frame, |sym| { resolved += 1; name = sym.name().map(|v| v.to_string()); addr = sym.addr(); @@ -99,13 +160,19 @@ fn smoke_test_frames() { - if can_resolve && - !(cfg!(target_os = "linux") && DLADDR) && - !(DBGHELP && !MSVC) - { + if can_resolve && !(cfg!(target_os = "linux") && DLADDR) && !(DBGHELP && !MSVC) { let name = name.expect("didn't find a name"); - assert!(name.contains(expected_name), - "didn't find `{}` in `{}`", expected_name, name); + + + + if cfg!(debug_assertions) { + assert!( + name.contains(expected_name), + "didn't find `{}` in `{}`", + expected_name, + name + ); + } } if can_resolve { @@ -116,13 +183,21 @@ fn smoke_test_frames() { let line = line.expect("didn't find a line number"); let file = file.expect("didn't find a line number"); if !expected_file.is_empty() { - assert!(file.ends_with(expected_file), - "{:?} didn't end with {:?}", file, expected_file); + assert!( + file.ends_with(expected_file), + "{:?} didn't end with {:?}", + file, + expected_file + ); } if expected_line != 0 { - assert!(line == expected_line, - "bad line number on frame for `{}`: {} != {}", - expected_name, line, expected_line); + assert!( + line == expected_line, + "bad line number on frame for `{}`: {} != {}", + expected_name, + line, + expected_line + ); } } } @@ -130,18 +205,20 @@ fn smoke_test_frames() { #[test] fn many_threads() { - let threads = (0..16).map(|_| { - thread::spawn(|| { - for _ in 0..16 { - backtrace::trace(|frame| { - backtrace::resolve(frame.ip(), |symbol| { - let _s = symbol.name().map(|s| s.to_string()); + let threads = (0..16) + .map(|_| { + thread::spawn(|| { + for _ in 0..16 { + backtrace::trace(|frame| { + backtrace::resolve(frame.ip(), |symbol| { + let _s = symbol.name().map(|s| s.to_string()); + }); + true }); - true - }); - } + } + }) }) - }).collect::>(); + .collect::>(); for t in threads { t.join().unwrap() diff --git a/third_party/rust/blake2b_simd/.cargo-checksum.json b/third_party/rust/blake2b_simd/.cargo-checksum.json index 40880c801400..bbeeada8d69f 100644 --- a/third_party/rust/blake2b_simd/.cargo-checksum.json +++ b/third_party/rust/blake2b_simd/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"a249ac610321c5c22f5381acb98fb6c0dacf8d70c62ecfca51beb945a718d6b7","README.md":"2253eba78d5af06642073c5dfd41253fb8be73d3a0e823bc3d7642c9d0ad0c6c","src/avx2.rs":"a97ec761e4e7f70ff6311f4c1e67cb5136ac66cfc51bc49525b81f9e23814d81","src/blake2bp.rs":"83577d4a22db3b92030d9bd4563aa9ad440f23c64a6ad5f10a9d709f22d50589","src/guts.rs":"1189cab87b18eaaf2abd5bcb3d7d799c75401a312cee6f1f65fdaad30203eb6f","src/lib.rs":"67723a3abc30dc7f3d488f434ced884b5ce962a807991c8f1cc9940df869c342","src/many.rs":"60d07e4d7ad63949fb5432ad05f7c6a525a3eee39d325f7d4e65e901b466be95","src/portable.rs":"c47baa15b311bc95d49f3d189111fe45756fb7d623a1f48f0050ae591817aedf","src/sse41.rs":"7a644b1056b804ada9ddc7586552a4a5c769e576d610ffe7ec74065f7eaff491","src/test.rs":"1685eec6fedc30fca1332cbb78c85e6c9b56eca962b6c6343c91ba69eefac754"},"package":"5850aeee1552f495dd0250014cf64b82b7c8879a89d83b33bbdace2cc4f63182"} \ No newline at end of file +{"files":{"Cargo.toml":"648c10063fa1a16a961df45f194f50982bdf3d41d04586a48d2cc6d69e0252c1","README.md":"2253eba78d5af06642073c5dfd41253fb8be73d3a0e823bc3d7642c9d0ad0c6c","src/avx2.rs":"a97ec761e4e7f70ff6311f4c1e67cb5136ac66cfc51bc49525b81f9e23814d81","src/blake2bp.rs":"83577d4a22db3b92030d9bd4563aa9ad440f23c64a6ad5f10a9d709f22d50589","src/guts.rs":"1189cab87b18eaaf2abd5bcb3d7d799c75401a312cee6f1f65fdaad30203eb6f","src/lib.rs":"67723a3abc30dc7f3d488f434ced884b5ce962a807991c8f1cc9940df869c342","src/many.rs":"60d07e4d7ad63949fb5432ad05f7c6a525a3eee39d325f7d4e65e901b466be95","src/portable.rs":"c47baa15b311bc95d49f3d189111fe45756fb7d623a1f48f0050ae591817aedf","src/sse41.rs":"7a644b1056b804ada9ddc7586552a4a5c769e576d610ffe7ec74065f7eaff491","src/test.rs":"1685eec6fedc30fca1332cbb78c85e6c9b56eca962b6c6343c91ba69eefac754"},"package":"b83b7baab1e671718d78204225800d6b170e648188ac7dc992e9d6bddf87d0c0"} \ No newline at end of file diff --git a/third_party/rust/blake2b_simd/Cargo.toml b/third_party/rust/blake2b_simd/Cargo.toml index f4192373c112..ca92a4cc2dd7 100644 --- a/third_party/rust/blake2b_simd/Cargo.toml +++ b/third_party/rust/blake2b_simd/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "blake2b_simd" -version = "0.5.8" +version = "0.5.9" authors = ["Jack O'Connor"] description = "a pure Rust BLAKE2b implementation with dynamic SIMD" documentation = "https://docs.rs/blake2b_simd" @@ -24,7 +24,7 @@ repository = "https://github.com/oconnor663/blake2_simd" version = "0.3.5" [dependencies.arrayvec] -version = "0.4.10" +version = "0.5.0" default-features = false [dependencies.constant_time_eq] diff --git a/third_party/rust/block/.cargo-checksum.json b/third_party/rust/block/.cargo-checksum.json new file mode 100644 index 000000000000..91c6a11917e1 --- /dev/null +++ b/third_party/rust/block/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"24df2f33b48c9e756a4f864f882c1ab07a159b6c2b790f45af2e8162499b6719","README.md":"01c56329fa14f48ea1c9c7986c3660c41beeff2477f61c2d36bd8e666b9fc562","src/lib.rs":"eb31678adf63b53109d9b94eba23699fd5f9ebfdb950f6e1a57ad51bb6a146fa","src/test_utils.rs":"271bd579b67e2440134227cf8ee52c1b8c22854eb0c7923b46306b51810c2cf1"},"package":"0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a"} \ No newline at end of file diff --git a/third_party/rust/block/Cargo.toml b/third_party/rust/block/Cargo.toml new file mode 100644 index 000000000000..f414e7ef2458 --- /dev/null +++ b/third_party/rust/block/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "block" +version = "0.1.6" +authors = ["Steven Sheldon"] + +description = "Rust interface for Apple's C language extension of blocks." +keywords = ["blocks", "osx", "ios", "objective-c"] +readme = "README.md" +repository = "http://github.com/SSheldon/rust-block" +documentation = "http://ssheldon.github.io/rust-objc/block/" +license = "MIT" + +exclude = [ + ".gitignore", + ".travis.yml", + "travis_install.sh", + "travis_test.sh", + "tests-ios/**", +] + +[dev-dependencies.objc_test_utils] +version = "0.0" +path = "test_utils" diff --git a/third_party/rust/block/README.md b/third_party/rust/block/README.md new file mode 100644 index 000000000000..78c65d087e26 --- /dev/null +++ b/third_party/rust/block/README.md @@ -0,0 +1,42 @@ +Rust interface for Apple's C language extension of blocks. + +For more information on the specifics of the block implementation, see +Clang's documentation: http://clang.llvm.org/docs/Block-ABI-Apple.html + +## Invoking blocks + +The `Block` struct is used for invoking blocks from Objective-C. For example, +consider this Objective-C function: + +``` objc +int32_t sum(int32_t (^block)(int32_t, int32_t)) { + return block(5, 8); +} +``` + +We could write it in Rust as the following: + +``` rust +unsafe fn sum(block: &Block<(i32, i32), i32>) -> i32 { + block.call((5, 8)) +} +``` + +Note the extra parentheses in the `call` method, since the arguments must be +passed as a tuple. + +## Creating blocks + +Creating a block to pass to Objective-C can be done with the `ConcreteBlock` +struct. For example, to create a block that adds two `i32`s, we could write: + +``` rust +let block = ConcreteBlock::new(|a: i32, b: i32| a + b); +let block = block.copy(); +assert!(unsafe { block.call((5, 8)) } == 13); +``` + +It is important to copy your block to the heap (with the `copy` method) before +passing it to Objective-C; this is because our `ConcreteBlock` is only meant +to be copied once, and we can enforce this in Rust, but if Objective-C code +were to copy it twice we could have a double free. diff --git a/third_party/rust/block/src/lib.rs b/third_party/rust/block/src/lib.rs new file mode 100644 index 000000000000..0f261a5ea49c --- /dev/null +++ b/third_party/rust/block/src/lib.rs @@ -0,0 +1,396 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#[cfg(test)] +mod test_utils; + +use std::marker::PhantomData; +use std::mem; +use std::ops::{Deref, DerefMut}; +use std::os::raw::{c_int, c_ulong, c_void}; +use std::ptr; + +enum Class { } + +#[cfg_attr(any(target_os = "macos", target_os = "ios"), + link(name = "System", kind = "dylib"))] +#[cfg_attr(not(any(target_os = "macos", target_os = "ios")), + link(name = "BlocksRuntime", kind = "dylib"))] +extern { + static _NSConcreteStackBlock: Class; + + fn _Block_copy(block: *const c_void) -> *mut c_void; + fn _Block_release(block: *const c_void); +} + + +pub trait BlockArguments: Sized { + + + + + unsafe fn call_block(self, block: *mut Block) -> R; +} + +macro_rules! block_args_impl { + ($($a:ident : $t:ident),*) => ( + impl<$($t),*> BlockArguments for ($($t,)*) { + unsafe fn call_block(self, block: *mut Block) -> R { + let invoke: unsafe extern fn(*mut Block $(, $t)*) -> R = { + let base = block as *mut BlockBase; + mem::transmute((*base).invoke) + }; + let ($($a,)*) = self; + invoke(block $(, $a)*) + } + } + ); +} + +block_args_impl!(); +block_args_impl!(a: A); +block_args_impl!(a: A, b: B); +block_args_impl!(a: A, b: B, c: C); +block_args_impl!(a: A, b: B, c: C, d: D); +block_args_impl!(a: A, b: B, c: C, d: D, e: E); +block_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F); +block_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F, g: G); +block_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H); +block_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I); +block_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J); +block_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J, k: K); +block_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J, k: K, l: L); + +#[repr(C)] +struct BlockBase { + isa: *const Class, + flags: c_int, + _reserved: c_int, + invoke: unsafe extern fn(*mut Block, ...) -> R, +} + + + +#[repr(C)] +pub struct Block { + _base: PhantomData>, +} + +impl Block where A: BlockArguments { + + + + + + + pub unsafe fn call(&self, args: A) -> R { + args.call_block(self as *const _ as *mut _) + } +} + + +pub struct RcBlock { + ptr: *mut Block, +} + +impl RcBlock { + + + + + + + pub unsafe fn new(ptr: *mut Block) -> Self { + RcBlock { ptr: ptr } + } + + + + + pub unsafe fn copy(ptr: *mut Block) -> Self { + let ptr = _Block_copy(ptr as *const c_void) as *mut Block; + RcBlock { ptr: ptr } + } +} + +impl Clone for RcBlock { + fn clone(&self) -> RcBlock { + unsafe { + RcBlock::copy(self.ptr) + } + } +} + +impl Deref for RcBlock { + type Target = Block; + + fn deref(&self) -> &Block { + unsafe { &*self.ptr } + } +} + +impl Drop for RcBlock { + fn drop(&mut self) { + unsafe { + _Block_release(self.ptr as *const c_void); + } + } +} + + +pub trait IntoConcreteBlock: Sized where A: BlockArguments { + + type Ret; + + + fn into_concrete_block(self) -> ConcreteBlock; +} + +macro_rules! concrete_block_impl { + ($f:ident) => ( + concrete_block_impl!($f,); + ); + ($f:ident, $($a:ident : $t:ident),*) => ( + impl<$($t,)* R, X> IntoConcreteBlock<($($t,)*)> for X + where X: Fn($($t,)*) -> R { + type Ret = R; + + fn into_concrete_block(self) -> ConcreteBlock<($($t,)*), R, X> { + unsafe extern fn $f<$($t,)* R, X>( + block_ptr: *mut ConcreteBlock<($($t,)*), R, X> + $(, $a: $t)*) -> R + where X: Fn($($t,)*) -> R { + let block = &*block_ptr; + (block.closure)($($a),*) + } + + let f: unsafe extern fn(*mut ConcreteBlock<($($t,)*), R, X> $(, $a: $t)*) -> R = $f; + unsafe { + ConcreteBlock::with_invoke(mem::transmute(f), self) + } + } + } + ); +} + +concrete_block_impl!(concrete_block_invoke_args0); +concrete_block_impl!(concrete_block_invoke_args1, a: A); +concrete_block_impl!(concrete_block_invoke_args2, a: A, b: B); +concrete_block_impl!(concrete_block_invoke_args3, a: A, b: B, c: C); +concrete_block_impl!(concrete_block_invoke_args4, a: A, b: B, c: C, d: D); +concrete_block_impl!(concrete_block_invoke_args5, a: A, b: B, c: C, d: D, e: E); +concrete_block_impl!(concrete_block_invoke_args6, a: A, b: B, c: C, d: D, e: E, f: F); +concrete_block_impl!(concrete_block_invoke_args7, a: A, b: B, c: C, d: D, e: E, f: F, g: G); +concrete_block_impl!(concrete_block_invoke_args8, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H); +concrete_block_impl!(concrete_block_invoke_args9, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I); +concrete_block_impl!(concrete_block_invoke_args10, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J); +concrete_block_impl!(concrete_block_invoke_args11, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J, k: K); +concrete_block_impl!(concrete_block_invoke_args12, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J, k: K, l: L); + + + +#[repr(C)] +pub struct ConcreteBlock { + base: BlockBase, + descriptor: Box>>, + closure: F, +} + +impl ConcreteBlock + where A: BlockArguments, F: IntoConcreteBlock { + + + + pub fn new(closure: F) -> Self { + closure.into_concrete_block() + } +} + +impl ConcreteBlock { + + + + unsafe fn with_invoke(invoke: unsafe extern fn(*mut Self, ...) -> R, + closure: F) -> Self { + ConcreteBlock { + base: BlockBase { + isa: &_NSConcreteStackBlock, + + flags: 1 << 25, + _reserved: 0, + invoke: mem::transmute(invoke), + }, + descriptor: Box::new(BlockDescriptor::new()), + closure: closure, + } + } +} + +impl ConcreteBlock where F: 'static { + + pub fn copy(self) -> RcBlock { + unsafe { + let mut block = self; + let copied = RcBlock::copy(&mut *block); + + + + mem::forget(block); + copied + } + } +} + +impl Clone for ConcreteBlock where F: Clone { + fn clone(&self) -> Self { + unsafe { + ConcreteBlock::with_invoke(mem::transmute(self.base.invoke), + self.closure.clone()) + } + } +} + +impl Deref for ConcreteBlock { + type Target = Block; + + fn deref(&self) -> &Block { + unsafe { &*(&self.base as *const _ as *const Block) } + } +} + +impl DerefMut for ConcreteBlock { + fn deref_mut(&mut self) -> &mut Block { + unsafe { &mut *(&mut self.base as *mut _ as *mut Block) } + } +} + +unsafe extern fn block_context_dispose(block: &mut B) { + + ptr::read(block); +} + +unsafe extern fn block_context_copy(_dst: &mut B, _src: &B) { + +} + +#[repr(C)] +struct BlockDescriptor { + _reserved: c_ulong, + block_size: c_ulong, + copy_helper: unsafe extern fn(&mut B, &B), + dispose_helper: unsafe extern fn(&mut B), +} + +impl BlockDescriptor { + fn new() -> BlockDescriptor { + BlockDescriptor { + _reserved: 0, + block_size: mem::size_of::() as c_ulong, + copy_helper: block_context_copy::, + dispose_helper: block_context_dispose::, + } + } +} + +#[cfg(test)] +mod tests { + use test_utils::*; + use super::{ConcreteBlock, RcBlock}; + + #[test] + fn test_call_block() { + let block = get_int_block_with(13); + unsafe { + assert!(block.call(()) == 13); + } + } + + #[test] + fn test_call_block_args() { + let block = get_add_block_with(13); + unsafe { + assert!(block.call((2,)) == 15); + } + } + + #[test] + fn test_create_block() { + let block = ConcreteBlock::new(|| 13); + let result = invoke_int_block(&block); + assert!(result == 13); + } + + #[test] + fn test_create_block_args() { + let block = ConcreteBlock::new(|a: i32| a + 5); + let result = invoke_add_block(&block, 6); + assert!(result == 11); + } + + #[test] + fn test_concrete_block_copy() { + let s = "Hello!".to_string(); + let expected_len = s.len() as i32; + let block = ConcreteBlock::new(move || s.len() as i32); + assert!(invoke_int_block(&block) == expected_len); + + let copied = block.copy(); + assert!(invoke_int_block(&copied) == expected_len); + } + + #[test] + fn test_concrete_block_stack_copy() { + fn make_block() -> RcBlock<(), i32> { + let x = 7; + let block = ConcreteBlock::new(move || x); + block.copy() + } + + let block = make_block(); + assert!(invoke_int_block(&block) == 7); + } +} diff --git a/third_party/rust/block/src/test_utils.rs b/third_party/rust/block/src/test_utils.rs new file mode 100644 index 000000000000..940a3d227206 --- /dev/null +++ b/third_party/rust/block/src/test_utils.rs @@ -0,0 +1,31 @@ +extern crate objc_test_utils; + +use {Block, RcBlock}; + +pub fn get_int_block_with(i: i32) -> RcBlock<(), i32> { + unsafe { + let ptr = objc_test_utils::get_int_block_with(i); + RcBlock::new(ptr as *mut _) + } +} + +pub fn get_add_block_with(i: i32) -> RcBlock<(i32,), i32> { + unsafe { + let ptr = objc_test_utils::get_add_block_with(i); + RcBlock::new(ptr as *mut _) + } +} + +pub fn invoke_int_block(block: &Block<(), i32>) -> i32 { + let ptr = block as *const _; + unsafe { + objc_test_utils::invoke_int_block(ptr as *mut _) + } +} + +pub fn invoke_add_block(block: &Block<(i32,), i32>, a: i32) -> i32 { + let ptr = block as *const _; + unsafe { + objc_test_utils::invoke_add_block(ptr as *mut _, a) + } +} diff --git a/third_party/rust/cc/.cargo-checksum.json b/third_party/rust/cc/.cargo-checksum.json index ec6b48d2510f..417fde7953fd 100644 --- a/third_party/rust/cc/.cargo-checksum.json +++ b/third_party/rust/cc/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"13b4fcd3d3f5fa9fa28c6714fe82f142f366c8f2a30a218ef6c1128ec20c31bc","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"190336dc7613f3b991580c39170b72967cafbd4c2d96ca2681249a8d5df2b1af","azure-pipelines.yml":"bb1c0c4147eeff07ddf230ca7535cd15a8acd05f8d431bf90988cd0647bb0775","ci/azure-install-rust.yml":"358198fb3cb23dd145434f777edde39c652ae390a5d64caf634a00dd78007286","ci/azure-steps.yml":"34b8c5dfab1c0a298b0a71e14daf15b82af07e2fb8d85684c4ccbf9f7f95ffb8","src/bin/gcc-shim.rs":"d6be9137cb48b86891e7b263adbf492e1193ffe682db9ba4a88eb1079b874b58","src/com.rs":"8b9a54af5400c259f877126cc68ea63ada4fe66e84c9b840711c95e570b15774","src/lib.rs":"6217a06cc91af81df4affa92d78619fbaa3f57ffe5b7780d19373a9353a60415","src/registry.rs":"3cc1b5a50879fa751572878ae1d0afbfc960c11665258492754b2c8bccb0ff5d","src/setup_config.rs":"f5c45afc99ad3b7c1311242bc4baf37e861d740ab81bf6ca90e2aa283672e65a","src/winapi.rs":"d7929b36130e33f1caa6bd444b378b83023b2b82d589c6e0ab38c4ff6c950da8","src/windows_registry.rs":"8671c26da1e0f206fc1ef48d42a4ffe95dc93575093ee2b23e112be996a02b66","tests/cc_env.rs":"bf7b14aa52af04294f648b2934f0f1830c5a0bdac1676310b8aa1f61458e7782","tests/support/mod.rs":"80dc87e54025197104cfb62d1af7a3400a3a0ddf0f2d98ea4ef4111cb1f0c890","tests/test.rs":"d839b2bcdb367180e537c0d26cb2d918d5ddfc587801c7051f4955d25688ea7d"},"package":"30f813bf45048a18eda9190fd3c6b78644146056740c43172a5a3699118588fd"} \ No newline at end of file +{"files":{"Cargo.lock":"3aff5f8b0a7f4d72852b11b0526f0002e6bf55f19f1ebd6470d7f97fbd540e60","Cargo.toml":"6ab10d9b6a9c6f0909074e6698c90c6b6a7223661ec2e83174d2593117cbe7f2","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"7184fbdf375a057e673257348f6d7584c0dd11b66318d98f3647f69eb610b097","src/bin/gcc-shim.rs":"b77907875029494b6288841c3aed2e4939ed40708c7f597fca5c9e2570490ca6","src/com.rs":"bcdaf1c28b71e6ef889c6b08d1ce9d7c0761344a677f523bc4c3cd297957f804","src/lib.rs":"4753929dbb7b676c19d7cfa06d0a47e37003554b80c536cbf2b892d591ef61c2","src/registry.rs":"3cc1b5a50879fa751572878ae1d0afbfc960c11665258492754b2c8bccb0ff5d","src/setup_config.rs":"7014103587d3382eac599cb76f016e2609b8140970861b2237982d1db24af265","src/winapi.rs":"ea8b7edbb9ff87957254f465c2334e714c5d6b3b19a8d757c48ea7ca0881c50c","src/windows_registry.rs":"388e79dcf3e84078ae0b086c6cdee9cf9eb7e3ffafdcbf3e2df26163661f5856","tests/cc_env.rs":"e02b3b0824ad039b47e4462c5ef6dbe6c824c28e7953af94a0f28f7b5158042e","tests/cflags.rs":"57f06eb5ce1557e5b4a032d0c4673e18fbe6f8d26c1deb153126e368b96b41b3","tests/cxxflags.rs":"c2c6c6d8a0d7146616fa1caed26876ee7bc9fcfffd525eb4743593cade5f3371","tests/support/mod.rs":"71620b178583b6e6e5e0d4cac14e2cef6afc62fb6841e0c72ed1784543abf8ac","tests/test.rs":"1605640c9b94a77f48fc92e1dc0485bdf1960da5626e2e00279e4703691656bc"},"package":"aa87058dce70a3ff5621797f1506cb837edd02ac4c0ae642b4542dce802908b8"} \ No newline at end of file diff --git a/third_party/rust/cc/Cargo.lock b/third_party/rust/cc/Cargo.lock new file mode 100644 index 000000000000..ee899568d125 --- /dev/null +++ b/third_party/rust/cc/Cargo.lock @@ -0,0 +1,154 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "cc" +version = "1.0.47" +dependencies = [ + "jobserver 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "getrandom" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", + "wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "jobserver" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "libc" +version = "0.2.64" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "log" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num_cpus" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "remove_dir_all" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tempdir" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasi" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" +"checksum getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "473a1265acc8ff1e808cd0a1af8cee3c2ee5200916058a2ca113c29f2d903571" +"checksum jobserver 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "f2b1d42ef453b30b7387e113da1c83ab1605d90c5b4e0eb8e96d016ed3b8c160" +"checksum libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)" = "74dfca3d9957906e8d1e6a0b641dc9a59848e793f1da2165889fd4f62d10d79c" +"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +"checksum num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bcef43580c035376c0705c42792c294b66974abbfd2789b511784023f71f3273" +"checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" +"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" +"checksum tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" +"checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d" +"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/third_party/rust/cc/Cargo.toml b/third_party/rust/cc/Cargo.toml index 764ef82ca1aa..8efe26374c99 100644 --- a/third_party/rust/cc/Cargo.toml +++ b/third_party/rust/cc/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,8 +11,9 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "cc" -version = "1.0.34" +version = "1.0.47" authors = ["Alex Crichton "] exclude = ["/.travis.yml", "/appveyor.yml"] description = "A build-time dependency for Cargo build scripts to assist in invoking the native\nC compiler to compile native C code into a static archive to be linked into Rust\ncode.\n" @@ -23,11 +24,15 @@ keywords = ["build-dependencies"] categories = ["development-tools::build-utils"] license = "MIT/Apache-2.0" repository = "https://github.com/alexcrichton/cc-rs" -[dependencies.rayon] -version = "1.0" +[dependencies.jobserver] +version = "0.1.16" +optional = true + +[dependencies.num_cpus] +version = "1.10" optional = true [dev-dependencies.tempdir] version = "0.3" [features] -parallel = ["rayon"] +parallel = ["num_cpus", "jobserver"] diff --git a/third_party/rust/cc/README.md b/third_party/rust/cc/README.md index e3c6ad006ccc..68448acecae0 100644 --- a/third_party/rust/cc/README.md +++ b/third_party/rust/cc/README.md @@ -2,8 +2,6 @@ A library to compile C/C++/assembly into a Rust library/application. -[![Build Status](https://dev.azure.com/alexcrichton/cc-rs/_apis/build/status/alexcrichton.cc-rs?branchName=master)](https://dev.azure.com/alexcrichton/cc-rs/_build/latest?definitionId=5&branchName=master) - [Documentation](https://docs.rs/cc) A simple library meant to be used as a build dependency with Cargo packages in @@ -28,8 +26,6 @@ Next up, you'll want to write a build script like so: ```rust,no_run // build.rs -extern crate cc; - fn main() { cc::Build::new() .file("foo.c") @@ -143,8 +139,6 @@ required varies per platform, but there are three broad categories: `Build`: ```rust,no_run -extern crate cc; - fn main() { cc::Build::new() .cpp(true) // Switch to C++ library compilation. @@ -163,8 +157,6 @@ linked to the crate target. on `Build` (currently for GNU/Clang toolchains only): ```rust,no_run -extern crate cc; - fn main() { cc::Build::new() // Switch to CUDA C++ library compilation using NVCC. diff --git a/third_party/rust/cc/azure-pipelines.yml b/third_party/rust/cc/azure-pipelines.yml deleted file mode 100644 index f20e1a7a8acf..000000000000 --- a/third_party/rust/cc/azure-pipelines.yml +++ /dev/null @@ -1,87 +0,0 @@ -trigger: - - master - -jobs: - - job: min - displayName: Minimum Rust - steps: - - template: ci/azure-install-rust.yml - parameters: - toolchain: 1.16.0 - - script: cargo build - - - job: Linux - pool: - vmImage: ubuntu-16.04 - strategy: - matrix: - x86_64: - TARGET: x86_64-unknown-linux-gnu - i686: - TARGET: i686-unknown-linux-gnu - x86_64-beta: - TARGET: x86_64-unknown-linux-gnu - TOOLCHAIN: beta - x86_64-nightly: - TARGET: x86_64-unknown-linux-gnu - TOOLCHAIN: nightly - - - job: macOS - pool: - vmImage: macos-10.13 - steps: - - template: ci/azure-steps.yml - strategy: - matrix: - x86_64: - TARGET: x86_64-apple-darwin - aarch64-ios: - TARGET: aarch64-apple-ios - NO_RUN: --no-run - - - job: Windows_vs2019 - pool: - vmImage: windows-2019 - steps: - - template: ci/azure-steps.yml - strategy: - matrix: - x86_64-msvc: - TARGET: x86_64-pc-windows-msvc - - - job: Windows_vs2017 - pool: - vmImage: vs2017-win2016 - steps: - - template: ci/azure-steps.yml - strategy: - matrix: - x86_64-msvc: - TARGET: x86_64-pc-windows-msvc - i686-msvc: - TARGET: i686-pc-windows-msvc - x86_64-gnu: - TARGET: x86_64-pc-windows-gnu - i686-gnu: - TARGET: i686-pc-windows-gnu - - - job: Windows_vs2015 - pool: - vmImage: vs2015-win2012r2 - steps: - - template: ci/azure-steps.yml - strategy: - matrix: - x86_64-msvc: - TARGET: x86_64-pc-windows-msvc - i686-msvc: - TARGET: i686-pc-windows-msvc - - - job: docs - steps: - - template: ci/azure-install-rust.yml - - script: cargo doc --no-deps --all-features - - script: curl -LsSf https://git.io/fhJ8n | rustc - && (cd target/doc && ../../rust_out) - condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/master')) - env: - GITHUB_DEPLOY_KEY: $(GITHUB_DEPLOY_KEY) diff --git a/third_party/rust/cc/ci/azure-install-rust.yml b/third_party/rust/cc/ci/azure-install-rust.yml deleted file mode 100644 index fa7eae459b79..000000000000 --- a/third_party/rust/cc/ci/azure-install-rust.yml +++ /dev/null @@ -1,23 +0,0 @@ -steps: - - bash: | - set -e - toolchain=$TOOLCHAIN - if [ "$toolchain" = "" ]; then - toolchain=stable - fi - curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $toolchain - echo "##vso[task.prependpath]$HOME/.cargo/bin" - displayName: Install rust (unix) - condition: ne( variables['Agent.OS'], 'Windows_NT' ) - - - script: | - curl -sSf -o rustup-init.exe https://win.rustup.rs - rustup-init.exe -y --default-toolchain stable-%TARGET% - echo ##vso[task.prependpath]%USERPROFILE%\.cargo\bin - displayName: Install rust (windows) - condition: eq( variables['Agent.OS'], 'Windows_NT' ) - - - script: | - rustc -Vv - cargo -V - displayName: Query rust and cargo versions diff --git a/third_party/rust/cc/ci/azure-steps.yml b/third_party/rust/cc/ci/azure-steps.yml deleted file mode 100644 index 3e8cf5026f58..000000000000 --- a/third_party/rust/cc/ci/azure-steps.yml +++ /dev/null @@ -1,17 +0,0 @@ -steps: - - template: azure-install-rust.yml - - bash: rustup target add $TARGET - displayName: Install Rust target - - - script: cargo build - displayName: "Normal build" - - bash: cargo test $NO_RUN -- --test-threads 1 - displayName: "Crate tests" - - bash: cargo test $NO_RUN --features parallel -- --test-threads 1 - displayName: "Crate tests (parallel)" - - bash: cargo test $NO_RUN --manifest-path cc-test/Cargo.toml --target $TARGET - displayName: "cc-test tests" - - bash: cargo test $NO_RUN --manifest-path cc-test/Cargo.toml --target $TARGET --features parallel - displayName: "cc-test tests (parallel)" - - bash: cargo test $NO_RUN --manifest-path cc-test/Cargo.toml --target $TARGET --release - displayName: "cc-test tests (release)" diff --git a/third_party/rust/cc/src/bin/gcc-shim.rs b/third_party/rust/cc/src/bin/gcc-shim.rs index 7fd0ea8fa84b..0375ba8cc5e5 100644 --- a/third_party/rust/cc/src/bin/gcc-shim.rs +++ b/third_party/rust/cc/src/bin/gcc-shim.rs @@ -6,18 +6,43 @@ use std::io::prelude::*; use std::path::PathBuf; fn main() { - let out_dir = PathBuf::from(env::var_os("GCCTEST_OUT_DIR").unwrap()); + let mut args = env::args(); + let program = args.next().expect("Unexpected empty args"); + + let out_dir = PathBuf::from( + env::var_os("GCCTEST_OUT_DIR").expect(&format!("{}: GCCTEST_OUT_DIR not found", program)), + ); + + for i in 0.. { - let candidate = out_dir.join(format!("out{}", i)); + let candidate = &out_dir.join(format!("out{}", i)); + + if candidate.exists() { continue; } - let mut f = File::create(candidate).unwrap(); - for arg in env::args().skip(1) { - writeln!(f, "{}", arg).unwrap(); - } - File::create(out_dir.join("libfoo.a")).unwrap(); + + let mut f = File::create(candidate).expect(&format!( + "{}: can't create candidate: {}", + program, + candidate.to_string_lossy() + )); + for arg in args { + writeln!(f, "{}", arg).expect(&format!( + "{}: can't write to candidate: {}", + program, + candidate.to_string_lossy() + )); + } break; } + + + let path = &out_dir.join("libfoo.a"); + File::create(path).expect(&format!( + "{}: can't create libfoo.a: {}", + program, + path.to_string_lossy() + )); } diff --git a/third_party/rust/cc/src/com.rs b/third_party/rust/cc/src/com.rs index 58d5983fbb79..c10e1400730c 100644 --- a/third_party/rust/cc/src/com.rs +++ b/third_party/rust/cc/src/com.rs @@ -7,19 +7,19 @@ #![allow(unused)] +use crate::winapi::CoInitializeEx; +use crate::winapi::IUnknown; +use crate::winapi::Interface; +use crate::winapi::BSTR; +use crate::winapi::COINIT_MULTITHREADED; +use crate::winapi::{SysFreeString, SysStringLen}; +use crate::winapi::{HRESULT, S_FALSE, S_OK}; use std::ffi::{OsStr, OsString}; use std::mem::forget; use std::ops::Deref; use std::os::windows::ffi::{OsStrExt, OsStringExt}; use std::ptr::null_mut; use std::slice::from_raw_parts; -use winapi::Interface; -use winapi::BSTR; -use winapi::CoInitializeEx; -use winapi::COINIT_MULTITHREADED; -use winapi::{SysFreeString, SysStringLen}; -use winapi::IUnknown; -use winapi::{HRESULT, S_FALSE, S_OK}; pub fn initialize() -> Result<(), HRESULT> { let err = unsafe { CoInitializeEx(null_mut(), COINIT_MULTITHREADED) }; diff --git a/third_party/rust/cc/src/lib.rs b/third_party/rust/cc/src/lib.rs index e5074218f17c..04a8aa75605e 100644 --- a/third_party/rust/cc/src/lib.rs +++ b/third_party/rust/cc/src/lib.rs @@ -48,8 +48,6 @@ - - @@ -58,18 +56,16 @@ #![allow(deprecated)] #![deny(missing_docs)] -#[cfg(feature = "parallel")] -extern crate rayon; - +use std::collections::HashMap; use std::env; use std::ffi::{OsStr, OsString}; +use std::fmt::{self, Display}; use std::fs; +use std::io::{self, BufRead, BufReader, Read, Write}; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Stdio}; -use std::io::{self, BufRead, BufReader, Read, Write}; -use std::thread::{self, JoinHandle}; -use std::collections::HashMap; use std::sync::{Arc, Mutex}; +use std::thread::{self, JoinHandle}; @@ -98,6 +94,8 @@ pub struct Build { flags: Vec, flags_supported: Vec, known_flag_support_status: Arc>>, + ar_flags: Vec, + no_default_flags: bool, files: Vec, cpp: bool, cpp_link_stdlib: Option>, @@ -108,6 +106,7 @@ pub struct Build { out_dir: Option, opt_level: Option, debug: Option, + force_frame_pointer: Option, env: Vec<(OsString, OsString)>, compiler: Option, archiver: Option, @@ -162,6 +161,12 @@ impl From for Error { } } +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}: {}", self.kind, self.message) + } +} + @@ -202,35 +207,28 @@ impl ToolFamily { fn add_debug_flags(&self, cmd: &mut Tool) { match *self { ToolFamily::Msvc { .. } => { - cmd.push_cc_arg("/Z7".into()); + cmd.push_cc_arg("-Z7".into()); } ToolFamily::Gnu | ToolFamily::Clang => { cmd.push_cc_arg("-g".into()); - cmd.push_cc_arg("-fno-omit-frame-pointer".into()); } } } - fn include_flag(&self) -> &'static str { - match *self { - ToolFamily::Msvc { .. } => "/I", - ToolFamily::Gnu | ToolFamily::Clang => "-I", - } - } - - - fn expand_flag(&self) -> &'static str { + fn add_force_frame_pointer(&self, cmd: &mut Tool) { match *self { - ToolFamily::Msvc { .. } => "/E", - ToolFamily::Gnu | ToolFamily::Clang => "-E", + ToolFamily::Gnu | ToolFamily::Clang => { + cmd.push_cc_arg("-fno-omit-frame-pointer".into()); + } + _ => (), } } fn warnings_flags(&self) -> &'static str { match *self { - ToolFamily::Msvc { .. } => "/W4", + ToolFamily::Msvc { .. } => "-W4", ToolFamily::Gnu | ToolFamily::Clang => "-Wall", } } @@ -246,29 +244,11 @@ impl ToolFamily { fn warnings_to_errors_flag(&self) -> &'static str { match *self { - ToolFamily::Msvc { .. } => "/WX", + ToolFamily::Msvc { .. } => "-WX", ToolFamily::Gnu | ToolFamily::Clang => "-Werror", } } - - - fn nvcc_debug_flag(&self) -> &'static str { - match *self { - ToolFamily::Msvc { .. } => unimplemented!(), - ToolFamily::Gnu | ToolFamily::Clang => "-G", - } - } - - - - fn nvcc_redirect_flag(&self) -> &'static str { - match *self { - ToolFamily::Msvc { .. } => unimplemented!(), - ToolFamily::Gnu | ToolFamily::Clang => "-Xcompiler", - } - } - fn verbose_stderr(&self) -> bool { *self == ToolFamily::Clang } @@ -304,6 +284,8 @@ impl Build { flags: Vec::new(), flags_supported: Vec::new(), known_flag_support_status: Arc::new(Mutex::new(HashMap::new())), + ar_flags: Vec::new(), + no_default_flags: false, files: Vec::new(), shared_flag: None, static_flag: None, @@ -316,6 +298,7 @@ impl Build { out_dir: None, opt_level: None, debug: None, + force_frame_pointer: None, env: Vec::new(), compiler: None, archiver: None, @@ -388,6 +371,23 @@ impl Build { self } + + + + + + + + + + + + + pub fn ar_flag(&mut self, flag: &str) -> &mut Build { + self.ar_flags.push(flag.to_string()); + self + } + fn ensure_check_file(&self) -> Result { let out_dir = self.get_out_dir()?; let src = if self.cuda { @@ -447,12 +447,19 @@ impl Build { let mut cmd = compiler.to_command(); let is_arm = target.contains("aarch64") || target.contains("arm"); - command_add_output_file(&mut cmd, &obj, target.contains("msvc"), false, is_arm); + command_add_output_file( + &mut cmd, + &obj, + self.cuda, + target.contains("msvc"), + false, + is_arm, + ); - if target.contains("msvc") { - cmd.arg("/c"); + if target.contains("msvc") && !self.cuda { + cmd.arg("-c"); } cmd.arg(&src); @@ -493,7 +500,6 @@ impl Build { - pub fn shared_flag(&mut self, shared_flag: bool) -> &mut Build { self.shared_flag = Some(shared_flag); self @@ -519,6 +525,17 @@ impl Build { } + + + + + + pub fn no_default_flags(&mut self, no_default_flags: bool) -> &mut Build { + self.no_default_flags = no_default_flags; + self + } + + pub fn file>(&mut self, p: P) -> &mut Build { self.files.push(p.as_ref().to_path_buf()); self @@ -766,7 +783,6 @@ impl Build { - pub fn debug(&mut self, debug: bool) -> &mut Build { self.debug = Some(debug); self @@ -777,6 +793,17 @@ impl Build { + + pub fn force_frame_pointer(&mut self, force: bool) -> &mut Build { + self.force_frame_pointer = Some(force); + self + } + + + + + + pub fn out_dir>(&mut self, out_dir: P) -> &mut Build { self.out_dir = Some(out_dir.as_ref().to_owned()); self @@ -891,7 +918,7 @@ impl Build { return Err(Error::new( ErrorKind::IOError, "Getting object file details failed.", - )) + )); } }; @@ -953,22 +980,150 @@ impl Build { } #[cfg(feature = "parallel")] - fn compile_objects(&self, objs: &[Object]) -> Result<(), Error> { - use self::rayon::prelude::*; + fn compile_objects<'me>(&'me self, objs: &[Object]) -> Result<(), Error> { + use std::sync::atomic::{AtomicBool, Ordering::SeqCst}; + use std::sync::Once; + + + + + + + let server = jobserver(); + let reacquire = server.release_raw().is_ok(); + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + let error = AtomicBool::new(false); + let mut threads = Vec::new(); + for obj in objs { + if error.load(SeqCst) { + break; + } + let token = server.acquire()?; + let state = State { + build: self, + obj, + error: &error, + }; + let state = unsafe { std::mem::transmute::>(state) }; + let thread = thread::spawn(|| { + let state: State<'me> = state; + let result = state.build.compile_object(state.obj); + if result.is_err() { + state.error.store(true, SeqCst); + } + drop(token); + return result; + }); + threads.push(JoinOnDrop(Some(thread))); + } - if let Some(amt) = self.getenv("NUM_JOBS") { - if let Ok(amt) = amt.parse() { - let _ = rayon::ThreadPoolBuilder::new() - .num_threads(amt) - .build_global(); + for mut thread in threads { + if let Some(thread) = thread.0.take() { + thread.join().expect("thread should not panic")?; } } - objs.par_iter() - .with_max_len(1) - .map(|obj| self.compile_object(obj)) - .collect() + + if reacquire { + server.acquire_raw()?; + } + + return Ok(()); + + + + + + struct State<'a> { + build: &'a Build, + obj: &'a Object, + error: &'a AtomicBool, + } + + + + fn jobserver() -> &'static jobserver::Client { + static INIT: Once = Once::new(); + static mut JOBSERVER: Option = None; + + fn _assert_sync() {} + _assert_sync::(); + + unsafe { + INIT.call_once(|| { + let server = default_jobserver(); + JOBSERVER = Some(server); + }); + JOBSERVER.as_ref().unwrap() + } + } + + unsafe fn default_jobserver() -> jobserver::Client { + + + if let Some(client) = jobserver::Client::from_env() { + return client; + } + + + + let mut parallelism = num_cpus::get(); + if let Ok(amt) = env::var("NUM_JOBS") { + if let Ok(amt) = amt.parse() { + parallelism = amt; + } + } + + + + let client = jobserver::Client::new(parallelism).expect("failed to create jobserver"); + client.acquire_raw().expect("failed to acquire initial"); + return client; + } + + struct JoinOnDrop(Option>>); + + impl Drop for JoinOnDrop { + fn drop(&mut self) { + if let Some(thread) = self.0.take() { + drop(thread.join()); + } + } + } } #[cfg(not(feature = "parallel"))] @@ -1002,10 +1157,10 @@ impl Build { ) }; let is_arm = target.contains("aarch64") || target.contains("arm"); - command_add_output_file(&mut cmd, &obj.dst, msvc, is_asm, is_arm); + command_add_output_file(&mut cmd, &obj.dst, self.cuda, msvc, is_asm, is_arm); if !msvc || !is_asm || !is_arm { - cmd.arg(if msvc { "/c" } else { "-c" }); + cmd.arg("-c"); } cmd.arg(&obj.src); @@ -1020,7 +1175,7 @@ impl Build { for &(ref a, ref b) in self.env.iter() { cmd.env(a, b); } - cmd.arg(compiler.family.expand_flag()); + cmd.arg("-E"); assert!( self.files.len() <= 1, @@ -1096,10 +1251,9 @@ impl Build { let envflags = self.envflags(if self.cpp { "CXXFLAGS" } else { "CFLAGS" }); - - let use_defaults = self.getenv("CRATE_CC_NO_DEFAULTS").is_none(); + let no_defaults = self.no_default_flags || self.getenv("CRATE_CC_NO_DEFAULTS").is_some(); - if use_defaults { + if !no_defaults { self.add_default_flags(&mut cmd, &target, &opt_level)?; } else { println!("Info: default compiler flags are disabled"); @@ -1110,7 +1264,7 @@ impl Build { } for directory in self.include_directories.iter() { - cmd.args.push(cmd.family.include_flag().into()); + cmd.args.push("-I".into()); cmd.args.push(directory.into()); } @@ -1119,12 +1273,18 @@ impl Build { - if self.warnings.unwrap_or(if self.has_flags() { false } else { true }) { + if self + .warnings + .unwrap_or(if self.has_flags() { false } else { true }) + { let wflags = cmd.family.warnings_flags().into(); cmd.push_cc_arg(wflags); } - if self.extra_warnings.unwrap_or(if self.has_flags() { false } else { true }) { + if self + .extra_warnings + .unwrap_or(if self.has_flags() { false } else { true }) + { if let Some(wflags) = cmd.family.extra_warnings_flags() { cmd.push_cc_arg(wflags.into()); } @@ -1141,15 +1301,10 @@ impl Build { } for &(ref key, ref value) in self.definitions.iter() { - let lead = if let ToolFamily::Msvc { .. } = cmd.family { - "/" - } else { - "-" - }; if let Some(ref value) = *value { - cmd.args.push(format!("{}D{}={}", lead, key, value).into()); + cmd.args.push(format!("-D{}={}", key, value).into()); } else { - cmd.args.push(format!("{}D{}", lead, key).into()); + cmd.args.push(format!("-D{}", key).into()); } } @@ -1161,36 +1316,39 @@ impl Build { Ok(cmd) } - fn add_default_flags(&self, cmd: &mut Tool, target: &str, opt_level: &str) -> Result<(), Error> { + fn add_default_flags( + &self, + cmd: &mut Tool, + target: &str, + opt_level: &str, + ) -> Result<(), Error> { match cmd.family { ToolFamily::Msvc { .. } => { - assert!(!self.cuda, - "CUDA C++ compilation not supported for MSVC, yet... but you are welcome to implement it :)"); - - cmd.args.push("/nologo".into()); + cmd.push_cc_arg("-nologo".into()); let crt_flag = match self.static_crt { - Some(true) => "/MT", - Some(false) => "/MD", + Some(true) => "-MT", + Some(false) => "-MD", None => { - let features = - self.getenv("CARGO_CFG_TARGET_FEATURE").unwrap_or(String::new()); + let features = self + .getenv("CARGO_CFG_TARGET_FEATURE") + .unwrap_or(String::new()); if features.contains("crt-static") { - "/MT" + "-MT" } else { - "/MD" + "-MD" } } }; - cmd.args.push(crt_flag.into()); + cmd.push_cc_arg(crt_flag.into()); match &opt_level[..] { - "z" | "s" | "1" => cmd.push_opt_unless_duplicate("/O1".into()), + "z" | "s" | "1" => cmd.push_opt_unless_duplicate("-O1".into()), - "2" | "3" => cmd.push_opt_unless_duplicate("/O2".into()), + "2" | "3" => cmd.push_opt_unless_duplicate("-O2".into()), _ => {} } } @@ -1207,7 +1365,11 @@ impl Build { cmd.push_cc_arg("-ffunction-sections".into()); cmd.push_cc_arg("-fdata-sections".into()); } - if self.pic.unwrap_or(!target.contains("windows-gnu")) { + + if self + .pic + .unwrap_or(!target.contains("windows-gnu") && !target.contains("riscv")) + { cmd.push_cc_arg("-fPIC".into()); @@ -1220,31 +1382,41 @@ impl Build { if self.get_debug() { if self.cuda { - let nvcc_debug_flag = cmd.family.nvcc_debug_flag().into(); - cmd.args.push(nvcc_debug_flag); + + cmd.args.push("-G".into()); } let family = cmd.family; family.add_debug_flags(cmd); } + if self.get_force_frame_pointer() { + let family = cmd.family; + family.add_force_frame_pointer(cmd); + } + match cmd.family { ToolFamily::Clang => { cmd.args.push(format!("--target={}", target).into()); } ToolFamily::Msvc { clang_cl } => { + + + + cmd.args.push("-Brepro".into()); + if clang_cl { if target.contains("x86_64") { cmd.args.push("-m64".into()); } else if target.contains("86") { cmd.args.push("-m32".into()); - cmd.args.push("/arch:IA32".into()); + cmd.push_cc_arg("-arch:IA32".into()); } else { - cmd.args.push(format!("--target={}", target).into()); + cmd.push_cc_arg(format!("--target={}", target).into()); } } else { if target.contains("i586") { - cmd.args.push("/ARCH:IA32".into()); + cmd.push_cc_arg("-arch:IA32".into()); } } @@ -1258,7 +1430,8 @@ impl Build { if target.contains("arm") || target.contains("thumb") { - cmd.args.push("/D_ARM_WINAPI_PARTITION_DESKTOP_SDK_AVAILABLE=1".into()); + cmd.args + .push("-D_ARM_WINAPI_PARTITION_DESKTOP_SDK_AVAILABLE=1".into()); } } ToolFamily::Gnu => { @@ -1271,14 +1444,18 @@ impl Build { } if self.static_flag.is_none() { - let features = self.getenv("CARGO_CFG_TARGET_FEATURE").unwrap_or(String::new()); + let features = self + .getenv("CARGO_CFG_TARGET_FEATURE") + .unwrap_or(String::new()); if features.contains("crt-static") { cmd.args.push("-static".into()); } } - if (target.starts_with("armv7") || target.starts_with("thumbv7")) && target.contains("-linux-") { + if (target.starts_with("armv7") || target.starts_with("thumbv7")) + && target.contains("-linux-") + { cmd.args.push("-march=armv7-a".into()); } @@ -1405,6 +1582,21 @@ impl Build { cmd.args.push("-mfloat-abi=soft".into()); } } + if target.starts_with("riscv32") || target.starts_with("riscv64") { + + let mut parts = target.split('-'); + if let Some(arch) = parts.next() { + let arch = &arch[5..]; + cmd.args.push(("-march=rv".to_owned() + arch).into()); + + + if arch.starts_with("64") { + cmd.args.push("-mabi=lp64".into()); + } else { + cmd.args.push("-mabi=ilp32".into()); + } + } + } } } @@ -1443,7 +1635,11 @@ impl Build { fn has_flags(&self) -> bool { let flags_env_var_name = if self.cpp { "CXXFLAGS" } else { "CFLAGS" }; let flags_env_var_value = self.get_var(flags_env_var_name); - if let Ok(_) = flags_env_var_value { true } else { false } + if let Ok(_) = flags_env_var_value { + true + } else { + false + } } fn msvc_macro_assembler(&self) -> Result<(Command, String), Error> { @@ -1459,18 +1655,18 @@ impl Build { }; let mut cmd = windows_registry::find(&target, tool).unwrap_or_else(|| self.cmd(tool)); for directory in self.include_directories.iter() { - cmd.arg("/I").arg(directory); + cmd.arg("-I").arg(directory); } for &(ref key, ref value) in self.definitions.iter() { if let Some(ref value) = *value { - cmd.arg(&format!("/D{}={}", key, value)); + cmd.arg(&format!("-D{}={}", key, value)); } else { - cmd.arg(&format!("/D{}", key)); + cmd.arg(&format!("-D{}", key)); } } if target.contains("i686") || target.contains("i586") { - cmd.arg("/safeseh"); + cmd.arg("-safeseh"); } for flag in self.flags.iter() { cmd.arg(flag); @@ -1488,9 +1684,12 @@ impl Build { let target = self.get_target()?; if target.contains("msvc") { let (mut cmd, program) = self.get_ar()?; - let mut out = OsString::from("/OUT:"); + let mut out = OsString::from("-out:"); out.push(dst); - cmd.arg(out).arg("/nologo"); + cmd.arg(out).arg("-nologo"); + for flag in self.ar_flags.iter() { + cmd.arg(flag); + } @@ -1548,11 +1747,38 @@ impl Build { return Err(Error::new( ErrorKind::IOError, "Could not copy or create a hard-link to the generated lib file.", - )) + )); } }; } else { let (mut ar, cmd) = self.get_ar()?; + + + + + + + + + + + + + + + + + + + + + + + + ar.env("ZERO_AR_DATE", "1"); + for flag in self.ar_flags.iter() { + ar.arg(flag); + } run( ar.arg("crs").arg(dst).args(&objects).args(&self.objects), &cmd, @@ -1585,26 +1811,32 @@ impl Build { return Err(Error::new( ErrorKind::ArchitectureInvalid, "Unknown architecture for iOS target.", - )) + )); } }; + let min_version = + std::env::var("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or_else(|_| "7.0".into()); + let sdk = match arch { ArchSpec::Device(arch) => { cmd.args.push("-arch".into()); cmd.args.push(arch.into()); - cmd.args.push("-miphoneos-version-min=7.0".into()); + cmd.args + .push(format!("-miphoneos-version-min={}", min_version).into()); "iphoneos" } ArchSpec::Simulator(arch) => { cmd.args.push(arch.into()); - cmd.args.push("-mios-simulator-version-min=7.0".into()); + cmd.args + .push(format!("-mios-simulator-version-min={}", min_version).into()); "iphonesimulator" } }; self.print(&format!("Detecting iOS SDK path for {}", sdk)); - let sdk_path = self.cmd("xcrun") + let sdk_path = self + .cmd("xcrun") .arg("--show-sdk-path") .arg("--sdk") .arg(sdk) @@ -1618,7 +1850,7 @@ impl Build { return Err(Error::new( ErrorKind::IOError, "Unable to determine iOS SDK path.", - )) + )); } }; @@ -1669,14 +1901,21 @@ impl Build { let cl_exe = windows_registry::find_tool(&target, "cl.exe"); - let tool_opt: Option = self.env_tool(env) - .map(|(tool, cc, args)| { + let tool_opt: Option = self + .env_tool(env) + .map(|(tool, wrapper, args)| { + + const DRIVER_MODE: &str = "--driver-mode="; + let driver_mode = args + .iter() + .find(|a| a.starts_with(DRIVER_MODE)) + .map(|a| &a[DRIVER_MODE.len()..]); - let mut t = Tool::new(PathBuf::from(tool.trim())); - if let Some(cc) = cc { + let mut t = Tool::with_clang_driver(PathBuf::from(tool.trim()), driver_mode); + if let Some(cc) = wrapper { t.cc_wrapper_path = Some(PathBuf::from(cc)); } for arg in args { @@ -1728,8 +1967,13 @@ impl Build { } } else if target.contains("cloudabi") { format!("{}-{}", target, traditional) - } else if target == "wasm32-unknown-wasi" || target == "wasm32-unknown-unknown" { + } else if target == "wasm32-wasi" + || target == "wasm32-unknown-wasi" + || target == "wasm32-unknown-unknown" + { "clang".to_string() + } else if target.contains("vxworks") { + "wr-c++".to_string() } else if self.get_host()? != target { let cc_env = self.getenv("CROSS_COMPILE"); @@ -1758,17 +2002,29 @@ impl Build { "armv7-unknown-netbsd-eabihf" => Some("armv7--netbsdelf-eabihf"), "i586-unknown-linux-musl" => Some("musl"), "i686-pc-windows-gnu" => Some("i686-w64-mingw32"), + "i686-uwp-windows-gnu" => Some("i686-w64-mingw32"), "i686-unknown-linux-musl" => Some("musl"), "i686-unknown-netbsd" => Some("i486--netbsdelf"), "mips-unknown-linux-gnu" => Some("mips-linux-gnu"), "mipsel-unknown-linux-gnu" => Some("mipsel-linux-gnu"), "mips64-unknown-linux-gnuabi64" => Some("mips64-linux-gnuabi64"), "mips64el-unknown-linux-gnuabi64" => Some("mips64el-linux-gnuabi64"), + "mipsisa32r6-unknown-linux-gnu" => Some("mipsisa32r6-linux-gnu"), + "mipsisa32r6el-unknown-linux-gnu" => Some("mipsisa32r6el-linux-gnu"), + "mipsisa64r6-unknown-linux-gnuabi64" => Some("mipsisa64r6-linux-gnuabi64"), + "mipsisa64r6el-unknown-linux-gnuabi64" => { + Some("mipsisa64r6el-linux-gnuabi64") + } "powerpc-unknown-linux-gnu" => Some("powerpc-linux-gnu"), "powerpc-unknown-linux-gnuspe" => Some("powerpc-linux-gnuspe"), "powerpc-unknown-netbsd" => Some("powerpc--netbsd"), "powerpc64-unknown-linux-gnu" => Some("powerpc-linux-gnu"), "powerpc64le-unknown-linux-gnu" => Some("powerpc64le-linux-gnu"), + "riscv32i-unknown-none-elf" => Some("riscv32-unknown-elf"), + "riscv32imac-unknown-none-elf" => Some("riscv32-unknown-elf"), + "riscv32imc-unknown-none-elf" => Some("riscv32-unknown-elf"), + "riscv64gc-unknown-none-elf" => Some("riscv64-unknown-elf"), + "riscv64imac-unknown-none-elf" => Some("riscv64-unknown-elf"), "s390x-unknown-linux-gnu" => Some("s390x-linux-gnu"), "sparc-unknown-linux-gnu" => Some("sparc-linux-gnu"), "sparc64-unknown-linux-gnu" => Some("sparc64-linux-gnu"), @@ -1786,6 +2042,7 @@ impl Build { "thumbv8m.main-none-eabi" => Some("arm-none-eabi"), "thumbv8m.main-none-eabihf" => Some("arm-none-eabi"), "x86_64-pc-windows-gnu" => Some("x86_64-w64-mingw32"), + "x86_64-uwp-windows-gnu" => Some("x86_64-w64-mingw32"), "x86_64-rumprun-netbsd" => Some("x86_64-rumprun-netbsd"), "x86_64-unknown-linux-musl" => Some("musl"), "x86_64-unknown-netbsd" => Some("x86_64--netbsd"), @@ -1811,10 +2068,11 @@ impl Build { Err(_) => "nvcc".into(), Ok(nvcc) => nvcc, }; - let mut nvcc_tool = Tool::with_features(PathBuf::from(nvcc), self.cuda); + let mut nvcc_tool = Tool::with_features(PathBuf::from(nvcc), None, self.cuda); nvcc_tool .args .push(format!("-ccbin={}", tool.path.display()).into()); + nvcc_tool.family = tool.family; nvcc_tool } else { tool @@ -1831,9 +2089,9 @@ impl Build { if let Some(cl_exe) = cl_exe { - if tool.family == (ToolFamily::Msvc { clang_cl: true }) && - tool.env.len() == 0 && - target.contains("msvc") + if tool.family == (ToolFamily::Msvc { clang_cl: true }) + && tool.env.len() == 0 + && target.contains("msvc") { for &(ref k, ref v) in cl_exe.env.iter() { tool.env.push((k.to_owned(), v.to_owned())); @@ -1849,7 +2107,8 @@ impl Build { let host = self.get_host()?; let kind = if host == target { "HOST" } else { "TARGET" }; let target_u = target.replace("-", "_"); - let res = self.getenv(&format!("{}_{}", var_base, target)) + let res = self + .getenv(&format!("{}_{}", var_base, target)) .or_else(|| self.getenv(&format!("{}_{}", var_base, target_u))) .or_else(|| self.getenv(&format!("{}_{}", kind, var_base))) .or_else(|| self.getenv(var_base)); @@ -2025,6 +2284,10 @@ impl Build { }) } + fn get_force_frame_pointer(&self) -> bool { + self.force_frame_pointer.unwrap_or_else(|| self.get_debug()) + } + fn get_out_dir(&self) -> Result { match self.out_dir.clone() { Some(p) => Ok(p), @@ -2040,7 +2303,7 @@ impl Build { fn getenv(&self, v: &str) -> Option { let mut cache = self.env_cache.lock().unwrap(); if let Some(val) = cache.get(v) { - return val.clone() + return val.clone(); } let r = env::var(v).ok(); self.print(&format!("{} = {:?}", v, r)); @@ -2072,28 +2335,37 @@ impl Default for Build { } impl Tool { - fn new(path: PathBuf) -> Tool { - Tool::with_features(path, false) + fn new(path: PathBuf) -> Self { + Tool::with_features(path, None, false) + } + + fn with_clang_driver(path: PathBuf, clang_driver: Option<&str>) -> Self { + Self::with_features(path, clang_driver, false) } - fn with_features(path: PathBuf, cuda: bool) -> Tool { + fn with_features(path: PathBuf, clang_driver: Option<&str>, cuda: bool) -> Self { let family = if let Some(fname) = path.file_name().and_then(|p| p.to_str()) { if fname.contains("clang-cl") { ToolFamily::Msvc { clang_cl: true } - } else if fname.contains("cl") && - !fname.contains("cloudabi") && - !fname.contains("uclibc") && - !fname.contains("clang") { + } else if fname.contains("cl") + && !fname.contains("cloudabi") + && !fname.contains("uclibc") + && !fname.contains("clang") + { ToolFamily::Msvc { clang_cl: false } } else if fname.contains("clang") { - ToolFamily::Clang + match clang_driver { + Some("cl") => ToolFamily::Msvc { clang_cl: true }, + _ => ToolFamily::Clang, + } } else { ToolFamily::Gnu } } else { ToolFamily::Gnu }; + Tool { path: path, cc_wrapper_path: None, @@ -2118,7 +2390,7 @@ impl Tool { fn push_cc_arg(&mut self, flag: OsString) { if self.cuda { - self.args.push(self.family.nvcc_redirect_flag().into()); + self.args.push("-Xcompiler".into()); } self.args.push(flag); } @@ -2140,9 +2412,10 @@ impl Tool { if chars.next() == Some('O') { - return self.args().iter().any(|ref a| - a.to_str().unwrap_or("").chars().nth(1) == Some('O') - ); + return self + .args() + .iter() + .any(|ref a| a.to_str().unwrap_or("").chars().nth(1) == Some('O')); } @@ -2174,7 +2447,11 @@ impl Tool { }; cmd.args(&self.cc_wrapper_args); - let value = self.args.iter().filter(|a| !self.removed_args.contains(a)).collect::>(); + let value = self + .args + .iter() + .filter(|a| !self.removed_args.contains(a)) + .collect::>(); cmd.args(&value); for &(ref k, ref v) in self.env.iter() { @@ -2269,7 +2546,7 @@ fn run(cmd: &mut Command, program: &str) -> Result<(), Error> { "Failed to wait on spawned child process, command {:?} with args {:?}.", cmd, program ), - )) + )); } }; print.join().unwrap(); @@ -2307,7 +2584,7 @@ fn run_output(cmd: &mut Command, program: &str) -> Result, Error> { "Failed to wait on spawned child process, command {:?} with args {:?}.", cmd, program ), - )) + )); } }; print.join().unwrap(); @@ -2365,16 +2642,20 @@ fn spawn(cmd: &mut Command, program: &str) -> Result<(Child, JoinHandle<()>), Er } fn fail(s: &str) -> ! { - panic!("\n\nInternal error occurred: {}\n\n", s) + let _ = writeln!(io::stderr(), "\n\nerror occurred: {}\n\n", s); + std::process::exit(1); } -fn command_add_output_file(cmd: &mut Command, dst: &Path, msvc: bool, is_asm: bool, is_arm: bool) { - if msvc && is_asm && is_arm { - cmd.arg("-o").arg(&dst); - } else if msvc && is_asm { - cmd.arg("/Fo").arg(dst); - } else if msvc { - let mut s = OsString::from("/Fo"); +fn command_add_output_file( + cmd: &mut Command, + dst: &Path, + cuda: bool, + msvc: bool, + is_asm: bool, + is_arm: bool, +) { + if msvc && !cuda && !(is_asm && is_arm) { + let mut s = OsString::from("-Fo"); s.push(&dst); cmd.arg(s); } else { diff --git a/third_party/rust/cc/src/setup_config.rs b/third_party/rust/cc/src/setup_config.rs index 5e58d8d4af5a..0cc45aa0de53 100644 --- a/third_party/rust/cc/src/setup_config.rs +++ b/third_party/rust/cc/src/setup_config.rs @@ -8,19 +8,19 @@ #![allow(bad_style)] #![allow(unused)] +use crate::winapi::Interface; +use crate::winapi::BSTR; +use crate::winapi::LPCOLESTR; +use crate::winapi::LPSAFEARRAY; +use crate::winapi::S_FALSE; +use crate::winapi::{CoCreateInstance, CLSCTX_ALL}; +use crate::winapi::{IUnknown, IUnknownVtbl}; +use crate::winapi::{HRESULT, LCID, LPCWSTR, PULONGLONG}; +use crate::winapi::{LPFILETIME, ULONG}; use std::ffi::OsString; use std::ptr::null_mut; -use winapi::Interface; -use winapi::{LPFILETIME, ULONG}; -use winapi::S_FALSE; -use winapi::BSTR; -use winapi::LPCOLESTR; -use winapi::{CoCreateInstance, CLSCTX_ALL}; -use winapi::LPSAFEARRAY; -use winapi::{IUnknown, IUnknownVtbl}; -use winapi::{HRESULT, LCID, LPCWSTR, PULONGLONG}; -use com::{BStr, ComPtr}; +use crate::com::{BStr, ComPtr}; pub type InstanceState = u32; @@ -31,7 +31,7 @@ pub const eRegistered: InstanceState = 2; pub const eNoRebootRequired: InstanceState = 4; pub const eComplete: InstanceState = -1i32 as u32; -RIDL!{#[uuid(0xb41463c3, 0x8866, 0x43b5, 0xbc, 0x33, 0x2b, 0x06, 0x76, 0xf7, 0xf4, 0x2e)] +RIDL! {#[uuid(0xb41463c3, 0x8866, 0x43b5, 0xbc, 0x33, 0x2b, 0x06, 0x76, 0xf7, 0xf4, 0x2e)] interface ISetupInstance(ISetupInstanceVtbl): IUnknown(IUnknownVtbl) { fn GetInstanceId( pbstrInstanceId: *mut BSTR, @@ -62,7 +62,7 @@ interface ISetupInstance(ISetupInstanceVtbl): IUnknown(IUnknownVtbl) { ) -> HRESULT, }} -RIDL!{#[uuid(0x89143c9a, 0x05af, 0x49b0, 0xb7, 0x17, 0x72, 0xe2, 0x18, 0xa2, 0x18, 0x5c)] +RIDL! {#[uuid(0x89143c9a, 0x05af, 0x49b0, 0xb7, 0x17, 0x72, 0xe2, 0x18, 0xa2, 0x18, 0x5c)] interface ISetupInstance2(ISetupInstance2Vtbl): ISetupInstance(ISetupInstanceVtbl) { fn GetState( pState: *mut InstanceState, @@ -78,7 +78,7 @@ interface ISetupInstance2(ISetupInstance2Vtbl): ISetupInstance(ISetupInstanceVtb ) -> HRESULT, }} -RIDL!{#[uuid(0x6380bcff, 0x41d3, 0x4b2e, 0x8b, 0x2e, 0xbf, 0x8a, 0x68, 0x10, 0xc8, 0x48)] +RIDL! {#[uuid(0x6380bcff, 0x41d3, 0x4b2e, 0x8b, 0x2e, 0xbf, 0x8a, 0x68, 0x10, 0xc8, 0x48)] interface IEnumSetupInstances(IEnumSetupInstancesVtbl): IUnknown(IUnknownVtbl) { fn Next( celt: ULONG, @@ -94,7 +94,7 @@ interface IEnumSetupInstances(IEnumSetupInstancesVtbl): IUnknown(IUnknownVtbl) { ) -> HRESULT, }} -RIDL!{#[uuid(0x42843719, 0xdb4c, 0x46c2, 0x8e, 0x7c, 0x64, 0xf1, 0x81, 0x6e, 0xfd, 0x5b)] +RIDL! {#[uuid(0x42843719, 0xdb4c, 0x46c2, 0x8e, 0x7c, 0x64, 0xf1, 0x81, 0x6e, 0xfd, 0x5b)] interface ISetupConfiguration(ISetupConfigurationVtbl): IUnknown(IUnknownVtbl) { fn EnumInstances( ppEnumInstances: *mut *mut IEnumSetupInstances, @@ -108,7 +108,7 @@ interface ISetupConfiguration(ISetupConfigurationVtbl): IUnknown(IUnknownVtbl) { ) -> HRESULT, }} -RIDL!{#[uuid(0x26aab78c, 0x4a60, 0x49d6, 0xaf, 0x3b, 0x3c, 0x35, 0xbc, 0x93, 0x36, 0x5d)] +RIDL! {#[uuid(0x26aab78c, 0x4a60, 0x49d6, 0xaf, 0x3b, 0x3c, 0x35, 0xbc, 0x93, 0x36, 0x5d)] interface ISetupConfiguration2(ISetupConfiguration2Vtbl): ISetupConfiguration(ISetupConfigurationVtbl) { fn EnumAllInstances( @@ -116,7 +116,7 @@ interface ISetupConfiguration2(ISetupConfiguration2Vtbl): ) -> HRESULT, }} -RIDL!{#[uuid(0xda8d8a16, 0xb2b6, 0x4487, 0xa2, 0xf1, 0x59, 0x4c, 0xcc, 0xcd, 0x6b, 0xf5)] +RIDL! {#[uuid(0xda8d8a16, 0xb2b6, 0x4487, 0xa2, 0xf1, 0x59, 0x4c, 0xcc, 0xcd, 0x6b, 0xf5)] interface ISetupPackageReference(ISetupPackageReferenceVtbl): IUnknown(IUnknownVtbl) { fn GetId( pbstrId: *mut BSTR, @@ -141,7 +141,7 @@ interface ISetupPackageReference(ISetupPackageReferenceVtbl): IUnknown(IUnknownV ) -> HRESULT, }} -RIDL!{#[uuid(0x42b21b78, 0x6192, 0x463e, 0x87, 0xbf, 0xd5, 0x77, 0x83, 0x8f, 0x1d, 0x5c)] +RIDL! {#[uuid(0x42b21b78, 0x6192, 0x463e, 0x87, 0xbf, 0xd5, 0x77, 0x83, 0x8f, 0x1d, 0x5c)] interface ISetupHelper(ISetupHelperVtbl): IUnknown(IUnknownVtbl) { fn ParseVersion( pwszVersion: LPCOLESTR, @@ -154,7 +154,7 @@ interface ISetupHelper(ISetupHelperVtbl): IUnknown(IUnknownVtbl) { ) -> HRESULT, }} -DEFINE_GUID!{CLSID_SetupConfiguration, +DEFINE_GUID! {CLSID_SetupConfiguration, 0x177f0c4a, 0x1cd3, 0x4de7, 0xa3, 0x2c, 0x71, 0xdb, 0xbb, 0x9f, 0xa3, 0x6d} @@ -196,7 +196,7 @@ impl SetupConfiguration { } pub fn enum_all_instances(&self) -> Result { let mut obj = null_mut(); - let this = try!(self.0.cast::()); + let this = self.0.cast::()?; let err = unsafe { this.EnumAllInstances(&mut obj) }; if err < 0 { return Err(err); @@ -249,7 +249,7 @@ impl SetupInstance { } pub fn product_path(&self) -> Result { let mut s = null_mut(); - let this = try!(self.0.cast::()); + let this = self.0.cast::()?; let err = unsafe { this.GetProductPath(&mut s) }; let bstr = unsafe { BStr::from_raw(s) }; if err < 0 { diff --git a/third_party/rust/cc/src/winapi.rs b/third_party/rust/cc/src/winapi.rs index 837ab1af3057..7045c953e26e 100644 --- a/third_party/rust/cc/src/winapi.rs +++ b/third_party/rust/cc/src/winapi.rs @@ -115,7 +115,7 @@ macro_rules! DEFINE_GUID { Data3: $w2, Data4: [$b1, $b2, $b3, $b4, $b5, $b6, $b7, $b8], }; - } + }; } macro_rules! RIDL { @@ -207,7 +207,7 @@ macro_rules! RIDL { ); } -RIDL!{#[uuid(0x00000000, 0x0000, 0x0000, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46)] +RIDL! {#[uuid(0x00000000, 0x0000, 0x0000, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46)] interface IUnknown(IUnknownVtbl) { fn QueryInterface( riid: REFIID, diff --git a/third_party/rust/cc/src/windows_registry.rs b/third_party/rust/cc/src/windows_registry.rs index 3ef91ee40dd4..d8d7088aed48 100644 --- a/third_party/rust/cc/src/windows_registry.rs +++ b/third_party/rust/cc/src/windows_registry.rs @@ -13,15 +13,7 @@ use std::process::Command; -use Tool; - -#[cfg(windows)] -macro_rules! otry { - ($expr:expr) => (match $expr { - Some(val) => val, - None => return None, - }) -} +use crate::Tool; @@ -171,17 +163,18 @@ pub fn find_vs_version() -> Result { #[cfg(windows)] mod impl_ { + use crate::com; + use crate::registry::{RegistryKey, LOCAL_MACHINE}; + use crate::setup_config::{EnumSetupInstances, SetupConfiguration, SetupInstance}; use std::env; use std::ffi::OsString; - use std::mem; - use std::path::{Path, PathBuf}; use std::fs::File; use std::io::Read; - use registry::{RegistryKey, LOCAL_MACHINE}; - use com; - use setup_config::{EnumSetupInstances, SetupConfiguration, SetupInstance}; + use std::iter; + use std::mem; + use std::path::{Path, PathBuf}; - use Tool; + use crate::Tool; struct MsvcTool { tool: PathBuf, @@ -215,6 +208,44 @@ mod impl_ { } } + #[allow(bare_trait_objects)] + fn vs16_instances() -> Box> { + let instances = if let Some(instances) = vs15_instances() { + instances + } else { + return Box::new(iter::empty()); + }; + Box::new(instances.filter_map(|instance| { + let instance = instance.ok()?; + let installation_name = instance.installation_name().ok()?; + if installation_name.to_str()?.starts_with("VisualStudio/16.") { + Some(PathBuf::from(instance.installation_path().ok()?)) + } else { + None + } + })) + } + + fn find_tool_in_vs16_path(tool: &str, target: &str) -> Option { + vs16_instances() + .filter_map(|path| { + let path = path.join(tool); + if !path.is_file() { + return None; + } + let mut tool = Tool::new(path); + if target.contains("x86_64") { + tool.env.push(("Platform".into(), "X64".into())); + } + Some(tool) + }) + .next() + } + + fn find_msbuild_vs16(target: &str) -> Option { + find_tool_in_vs16_path(r"MSBuild\Current\Bin\MSBuild.exe", target) + } + @@ -223,16 +254,16 @@ mod impl_ { fn vs15_instances() -> Option { - otry!(com::initialize().ok()); + com::initialize().ok()?; - let config = otry!(SetupConfiguration::new().ok()); + let config = SetupConfiguration::new().ok()?; config.enum_all_instances().ok() } pub fn find_msvc_15(tool: &str, target: &str) -> Option { - let iter = otry!(vs15_instances()); + let iter = vs15_instances()?; for instance in iter { - let instance = otry!(instance.ok()); + let instance = instance.ok()?; let tool = tool_from_vs15_instance(tool, target, &instance); if tool.is_some() { return tool; @@ -256,7 +287,8 @@ mod impl_ { instance .ok() .and_then(|instance| instance.installation_path().ok()) - }).map(|path| PathBuf::from(path).join(tool)) + }) + .map(|path| PathBuf::from(path).join(tool)) .find(|ref path| path.is_file()), None => None, }; @@ -268,7 +300,7 @@ mod impl_ { .ok() .and_then(|key| key.query_str("15.0").ok()) .map(|path| PathBuf::from(path).join(tool)) - .filter(|ref path| path.is_file()); + .and_then(|path| if path.is_file() { Some(path) } else { None }); } path.map(|path| { @@ -281,8 +313,7 @@ mod impl_ { } fn tool_from_vs15_instance(tool: &str, target: &str, instance: &SetupInstance) -> Option { - let (bin_path, host_dylib_path, lib_path, include_path) = - otry!(vs15_vc_paths(target, instance)); + let (bin_path, host_dylib_path, lib_path, include_path) = vs15_vc_paths(target, instance)?; let tool_path = bin_path.join(tool); if !tool_path.exists() { return None; @@ -298,7 +329,7 @@ mod impl_ { tool.include.push(atl_include_path); } - otry!(add_sdks(&mut tool, target)); + add_sdks(&mut tool, target)?; Some(tool.into_tool()) } @@ -307,30 +338,32 @@ mod impl_ { target: &str, instance: &SetupInstance, ) -> Option<(PathBuf, PathBuf, PathBuf, PathBuf)> { - let instance_path: PathBuf = otry!(instance.installation_path().ok()).into(); + let instance_path: PathBuf = instance.installation_path().ok()?.into(); let version_path = instance_path.join(r"VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt"); - let mut version_file = otry!(File::open(version_path).ok()); + let mut version_file = File::open(version_path).ok()?; let mut version = String::new(); - otry!(version_file.read_to_string(&mut version).ok()); + version_file.read_to_string(&mut version).ok()?; let version = version.trim(); let host = match host_arch() { X86 => "X86", X86_64 => "X64", _ => return None, }; - let target = otry!(lib_subdir(target)); + let target = lib_subdir(target)?; let path = instance_path.join(r"VC\Tools\MSVC").join(version); - let bin_path = path.join("bin") + let bin_path = path + .join("bin") .join(&format!("Host{}", host)) .join(&target); - let host_dylib_path = path.join("bin") + let host_dylib_path = path + .join("bin") .join(&format!("Host{}", host)) .join(&host.to_lowercase()); let lib_path = path.join("lib").join(&target); @@ -340,7 +373,7 @@ mod impl_ { fn atl_paths(target: &str, path: &Path) -> Option<(PathBuf, PathBuf)> { let atl_path = path.join("atlfmc"); - let sub = otry!(lib_subdir(target)); + let sub = lib_subdir(target)?; if atl_path.exists() { Some((atl_path.join("lib").join(sub), atl_path.join("include"))) } else { @@ -351,15 +384,15 @@ mod impl_ { pub fn find_msvc_14(tool: &str, target: &str) -> Option { - let vcdir = otry!(get_vc_dir("14.0")); - let mut tool = otry!(get_tool(tool, &vcdir, target)); - otry!(add_sdks(&mut tool, target)); + let vcdir = get_vc_dir("14.0")?; + let mut tool = get_tool(tool, &vcdir, target)?; + add_sdks(&mut tool, target)?; Some(tool.into_tool()) } fn add_sdks(tool: &mut MsvcTool, target: &str) -> Option<()> { - let sub = otry!(lib_subdir(target)); - let (ucrt, ucrt_version) = otry!(get_ucrt_dir()); + let sub = lib_subdir(target)?; + let (ucrt, ucrt_version) = get_ucrt_dir()?; tool.path .push(ucrt.join("bin").join(&ucrt_version).join(sub)); @@ -394,10 +427,10 @@ mod impl_ { pub fn find_msvc_12(tool: &str, target: &str) -> Option { - let vcdir = otry!(get_vc_dir("12.0")); - let mut tool = otry!(get_tool(tool, &vcdir, target)); - let sub = otry!(lib_subdir(target)); - let sdk81 = otry!(get_sdk81_dir()); + let vcdir = get_vc_dir("12.0")?; + let mut tool = get_tool(tool, &vcdir, target)?; + let sub = lib_subdir(target)?; + let sdk81 = get_sdk81_dir()?; tool.path.push(sdk81.join("bin").join(sub)); let sdk_lib = sdk81.join("lib").join("winv6.3"); tool.libs.push(sdk_lib.join("um").join(sub)); @@ -410,10 +443,10 @@ mod impl_ { pub fn find_msvc_11(tool: &str, target: &str) -> Option { - let vcdir = otry!(get_vc_dir("11.0")); - let mut tool = otry!(get_tool(tool, &vcdir, target)); - let sub = otry!(lib_subdir(target)); - let sdk8 = otry!(get_sdk8_dir()); + let vcdir = get_vc_dir("11.0")?; + let mut tool = get_tool(tool, &vcdir, target)?; + let sub = lib_subdir(target)?; + let sdk8 = get_sdk8_dir()?; tool.path.push(sdk8.join("bin").join(sub)); let sdk_lib = sdk8.join("lib").join("win8"); tool.libs.push(sdk_lib.join("um").join(sub)); @@ -450,7 +483,7 @@ mod impl_ { tool }) .filter_map(|mut tool| { - let sub = otry!(vc_lib_subdir(target)); + let sub = vc_lib_subdir(target)?; tool.libs.push(path.join("lib").join(sub)); tool.include.push(path.join("include")); let atlmfc_path = path.join("atlmfc"); @@ -467,8 +500,8 @@ mod impl_ { fn get_vc_dir(ver: &str) -> Option { let key = r"SOFTWARE\Microsoft\VisualStudio\SxS\VC7"; - let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); - let path = otry!(key.query_str(ver).ok()); + let key = LOCAL_MACHINE.open(key.as_ref()).ok()?; + let path = key.query_str(ver).ok()?; Some(path.into()) } @@ -480,20 +513,20 @@ mod impl_ { fn get_ucrt_dir() -> Option<(PathBuf, String)> { let key = r"SOFTWARE\Microsoft\Windows Kits\Installed Roots"; - let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); - let root = otry!(key.query_str("KitsRoot10").ok()); - let readdir = otry!(Path::new(&root).join("lib").read_dir().ok()); - let max_libdir = otry!( - readdir - .filter_map(|dir| dir.ok()) - .map(|dir| dir.path()) - .filter(|dir| dir.components() + let key = LOCAL_MACHINE.open(key.as_ref()).ok()?; + let root = key.query_str("KitsRoot10").ok()?; + let readdir = Path::new(&root).join("lib").read_dir().ok()?; + let max_libdir = readdir + .filter_map(|dir| dir.ok()) + .map(|dir| dir.path()) + .filter(|dir| { + dir.components() .last() .and_then(|c| c.as_os_str().to_str()) .map(|c| c.starts_with("10.") && dir.join("ucrt").is_dir()) - .unwrap_or(false)) - .max() - ); + .unwrap_or(false) + }) + .max()?; let version = max_libdir.components().last().unwrap(); let version = version.as_os_str().to_str().unwrap().to_string(); Some((root.into(), version)) @@ -509,20 +542,19 @@ mod impl_ { fn get_sdk10_dir() -> Option<(PathBuf, String)> { let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v10.0"; - let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); - let root = otry!(key.query_str("InstallationFolder").ok()); - let readdir = otry!(Path::new(&root).join("lib").read_dir().ok()); + let key = LOCAL_MACHINE.open(key.as_ref()).ok()?; + let root = key.query_str("InstallationFolder").ok()?; + let readdir = Path::new(&root).join("lib").read_dir().ok()?; let mut dirs = readdir .filter_map(|dir| dir.ok()) .map(|dir| dir.path()) .collect::>(); dirs.sort(); - let dir = otry!( - dirs.into_iter() - .rev() - .filter(|dir| dir.join("um").join("x64").join("kernel32.lib").is_file()) - .next() - ); + let dir = dirs + .into_iter() + .rev() + .filter(|dir| dir.join("um").join("x64").join("kernel32.lib").is_file()) + .next()?; let version = dir.components().last().unwrap(); let version = version.as_os_str().to_str().unwrap().to_string(); Some((root.into(), version)) @@ -534,15 +566,15 @@ mod impl_ { fn get_sdk81_dir() -> Option { let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v8.1"; - let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); - let root = otry!(key.query_str("InstallationFolder").ok()); + let key = LOCAL_MACHINE.open(key.as_ref()).ok()?; + let root = key.query_str("InstallationFolder").ok()?; Some(root.into()) } fn get_sdk8_dir() -> Option { let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v8.0"; - let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); - let root = otry!(key.query_str("InstallationFolder").ok()); + let key = LOCAL_MACHINE.open(key.as_ref()).ok()?; + let root = key.query_str("InstallationFolder").ok()?; Some(root.into()) } @@ -642,7 +674,7 @@ mod impl_ { for subkey in key.iter().filter_map(|k| k.ok()) { let val = subkey .to_str() - .and_then(|s| s.trim_start_matches("v").replace(".", "").parse().ok()); + .and_then(|s| s.trim_left_matches("v").replace(".", "").parse().ok()); let val = match val { Some(s) => s, None => continue, @@ -659,6 +691,10 @@ mod impl_ { pub fn has_msbuild_version(version: &str) -> bool { match version { + "16.0" => { + find_msbuild_vs16("x86_64-pc-windows-msvc").is_some() + || find_msbuild_vs16("i686-pc-windows-msvc").is_some() + } "15.0" => { find_msbuild_vs15("x86_64-pc-windows-msvc").is_some() || find_msbuild_vs15("i686-pc-windows-msvc").is_some() @@ -684,7 +720,9 @@ mod impl_ { pub fn find_msbuild(target: &str) -> Option { - if let Some(r) = find_msbuild_vs15(target) { + if let Some(r) = find_msbuild_vs16(target) { + return Some(r); + } else if let Some(r) = find_msbuild_vs15(target) { return Some(r); } else { find_old_msbuild(target) diff --git a/third_party/rust/cc/tests/cc_env.rs b/third_party/rust/cc/tests/cc_env.rs index f9386d7f59b5..43eb689f0fbf 100644 --- a/third_party/rust/cc/tests/cc_env.rs +++ b/third_party/rust/cc/tests/cc_env.rs @@ -1,12 +1,9 @@ -extern crate cc; -extern crate tempdir; - use std::env; -use std::path::Path; use std::ffi::OsString; +use std::path::Path; mod support; -use support::Test; +use crate::support::Test; #[test] fn main() { @@ -63,14 +60,16 @@ fn ccache_env_flags() { .cflags_env() .into_string() .unwrap() - .contains("ccache") == false + .contains("ccache") + == false ); assert!( compiler .cflags_env() .into_string() .unwrap() - .contains(" lol-this-is-not-a-compiler") == false + .contains(" lol-this-is-not-a-compiler") + == false ); env::set_var("CC", ""); diff --git a/third_party/rust/cc/tests/cflags.rs b/third_party/rust/cc/tests/cflags.rs new file mode 100644 index 000000000000..2afee3f18976 --- /dev/null +++ b/third_party/rust/cc/tests/cflags.rs @@ -0,0 +1,15 @@ +mod support; + +use crate::support::Test; +use std::env; + + + +#[test] +fn gnu_no_warnings_if_cflags() { + env::set_var("CFLAGS", "-arbitrary"); + let test = Test::gnu(); + test.gcc().file("foo.c").compile("foo"); + + test.cmd(0).must_not_have("-Wall").must_not_have("-Wextra"); +} diff --git a/third_party/rust/cc/tests/cxxflags.rs b/third_party/rust/cc/tests/cxxflags.rs new file mode 100644 index 000000000000..4e7d1354a8db --- /dev/null +++ b/third_party/rust/cc/tests/cxxflags.rs @@ -0,0 +1,15 @@ +mod support; + +use crate::support::Test; +use std::env; + + + +#[test] +fn gnu_no_warnings_if_cxxflags() { + env::set_var("CXXFLAGS", "-arbitrary"); + let test = Test::gnu(); + test.gcc().file("foo.cpp").cpp(true).compile("foo"); + + test.cmd(0).must_not_have("-Wall").must_not_have("-Wextra"); +} diff --git a/third_party/rust/cc/tests/support/mod.rs b/third_party/rust/cc/tests/support/mod.rs index cae81513b13b..8475220db749 100644 --- a/third_party/rust/cc/tests/support/mod.rs +++ b/third_party/rust/cc/tests/support/mod.rs @@ -1,10 +1,11 @@ #![allow(dead_code)] use std::env; -use std::ffi::OsStr; +use std::ffi::{OsStr, OsString}; use std::fs::{self, File}; +use std::io; use std::io::prelude::*; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use cc; use tempdir::TempDir; @@ -26,9 +27,10 @@ impl Test { if gcc.ends_with("deps") { gcc.pop(); } + let td = TempDir::new_in(&gcc, "gcc-test").unwrap(); gcc.push(format!("gcc-shim{}", env::consts::EXE_SUFFIX)); Test { - td: TempDir::new("gcc-test").unwrap(), + td: td, gcc: gcc, msvc: false, } @@ -48,17 +50,18 @@ impl Test { } pub fn shim(&self, name: &str) -> &Test { - let fname = format!("{}{}", name, env::consts::EXE_SUFFIX); - fs::hard_link(&self.gcc, self.td.path().join(&fname)) - .or_else(|_| fs::copy(&self.gcc, self.td.path().join(&fname)).map(|_| ())) - .unwrap(); + link_or_copy( + &self.gcc, + self.td + .path() + .join(&format!("{}{}", name, env::consts::EXE_SUFFIX)), + ) + .unwrap(); self } pub fn gcc(&self) -> cc::Build { let mut cfg = cc::Build::new(); - let mut path = env::split_paths(&env::var_os("PATH").unwrap()).collect::>(); - path.insert(0, self.td.path().to_owned()); let target = if self.msvc { "x86_64-pc-windows-msvc" } else { @@ -70,7 +73,7 @@ impl Test { .opt_level(2) .debug(false) .out_dir(self.td.path()) - .__set_env("PATH", env::join_paths(path).unwrap()) + .__set_env("PATH", self.path()) .__set_env("GCCTEST_OUT_DIR", self.td.path()); if self.msvc { cfg.compiler(self.td.path().join("cl")); @@ -79,6 +82,12 @@ impl Test { cfg } + fn path(&self) -> OsString { + let mut path = env::split_paths(&env::var_os("PATH").unwrap()).collect::>(); + path.insert(0, self.td.path().to_owned()); + env::join_paths(path).unwrap() + } + pub fn cmd(&self, i: u32) -> Execution { let mut s = String::new(); File::open(self.td.path().join(format!("out{}", i))) @@ -113,10 +122,12 @@ impl Execution { } pub fn must_have_in_order(&self, before: &str, after: &str) -> &Execution { - let before_position = self.args + let before_position = self + .args .iter() .rposition(|x| OsStr::new(x) == OsStr::new(before)); - let after_position = self.args + let after_position = self + .args .iter() .rposition(|x| OsStr::new(x) == OsStr::new(after)); match (before_position, after_position) { @@ -129,3 +140,22 @@ impl Execution { self } } + + + + + +#[cfg(not(target_os = "macos"))] +fn link_or_copy, Q: AsRef>(from: P, to: Q) -> io::Result<()> { + let from = from.as_ref(); + let to = to.as_ref(); + fs::hard_link(from, to).or_else(|_| fs::copy(from, to).map(|_| ())) +} + + + + +#[cfg(target_os = "macos")] +fn link_or_copy, Q: AsRef>(from: P, to: Q) -> io::Result<()> { + fs::copy(from, to).map(|_| ()) +} diff --git a/third_party/rust/cc/tests/test.rs b/third_party/rust/cc/tests/test.rs index 573a99a7b8ff..def11f025969 100644 --- a/third_party/rust/cc/tests/test.rs +++ b/third_party/rust/cc/tests/test.rs @@ -1,8 +1,4 @@ -extern crate cc; -extern crate tempdir; - -use std::env; -use support::Test; +use crate::support::Test; mod support; @@ -43,10 +39,40 @@ fn gnu_opt_level_s() { } #[test] -fn gnu_debug() { +fn gnu_debug_fp_auto() { let test = Test::gnu(); test.gcc().debug(true).file("foo.c").compile("foo"); test.cmd(0).must_have("-g"); + test.cmd(0).must_have("-fno-omit-frame-pointer"); +} + +#[test] +fn gnu_debug_fp() { + let test = Test::gnu(); + test.gcc().debug(true).file("foo.c").compile("foo"); + test.cmd(0).must_have("-g"); + test.cmd(0).must_have("-fno-omit-frame-pointer"); +} + +#[test] +fn gnu_debug_nofp() { + let test = Test::gnu(); + test.gcc() + .debug(true) + .force_frame_pointer(false) + .file("foo.c") + .compile("foo"); + test.cmd(0).must_have("-g"); + test.cmd(0).must_not_have("-fno-omit-frame-pointer"); + + let test = Test::gnu(); + test.gcc() + .force_frame_pointer(false) + .debug(true) + .file("foo.c") + .compile("foo"); + test.cmd(0).must_have("-g"); + test.cmd(0).must_not_have("-fno-omit-frame-pointer"); } #[test] @@ -111,30 +137,6 @@ fn gnu_warnings_overridable() { .must_have_in_order("-Wall", "-Wno-missing-field-initializers"); } -#[test] -fn gnu_no_warnings_if_cflags() { - env::set_var("CFLAGS", "-Wflag-does-not-exist"); - let test = Test::gnu(); - test.gcc() - .file("foo.c") - .compile("foo"); - - test.cmd(0).must_not_have("-Wall").must_not_have("-Wextra"); - env::set_var("CFLAGS", ""); -} - -#[test] -fn gnu_no_warnings_if_cxxflags() { - env::set_var("CXXFLAGS", "-Wflag-does-not-exist"); - let test = Test::gnu(); - test.gcc() - .file("foo.c") - .compile("foo"); - - test.cmd(0).must_not_have("-Wall").must_not_have("-Wextra"); - env::set_var("CXXFLAGS", ""); -} - #[test] fn gnu_x86_64() { for vendor in &["unknown-linux-gnu", "apple-darwin"] { @@ -315,11 +317,11 @@ fn msvc_smoke() { test.gcc().file("foo.c").compile("foo"); test.cmd(0) - .must_have("/O2") + .must_have("-O2") .must_have("foo.c") - .must_not_have("/Z7") - .must_have("/c") - .must_have("/MD"); + .must_not_have("-Z7") + .must_have("-c") + .must_have("-MD"); test.cmd(1).must_have(test.td.path().join("foo.o")); } @@ -328,14 +330,14 @@ fn msvc_opt_level_0() { let test = Test::msvc(); test.gcc().opt_level(0).file("foo.c").compile("foo"); - test.cmd(0).must_not_have("/O2"); + test.cmd(0).must_not_have("-O2"); } #[test] fn msvc_debug() { let test = Test::msvc(); test.gcc().debug(true).file("foo.c").compile("foo"); - test.cmd(0).must_have("/Z7"); + test.cmd(0).must_have("-Z7"); } #[test] @@ -343,7 +345,7 @@ fn msvc_include() { let test = Test::msvc(); test.gcc().include("foo/bar").file("foo.c").compile("foo"); - test.cmd(0).must_have("/I").must_have("foo/bar"); + test.cmd(0).must_have("-I").must_have("foo/bar"); } #[test] @@ -355,7 +357,7 @@ fn msvc_define() { .file("foo.c") .compile("foo"); - test.cmd(0).must_have("/DFOO=bar").must_have("/DBAR"); + test.cmd(0).must_have("-DFOO=bar").must_have("-DBAR"); } #[test] @@ -363,7 +365,7 @@ fn msvc_static_crt() { let test = Test::msvc(); test.gcc().static_crt(true).file("foo.c").compile("foo"); - test.cmd(0).must_have("/MT"); + test.cmd(0).must_have("-MT"); } #[test] @@ -371,5 +373,5 @@ fn msvc_no_static_crt() { let test = Test::msvc(); test.gcc().static_crt(false).file("foo.c").compile("foo"); - test.cmd(0).must_have("/MD"); + test.cmd(0).must_have("-MD"); } diff --git a/third_party/rust/cocoa/.cargo-checksum.json b/third_party/rust/cocoa/.cargo-checksum.json new file mode 100644 index 000000000000..895ba3885ecd --- /dev/null +++ b/third_party/rust/cocoa/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"COPYRIGHT":"ec82b96487e9e778ee610c7ab245162464782cfa1f555c2299333f8dbe5c036a","Cargo.lock":"13b2e7b7a41925418cff959dc7bfc94e516797f4da57fd50fee058013af0175f","Cargo.toml":"28b9c7045e43195c6c91d6fb8923c3a74143fe4ecf3514ae5ebcf61d7c95299a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"62065228e42caebca7e7d7db1204cbb867033de5982ca4009928915e4095f3a3","README.md":"29542cb34adb016506822ef78bceacf1774dbe8c27818a4d47b7d7b71ed4e1ac","examples/color.rs":"5211a85ab9ec4cbb98f8f677de50bb2f1d300400f50226ebfd714d40b9d32485","examples/fullscreen.rs":"f5ad7a0f9c44944c5ee84413ab9a106abd4d4e53534867c602651db7be722165","examples/hello_world.rs":"7efe2a29de4d4dfc1d8821f469e13b1456f72de478bab55f813d08d4e5aafd5c","examples/tab_view.rs":"e6a3187eeac2f46210994293c2db7756757f4cad07e519902fa994c6d5c1a7d6","src/appkit.rs":"097a75a94a50de737d757bcd84fd0ce4283aab8f6f197dc9889dad2387c279c9","src/base.rs":"6c56d1758a9b0a7f8927771fe8b0bb43c6f19e4531bf9accecc786028eaad845","src/foundation.rs":"0b8809b5c2d11120ed4e90797961cb97c814ae62166d9d19182c04da7a04d9a6","src/lib.rs":"ed6164b3e0fe68579218185267d79229a2989c86efce690d7273f779c5239ec3","src/macros.rs":"0de0a8ea9a23f03cad94266a92051c3be8ff3f8f7d7d60f95dafe6c663204d48","src/quartzcore.rs":"a3aaf7461020a7b03dbdd55e3dcf6c6dbcf693343def7e121cc1d9257d4cc26f","tests/foundation.rs":"16e35ab33eefde5f7f2f52ee796e3b825427c2665ef9b146d40362f81fdd503a"},"package":"8cd20045e880893b4a8286d5639e9ade85fb1f6a14c291f882cf8cf2149d37d9"} \ No newline at end of file diff --git a/third_party/rust/cocoa/COPYRIGHT b/third_party/rust/cocoa/COPYRIGHT new file mode 100644 index 000000000000..8b7291ad281c --- /dev/null +++ b/third_party/rust/cocoa/COPYRIGHT @@ -0,0 +1,5 @@ +Licensed under the Apache License, Version 2.0 or the MIT license +, at your +option. All files in the project carrying such notice may not be +copied, modified, or distributed except according to those terms. diff --git a/third_party/rust/cocoa/Cargo.lock b/third_party/rust/cocoa/Cargo.lock new file mode 100644 index 000000000000..493bc65b8962 --- /dev/null +++ b/third_party/rust/cocoa/Cargo.lock @@ -0,0 +1,95 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "bitflags" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "block" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cocoa" +version = "0.19.0" +dependencies = [ + "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)", + "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "objc 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "core-foundation" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "core-foundation-sys" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "core-graphics" +version = "0.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", + "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.60" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "malloc_buf" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "objc" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "malloc_buf 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[metadata] +"checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd" +"checksum block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" +"checksum core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "25b9e03f145fd4f2bf705e07b900cd41fc636598fe5dc452fd0db1441c3f496d" +"checksum core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b" +"checksum core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)" = "56790968ab1c8a1202a102e6de05fc6e1ec87da99e4e93e9a7d13efbfc1e95a9" +"checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +"checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +"checksum libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "d44e80633f007889c7eff624b709ab43c92d708caad982295768a7b13ca3b5eb" +"checksum malloc_buf 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" +"checksum objc 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "31d20fd2b37e07cf5125be68357b588672e8cefe9a96f8c17a9d46053b3e590d" diff --git a/third_party/rust/cocoa/Cargo.toml b/third_party/rust/cocoa/Cargo.toml new file mode 100644 index 000000000000..37efcdddc636 --- /dev/null +++ b/third_party/rust/cocoa/Cargo.toml @@ -0,0 +1,44 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "cocoa" +version = "0.19.0" +authors = ["The Servo Project Developers"] +description = "Bindings to Cocoa for macOS" +homepage = "https://github.com/servo/core-foundation-rs" +license = "MIT / Apache-2.0" +repository = "https://github.com/servo/core-foundation-rs" + +[lib] +name = "cocoa" +crate-type = ["rlib"] +[dependencies.bitflags] +version = "1.0" + +[dependencies.block] +version = "0.1" + +[dependencies.core-foundation] +version = "0.6" + +[dependencies.core-graphics] +version = "0.17" + +[dependencies.foreign-types] +version = "0.3" + +[dependencies.libc] +version = "0.2" + +[dependencies.objc] +version = "0.2.3" diff --git a/third_party/rust/cocoa/LICENSE-APACHE b/third_party/rust/cocoa/LICENSE-APACHE new file mode 100644 index 000000000000..16fe87b06e80 --- /dev/null +++ b/third_party/rust/cocoa/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/cocoa/LICENSE-MIT b/third_party/rust/cocoa/LICENSE-MIT new file mode 100644 index 000000000000..807526f57f3a --- /dev/null +++ b/third_party/rust/cocoa/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2012-2013 Mozilla Foundation + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/cocoa/README.md b/third_party/rust/cocoa/README.md new file mode 100644 index 000000000000..d1818d11c48f --- /dev/null +++ b/third_party/rust/cocoa/README.md @@ -0,0 +1,6 @@ +Cocoa-rs +-------- + +This crate provides Rust bindings to Cocoa for macOS. It's dual-licensed MIT / +Apache 2.0. If you'd like to help improve cocoa-rs, check out [the Servo +contributing guide](https://github.com/servo/servo/blob/master/CONTRIBUTING.md)! diff --git a/third_party/rust/cocoa/examples/color.rs b/third_party/rust/cocoa/examples/color.rs new file mode 100644 index 000000000000..005f61e11507 --- /dev/null +++ b/third_party/rust/cocoa/examples/color.rs @@ -0,0 +1,120 @@ +extern crate cocoa; + +use cocoa::base::{selector, id, nil, NO}; + +use cocoa::foundation::{NSRect, NSPoint, NSSize, NSAutoreleasePool, NSProcessInfo, + NSString}; +use cocoa::appkit::{NSApp, NSColor, NSColorSpace, NSApplication, NSApplicationActivationPolicyRegular, + NSMenu, NSMenuItem, NSWindowStyleMask, NSBackingStoreType, NSWindow, + NSRunningApplication, NSApplicationActivateIgnoringOtherApps}; + + +fn main() { + unsafe { + + let app = create_app(); + + + let clear = NSColor::clearColor(nil); + let black = NSColor::colorWithRed_green_blue_alpha_(nil, 0.0, 0.0, 0.0, 1.0); + let srgb_red = NSColor::colorWithSRGBRed_green_blue_alpha_(nil, 1.0, 0.0, 0.0, 1.0); + let device_green = NSColor::colorWithDeviceRed_green_blue_alpha_(nil, 0.0, 1.0, 0.0, 1.0); + let display_p3_blue = NSColor::colorWithDisplayP3Red_green_blue_alpha_(nil, 0.0, 0.0, 1.0, 1.0); + let calibrated_cyan = NSColor::colorWithCalibratedRed_green_blue_alpha_(nil, 0.0, 1.0, 1.0, 1.0); + + + let _win_clear = create_window(NSString::alloc(nil).init_str("clear"), clear); + let _win_black = create_window(NSString::alloc(nil).init_str("black"), black); + let _win_srgb_red = create_window(NSString::alloc(nil).init_str("srgb_red"), srgb_red); + let _win_device_green = create_window(NSString::alloc(nil).init_str("device_green"), device_green); + let _win_display_p3_blue = create_window(NSString::alloc(nil).init_str("display_p3_blue"), display_p3_blue); + let _win_calibrated_cyan = create_window(NSString::alloc(nil).init_str("calibrated_cyan"), calibrated_cyan); + + + + + + let my_color = NSColor::colorWithRed_green_blue_alpha_(nil, 0.25, 0.75, 0.5, 0.25); + println!("alphaComponent: {:?}", my_color.alphaComponent()); + println!("redComponent: {:?}", my_color.redComponent()); + println!("greenComponent: {:?}", my_color.greenComponent()); + println!("blueComponent: {:?}", my_color.blueComponent()); + println!("hueComponent: {:?}", my_color.hueComponent()); + println!("saturationComponent: {:?}", my_color.saturationComponent()); + println!("brightnessComponent: {:?}", my_color.brightnessComponent()); + + + let my_color_cmyk_cs = my_color.colorUsingColorSpace_(NSColorSpace::deviceCMYKColorSpace(nil)); + println!("blackComponent: {:?}", my_color_cmyk_cs.blackComponent()); + println!("cyanComponent: {:?}", my_color_cmyk_cs.cyanComponent()); + println!("magentaComponent: {:?}", my_color_cmyk_cs.magentaComponent()); + println!("yellowComponent: {:?}", my_color_cmyk_cs.yellowComponent()); + + + let cs = NSColorSpace::genericGamma22GrayColorSpace(nil); + let cs_name = cs.localizedName(); + let cs_name_bytes = cs_name.UTF8String() as *const u8; + let cs_name_string = std::str::from_utf8(std::slice::from_raw_parts(cs_name_bytes, cs_name.len())).unwrap(); + println!("NSColorSpace: {:?}", cs_name_string); + + + let cg_cs = cs.CGColorSpace(); + let cs = NSColorSpace::alloc(nil).initWithCGColorSpace_(cg_cs); + let cs_name = cs.localizedName(); + let cs_name_bytes = cs_name.UTF8String() as *const u8; + let cs_name_string = std::str::from_utf8(std::slice::from_raw_parts(cs_name_bytes, cs_name.len())).unwrap(); + println!("initWithCGColorSpace_: {:?}", cs_name_string); + + app.run(); + } +} + +unsafe fn create_window(title: id, color: id) -> id { + let window = NSWindow::alloc(nil).initWithContentRect_styleMask_backing_defer_( + NSRect::new(NSPoint::new(0., 0.), NSSize::new(200., 200.)), + NSWindowStyleMask::NSTitledWindowMask | + NSWindowStyleMask::NSClosableWindowMask | + NSWindowStyleMask::NSResizableWindowMask | + NSWindowStyleMask::NSMiniaturizableWindowMask | + NSWindowStyleMask::NSUnifiedTitleAndToolbarWindowMask, + NSBackingStoreType::NSBackingStoreBuffered, + NO + ).autorelease(); + + window.cascadeTopLeftFromPoint_(NSPoint::new(20., 20.)); + window.setTitle_(title); + window.setBackgroundColor_(color); + window.makeKeyAndOrderFront_(nil); + window +} + +unsafe fn create_app() -> id { + let _pool = NSAutoreleasePool::new(nil); + + let app = NSApp(); + app.setActivationPolicy_(NSApplicationActivationPolicyRegular); + + + let menubar = NSMenu::new(nil).autorelease(); + let app_menu_item = NSMenuItem::new(nil).autorelease(); + menubar.addItem_(app_menu_item); + app.setMainMenu_(menubar); + + + let app_menu = NSMenu::new(nil).autorelease(); + let quit_prefix = NSString::alloc(nil).init_str("Quit "); + let quit_title = + quit_prefix.stringByAppendingString_(NSProcessInfo::processInfo(nil).processName()); + let quit_action = selector("terminate:"); + let quit_key = NSString::alloc(nil).init_str("q"); + let quit_item = NSMenuItem::alloc(nil) + .initWithTitle_action_keyEquivalent_(quit_title, quit_action, quit_key) + .autorelease(); + app_menu.addItem_(quit_item); + app_menu_item.setSubmenu_(app_menu); + + let current_app = NSRunningApplication::currentApplication(nil); + current_app.activateWithOptions_(NSApplicationActivateIgnoringOtherApps); + + return app; +} diff --git a/third_party/rust/cocoa/examples/fullscreen.rs b/third_party/rust/cocoa/examples/fullscreen.rs new file mode 100644 index 000000000000..1d2d30ecd811 --- /dev/null +++ b/third_party/rust/cocoa/examples/fullscreen.rs @@ -0,0 +1,97 @@ +extern crate cocoa; +extern crate core_graphics; + +#[macro_use] +extern crate objc; + +use cocoa::base::{selector, nil, NO, id}; +use cocoa::foundation::{NSRect, NSPoint, NSSize, NSAutoreleasePool, NSProcessInfo, + NSString, NSUInteger}; +use cocoa::appkit::{NSApp, NSApplication, NSApplicationActivationPolicyRegular, NSWindow, + NSBackingStoreBuffered, NSMenu, NSMenuItem, NSWindowStyleMask, + NSRunningApplication, NSApplicationActivateIgnoringOtherApps, + NSWindowCollectionBehavior, NSApplicationPresentationOptions}; + +use core_graphics::display::CGDisplay; + +use objc::runtime::{Object, Sel}; +use objc::declare::ClassDecl; + +fn main() { + unsafe { + let _pool = NSAutoreleasePool::new(nil); + + let app = NSApp(); + app.setActivationPolicy_(NSApplicationActivationPolicyRegular); + + + let menubar = NSMenu::new(nil).autorelease(); + let app_menu_item = NSMenuItem::new(nil).autorelease(); + menubar.addItem_(app_menu_item); + app.setMainMenu_(menubar); + + + let app_menu = NSMenu::new(nil).autorelease(); + let quit_prefix = NSString::alloc(nil).init_str("Quit "); + let quit_title = + quit_prefix.stringByAppendingString_(NSProcessInfo::processInfo(nil).processName()); + let quit_action = selector("terminate:"); + let quit_key = NSString::alloc(nil).init_str("q"); + let quit_item = NSMenuItem::alloc(nil) + .initWithTitle_action_keyEquivalent_(quit_title, quit_action, quit_key) + .autorelease(); + app_menu.addItem_(quit_item); + app_menu_item.setSubmenu_(app_menu); + + + let superclass = class!(NSObject); + let mut decl = ClassDecl::new("MyWindowDelegate", superclass).unwrap(); + + extern fn will_use_fillscreen_presentation_options(_: &Object, _: Sel, _: id, _: NSUInteger) -> NSUInteger { + + let options = NSApplicationPresentationOptions::NSApplicationPresentationFullScreen + | NSApplicationPresentationOptions::NSApplicationPresentationHideDock + | NSApplicationPresentationOptions::NSApplicationPresentationHideMenuBar + | NSApplicationPresentationOptions::NSApplicationPresentationDisableProcessSwitching; + options.bits() + } + + extern fn window_entering_fullscreen(_: &Object, _: Sel, _: id) { + + let options = NSApplicationPresentationOptions::NSApplicationPresentationHideDock | NSApplicationPresentationOptions::NSApplicationPresentationHideMenuBar; + unsafe { + NSApp().setPresentationOptions_(options); + } + } + + decl.add_method(sel!(window:willUseFullScreenPresentationOptions:), + will_use_fillscreen_presentation_options as extern fn(&Object, Sel, id, NSUInteger) -> NSUInteger); + decl.add_method(sel!(windowWillEnterFullScreen:), + window_entering_fullscreen as extern fn(&Object, Sel, id)); + decl.add_method(sel!(windowDidEnterFullScreen:), + window_entering_fullscreen as extern fn(&Object, Sel, id)); + + let delegate_class = decl.register(); + let delegate_object = msg_send![delegate_class, new]; + + + let display = CGDisplay::main(); + let size = NSSize::new(display.pixels_wide() as f64, display.pixels_high() as f64); + let window = NSWindow::alloc(nil) + .initWithContentRect_styleMask_backing_defer_(NSRect::new(NSPoint::new(0., 0.), size), + NSWindowStyleMask::NSTitledWindowMask, + NSBackingStoreBuffered, + NO) + .autorelease(); + window.setDelegate_(delegate_object); + let title = NSString::alloc(nil).init_str("Fullscreen!"); + window.setTitle_(title); + window.makeKeyAndOrderFront_(nil); + + let current_app = NSRunningApplication::currentApplication(nil); + current_app.activateWithOptions_(NSApplicationActivateIgnoringOtherApps); + window.setCollectionBehavior_(NSWindowCollectionBehavior::NSWindowCollectionBehaviorFullScreenPrimary); + window.toggleFullScreen_(nil); + app.run(); + } +} diff --git a/third_party/rust/cocoa/examples/hello_world.rs b/third_party/rust/cocoa/examples/hello_world.rs new file mode 100644 index 000000000000..0f388c778f53 --- /dev/null +++ b/third_party/rust/cocoa/examples/hello_world.rs @@ -0,0 +1,53 @@ +extern crate cocoa; + +use cocoa::base::{selector, nil, NO}; +use cocoa::foundation::{NSRect, NSPoint, NSSize, NSAutoreleasePool, NSProcessInfo, + NSString}; +use cocoa::appkit::{NSApp, NSApplication, NSApplicationActivationPolicyRegular, NSWindow, + NSBackingStoreBuffered, NSMenu, NSMenuItem, NSWindowStyleMask, + NSRunningApplication, NSApplicationActivateIgnoringOtherApps}; + +fn main() { + unsafe { + let _pool = NSAutoreleasePool::new(nil); + + let app = NSApp(); + app.setActivationPolicy_(NSApplicationActivationPolicyRegular); + + + let menubar = NSMenu::new(nil).autorelease(); + let app_menu_item = NSMenuItem::new(nil).autorelease(); + menubar.addItem_(app_menu_item); + app.setMainMenu_(menubar); + + + let app_menu = NSMenu::new(nil).autorelease(); + let quit_prefix = NSString::alloc(nil).init_str("Quit "); + let quit_title = + quit_prefix.stringByAppendingString_(NSProcessInfo::processInfo(nil).processName()); + let quit_action = selector("terminate:"); + let quit_key = NSString::alloc(nil).init_str("q"); + let quit_item = NSMenuItem::alloc(nil) + .initWithTitle_action_keyEquivalent_(quit_title, quit_action, quit_key) + .autorelease(); + app_menu.addItem_(quit_item); + app_menu_item.setSubmenu_(app_menu); + + + let window = NSWindow::alloc(nil) + .initWithContentRect_styleMask_backing_defer_(NSRect::new(NSPoint::new(0., 0.), + NSSize::new(200., 200.)), + NSWindowStyleMask::NSTitledWindowMask, + NSBackingStoreBuffered, + NO) + .autorelease(); + window.cascadeTopLeftFromPoint_(NSPoint::new(20., 20.)); + window.center(); + let title = NSString::alloc(nil).init_str("Hello World!"); + window.setTitle_(title); + window.makeKeyAndOrderFront_(nil); + let current_app = NSRunningApplication::currentApplication(nil); + current_app.activateWithOptions_(NSApplicationActivateIgnoringOtherApps); + app.run(); + } +} diff --git a/third_party/rust/cocoa/examples/tab_view.rs b/third_party/rust/cocoa/examples/tab_view.rs new file mode 100644 index 000000000000..eb90b5b4eb56 --- /dev/null +++ b/third_party/rust/cocoa/examples/tab_view.rs @@ -0,0 +1,87 @@ +extern crate cocoa; + +use cocoa::base::{selector, id, nil, NO}; + + +use cocoa::foundation::{NSRect, NSPoint, NSSize, NSAutoreleasePool, NSProcessInfo, + NSString}; +use cocoa::appkit::{NSApp, NSApplication, NSApplicationActivationPolicyRegular, NSWindow, + NSMenu, NSMenuItem, NSTabView, NSWindowStyleMask, NSBackingStoreType, + NSTabViewItem, NSRunningApplication, NSApplicationActivateIgnoringOtherApps}; + + +fn main() { + unsafe { + + + let tab_view = NSTabView::new(nil) + .initWithFrame_(NSRect::new(NSPoint::new(0., 0.), NSSize::new(200., 200.))); + + + let tab_view_item = NSTabViewItem::new(nil) + .initWithIdentifier_(NSString::alloc(nil).init_str("TabView1")); + + tab_view_item.setLabel_(NSString::alloc(nil).init_str("Tab view item 1")); + tab_view.addTabViewItem_(tab_view_item); + + + let tab_view_item2 = NSTabViewItem::new(nil) + .initWithIdentifier_(NSString::alloc(nil).init_str("TabView2")); + + tab_view_item2.setLabel_(NSString::alloc(nil).init_str("Tab view item 2")); + tab_view.addTabViewItem_(tab_view_item2); + + + let app = create_app(NSString::alloc(nil).init_str("Tab View"), tab_view); + app.run(); + } +} + +unsafe fn create_app(title: id, content: id) -> id { + let _pool = NSAutoreleasePool::new(nil); + + let app = NSApp(); + app.setActivationPolicy_(NSApplicationActivationPolicyRegular); + + + let menubar = NSMenu::new(nil).autorelease(); + let app_menu_item = NSMenuItem::new(nil).autorelease(); + menubar.addItem_(app_menu_item); + app.setMainMenu_(menubar); + + + let app_menu = NSMenu::new(nil).autorelease(); + let quit_prefix = NSString::alloc(nil).init_str("Quit "); + let quit_title = + quit_prefix.stringByAppendingString_(NSProcessInfo::processInfo(nil).processName()); + let quit_action = selector("terminate:"); + let quit_key = NSString::alloc(nil).init_str("q"); + let quit_item = NSMenuItem::alloc(nil) + .initWithTitle_action_keyEquivalent_(quit_title, quit_action, quit_key) + .autorelease(); + app_menu.addItem_(quit_item); + app_menu_item.setSubmenu_(app_menu); + + + let window = NSWindow::alloc(nil).initWithContentRect_styleMask_backing_defer_( + NSRect::new(NSPoint::new(0., 0.), NSSize::new(200., 200.)), + NSWindowStyleMask::NSTitledWindowMask | + NSWindowStyleMask::NSClosableWindowMask | + NSWindowStyleMask::NSResizableWindowMask | + NSWindowStyleMask::NSMiniaturizableWindowMask | + NSWindowStyleMask::NSUnifiedTitleAndToolbarWindowMask, + NSBackingStoreType::NSBackingStoreBuffered, + NO + ).autorelease(); + window.cascadeTopLeftFromPoint_(NSPoint::new(20., 20.)); + window.center(); + + window.setTitle_(title); + window.makeKeyAndOrderFront_(nil); + + window.setContentView_(content); + let current_app = NSRunningApplication::currentApplication(nil); + current_app.activateWithOptions_(NSApplicationActivateIgnoringOtherApps); + + return app; +} diff --git a/third_party/rust/cocoa/src/appkit.rs b/third_party/rust/cocoa/src/appkit.rs new file mode 100644 index 000000000000..a330835db5e6 --- /dev/null +++ b/third_party/rust/cocoa/src/appkit.rs @@ -0,0 +1,4155 @@ + + + + + + + + + +#![allow(non_upper_case_globals)] + +use base::{id, BOOL, SEL}; +use block::Block; +use foundation::{NSInteger, NSUInteger, NSTimeInterval, + NSPoint, NSSize, NSRect, NSRange, NSRectEdge}; +use libc; + +pub use core_graphics::base::CGFloat; +pub use core_graphics::geometry::CGPoint; + +pub use self::NSApplicationActivationPolicy::*; +pub use self::NSApplicationActivationOptions::*; +pub use self::NSBackingStoreType::*; +pub use self::NSOpenGLPixelFormatAttribute::*; +pub use self::NSOpenGLPFAOpenGLProfiles::*; +pub use self::NSEventType::*; +use std::os::raw::c_void; + +pub type CGLContextObj = *mut c_void; + +pub type GLint = i32; + +#[link(name = "AppKit", kind = "framework")] +extern { + pub static NSAppKitVersionNumber: f64; + + + pub static NSPasteboardTypeString: id; + pub static NSPasteboardTypePDF: id; + pub static NSPasteboardTypeTIFF: id; + pub static NSPasteboardTypePNG: id; + pub static NSPasteboardTypeRTF: id; + pub static NSPasteboardTypeRTFD: id; + pub static NSPasteboardTypeHTML: id; + pub static NSPasteboardTypeTabularText: id; + pub static NSPasteboardTypeFont: id; + pub static NSPasteboardTypeRuler: id; + pub static NSPasteboardTypeColor: id; + pub static NSPasteboardTypeSound: id; + pub static NSPasteboardTypeMultipleTextSelection: id; + pub static NSPasteboardTypeFindPanelSearchOptions: id; + + + pub static NSStringPboardType: id; + pub static NSFilenamesPboardType: id; + pub static NSPostScriptPboardType: id; + pub static NSTIFFPboardType: id; + pub static NSRTFPboardType: id; + pub static NSTabularTextPboardType: id; + pub static NSFontPboardType: id; + pub static NSRulerPboardType: id; + pub static NSFileContentsPboardType: id; + pub static NSColorPboardType: id; + pub static NSRTFDPboardType: id; + pub static NSHTMLPboardType: id; + pub static NSPICTPboardType: id; + pub static NSURLPboardType: id; + pub static NSPDFPboardType: id; + pub static NSVCardPboardType: id; + pub static NSFilesPromisePboardType: id; + pub static NSMultipleTextSelectionPboardType: id; + pub static NSSoundPboardType: id; + + + pub static NSGeneralPboard: id; + pub static NSFontPboard: id; + pub static NSRulerPboard: id; + pub static NSFindPboard: id; + pub static NSDragPboard: id; + + + pub static NSPasteboardURLReadingFileURLsOnlyKey: id; + pub static NSPasteboardURLReadingContentsConformToTypesKey: id; + + + pub static NSAppearanceNameVibrantDark: id; + pub static NSAppearanceNameVibrantLight: id; + + + pub static NSFullScreenModeAllScreens: id; + pub static NSFullScreenModeSetting: id; + pub static NSFullScreenModeWindowLevel: id; + pub static NSFullScreenModeApplicationPresentationOptions: id; +} + +pub const NSAppKitVersionNumber10_0: f64 = 577.0; +pub const NSAppKitVersionNumber10_1: f64 = 620.0; +pub const NSAppKitVersionNumber10_2: f64 = 663.0; +pub const NSAppKitVersionNumber10_2_3: f64 = 663.6; +pub const NSAppKitVersionNumber10_3: f64 = 743.0; +pub const NSAppKitVersionNumber10_3_2: f64 = 743.14; +pub const NSAppKitVersionNumber10_3_3: f64 = 743.2; +pub const NSAppKitVersionNumber10_3_5: f64 = 743.24; +pub const NSAppKitVersionNumber10_3_7: f64 = 743.33; +pub const NSAppKitVersionNumber10_3_9: f64 = 743.36; +pub const NSAppKitVersionNumber10_4: f64 = 824.0; +pub const NSAppKitVersionNumber10_4_1: f64 = 824.1; +pub const NSAppKitVersionNumber10_4_3: f64 = 824.23; +pub const NSAppKitVersionNumber10_4_4: f64 = 824.33; +pub const NSAppKitVersionNumber10_4_7: f64 = 824.41; +pub const NSAppKitVersionNumber10_5: f64 = 949.0; +pub const NSAppKitVersionNumber10_5_2: f64 = 949.27; +pub const NSAppKitVersionNumber10_5_3: f64 = 949.33; +pub const NSAppKitVersionNumber10_6: f64 = 1038.0; +pub const NSAppKitVersionNumber10_7: f64 = 1138.0; +pub const NSAppKitVersionNumber10_7_2: f64 = 1138.23; +pub const NSAppKitVersionNumber10_7_3: f64 = 1138.32; +pub const NSAppKitVersionNumber10_7_4: f64 = 1138.47; +pub const NSAppKitVersionNumber10_8: f64 = 1187.0; +pub const NSAppKitVersionNumber10_9: f64 = 1265.0; +pub const NSAppKitVersionNumber10_10: f64 = 1343.0; +pub const NSAppKitVersionNumber10_10_2: f64 = 1344.0; +pub const NSAppKitVersionNumber10_10_3: f64 = 1347.0; +pub const NSAppKitVersionNumber10_10_4: f64 = 1348.0; +pub const NSAppKitVersionNumber10_10_5: f64 = 1348.0; +pub const NSAppKitVersionNumber10_10_Max: f64 = 1349.0; +pub const NSAppKitVersionNumber10_11: f64 = 1404.0; +pub const NSAppKitVersionNumber10_11_1: f64 = 1404.13; +pub const NSAppKitVersionNumber10_11_2: f64 = 1404.34; +pub const NSAppKitVersionNumber10_11_3: f64 = 1404.34; +pub const NSAppKitVersionNumber10_12: f64 = 1504.0; +pub const NSAppKitVersionNumber10_12_1: f64 = 1504.60; +pub const NSAppKitVersionNumber10_12_2: f64 = 1504.76; +pub const NSAppKitVersionNumber10_13: f64 = 1561.0; +pub const NSAppKitVersionNumber10_13_1: f64 = 1561.1; +pub const NSAppKitVersionNumber10_13_2: f64 = 1561.2; +pub const NSAppKitVersionNumber10_13_4: f64 = 1561.4; + +pub unsafe fn NSApp() -> id { + msg_send![class!(NSApplication), sharedApplication] +} + +#[repr(i64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSApplicationActivationPolicy { + NSApplicationActivationPolicyRegular = 0, + NSApplicationActivationPolicyAccessory = 1, + NSApplicationActivationPolicyProhibited = 2, + NSApplicationActivationPolicyERROR = -1 +} + +#[repr(u64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSApplicationActivationOptions { + NSApplicationActivateAllWindows = 1 << 0, + NSApplicationActivateIgnoringOtherApps = 1 << 1 +} + +#[repr(u64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSApplicationTerminateReply { + NSTerminateCancel = 0, + NSTerminateNow = 1, + NSTerminateLater = 2, +} + +bitflags! { + pub struct NSApplicationPresentationOptions : NSUInteger { + const NSApplicationPresentationDefault = 0; + const NSApplicationPresentationAutoHideDock = 1 << 0; + const NSApplicationPresentationHideDock = 1 << 1; + const NSApplicationPresentationAutoHideMenuBar = 1 << 2; + const NSApplicationPresentationHideMenuBar = 1 << 3; + const NSApplicationPresentationDisableAppleMenu = 1 << 4; + const NSApplicationPresentationDisableProcessSwitching = 1 << 5; + const NSApplicationPresentationDisableForceQuit = 1 << 6; + const NSApplicationPresentationDisableSessionTermination = 1 << 7; + const NSApplicationPresentationDisableHideApplication = 1 << 8; + const NSApplicationPresentationDisableMenuBarTransparency = 1 << 9; + const NSApplicationPresentationFullScreen = 1 << 10; + const NSApplicationPresentationAutoHideToolbar = 1 << 11; + } +} + +bitflags! { + pub struct NSWindowStyleMask: NSUInteger { + const NSBorderlessWindowMask = 0; + const NSTitledWindowMask = 1 << 0; + const NSClosableWindowMask = 1 << 1; + const NSMiniaturizableWindowMask = 1 << 2; + const NSResizableWindowMask = 1 << 3; + + const NSTexturedBackgroundWindowMask = 1 << 8; + + const NSUnifiedTitleAndToolbarWindowMask = 1 << 12; + + const NSFullScreenWindowMask = 1 << 14; + + const NSFullSizeContentViewWindowMask = 1 << 15; + } +} + +#[repr(u64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSWindowTitleVisibility { + NSWindowTitleVisible = 0, + NSWindowTitleHidden = 1 +} + +#[repr(i64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSWindowTabbingMode { + NSWindowTabbingModeAutomatic = 0, + NSWindowTabbingModeDisallowed = 1, + NSWindowTabbingModePreferred = 2 +} + +#[repr(u64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSBackingStoreType { + NSBackingStoreRetained = 0, + NSBackingStoreNonretained = 1, + NSBackingStoreBuffered = 2 +} + +bitflags! { + pub struct NSWindowOrderingMode: NSInteger { + const NSWindowAbove = 1; + const NSWindowBelow = -1; + const NSWindowOut = 0; + } +} + +bitflags! { + pub struct NSAlignmentOptions: libc::c_ulonglong { + const NSAlignMinXInward = 1 << 0; + const NSAlignMinYInward = 1 << 1; + const NSAlignMaxXInward = 1 << 2; + const NSAlignMaxYInward = 1 << 3; + const NSAlignWidthInward = 1 << 4; + const NSAlignHeightInward = 1 << 5; + const NSAlignMinXOutward = 1 << 8; + const NSAlignMinYOutward = 1 << 9; + const NSAlignMaxXOutward = 1 << 10; + const NSAlignMaxYOutward = 1 << 11; + const NSAlignWidthOutward = 1 << 12; + const NSAlignHeightOutward = 1 << 13; + const NSAlignMinXNearest = 1 << 16; + const NSAlignMinYNearest = 1 << 17; + const NSAlignMaxXNearest = 1 << 18; + const NSAlignMaxYNearest = 1 << 19; + const NSAlignWidthNearest = 1 << 20; + const NSAlignHeightNearest = 1 << 21; + const NSAlignRectFlipped = 1 << 63; + const NSAlignAllEdgesInward = NSAlignmentOptions::NSAlignMinXInward.bits + | NSAlignmentOptions::NSAlignMaxXInward.bits + | NSAlignmentOptions::NSAlignMinYInward.bits + | NSAlignmentOptions::NSAlignMaxYInward.bits; + const NSAlignAllEdgesOutward = NSAlignmentOptions::NSAlignMinXOutward.bits + | NSAlignmentOptions::NSAlignMaxXOutward.bits + | NSAlignmentOptions::NSAlignMinYOutward.bits + | NSAlignmentOptions::NSAlignMaxYOutward.bits; + const NSAlignAllEdgesNearest = NSAlignmentOptions::NSAlignMinXNearest.bits + | NSAlignmentOptions::NSAlignMaxXNearest.bits + | NSAlignmentOptions::NSAlignMinYNearest.bits + | NSAlignmentOptions::NSAlignMaxYNearest.bits; + } +} + +#[repr(u64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSOpenGLPixelFormatAttribute { + NSOpenGLPFAAllRenderers = 1, + NSOpenGLPFATripleBuffer = 3, + NSOpenGLPFADoubleBuffer = 5, + NSOpenGLPFAStereo = 6, + NSOpenGLPFAAuxBuffers = 7, + NSOpenGLPFAColorSize = 8, + NSOpenGLPFAAlphaSize = 11, + NSOpenGLPFADepthSize = 12, + NSOpenGLPFAStencilSize = 13, + NSOpenGLPFAAccumSize = 14, + NSOpenGLPFAMinimumPolicy = 51, + NSOpenGLPFAMaximumPolicy = 52, + NSOpenGLPFAOffScreen = 53, + NSOpenGLPFAFullScreen = 54, + NSOpenGLPFASampleBuffers = 55, + NSOpenGLPFASamples = 56, + NSOpenGLPFAAuxDepthStencil = 57, + NSOpenGLPFAColorFloat = 58, + NSOpenGLPFAMultisample = 59, + NSOpenGLPFASupersample = 60, + NSOpenGLPFASampleAlpha = 61, + NSOpenGLPFARendererID = 70, + NSOpenGLPFASingleRenderer = 71, + NSOpenGLPFANoRecovery = 72, + NSOpenGLPFAAccelerated = 73, + NSOpenGLPFAClosestPolicy = 74, + NSOpenGLPFARobust = 75, + NSOpenGLPFABackingStore = 76, + NSOpenGLPFAMPSafe = 78, + NSOpenGLPFAWindow = 80, + NSOpenGLPFAMultiScreen = 81, + NSOpenGLPFACompliant = 83, + NSOpenGLPFAScreenMask = 84, + NSOpenGLPFAPixelBuffer = 90, + NSOpenGLPFARemotePixelBuffer = 91, + NSOpenGLPFAAllowOfflineRenderers = 96, + NSOpenGLPFAAcceleratedCompute = 97, + NSOpenGLPFAOpenGLProfile = 99, + NSOpenGLPFAVirtualScreenCount = 128, +} + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSOpenGLPFAOpenGLProfiles { + NSOpenGLProfileVersionLegacy = 0x1000, + NSOpenGLProfileVersion3_2Core = 0x3200, + NSOpenGLProfileVersion4_1Core = 0x4100, +} + +#[repr(u64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSOpenGLContextParameter { + NSOpenGLCPSwapInterval = 222, + NSOpenGLCPSurfaceOrder = 235, + NSOpenGLCPSurfaceOpacity = 236, + NSOpenGLCPSurfaceBackingSize = 304, + NSOpenGLCPReclaimResources = 308, + NSOpenGLCPCurrentRendererID = 309, + NSOpenGLCPGPUVertexProcessing = 310, + NSOpenGLCPGPUFragmentProcessing = 311, + NSOpenGLCPHasDrawable = 314, + NSOpenGLCPMPSwapsInFlight = 315, +} + +#[repr(u64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSWindowButton { + NSWindowCloseButton = 0, + NSWindowMiniaturizeButton = 1, + NSWindowZoomButton = 2, + NSWindowToolbarButton = 3, + NSWindowDocumentIconButton = 4, + NSWindowDocumentVersionsButton = 6, + NSWindowFullScreenButton = 7, +} + +#[repr(u64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSBezelStyle { + NSRoundedBezelStyle = 1, + NSRegularSquareBezelStyle = 2, + NSDisclosureBezelStyle = 5, + NSShadowlessSquareBezelStyle = 6, + NSCircularBezelStyle = 7, + NSTexturedSquareBezelStyle = 8, + NSHelpButtonBezelStyle = 9, + NSSmallSquareBezelStyle = 10, + NSTexturedRoundedBezelStyle = 11, + NSRoundRectBezelStyle = 12, + NSRecessedBezelStyle = 13, + NSRoundedDisclosureBezelStyle = 14, +} + +#[repr(u64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSRequestUserAttentionType { + NSCriticalRequest = 0, + NSInformationalRequest = 10, +} + +pub static NSMainMenuWindowLevel: i32 = 24; + +pub trait NSApplication: Sized { + unsafe fn sharedApplication(_: Self) -> id { + msg_send![class!(NSApplication), sharedApplication] + } + + unsafe fn mainMenu(self) -> id; + unsafe fn setActivationPolicy_(self, policy: NSApplicationActivationPolicy) -> BOOL; + unsafe fn setPresentationOptions_(self, options: NSApplicationPresentationOptions) -> BOOL; + unsafe fn presentationOptions_(self) -> NSApplicationPresentationOptions; + unsafe fn setMainMenu_(self, menu: id); + unsafe fn setServicesMenu_(self, menu: id); + unsafe fn setWindowsMenu_(self, menu: id); + unsafe fn activateIgnoringOtherApps_(self, ignore: BOOL); + unsafe fn run(self); + unsafe fn finishLaunching(self); + unsafe fn nextEventMatchingMask_untilDate_inMode_dequeue_(self, + mask: NSUInteger, + expiration: id, + in_mode: id, + dequeue: BOOL) -> id; + unsafe fn sendEvent_(self, an_event: id); + unsafe fn postEvent_atStart_(self, anEvent: id, flag: BOOL); + unsafe fn stop_(self, sender: id); + unsafe fn setApplicationIconImage_(self, image: id); + unsafe fn requestUserAttention_(self, requestType: NSRequestUserAttentionType); +} + +impl NSApplication for id { + unsafe fn mainMenu(self) -> id { + msg_send![self, mainMenu] + } + + unsafe fn setActivationPolicy_(self, policy: NSApplicationActivationPolicy) -> BOOL { + msg_send![self, setActivationPolicy:policy as NSInteger] + } + + unsafe fn setPresentationOptions_(self, options: NSApplicationPresentationOptions) -> BOOL { + msg_send![self, setPresentationOptions:options.bits] + } + + unsafe fn presentationOptions_(self) -> NSApplicationPresentationOptions { + let options = msg_send![self, presentationOptions]; + return NSApplicationPresentationOptions::from_bits(options).unwrap(); + } + + unsafe fn setMainMenu_(self, menu: id) { + msg_send![self, setMainMenu:menu] + } + + unsafe fn setServicesMenu_(self, menu: id) { + msg_send![self, setServicesMenu:menu] + } + + unsafe fn setWindowsMenu_(self, menu: id) { + msg_send![self, setWindowsMenu:menu] + } + + unsafe fn activateIgnoringOtherApps_(self, ignore: BOOL) { + msg_send![self, activateIgnoringOtherApps:ignore] + } + + unsafe fn run(self) { + msg_send![self, run] + } + + unsafe fn finishLaunching(self) { + msg_send![self, finishLaunching] + } + + unsafe fn nextEventMatchingMask_untilDate_inMode_dequeue_(self, + mask: NSUInteger, + expiration: id, + in_mode: id, + dequeue: BOOL) -> id { + msg_send![self, nextEventMatchingMask:mask + untilDate:expiration + inMode:in_mode + dequeue:dequeue] + } + + unsafe fn sendEvent_(self, an_event: id) { + msg_send![self, sendEvent:an_event] + } + + unsafe fn postEvent_atStart_(self, anEvent: id, flag: BOOL) { + msg_send![self, postEvent:anEvent atStart:flag] + } + + unsafe fn stop_(self, sender: id) { + msg_send![self, stop:sender] + } + + unsafe fn setApplicationIconImage_(self, icon: id) { + msg_send![self, setApplicationIconImage:icon] + } + + unsafe fn requestUserAttention_(self, requestType: NSRequestUserAttentionType) { + msg_send![self, requestUserAttention:requestType] + } +} + +pub trait NSRunningApplication: Sized { + unsafe fn currentApplication(_: Self) -> id { + msg_send![class!(NSRunningApplication), currentApplication] + } + unsafe fn activateWithOptions_(self, options: NSApplicationActivationOptions) -> BOOL; +} + +impl NSRunningApplication for id { + unsafe fn activateWithOptions_(self, options: NSApplicationActivationOptions) -> BOOL { + msg_send![self, activateWithOptions:options as NSUInteger] + } +} + +pub trait NSPasteboard: Sized { + unsafe fn generalPasteboard(_: Self) -> id { + msg_send![class!(NSPasteboard), generalPasteboard] + } + + unsafe fn pasteboardByFilteringData_ofType(_: Self, data: id, _type: id) -> id { + msg_send![class!(NSPasteboard), pasteboardByFilteringData:data ofType:_type] + } + + unsafe fn pasteboardByFilteringFile(_: Self, file: id) -> id { + msg_send![class!(NSPasteboard), pasteboardByFilteringFile:file] + } + + unsafe fn pasteboardByFilteringTypesInPasteboard(_: Self, pboard: id) -> id { + msg_send![class!(NSPasteboard), pasteboardByFilteringTypesInPasteboard:pboard] + } + + unsafe fn pasteboardWithName(_: Self, name: id) -> id { + msg_send![class!(NSPasteboard), pasteboardWithName:name] + } + + unsafe fn pasteboardWithUniqueName(_: Self) -> id { + msg_send![class!(NSPasteboard), pasteboardWithUniqueName] + } + + unsafe fn releaseGlobally(self); + + unsafe fn clearContents(self) -> NSInteger; + unsafe fn writeObjects(self, objects: id) -> BOOL; + unsafe fn setData_forType(self, data: id, dataType: id) -> BOOL; + unsafe fn setPropertyList_forType(self, plist: id, dataType: id) -> BOOL; + unsafe fn setString_forType(self, string: id, dataType: id) -> BOOL; + + unsafe fn readObjectsForClasses_options(self, classArray: id, options: id) -> id; + unsafe fn pasteboardItems(self) -> id; + unsafe fn indexOfPasteboardItem(self, pasteboardItem: id) -> NSInteger; + unsafe fn dataForType(self, dataType: id) -> id; + unsafe fn propertyListForType(self, dataType: id) -> id; + unsafe fn stringForType(self, dataType: id) -> id; + + unsafe fn availableTypeFromArray(self, types: id) -> id; + unsafe fn canReadItemWithDataConformingToTypes(self, types: id) -> BOOL; + unsafe fn canReadObjectForClasses_options(self, classArray: id, options: id) -> BOOL; + unsafe fn types(self) -> id; + unsafe fn typesFilterableTo(_: Self, _type: id) -> id { + msg_send![class!(NSPasteboard), typesFilterableTo:_type] + } + + unsafe fn name(self) -> id; + unsafe fn changeCount(self) -> NSInteger; + + unsafe fn declareTypes_owner(self, newTypes: id, newOwner: id) -> NSInteger; + unsafe fn addTypes_owner(self, newTypes: id, newOwner: id) -> NSInteger; + unsafe fn writeFileContents(self, filename: id) -> BOOL; + unsafe fn writeFileWrapper(self, wrapper: id) -> BOOL; + + unsafe fn readFileContentsType_toFile(self, _type: id, filename: id) -> id; + unsafe fn readFileWrapper(self) -> id; +} + +impl NSPasteboard for id { + unsafe fn releaseGlobally(self) { + msg_send![self, releaseGlobally]; + } + + unsafe fn clearContents(self) -> NSInteger { + msg_send![self, clearContents] + } + + unsafe fn writeObjects(self, objects: id) -> BOOL { + msg_send![self, writeObjects:objects] + } + + unsafe fn setData_forType(self, data: id, dataType: id) -> BOOL { + msg_send![self, setData:data forType:dataType] + } + + unsafe fn setPropertyList_forType(self, plist: id, dataType: id) -> BOOL { + msg_send![self, setPropertyList:plist forType:dataType] + } + + unsafe fn setString_forType(self, string: id, dataType: id) -> BOOL { + msg_send![self, setString:string forType:dataType] + } + + unsafe fn readObjectsForClasses_options(self, classArray: id, options: id) -> id { + msg_send![self, readObjectsForClasses:classArray options:options] + } + + unsafe fn pasteboardItems(self) -> id { + msg_send![self, pasteboardItems] + } + + unsafe fn indexOfPasteboardItem(self, pasteboardItem: id) -> NSInteger { + msg_send![self, indexOfPasteboardItem:pasteboardItem] + } + + unsafe fn dataForType(self, dataType: id) -> id { + msg_send![self, dataForType:dataType] + } + + unsafe fn propertyListForType(self, dataType: id) -> id { + msg_send![self, propertyListForType:dataType] + } + + unsafe fn stringForType(self, dataType: id) -> id { + msg_send![self, stringForType:dataType] + } + + unsafe fn availableTypeFromArray(self, types: id) -> id { + msg_send![self, availableTypeFromArray:types] + } + + unsafe fn canReadItemWithDataConformingToTypes(self, types: id) -> BOOL { + msg_send![self, canReadItemWithDataConformingToTypes:types] + } + + unsafe fn canReadObjectForClasses_options(self, classArray: id, options: id) -> BOOL { + msg_send![self, canReadObjectForClasses:classArray options:options] + } + + unsafe fn types(self) -> id { + msg_send![self, types] + } + + unsafe fn name(self) -> id { + msg_send![self, name] + } + + unsafe fn changeCount(self) -> NSInteger { + msg_send![self, changeCount] + } + + unsafe fn declareTypes_owner(self, newTypes: id, newOwner: id) -> NSInteger { + msg_send![self, declareTypes:newTypes owner:newOwner] + } + + unsafe fn addTypes_owner(self, newTypes: id, newOwner: id) -> NSInteger { + msg_send![self, addTypes:newTypes owner:newOwner] + } + + unsafe fn writeFileContents(self, filename: id) -> BOOL { + msg_send![self, writeFileContents:filename] + } + + unsafe fn writeFileWrapper(self, wrapper: id) -> BOOL { + msg_send![self, writeFileWrapper:wrapper] + } + + unsafe fn readFileContentsType_toFile(self, _type: id, filename: id) -> id { + msg_send![self, readFileContentsType:_type toFile:filename] + } + + unsafe fn readFileWrapper(self) -> id { + msg_send![self, readFileWrapper] + } + +} + +pub trait NSPasteboardItem: Sized { + unsafe fn types(self) -> id; + + unsafe fn setDataProvider_forTypes(self, dataProvider: id, types: id) -> BOOL; + unsafe fn setData_forType(self, data: id, _type: id) -> BOOL; + unsafe fn setString_forType(self, string: id, _type: id) -> BOOL; + unsafe fn setPropertyList_forType(self, propertyList: id, _type: id) -> BOOL; + + unsafe fn dataForType(self, _type: id) -> id; + unsafe fn stringForType(self, _type: id) -> id; + unsafe fn propertyListForType(self, _type: id) -> id; +} + +impl NSPasteboardItem for id { + unsafe fn types(self) -> id { + msg_send![self, types] + } + + unsafe fn setDataProvider_forTypes(self, dataProvider: id, types: id) -> BOOL { + msg_send![self, setDataProvider:dataProvider forTypes:types] + } + + unsafe fn setData_forType(self, data: id, _type: id) -> BOOL { + msg_send![self, setData:data forType:_type] + } + + unsafe fn setString_forType(self, string: id, _type: id) -> BOOL { + msg_send![self, setString:string forType:_type] + } + + unsafe fn setPropertyList_forType(self, propertyList: id, _type: id) -> BOOL { + msg_send![self, setPropertyList:propertyList forType:_type] + } + + unsafe fn dataForType(self, _type: id) -> id { + msg_send![self, dataForType:_type] + } + + unsafe fn stringForType(self, _type: id) -> id { + msg_send![self, stringForType:_type] + } + + unsafe fn propertyListForType(self, _type: id) -> id { + msg_send![self, propertyListForType:_type] + } +} + +pub trait NSPasteboardItemDataProvider: Sized { + unsafe fn pasteboard_item_provideDataForType(self, pasteboard: id, item: id, _type: id); + unsafe fn pasteboardFinishedWithDataProvider(self, pasteboard: id); +} + +impl NSPasteboardItemDataProvider for id { + unsafe fn pasteboard_item_provideDataForType(self, pasteboard: id, item: id, _type: id) { + msg_send![self, pasteboard:pasteboard item:item provideDataForType:_type] + } + + unsafe fn pasteboardFinishedWithDataProvider(self, pasteboard: id) { + msg_send![self, pasteboardFinishedWithDataProvider:pasteboard] + } +} + +pub trait NSPasteboardWriting: Sized { + unsafe fn writableTypesForPasteboard(self, pasteboard: id) -> id; + unsafe fn writingOptionsForType_pasteboard(self, _type: id, pasteboard: id) -> NSPasteboardWritingOptions; + + unsafe fn pasteboardPropertyListForType(self, _type: id) -> id; +} + +impl NSPasteboardWriting for id { + unsafe fn writableTypesForPasteboard(self, pasteboard: id) -> id { + msg_send![self, writableTypesForPasteboard:pasteboard] + } + + unsafe fn writingOptionsForType_pasteboard(self, _type: id, pasteboard: id) -> NSPasteboardWritingOptions { + msg_send![self, writingOptionsForType:_type pasteboard:pasteboard] + } + + unsafe fn pasteboardPropertyListForType(self, _type: id) -> id { + msg_send![self, pasteboardPropertyListForType:_type] + } +} + +pub trait NSPasteboardReading: Sized { + unsafe fn initWithPasteboardPropertyList_ofType(self, propertyList: id, _type: id) -> id; + + unsafe fn readableTypesForPasteboard(self, pasteboard: id) -> id; + unsafe fn readingOptionsForType_pasteboard(self, _type: id, pasteboard: id) -> NSPasteboardReadingOptions; +} + +impl NSPasteboardReading for id { + unsafe fn initWithPasteboardPropertyList_ofType(self, propertyList: id, _type: id) -> id { + msg_send![self, initWithPasteboardPropertyList:propertyList ofType:_type] + } + + unsafe fn readableTypesForPasteboard(self, pasteboard: id) -> id { + let class: id = msg_send![self, class]; + msg_send![class, readableTypesForPasteboard:pasteboard] + } + unsafe fn readingOptionsForType_pasteboard(self, _type: id, pasteboard: id) -> NSPasteboardReadingOptions { + let class: id = msg_send![self, class]; + msg_send![class, readingOptionsForType:_type pasteboard:pasteboard] + } +} + +#[repr(u64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSPasteboardReadingOptions { + NSPasteboardReadingAsData = 0, + NSPasteboardReadingAsString = 1 << 0, + NSPasteboardReadingAsPropertyList = 1 << 1, + NSPasteboardReadingAsKeyedArchive = 1 << 2 +} + +#[repr(u64)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum NSPasteboardWritingOptions { + NSPasteboardWritingPromised = 1 << 9, +} + +pub trait NSMenu: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSMenu), alloc] + } + + unsafe fn new(_: Self) -> id { + msg_send![class!(NSMenu), new] + } + + unsafe fn initWithTitle_(self, title: id ) -> id; + unsafe fn setAutoenablesItems(self, state: BOOL); + + unsafe fn addItem_(self, menu_item: id); + unsafe fn addItemWithTitle_action_keyEquivalent(self, title: id, action: SEL, key: id) -> id; + unsafe fn itemAtIndex_(self, index: NSInteger) -> id; +} + +impl NSMenu for id { + unsafe fn initWithTitle_(self, title: id ) -> id { + msg_send![self, initWithTitle:title] + } + + unsafe fn setAutoenablesItems(self, state: BOOL) { + msg_send![self, setAutoenablesItems: state] + } + + unsafe fn addItem_(self, menu_item: id) { + msg_send![self, addItem:menu_item] + } + + unsafe fn addItemWithTitle_action_keyEquivalent(self, title: id, action: SEL, key: id) -> id { + msg_send![self, addItemWithTitle:title action:action keyEquivalent:key] + } + + unsafe fn itemAtIndex_(self, index: NSInteger) -> id { + msg_send![self, itemAtIndex:index] + } +} + +pub trait NSMenuItem: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSMenuItem), alloc] + } + + unsafe fn new(_: Self) -> id { + msg_send![class!(NSMenuItem), new] + } + + unsafe fn separatorItem(_: Self) -> id { + msg_send![class!(NSMenuItem), separatorItem] + } + + unsafe fn initWithTitle_action_keyEquivalent_(self, title: id, action: SEL, key: id) -> id; + unsafe fn setKeyEquivalentModifierMask_(self, mask: NSEventModifierFlags); + unsafe fn setSubmenu_(self, submenu: id); +} + +impl NSMenuItem for id { + unsafe fn initWithTitle_action_keyEquivalent_(self, title: id, action: SEL, key: id) -> id { + msg_send![self, initWithTitle:title action:action keyEquivalent:key] + } + + unsafe fn setKeyEquivalentModifierMask_(self, mask: NSEventModifierFlags) { + msg_send![self, setKeyEquivalentModifierMask:mask] + } + + unsafe fn setSubmenu_(self, submenu: id) { + msg_send![self, setSubmenu:submenu] + } +} + +pub type NSWindowDepth = libc::c_int; + +bitflags! { + pub struct NSWindowCollectionBehavior: NSUInteger { + const NSWindowCollectionBehaviorDefault = 0; + const NSWindowCollectionBehaviorCanJoinAllSpaces = 1 << 0; + const NSWindowCollectionBehaviorMoveToActiveSpace = 1 << 1; + + const NSWindowCollectionBehaviorManaged = 1 << 2; + const NSWindowCollectionBehaviorTransient = 1 << 3; + const NSWindowCollectionBehaviorStationary = 1 << 4; + + const NSWindowCollectionBehaviorParticipatesInCycle = 1 << 5; + const NSWindowCollectionBehaviorIgnoresCycle = 1 << 6; + + const NSWindowCollectionBehaviorFullScreenPrimary = 1 << 7; + const NSWindowCollectionBehaviorFullScreenAuxiliary = 1 << 8; + } +} + +bitflags! { + pub struct NSWindowOcclusionState: NSUInteger { + const NSWindowOcclusionStateVisible = 1 << 1; + } +} + +pub trait NSWindow: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSWindow), alloc] + } + + + unsafe fn initWithContentRect_styleMask_backing_defer_(self, + rect: NSRect, + style: NSWindowStyleMask, + backing: NSBackingStoreType, + defer: BOOL) -> id; + unsafe fn initWithContentRect_styleMask_backing_defer_screen_(self, + rect: NSRect, + style: NSWindowStyleMask, + backing: NSBackingStoreType, + defer: BOOL, + screen: id) -> id; + + + unsafe fn styleMask(self) -> NSWindowStyleMask; + unsafe fn setStyleMask_(self, styleMask: NSWindowStyleMask); + unsafe fn toggleFullScreen_(self, sender: id); + unsafe fn worksWhenModal(self) -> BOOL; + unsafe fn alphaValue(self) -> CGFloat; + unsafe fn setAlphaValue_(self, windowAlpha: CGFloat); + unsafe fn backgroundColor(self) -> id; + unsafe fn setBackgroundColor_(self, color: id); + unsafe fn colorSpace(self) -> id; + unsafe fn setColorSpace_(self, colorSpace: id); + unsafe fn contentView(self) -> id; + unsafe fn setContentView_(self, view: id); + unsafe fn canHide(self) -> BOOL; + unsafe fn setCanHide_(self, canHide: BOOL); + unsafe fn hidesOnDeactivate(self) -> BOOL; + unsafe fn setHidesOnDeactivate_(self, hideOnDeactivate: BOOL); + unsafe fn collectionBehavior(self) -> NSWindowCollectionBehavior; + unsafe fn setCollectionBehavior_(self, collectionBehavior: NSWindowCollectionBehavior); + unsafe fn setOpaque_(self, opaque: BOOL); + unsafe fn hasShadow(self) -> BOOL; + unsafe fn setHasShadow_(self, hasShadow: BOOL); + unsafe fn invalidateShadow(self); + unsafe fn autorecalculatesContentBorderThicknessForEdge_(self, edge: NSRectEdge) -> BOOL; + unsafe fn setAutorecalculatesContentBorderThickness_forEdge_(self, + autorecalculateContentBorderThickness: BOOL, + edge: NSRectEdge) -> BOOL; + unsafe fn contentBorderThicknessForEdge_(self, edge: NSRectEdge) -> CGFloat; + unsafe fn setContentBorderThickness_forEdge_(self, borderThickness: CGFloat, edge: NSRectEdge); + unsafe fn delegate(self) -> id; + unsafe fn setDelegate_(self, delegate: id); + unsafe fn preventsApplicationTerminationWhenModal(self) -> BOOL; + unsafe fn setPreventsApplicationTerminationWhenModal_(self, flag: BOOL); + + + + + unsafe fn contentRectForFrameRect_styleMask_(self, windowFrame: NSRect, windowStyle: NSWindowStyleMask) -> NSRect; + unsafe fn frameRectForContentRect_styleMask_(self, windowContentRect: NSRect, windowStyle: NSWindowStyleMask) -> NSRect; + unsafe fn minFrameWidthWithTitle_styleMask_(self, windowTitle: id, windowStyle: NSWindowStyleMask) -> CGFloat; + unsafe fn contentRectForFrameRect_(self, windowFrame: NSRect) -> NSRect; + unsafe fn frameRectForContentRect_(self, windowContent: NSRect) -> NSRect; + + + unsafe fn drawers(self) -> id; + unsafe fn windowController(self) -> id; + unsafe fn setWindowController_(self, windowController: id); + + + + + unsafe fn frame(self) -> NSRect; + unsafe fn setFrameOrigin_(self, point: NSPoint); + unsafe fn setFrameTopLeftPoint_(self, point: NSPoint); + unsafe fn constrainFrameRect_toScreen_(self, frameRect: NSRect, screen: id); + unsafe fn cascadeTopLeftFromPoint_(self, topLeft: NSPoint) -> NSPoint; + unsafe fn setFrame_display_(self, windowFrame: NSRect, display: BOOL); + unsafe fn setFrame_displayViews_(self, windowFrame: NSRect, display: BOOL); + unsafe fn aspectRatio(self) -> NSSize; + unsafe fn setAspectRatio_(self, aspectRatio: NSSize); + unsafe fn minSize(self) -> NSSize; + unsafe fn setMinSize_(self, minSize: NSSize); + unsafe fn maxSize(self) -> NSSize; + unsafe fn setMaxSize_(self, maxSize: NSSize); + unsafe fn performZoom_(self, sender: id); + unsafe fn zoom_(self, sender: id); + unsafe fn resizeFlags(self) -> NSInteger; + unsafe fn showsResizeIndicator(self) -> BOOL; + unsafe fn setShowsResizeIndicator_(self, showsResizeIndicator: BOOL); + unsafe fn resizeIncrements(self) -> NSSize; + unsafe fn setResizeIncrements_(self, resizeIncrements: NSSize); + unsafe fn preservesContentDuringLiveResize(self) -> BOOL; + unsafe fn setPreservesContentDuringLiveResize_(self, preservesContentDuringLiveResize: BOOL); + unsafe fn inLiveResize(self) -> BOOL; + + + unsafe fn contentAspectRatio(self) -> NSSize; + unsafe fn setContentAspectRatio_(self, contentAspectRatio: NSSize); + unsafe fn contentMinSize(self) -> NSSize; + unsafe fn setContentMinSize_(self, contentMinSize: NSSize); + unsafe fn contentSize(self) -> NSSize; + unsafe fn setContentSize_(self, contentSize: NSSize); + unsafe fn contentMaxSize(self) -> NSSize; + unsafe fn setContentMaxSize_(self, contentMaxSize: NSSize); + unsafe fn contentResizeIncrements(self) -> NSSize; + unsafe fn setContentResizeIncrements_(self, contentResizeIncrements: NSSize); + + + unsafe fn isVisible(self) -> BOOL; + unsafe fn occlusionState(self) -> NSWindowOcclusionState; + + + unsafe fn orderOut_(self, sender: id); + unsafe fn orderBack_(self, sender: id); + unsafe fn orderFront_(self, sender: id); + unsafe fn orderFrontRegardless(self); + unsafe fn orderFrontWindow_relativeTo_(self, orderingMode: NSWindowOrderingMode, otherWindowNumber: NSInteger); + unsafe fn level(self) -> NSInteger; + unsafe fn setLevel_(self, level: NSInteger); + + + unsafe fn canBecomeKeyWindow(self) -> BOOL; + unsafe fn makeKeyWindow(self); + unsafe fn makeKeyAndOrderFront_(self, sender: id); + + + + + unsafe fn canBecomeMainWindow(self) -> BOOL; + unsafe fn makeMainWindow(self); + + + + + unsafe fn toolbar(self) -> id ; + unsafe fn setToolbar_(self, toolbar: id ); + unsafe fn runToolbarCustomizationPalette(self, sender: id); + + + + + + + + + + unsafe fn standardWindowButton_(self, windowButtonKind: NSWindowButton) -> id; + + + unsafe fn allowsAutomaticWindowTabbing(_: Self) -> BOOL; + unsafe fn setAllowsAutomaticWindowTabbing_(_: Self, allowsAutomaticWindowTabbing: BOOL); + unsafe fn tabbingIdentifier(self) -> id; + unsafe fn tabbingMode(self) -> NSWindowTabbingMode; + unsafe fn setTabbingMode_(self, tabbingMode: NSWindowTabbingMode); + unsafe fn addTabbedWindow_ordered_(self, window: id, ordering_mode: NSWindowOrderingMode); + unsafe fn toggleTabBar_(self, sender: id); + + + + + + unsafe fn initialFirstResponder(self) -> id; + unsafe fn firstResponder(self) -> id; + unsafe fn setInitialFirstResponder_(self, responder: id); + unsafe fn makeFirstResponder_(self, responder: id) -> BOOL; + + + + + unsafe fn keyDown_(self, event: id); + + + unsafe fn acceptsMouseMovedEvents(self) -> BOOL; + unsafe fn ignoresMouseEvents(self) -> BOOL; + unsafe fn setIgnoresMouseEvents_(self, ignoreMouseEvents: BOOL); + unsafe fn mouseLocationOutsideOfEventStream(self) -> NSPoint; + unsafe fn setAcceptsMouseMovedEvents_(self, acceptMouseMovedEvents: BOOL); + unsafe fn windowNumberAtPoint_belowWindowWithWindowNumber_(self, + point: NSPoint, + windowNumber: NSInteger) -> NSInteger; + + + + + + + + + + unsafe fn backingScaleFactor(self) -> CGFloat; + unsafe fn backingAlignedRect_options_(self, rect: NSRect, options: NSAlignmentOptions) -> NSRect; + unsafe fn convertRectFromBacking_(self, rect: NSRect) -> NSRect; + unsafe fn convertRectToBacking_(self, rect: NSRect) -> NSRect; + unsafe fn convertRectToScreen_(self, rect: NSRect) -> NSRect; + unsafe fn convertRectFromScreen_(self, rect: NSRect) -> NSRect; + + + unsafe fn setDocumentEdited_(self, documentEdited: BOOL); + + + unsafe fn title(self) -> id; + unsafe fn setTitle_(self, title: id); + unsafe fn setTitleWithRepresentedFilename_(self, filePath: id); + unsafe fn setTitleVisibility_(self, visibility: NSWindowTitleVisibility); + unsafe fn setTitlebarAppearsTransparent_(self, transparent: BOOL); + unsafe fn representedFilename(self) -> id; + unsafe fn setRepresentedFilename_(self, filePath: id); + unsafe fn representedURL(self) -> id; + unsafe fn setRepresentedURL_(self, representedURL: id); + + + unsafe fn screen(self) -> id; + unsafe fn deepestScreen(self) -> id; + unsafe fn displaysWhenScreenProfileChanges(self) -> BOOL; + unsafe fn setDisplaysWhenScreenProfileChanges_(self, displaysWhenScreenProfileChanges: BOOL); + + + unsafe fn setMovableByWindowBackground_(self, movableByWindowBackground: BOOL); + unsafe fn setMovable_(self, movable: BOOL); + unsafe fn center(self); + + + unsafe fn performClose_(self, sender: id); + unsafe fn close(self); + unsafe fn setReleasedWhenClosed_(self, releasedWhenClosed: BOOL); + + + unsafe fn performMiniaturize_(self, sender: id); + unsafe fn miniaturize_(self, sender: id); + unsafe fn deminiaturize_(self, sender: id); + unsafe fn miniwindowImage(self) -> id; + unsafe fn setMiniwindowImage_(self, miniwindowImage: id); + unsafe fn miniwindowTitle(self) -> id; + unsafe fn setMiniwindowTitle_(self, miniwindowTitle: id); + + + + + + + + +} + +impl NSWindow for id { + + + unsafe fn initWithContentRect_styleMask_backing_defer_(self, + rect: NSRect, + style: NSWindowStyleMask, + backing: NSBackingStoreType, + defer: BOOL) -> id { + msg_send![self, initWithContentRect:rect + styleMask:style.bits + backing:backing as NSUInteger + defer:defer] + } + + unsafe fn initWithContentRect_styleMask_backing_defer_screen_(self, + rect: NSRect, + style: NSWindowStyleMask, + backing: NSBackingStoreType, + defer: BOOL, + screen: id) -> id { + msg_send![self, initWithContentRect:rect + styleMask:style.bits + backing:backing as NSUInteger + defer:defer + screen:screen] + } + + + + unsafe fn styleMask(self) -> NSWindowStyleMask { + NSWindowStyleMask::from_bits_truncate(msg_send![self, styleMask]) + } + + unsafe fn setStyleMask_(self, styleMask: NSWindowStyleMask) { + msg_send![self, setStyleMask:styleMask.bits] + } + + unsafe fn toggleFullScreen_(self, sender: id) { + msg_send![self, toggleFullScreen:sender] + } + + unsafe fn worksWhenModal(self) -> BOOL { + msg_send![self, worksWhenModal] + } + + unsafe fn alphaValue(self) -> CGFloat { + msg_send![self, alphaValue] + } + + unsafe fn setAlphaValue_(self, windowAlpha: CGFloat) { + msg_send![self, setAlphaValue:windowAlpha] + } + + unsafe fn backgroundColor(self) -> id { + msg_send![self, backgroundColor] + } + + unsafe fn setBackgroundColor_(self, color: id) { + msg_send![self, setBackgroundColor:color] + } + + unsafe fn colorSpace(self) -> id { + msg_send![self, colorSpace] + } + + unsafe fn setColorSpace_(self, colorSpace: id) { + msg_send![self, setColorSpace:colorSpace] + } + + unsafe fn contentView(self) -> id { + msg_send![self, contentView] + } + + unsafe fn setContentView_(self, view: id) { + msg_send![self, setContentView:view] + } + + unsafe fn canHide(self) -> BOOL { + msg_send![self, canHide] + } + + unsafe fn setCanHide_(self, canHide: BOOL) { + msg_send![self, setCanHide:canHide] + } + + unsafe fn hidesOnDeactivate(self) -> BOOL { + msg_send![self, hidesOnDeactivate] + } + + unsafe fn setHidesOnDeactivate_(self, hideOnDeactivate: BOOL) { + msg_send![self, setHidesOnDeactivate:hideOnDeactivate] + } + + unsafe fn collectionBehavior(self) -> NSWindowCollectionBehavior { + msg_send![self, collectionBehavior] + } + + unsafe fn setCollectionBehavior_(self, collectionBehavior: NSWindowCollectionBehavior) { + msg_send![self, setCollectionBehavior:collectionBehavior] + } + + unsafe fn setOpaque_(self, opaque: BOOL) { + msg_send![self, setOpaque:opaque] + } + + unsafe fn hasShadow(self) -> BOOL { + msg_send![self, hasShadow] + } + + unsafe fn setHasShadow_(self, hasShadow: BOOL) { + msg_send![self, setHasShadow:hasShadow] + } + + unsafe fn invalidateShadow(self) { + msg_send![self, invalidateShadow] + } + + unsafe fn autorecalculatesContentBorderThicknessForEdge_(self, edge: NSRectEdge) -> BOOL { + msg_send![self, autorecalculatesContentBorderThicknessForEdge:edge] + } + + unsafe fn setAutorecalculatesContentBorderThickness_forEdge_(self, + autorecalculateContentBorderThickness: BOOL, + edge: NSRectEdge) -> BOOL { + msg_send![self, setAutorecalculatesContentBorderThickness: + autorecalculateContentBorderThickness forEdge:edge] + } + + unsafe fn contentBorderThicknessForEdge_(self, edge: NSRectEdge) -> CGFloat { + msg_send![self, contentBorderThicknessForEdge:edge] + } + + unsafe fn setContentBorderThickness_forEdge_(self, borderThickness: CGFloat, edge: NSRectEdge) { + msg_send![self, setContentBorderThickness:borderThickness forEdge:edge] + } + + unsafe fn delegate(self) -> id { + msg_send![self, delegate] + } + + unsafe fn setDelegate_(self, delegate: id) { + msg_send![self, setDelegate:delegate] + } + + unsafe fn preventsApplicationTerminationWhenModal(self) -> BOOL { + msg_send![self, preventsApplicationTerminationWhenModal] + } + + unsafe fn setPreventsApplicationTerminationWhenModal_(self, flag: BOOL) { + msg_send![self, setPreventsApplicationTerminationWhenModal:flag] + } + + + + + + unsafe fn contentRectForFrameRect_styleMask_(self, windowFrame: NSRect, windowStyle: NSWindowStyleMask) -> NSRect { + msg_send![self, contentRectForFrameRect:windowFrame styleMask:windowStyle.bits] + } + + unsafe fn frameRectForContentRect_styleMask_(self, windowContentRect: NSRect, windowStyle: NSWindowStyleMask) -> NSRect { + msg_send![self, frameRectForContentRect:windowContentRect styleMask:windowStyle.bits] + } + + unsafe fn minFrameWidthWithTitle_styleMask_(self, windowTitle: id, windowStyle: NSWindowStyleMask) -> CGFloat { + msg_send![self, minFrameWidthWithTitle:windowTitle styleMask:windowStyle.bits] + } + + unsafe fn contentRectForFrameRect_(self, windowFrame: NSRect) -> NSRect { + msg_send![self, contentRectForFrameRect:windowFrame] + } + + unsafe fn frameRectForContentRect_(self, windowContent: NSRect) -> NSRect { + msg_send![self, frameRectForContentRect:windowContent] + } + + + + unsafe fn drawers(self) -> id { + msg_send![self, drawers] + } + + unsafe fn windowController(self) -> id { + msg_send![self, windowController] + } + + unsafe fn setWindowController_(self, windowController: id) { + msg_send![self, setWindowController:windowController] + } + + + + + + unsafe fn frame(self) -> NSRect { + msg_send![self, frame] + } + + unsafe fn setFrameOrigin_(self, point: NSPoint) { + msg_send![self, setFrameOrigin:point] + } + + unsafe fn setFrameTopLeftPoint_(self, point: NSPoint) { + msg_send![self, setFrameTopLeftPoint:point] + } + + unsafe fn constrainFrameRect_toScreen_(self, frameRect: NSRect, screen: id) { + msg_send![self, constrainFrameRect:frameRect toScreen:screen] + } + + unsafe fn cascadeTopLeftFromPoint_(self, topLeft: NSPoint) -> NSPoint { + msg_send![self, cascadeTopLeftFromPoint:topLeft] + } + + unsafe fn setFrame_display_(self, windowFrame: NSRect, display: BOOL) { + msg_send![self, setFrame:windowFrame display:display] + } + + unsafe fn setFrame_displayViews_(self, windowFrame: NSRect, display: BOOL) { + msg_send![self, setFrame:windowFrame displayViews:display] + } + + unsafe fn aspectRatio(self) -> NSSize { + msg_send![self, aspectRatio] + } + + unsafe fn setAspectRatio_(self, aspectRatio: NSSize) { + msg_send![self, setAspectRatio:aspectRatio] + } + + unsafe fn minSize(self) -> NSSize { + msg_send![self, minSize] + } + + unsafe fn setMinSize_(self, minSize: NSSize) { + msg_send![self, setMinSize:minSize] + } + + unsafe fn maxSize(self) -> NSSize { + msg_send![self, maxSize] + } + + unsafe fn setMaxSize_(self, maxSize: NSSize) { + msg_send![self, setMaxSize:maxSize] + } + + unsafe fn performZoom_(self, sender: id) { + msg_send![self, performZoom:sender] + } + + unsafe fn zoom_(self, sender: id) { + msg_send![self, zoom:sender] + } + + unsafe fn resizeFlags(self) -> NSInteger { + msg_send![self, resizeFlags] + } + + unsafe fn showsResizeIndicator(self) -> BOOL { + msg_send![self, showsResizeIndicator] + } + + unsafe fn setShowsResizeIndicator_(self, showsResizeIndicator: BOOL) { + msg_send![self, setShowsResizeIndicator:showsResizeIndicator] + } + + unsafe fn resizeIncrements(self) -> NSSize { + msg_send![self, resizeIncrements] + } + + unsafe fn setResizeIncrements_(self, resizeIncrements: NSSize) { + msg_send![self, setResizeIncrements:resizeIncrements] + } + + unsafe fn preservesContentDuringLiveResize(self) -> BOOL { + msg_send![self, preservesContentDuringLiveResize] + } + + unsafe fn setPreservesContentDuringLiveResize_(self, preservesContentDuringLiveResize: BOOL) { + msg_send![self, setPreservesContentDuringLiveResize:preservesContentDuringLiveResize] + } + + unsafe fn inLiveResize(self) -> BOOL { + msg_send![self, inLiveResize] + } + + + + unsafe fn contentAspectRatio(self) -> NSSize { + msg_send![self, contentAspectRatio] + } + + unsafe fn setContentAspectRatio_(self, contentAspectRatio: NSSize) { + msg_send![self, setContentAspectRatio:contentAspectRatio] + } + + unsafe fn contentMinSize(self) -> NSSize { + msg_send![self, contentMinSize] + } + + unsafe fn setContentMinSize_(self, contentMinSize: NSSize) { + msg_send![self, setContentMinSize:contentMinSize] + } + + unsafe fn contentSize(self) -> NSSize { + msg_send![self, contentSize] + } + + unsafe fn setContentSize_(self, contentSize: NSSize) { + msg_send![self, setContentSize:contentSize] + } + + unsafe fn contentMaxSize(self) -> NSSize { + msg_send![self, contentMaxSize] + } + + unsafe fn setContentMaxSize_(self, contentMaxSize: NSSize) { + msg_send![self, setContentMaxSize:contentMaxSize] + } + + unsafe fn contentResizeIncrements(self) -> NSSize { + msg_send![self, contentResizeIncrements] + } + + unsafe fn setContentResizeIncrements_(self, contentResizeIncrements: NSSize) { + msg_send![self, setContentResizeIncrements:contentResizeIncrements] + } + + + + unsafe fn isVisible(self) -> BOOL { + msg_send![self, isVisible] + } + + unsafe fn occlusionState(self) -> NSWindowOcclusionState { + msg_send![self, occlusionState] + } + + + + unsafe fn orderOut_(self, sender: id) { + msg_send![self, orderOut:sender] + } + + unsafe fn orderBack_(self, sender: id) { + msg_send![self, orderBack:sender] + } + + unsafe fn orderFront_(self, sender: id) { + msg_send![self, orderFront:sender] + } + + unsafe fn orderFrontRegardless(self) { + msg_send![self, orderFrontRegardless] + } + + unsafe fn orderFrontWindow_relativeTo_(self, ordering_mode: NSWindowOrderingMode, other_window_number: NSInteger) { + msg_send![self, orderWindow:ordering_mode relativeTo:other_window_number] + } + + unsafe fn level(self) -> NSInteger { + msg_send![self, level] + } + + unsafe fn setLevel_(self, level: NSInteger) { + msg_send![self, setLevel:level] + } + + + + unsafe fn canBecomeKeyWindow(self) -> BOOL { + msg_send![self, canBecomeKeyWindow] + } + + unsafe fn makeKeyWindow(self) { + msg_send![self, makeKeyWindow] + } + + unsafe fn makeKeyAndOrderFront_(self, sender: id) { + msg_send![self, makeKeyAndOrderFront:sender] + } + + + + unsafe fn canBecomeMainWindow(self) -> BOOL { + msg_send![self, canBecomeMainWindow] + } + + unsafe fn makeMainWindow(self) { + msg_send![self, makeMainWindow] + } + + + + unsafe fn toolbar(self) -> id { + msg_send![self, toolbar] + } + + unsafe fn setToolbar_(self, toolbar: id ) { + msg_send![self, setToolbar:toolbar] + } + + unsafe fn runToolbarCustomizationPalette(self, sender: id) { + msg_send![self, runToolbarCustomizationPalette:sender] + } + + + + + + + + + + + unsafe fn standardWindowButton_(self, windowButtonKind: NSWindowButton) -> id { + msg_send![self, standardWindowButton:windowButtonKind] + } + + + unsafe fn allowsAutomaticWindowTabbing(_: Self) -> BOOL { + msg_send![class!(NSWindow), allowsAutomaticWindowTabbing] + } + + unsafe fn setAllowsAutomaticWindowTabbing_(_: Self, allowsAutomaticWindowTabbing: BOOL) { + msg_send![class!(NSWindow), setAllowsAutomaticWindowTabbing:allowsAutomaticWindowTabbing] + } + + unsafe fn tabbingIdentifier(self) -> id { + msg_send![self, tabbingIdentifier] + } + + unsafe fn tabbingMode(self) -> NSWindowTabbingMode { + msg_send!(self, tabbingMode) + } + + unsafe fn setTabbingMode_(self, tabbingMode: NSWindowTabbingMode) { + msg_send![self, setTabbingMode: tabbingMode]; + } + + unsafe fn addTabbedWindow_ordered_(self, window: id, ordering_mode: NSWindowOrderingMode) { + msg_send![self, addTabbedWindow:window ordered: ordering_mode]; + } + + unsafe fn toggleTabBar_(self, sender: id) { + msg_send![self, toggleTabBar:sender] + } + + + + + + unsafe fn initialFirstResponder(self) -> id { + msg_send![self, initialFirstResponder] + } + + unsafe fn firstResponder(self) -> id { + msg_send![self, firstResponder] + } + + unsafe fn setInitialFirstResponder_(self, responder: id) { + msg_send![self, setInitialFirstResponder:responder] + } + + unsafe fn makeFirstResponder_(self, responder: id) -> BOOL { + msg_send![self, makeFirstResponder:responder] + } + + + + + + unsafe fn keyDown_(self, event: id) { + msg_send![self, keyDown:event] + } + + + + unsafe fn acceptsMouseMovedEvents(self) -> BOOL { + msg_send![self, acceptsMouseMovedEvents] + } + + unsafe fn ignoresMouseEvents(self) -> BOOL { + msg_send![self, ignoresMouseEvents] + } + + unsafe fn setIgnoresMouseEvents_(self, ignoreMouseEvents: BOOL) { + msg_send![self, setIgnoresMouseEvents:ignoreMouseEvents] + } + + unsafe fn mouseLocationOutsideOfEventStream(self) -> NSPoint { + msg_send![self, mouseLocationOutsideOfEventStream] + } + + unsafe fn setAcceptsMouseMovedEvents_(self, acceptMouseMovedEvents: BOOL) { + msg_send![self, setAcceptsMouseMovedEvents:acceptMouseMovedEvents] + } + + unsafe fn windowNumberAtPoint_belowWindowWithWindowNumber_(self, + point: NSPoint, + windowNumber: NSInteger) -> NSInteger { + msg_send![self, windowNumberAtPoint:point belowWindowWithWindowNumber:windowNumber] + } + + + + unsafe fn backingScaleFactor(self) -> CGFloat { + msg_send![self, backingScaleFactor] + } + + unsafe fn backingAlignedRect_options_(self, rect: NSRect, options: NSAlignmentOptions) -> NSRect { + msg_send![self, backingAlignedRect:rect options:options] + } + + unsafe fn convertRectFromBacking_(self, rect: NSRect) -> NSRect { + msg_send![self, convertRectFromBacking:rect] + } + + unsafe fn convertRectToBacking_(self, rect: NSRect) -> NSRect { + msg_send![self, convertRectToBacking:rect] + } + + unsafe fn convertRectToScreen_(self, rect: NSRect) -> NSRect { + msg_send![self, convertRectToScreen:rect] + } + + unsafe fn convertRectFromScreen_(self, rect: NSRect) -> NSRect { + msg_send![self, convertRectFromScreen:rect] + } + + + + unsafe fn setDocumentEdited_(self, documentEdited: BOOL) { + msg_send![self, setDocumentEdited:documentEdited] + } + + + + unsafe fn title(self) -> id { + msg_send![self, title] + } + + unsafe fn setTitle_(self, title: id) { + msg_send![self, setTitle:title] + } + + unsafe fn setTitleWithRepresentedFilename_(self, filePath: id) { + msg_send![self, setTitleWithRepresentedFilename:filePath] + } + + unsafe fn setTitleVisibility_(self, visibility: NSWindowTitleVisibility) { + msg_send![self, setTitleVisibility:visibility] + } + + unsafe fn setTitlebarAppearsTransparent_(self, transparent: BOOL) { + msg_send![self, setTitlebarAppearsTransparent:transparent] + } + + unsafe fn representedFilename(self) -> id { + msg_send![self, representedFilename] + } + + unsafe fn setRepresentedFilename_(self, filePath: id) { + msg_send![self, setRepresentedFilename:filePath] + } + + unsafe fn representedURL(self) -> id { + msg_send![self, representedURL] + } + + unsafe fn setRepresentedURL_(self, representedURL: id) { + msg_send![self, setRepresentedURL:representedURL] + } + + + + unsafe fn screen(self) -> id { + msg_send![self, screen] + } + + unsafe fn deepestScreen(self) -> id { + msg_send![self, deepestScreen] + } + + unsafe fn displaysWhenScreenProfileChanges(self) -> BOOL { + msg_send![self, displaysWhenScreenProfileChanges] + } + + unsafe fn setDisplaysWhenScreenProfileChanges_(self, displaysWhenScreenProfileChanges: BOOL) { + msg_send![self, setDisplaysWhenScreenProfileChanges:displaysWhenScreenProfileChanges] + } + + + + unsafe fn setMovableByWindowBackground_(self, movableByWindowBackground: BOOL) { + msg_send![self, setMovableByWindowBackground:movableByWindowBackground] + } + + unsafe fn setMovable_(self, movable: BOOL) { + msg_send![self, setMovable:movable] + } + + unsafe fn center(self) { + msg_send![self, center] + } + + + + unsafe fn performClose_(self, sender: id) { + msg_send![self, performClose:sender] + } + + unsafe fn close(self) { + msg_send![self, close] + } + + unsafe fn setReleasedWhenClosed_(self, releasedWhenClosed: BOOL) { + msg_send![self, setReleasedWhenClosed:releasedWhenClosed] + } + + + + unsafe fn performMiniaturize_(self, sender: id) { + msg_send![self, performMiniaturize:sender] + } + + unsafe fn miniaturize_(self, sender: id) { + msg_send![self, miniaturize:sender] + } + + unsafe fn deminiaturize_(self, sender: id) { + msg_send![self, deminiaturize:sender] + } + + unsafe fn miniwindowImage(self) -> id { + msg_send![self, miniwindowImage] + } + + unsafe fn setMiniwindowImage_(self, miniwindowImage: id) { + msg_send![self, setMiniwindowImage:miniwindowImage] + } + + unsafe fn miniwindowTitle(self) -> id { + msg_send![self, miniwindowTitle] + } + + unsafe fn setMiniwindowTitle_(self, miniwindowTitle: id) { + msg_send![self, setMiniwindowTitle:miniwindowTitle] + } + + + + + + + + +} + +pub trait NSView: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSView), alloc] + } + + unsafe fn init(self) -> id; + unsafe fn initWithFrame_(self, frameRect: NSRect) -> id; + unsafe fn bounds(self) -> NSRect; + unsafe fn frame(self) -> NSRect; + unsafe fn display_(self); + unsafe fn setWantsBestResolutionOpenGLSurface_(self, flag: BOOL); + unsafe fn convertPoint_fromView_(self, point: NSPoint, view: id) -> NSPoint; + unsafe fn addSubview_(self, view: id); + unsafe fn superview(self) -> id; + unsafe fn removeFromSuperview(self); + unsafe fn setAutoresizingMask_(self, autoresizingMask: NSAutoresizingMaskOptions); + + unsafe fn wantsLayer(self) -> BOOL; + unsafe fn setWantsLayer(self, wantsLayer: BOOL); + unsafe fn layer(self) -> id; + unsafe fn setLayer(self, layer: id); + + unsafe fn widthAnchor(self) -> id; + unsafe fn heightAnchor(self) -> id; + unsafe fn convertRectToBacking(self, rect: NSRect) -> NSRect; +} + +impl NSView for id { + unsafe fn init(self) -> id { + msg_send![self, init] + } + + unsafe fn initWithFrame_(self, frameRect: NSRect) -> id { + msg_send![self, initWithFrame:frameRect] + } + + unsafe fn bounds(self) -> NSRect { + msg_send![self, bounds] + } + + unsafe fn frame(self) -> NSRect { + msg_send![self, frame] + } + + unsafe fn display_(self) { + msg_send![self, display] + } + + unsafe fn setWantsBestResolutionOpenGLSurface_(self, flag: BOOL) { + msg_send![self, setWantsBestResolutionOpenGLSurface:flag] + } + + unsafe fn convertPoint_fromView_(self, point: NSPoint, view: id) -> NSPoint { + msg_send![self, convertPoint:point fromView:view] + } + + unsafe fn addSubview_(self, view: id) { + msg_send![self, addSubview:view] + } + + unsafe fn superview(self) -> id { + msg_send![self, superview] + } + + unsafe fn removeFromSuperview(self) { + msg_send![self, removeFromSuperview] + } + + unsafe fn setAutoresizingMask_(self, autoresizingMask: NSAutoresizingMaskOptions) { + msg_send![self, setAutoresizingMask:autoresizingMask] + } + + unsafe fn wantsLayer(self) -> BOOL { + msg_send![self, wantsLayer] + } + + unsafe fn setWantsLayer(self, wantsLayer: BOOL) { + msg_send![self, setWantsLayer:wantsLayer] + } + + unsafe fn layer(self) -> id { + msg_send![self, layer] + } + + unsafe fn setLayer(self, layer: id) { + msg_send![self, setLayer:layer] + } + + unsafe fn widthAnchor(self) -> id { + msg_send![self, widthAnchor] + } + + unsafe fn heightAnchor(self) -> id { + msg_send![self, heightAnchor] + } + + unsafe fn convertRectToBacking(self, rect: NSRect) -> NSRect { + msg_send![self, convertRectToBacking:rect] + } +} + +pub type NSAutoresizingMaskOptions = u64; + +pub const NSViewNotSizable: u64 = 0; +pub const NSViewMinXMargin: u64 = 1; +pub const NSViewWidthSizable: u64 = 2; +pub const NSViewMaxXMargin: u64 = 4; +pub const NSViewMinYMargin: u64 = 8; +pub const NSViewHeightSizable: u64 = 16; +pub const NSViewMaxYMargin: u64 = 32; + +pub trait NSOpenGLView: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSOpenGLView), alloc] + } + + unsafe fn initWithFrame_pixelFormat_(self, frameRect: NSRect, format: id) -> id; + unsafe fn display_(self); + unsafe fn setOpenGLContext_(self, context: id); + unsafe fn setPixelFormat_(self, pixelformat: id); +} + +impl NSOpenGLView for id { + unsafe fn initWithFrame_pixelFormat_(self, frameRect: NSRect, format: id) -> id { + msg_send![self, initWithFrame:frameRect pixelFormat:format] + } + + unsafe fn display_(self) { + msg_send![self, display] + } + + unsafe fn setOpenGLContext_(self, context: id) { + msg_send![self, setOpenGLContext:context] + } + + unsafe fn setPixelFormat_(self, pixelformat: id) { + msg_send![self, setPixelFormat:pixelformat] + } +} + +pub trait NSOpenGLPixelFormat: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSOpenGLPixelFormat), alloc] + } + + + + unsafe fn initWithAttributes_(self, attributes: &[u32]) -> id; + + + + unsafe fn getValues_forAttribute_forVirtualScreen_(self, val: *mut GLint, attrib: NSOpenGLPixelFormatAttribute, screen: GLint); + unsafe fn numberOfVirtualScreens(self) -> GLint; + +} + +impl NSOpenGLPixelFormat for id { + + + unsafe fn initWithAttributes_(self, attributes: &[u32]) -> id { + msg_send![self, initWithAttributes:attributes] + } + + + + unsafe fn getValues_forAttribute_forVirtualScreen_(self, val: *mut GLint, attrib: NSOpenGLPixelFormatAttribute, screen: GLint) { + msg_send![self, getValues:val forAttribute:attrib forVirtualScreen:screen] + } + + unsafe fn numberOfVirtualScreens(self) -> GLint { + msg_send![self, numberOfVirtualScreens] + } +} + +pub trait NSOpenGLContext: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSOpenGLContext), alloc] + } + + + unsafe fn initWithFormat_shareContext_(self, format: id , shareContext: id ) -> id ; + unsafe fn initWithCGLContextObj_(self, context: CGLContextObj) -> id ; + + + unsafe fn clearCurrentContext(_: Self); + unsafe fn currentContext(_: Self) -> id ; + unsafe fn makeCurrentContext(self); + + + unsafe fn setView_(self, view: id ); + unsafe fn view(self) -> id ; + unsafe fn clearDrawable(self); + unsafe fn update(self); + + + unsafe fn flushBuffer(self); + + + unsafe fn setValues_forParameter_(self, vals: *const GLint, param: NSOpenGLContextParameter); + unsafe fn getValues_forParameter_(self, vals: *mut GLint, param: NSOpenGLContextParameter); + + + unsafe fn setCurrentVirtualScreen_(self, screen: GLint); + unsafe fn currentVirtualScreen(self) -> GLint; + + + unsafe fn CGLContextObj(self) -> CGLContextObj; +} + +impl NSOpenGLContext for id { + + + unsafe fn initWithFormat_shareContext_(self, format: id , shareContext: id ) -> id { + msg_send![self, initWithFormat:format shareContext:shareContext] + } + + unsafe fn initWithCGLContextObj_(self, context: CGLContextObj) -> id { + msg_send![self, initWithCGLContextObj:context] + } + + + + unsafe fn clearCurrentContext(_: Self) { + msg_send![class!(NSOpenGLContext), clearCurrentContext] + } + + unsafe fn currentContext(_: Self) -> id { + msg_send![class!(NSOpenGLContext), currentContext] + } + + unsafe fn makeCurrentContext(self) { + msg_send![self, makeCurrentContext] + } + + + + unsafe fn setView_(self, view: id ) { + msg_send![self, setView:view] + } + + unsafe fn view(self) -> id { + msg_send![self, view] + } + + unsafe fn clearDrawable(self) { + msg_send![self, clearDrawable] + } + + unsafe fn update(self) { + msg_send![self, update] + } + + + + unsafe fn flushBuffer(self) { + msg_send![self, flushBuffer] + } + + + + unsafe fn setValues_forParameter_(self, vals: *const GLint, param: NSOpenGLContextParameter) { + msg_send![self, setValues:vals forParameter:param] + } + + unsafe fn getValues_forParameter_(self, vals: *mut GLint, param: NSOpenGLContextParameter) { + msg_send![self, getValues:vals forParameter:param] + } + + + + unsafe fn setCurrentVirtualScreen_(self, screen: GLint) { + msg_send![self, setCurrentVirtualScreen:screen] + } + + unsafe fn currentVirtualScreen(self) -> GLint { + msg_send![self, currentVirtualScreen] + } + + + + unsafe fn CGLContextObj(self) -> CGLContextObj { + msg_send![self, CGLContextObj] + } +} + +bitflags! { + pub struct NSEventSwipeTrackingOptions: NSUInteger { + const NSEventSwipeTrackingLockDirection = 0x1 << 0; + const NSEventSwipeTrackingClampGestureAmount = 0x1 << 1; + } +} + +#[repr(i64)] +pub enum NSEventGestureAxis { + NSEventGestureAxisNone = 0, + NSEventGestureAxisHorizontal, + NSEventGestureAxisVertical, +} + +bitflags! { + pub struct NSEventPhase: NSUInteger { + const NSEventPhaseNone = 0; + const NSEventPhaseBegan = 0x1 << 0; + const NSEventPhaseStationary = 0x1 << 1; + const NSEventPhaseChanged = 0x1 << 2; + const NSEventPhaseEnded = 0x1 << 3; + const NSEventPhaseCancelled = 0x1 << 4; + const NSEventPhaseMayBegin = 0x1 << 5; + } +} + +bitflags! { + pub struct NSTouchPhase: NSUInteger { + const NSTouchPhaseBegan = 1 << 0; + const NSTouchPhaseMoved = 1 << 1; + const NSTouchPhaseStationary = 1 << 2; + const NSTouchPhaseEnded = 1 << 3; + const NSTouchPhaseCancelled = 1 << 4; + const NSTouchPhaseTouching = NSTouchPhase::NSTouchPhaseBegan.bits + | NSTouchPhase::NSTouchPhaseMoved.bits + | NSTouchPhase::NSTouchPhaseStationary.bits; + const NSTouchPhaseAny = !0; // NSUIntegerMax + } +} + +#[derive(Clone, Copy, Debug, PartialEq)] +#[repr(u64)] +pub enum NSEventType { + NSLeftMouseDown = 1, + NSLeftMouseUp = 2, + NSRightMouseDown = 3, + NSRightMouseUp = 4, + NSMouseMoved = 5, + NSLeftMouseDragged = 6, + NSRightMouseDragged = 7, + NSMouseEntered = 8, + NSMouseExited = 9, + NSKeyDown = 10, + NSKeyUp = 11, + NSFlagsChanged = 12, + NSAppKitDefined = 13, + NSSystemDefined = 14, + NSApplicationDefined = 15, + NSPeriodic = 16, + NSCursorUpdate = 17, + NSScrollWheel = 22, + NSTabletPoint = 23, + NSTabletProximity = 24, + NSOtherMouseDown = 25, + NSOtherMouseUp = 26, + NSOtherMouseDragged = 27, + NSEventTypeGesture = 29, + NSEventTypeMagnify = 30, + NSEventTypeSwipe = 31, + NSEventTypeRotate = 18, + NSEventTypeBeginGesture = 19, + NSEventTypeEndGesture = 20, + NSEventTypePressure = 34, +} + +bitflags! { + pub struct NSEventMask: libc::c_ulonglong { + const NSLeftMouseDownMask = 1 << NSLeftMouseDown as libc::c_ulonglong; + const NSLeftMouseUpMask = 1 << NSLeftMouseUp as libc::c_ulonglong; + const NSRightMouseDownMask = 1 << NSRightMouseDown as libc::c_ulonglong; + const NSRightMouseUpMask = 1 << NSRightMouseUp as libc::c_ulonglong; + const NSMouseMovedMask = 1 << NSMouseMoved as libc::c_ulonglong; + const NSLeftMouseDraggedMask = 1 << NSLeftMouseDragged as libc::c_ulonglong; + const NSRightMouseDraggedMask = 1 << NSRightMouseDragged as libc::c_ulonglong; + const NSMouseEnteredMask = 1 << NSMouseEntered as libc::c_ulonglong; + const NSMouseExitedMask = 1 << NSMouseExited as libc::c_ulonglong; + const NSKeyDownMask = 1 << NSKeyDown as libc::c_ulonglong; + const NSKeyUpMask = 1 << NSKeyUp as libc::c_ulonglong; + const NSFlagsChangedMask = 1 << NSFlagsChanged as libc::c_ulonglong; + const NSAppKitDefinedMask = 1 << NSAppKitDefined as libc::c_ulonglong; + const NSSystemDefinedMask = 1 << NSSystemDefined as libc::c_ulonglong; + const NSApplicationDefinedMask = 1 << NSApplicationDefined as libc::c_ulonglong; + const NSPeriodicMask = 1 << NSPeriodic as libc::c_ulonglong; + const NSCursorUpdateMask = 1 << NSCursorUpdate as libc::c_ulonglong; + const NSScrollWheelMask = 1 << NSScrollWheel as libc::c_ulonglong; + const NSTabletPointMask = 1 << NSTabletPoint as libc::c_ulonglong; + const NSTabletProximityMask = 1 << NSTabletProximity as libc::c_ulonglong; + const NSOtherMouseDownMask = 1 << NSOtherMouseDown as libc::c_ulonglong; + const NSOtherMouseUpMask = 1 << NSOtherMouseUp as libc::c_ulonglong; + const NSOtherMouseDraggedMask = 1 << NSOtherMouseDragged as libc::c_ulonglong; + const NSEventMaskGesture = 1 << NSEventTypeGesture as libc::c_ulonglong; + const NSEventMaskSwipe = 1 << NSEventTypeSwipe as libc::c_ulonglong; + const NSEventMaskRotate = 1 << NSEventTypeRotate as libc::c_ulonglong; + const NSEventMaskBeginGesture = 1 << NSEventTypeBeginGesture as libc::c_ulonglong; + const NSEventMaskEndGesture = 1 << NSEventTypeEndGesture as libc::c_ulonglong; + const NSEventMaskPressure = 1 << NSEventTypePressure as libc::c_ulonglong; + const NSAnyEventMask = 0xffffffffffffffff; + } +} + +impl NSEventMask { + pub fn from_type(ty: NSEventType) -> NSEventMask { + NSEventMask { bits: 1 << ty as libc::c_ulonglong } + } +} + +bitflags! { + pub struct NSEventModifierFlags: NSUInteger { + const NSAlphaShiftKeyMask = 1 << 16; + const NSShiftKeyMask = 1 << 17; + const NSControlKeyMask = 1 << 18; + const NSAlternateKeyMask = 1 << 19; + const NSCommandKeyMask = 1 << 20; + const NSNumericPadKeyMask = 1 << 21; + const NSHelpKeyMask = 1 << 22; + const NSFunctionKeyMask = 1 << 23; + const NSDeviceIndependentModifierFlagsMask = 0xffff0000; + } +} + + +pub enum NSPointingDeviceType { + + + + + +} + + +pub enum NSEventButtonMask { + + + + +} + +#[repr(i16)] +pub enum NSEventSubtype { + + + + + + NSWindowExposedEventType = 0, + NSApplicationActivatedEventType = 1, + NSApplicationDeactivatedEventType = 2, + NSWindowMovedEventType = 4, + NSScreenChangedEventType = 8, + NSAWTEventType = 16, +} + +pub const NSUpArrowFunctionKey: libc::c_ushort = 0xF700; +pub const NSDownArrowFunctionKey: libc::c_ushort = 0xF701; +pub const NSLeftArrowFunctionKey: libc::c_ushort = 0xF702; +pub const NSRightArrowFunctionKey: libc::c_ushort = 0xF703; +pub const NSF1FunctionKey: libc::c_ushort = 0xF704; +pub const NSF2FunctionKey: libc::c_ushort = 0xF705; +pub const NSF3FunctionKey: libc::c_ushort = 0xF706; +pub const NSF4FunctionKey: libc::c_ushort = 0xF707; +pub const NSF5FunctionKey: libc::c_ushort = 0xF708; +pub const NSF6FunctionKey: libc::c_ushort = 0xF709; +pub const NSF7FunctionKey: libc::c_ushort = 0xF70A; +pub const NSF8FunctionKey: libc::c_ushort = 0xF70B; +pub const NSF9FunctionKey: libc::c_ushort = 0xF70C; +pub const NSF10FunctionKey: libc::c_ushort = 0xF70D; +pub const NSF11FunctionKey: libc::c_ushort = 0xF70E; +pub const NSF12FunctionKey: libc::c_ushort = 0xF70F; +pub const NSF13FunctionKey: libc::c_ushort = 0xF710; +pub const NSF14FunctionKey: libc::c_ushort = 0xF711; +pub const NSF15FunctionKey: libc::c_ushort = 0xF712; +pub const NSF16FunctionKey: libc::c_ushort = 0xF713; +pub const NSF17FunctionKey: libc::c_ushort = 0xF714; +pub const NSF18FunctionKey: libc::c_ushort = 0xF715; +pub const NSF19FunctionKey: libc::c_ushort = 0xF716; +pub const NSF20FunctionKey: libc::c_ushort = 0xF717; +pub const NSF21FunctionKey: libc::c_ushort = 0xF718; +pub const NSF22FunctionKey: libc::c_ushort = 0xF719; +pub const NSF23FunctionKey: libc::c_ushort = 0xF71A; +pub const NSF24FunctionKey: libc::c_ushort = 0xF71B; +pub const NSF25FunctionKey: libc::c_ushort = 0xF71C; +pub const NSF26FunctionKey: libc::c_ushort = 0xF71D; +pub const NSF27FunctionKey: libc::c_ushort = 0xF71E; +pub const NSF28FunctionKey: libc::c_ushort = 0xF71F; +pub const NSF29FunctionKey: libc::c_ushort = 0xF720; +pub const NSF30FunctionKey: libc::c_ushort = 0xF721; +pub const NSF31FunctionKey: libc::c_ushort = 0xF722; +pub const NSF32FunctionKey: libc::c_ushort = 0xF723; +pub const NSF33FunctionKey: libc::c_ushort = 0xF724; +pub const NSF34FunctionKey: libc::c_ushort = 0xF725; +pub const NSF35FunctionKey: libc::c_ushort = 0xF726; +pub const NSInsertFunctionKey: libc::c_ushort = 0xF727; +pub const NSDeleteFunctionKey: libc::c_ushort = 0xF728; +pub const NSHomeFunctionKey: libc::c_ushort = 0xF729; +pub const NSBeginFunctionKey: libc::c_ushort = 0xF72A; +pub const NSEndFunctionKey: libc::c_ushort = 0xF72B; +pub const NSPageUpFunctionKey: libc::c_ushort = 0xF72C; +pub const NSPageDownFunctionKey: libc::c_ushort = 0xF72D; +pub const NSPrintScreenFunctionKey: libc::c_ushort = 0xF72E; +pub const NSScrollLockFunctionKey: libc::c_ushort = 0xF72F; +pub const NSPauseFunctionKey: libc::c_ushort = 0xF730; +pub const NSSysReqFunctionKey: libc::c_ushort = 0xF731; +pub const NSBreakFunctionKey: libc::c_ushort = 0xF732; +pub const NSResetFunctionKey: libc::c_ushort = 0xF733; +pub const NSStopFunctionKey: libc::c_ushort = 0xF734; +pub const NSMenuFunctionKey: libc::c_ushort = 0xF735; +pub const NSUserFunctionKey: libc::c_ushort = 0xF736; +pub const NSSystemFunctionKey: libc::c_ushort = 0xF737; +pub const NSPrintFunctionKey: libc::c_ushort = 0xF738; +pub const NSClearLineFunctionKey: libc::c_ushort = 0xF739; +pub const NSClearDisplayFunctionKey: libc::c_ushort = 0xF73A; +pub const NSInsertLineFunctionKey: libc::c_ushort = 0xF73B; +pub const NSDeleteLineFunctionKey: libc::c_ushort = 0xF73C; +pub const NSInsertCharFunctionKey: libc::c_ushort = 0xF73D; +pub const NSDeleteCharFunctionKey: libc::c_ushort = 0xF73E; +pub const NSPrevFunctionKey: libc::c_ushort = 0xF73F; +pub const NSNextFunctionKey: libc::c_ushort = 0xF740; +pub const NSSelectFunctionKey: libc::c_ushort = 0xF741; +pub const NSExecuteFunctionKey: libc::c_ushort = 0xF742; +pub const NSUndoFunctionKey: libc::c_ushort = 0xF743; +pub const NSRedoFunctionKey: libc::c_ushort = 0xF744; +pub const NSFindFunctionKey: libc::c_ushort = 0xF745; +pub const NSHelpFunctionKey: libc::c_ushort = 0xF746; +pub const NSModeSwitchFunctionKey: libc::c_ushort = 0xF747; + +pub trait NSEvent: Sized { + + unsafe fn keyEventWithType_location_modifierFlags_timestamp_windowNumber_context_characters_charactersIgnoringModifiers_isARepeat_keyCode_( + _: Self, + eventType: NSEventType, + location: NSPoint, + modifierFlags: NSEventModifierFlags, + timestamp: NSTimeInterval, + windowNumber: NSInteger, + context: id , + characters: id , + unmodCharacters: id , + repeatKey: BOOL, + code: libc::c_ushort) -> id ; + unsafe fn mouseEventWithType_location_modifierFlags_timestamp_windowNumber_context_eventNumber_clickCount_pressure_( + _: Self, + eventType: NSEventType, + location: NSPoint, + modifierFlags: NSEventModifierFlags, + timestamp: NSTimeInterval, + windowNumber: NSInteger, + context: id , + eventNumber: NSInteger, + clickCount: NSInteger, + pressure: libc::c_float) -> id ; + unsafe fn enterExitEventWithType_location_modifierFlags_timestamp_windowNumber_context_eventNumber_trackingNumber_userData_( + _: Self, + eventType: NSEventType, + location: NSPoint, + modifierFlags: NSEventModifierFlags, + timestamp: NSTimeInterval, + windowNumber: NSInteger, + context: id , + eventNumber: NSInteger, + trackingNumber: NSInteger, + userData: *mut c_void) -> id ; + unsafe fn otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_( + _: Self, + eventType: NSEventType, + location: NSPoint, + modifierFlags: NSEventModifierFlags, + timestamp: NSTimeInterval, + windowNumber: NSInteger, + context: id , + subtype: NSEventSubtype, + data1: NSInteger, + data2: NSInteger) -> id ; + unsafe fn eventWithEventRef_(_: Self, eventRef: *const c_void) -> id; + unsafe fn eventWithCGEvent_(_: Self, cgEvent: *mut c_void ) -> id; + + + unsafe fn context(self) -> id ; + unsafe fn locationInWindow(self) -> NSPoint; + unsafe fn modifierFlags(self) -> NSEventModifierFlags; + unsafe fn timestamp(self) -> NSTimeInterval; + + unsafe fn eventType(self) -> NSEventType; + unsafe fn window(self) -> id ; + unsafe fn windowNumber(self) -> NSInteger; + unsafe fn eventRef(self) -> *const c_void; + unsafe fn CGEvent(self) -> *mut c_void ; + + + + unsafe fn currentModifierFlags(_: Self) -> NSEventModifierFlags; + unsafe fn keyRepeatDelay(_: Self) -> NSTimeInterval; + unsafe fn keyRepeatInterval(_: Self) -> NSTimeInterval; + unsafe fn characters(self) -> id ; + unsafe fn charactersIgnoringModifiers(self) -> id ; + unsafe fn keyCode(self) -> libc::c_ushort; + unsafe fn isARepeat(self) -> BOOL; + + + unsafe fn pressedMouseButtons(_: Self) -> NSUInteger; + unsafe fn doubleClickInterval(_: Self) -> NSTimeInterval; + unsafe fn mouseLocation(_: Self) -> NSPoint; + unsafe fn buttonNumber(self) -> NSInteger; + unsafe fn clickCount(self) -> NSInteger; + unsafe fn pressure(self) -> libc::c_float; + unsafe fn stage(self) -> NSInteger; + unsafe fn setMouseCoalescingEnabled_(_: Self, flag: BOOL); + unsafe fn isMouseCoalescingEnabled(_: Self) -> BOOL; + + + unsafe fn eventNumber(self) -> NSInteger; + unsafe fn trackingNumber(self) -> NSInteger; + unsafe fn trackingArea(self) -> id ; + unsafe fn userData(self) -> *const c_void; + + + unsafe fn data1(self) -> NSInteger; + unsafe fn data2(self) -> NSInteger; + unsafe fn subtype(self) -> NSEventSubtype; + + + unsafe fn deltaX(self) -> CGFloat; + unsafe fn deltaY(self) -> CGFloat; + unsafe fn deltaZ(self) -> CGFloat; + + + unsafe fn capabilityMask(self) -> NSUInteger; + unsafe fn deviceID(self) -> NSUInteger; + unsafe fn pointingDeviceID(self) -> NSUInteger; + unsafe fn pointingDeviceSerialNumber(self) -> NSUInteger; + unsafe fn pointingDeviceType(self) -> NSPointingDeviceType; + unsafe fn systemTabletID(self) -> NSUInteger; + unsafe fn tabletID(self) -> NSUInteger; + unsafe fn uniqueID(self) -> libc::c_ulonglong; + unsafe fn vendorID(self) -> NSUInteger; + unsafe fn vendorPointingDeviceType(self) -> NSUInteger; + + + unsafe fn absoluteX(self) -> NSInteger; + unsafe fn absoluteY(self) -> NSInteger; + unsafe fn absoluteZ(self) -> NSInteger; + unsafe fn buttonMask(self) -> NSEventButtonMask; + unsafe fn rotation(self) -> libc::c_float; + unsafe fn tangentialPressure(self) -> libc::c_float; + unsafe fn tilt(self) -> NSPoint; + unsafe fn vendorDefined(self) -> id; + + + unsafe fn startPeriodicEventsAfterDelay_withPeriod_(_: Self, delaySeconds: NSTimeInterval, periodSeconds: NSTimeInterval); + unsafe fn stopPeriodicEvents(_: Self); + + + unsafe fn magnification(self) -> CGFloat; + unsafe fn touchesMatchingPhase_inView_(self, phase: NSTouchPhase, view: id ) -> id ; + unsafe fn isSwipeTrackingFromScrollEventsEnabled(_: Self) -> BOOL; + + + + + unsafe fn removeMonitor_(_: Self, eventMonitor: id); + + + unsafe fn hasPreciseScrollingDeltas(self) -> BOOL; + unsafe fn scrollingDeltaX(self) -> CGFloat; + unsafe fn scrollingDeltaY(self) -> CGFloat; + unsafe fn momentumPhase(self) -> NSEventPhase; + unsafe fn phase(self) -> NSEventPhase; + + + + unsafe fn locationInNode_(self, node: id ) -> CGPoint; +} + +impl NSEvent for id { + + + unsafe fn keyEventWithType_location_modifierFlags_timestamp_windowNumber_context_characters_charactersIgnoringModifiers_isARepeat_keyCode_( + _: Self, + eventType: NSEventType, + location: NSPoint, + modifierFlags: NSEventModifierFlags, + timestamp: NSTimeInterval, + windowNumber: NSInteger, + context: id , + characters: id , + unmodCharacters: id , + repeatKey: BOOL, + code: libc::c_ushort) -> id + { + msg_send![class!(NSEvent), keyEventWithType:eventType + location:location + modifierFlags:modifierFlags + timestamp:timestamp + windowNumber:windowNumber + context:context + characters:characters + charactersIgnoringModifiers:unmodCharacters + isARepeat:repeatKey + keyCode:code] + } + + unsafe fn mouseEventWithType_location_modifierFlags_timestamp_windowNumber_context_eventNumber_clickCount_pressure_( + _: Self, + eventType: NSEventType, + location: NSPoint, + modifierFlags: NSEventModifierFlags, + timestamp: NSTimeInterval, + windowNumber: NSInteger, + context: id , + eventNumber: NSInteger, + clickCount: NSInteger, + pressure: libc::c_float) -> id + { + msg_send![class!(NSEvent), mouseEventWithType:eventType + location:location + modifierFlags:modifierFlags + timestamp:timestamp + windowNumber:windowNumber + context:context + eventNumber:eventNumber + clickCount:clickCount + pressure:pressure] + } + + unsafe fn enterExitEventWithType_location_modifierFlags_timestamp_windowNumber_context_eventNumber_trackingNumber_userData_( + _: Self, + eventType: NSEventType, + location: NSPoint, + modifierFlags: NSEventModifierFlags, + timestamp: NSTimeInterval, + windowNumber: NSInteger, + context: id , + eventNumber: NSInteger, + trackingNumber: NSInteger, + userData: *mut c_void) -> id + { + msg_send![class!(NSEvent), enterExitEventWithType:eventType + location:location + modifierFlags:modifierFlags + timestamp:timestamp + windowNumber:windowNumber + context:context + eventNumber:eventNumber + trackingNumber:trackingNumber + userData:userData] + } + + unsafe fn otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_( + _: Self, + eventType: NSEventType, + location: NSPoint, + modifierFlags: NSEventModifierFlags, + timestamp: NSTimeInterval, + windowNumber: NSInteger, + context: id , + subtype: NSEventSubtype, + data1: NSInteger, + data2: NSInteger) -> id + { + msg_send![class!(NSEvent), otherEventWithType:eventType + location:location + modifierFlags:modifierFlags + timestamp:timestamp + windowNumber:windowNumber + context:context + subtype:subtype + data1:data1 + data2:data2] + } + + unsafe fn eventWithEventRef_(_: Self, eventRef: *const c_void) -> id { + msg_send![class!(NSEvent), eventWithEventRef:eventRef] + } + + unsafe fn eventWithCGEvent_(_: Self, cgEvent: *mut c_void ) -> id { + msg_send![class!(NSEvent), eventWithCGEvent:cgEvent] + } + + + + unsafe fn context(self) -> id { + msg_send![self, context] + } + + unsafe fn locationInWindow(self) -> NSPoint { + msg_send![self, locationInWindow] + } + + unsafe fn modifierFlags(self) -> NSEventModifierFlags { + msg_send![self, modifierFlags] + } + + unsafe fn timestamp(self) -> NSTimeInterval { + msg_send![self, timestamp] + } + + + unsafe fn eventType(self) -> NSEventType { + msg_send![self, type] + } + + unsafe fn window(self) -> id { + msg_send![self, window] + } + + unsafe fn windowNumber(self) -> NSInteger { + msg_send![self, windowNumber] + } + + unsafe fn eventRef(self) -> *const c_void { + msg_send![self, eventRef] + } + + unsafe fn CGEvent(self) -> *mut c_void { + msg_send![self, CGEvent] + } + + + + + + unsafe fn currentModifierFlags(_: Self) -> NSEventModifierFlags { + msg_send![class!(NSEvent), currentModifierFlags] + } + + unsafe fn keyRepeatDelay(_: Self) -> NSTimeInterval { + msg_send![class!(NSEvent), keyRepeatDelay] + } + + unsafe fn keyRepeatInterval(_: Self) -> NSTimeInterval { + msg_send![class!(NSEvent), keyRepeatInterval] + } + + unsafe fn characters(self) -> id { + msg_send![self, characters] + } + + unsafe fn charactersIgnoringModifiers(self) -> id { + msg_send![self, charactersIgnoringModifiers] + } + + unsafe fn keyCode(self) -> libc::c_ushort { + msg_send![self, keyCode] + } + + unsafe fn isARepeat(self) -> BOOL { + msg_send![self, isARepeat] + } + + + + unsafe fn pressedMouseButtons(_: Self) -> NSUInteger { + msg_send![class!(NSEvent), pressedMouseButtons] + } + + unsafe fn doubleClickInterval(_: Self) -> NSTimeInterval { + msg_send![class!(NSEvent), doubleClickInterval] + } + + unsafe fn mouseLocation(_: Self) -> NSPoint { + msg_send![class!(NSEvent), mouseLocation] + } + + unsafe fn buttonNumber(self) -> NSInteger { + msg_send![self, buttonNumber] + } + + unsafe fn clickCount(self) -> NSInteger { + msg_send![self, clickCount] + } + + unsafe fn pressure(self) -> libc::c_float { + msg_send![self, pressure] + } + + unsafe fn stage(self) -> NSInteger{ + msg_send![self, stage] + } + + unsafe fn setMouseCoalescingEnabled_(_: Self, flag: BOOL) { + msg_send![class!(NSEvent), setMouseCoalescingEnabled:flag] + } + + unsafe fn isMouseCoalescingEnabled(_: Self) -> BOOL { + msg_send![class!(NSEvent), isMouseCoalescingEnabled] + } + + + + unsafe fn eventNumber(self) -> NSInteger { + msg_send![self, eventNumber] + } + + unsafe fn trackingNumber(self) -> NSInteger { + msg_send![self, trackingNumber] + } + + unsafe fn trackingArea(self) -> id { + msg_send![self, trackingArea] + } + + unsafe fn userData(self) -> *const c_void { + msg_send![self, userData] + } + + + + unsafe fn data1(self) -> NSInteger { + msg_send![self, data1] + } + + unsafe fn data2(self) -> NSInteger { + msg_send![self, data2] + } + + unsafe fn subtype(self) -> NSEventSubtype { + msg_send![self, subtype] + } + + + + unsafe fn deltaX(self) -> CGFloat { + msg_send![self, deltaX] + } + + unsafe fn deltaY(self) -> CGFloat { + msg_send![self, deltaY] + } + + unsafe fn deltaZ(self) -> CGFloat { + msg_send![self, deltaZ] + } + + + + unsafe fn capabilityMask(self) -> NSUInteger { + msg_send![self, capabilityMask] + } + + unsafe fn deviceID(self) -> NSUInteger { + msg_send![self, deviceID] + } + + unsafe fn pointingDeviceID(self) -> NSUInteger { + msg_send![self, pointingDeviceID] + } + + unsafe fn pointingDeviceSerialNumber(self) -> NSUInteger { + msg_send![self, pointingDeviceSerialNumber] + } + + unsafe fn pointingDeviceType(self) -> NSPointingDeviceType { + msg_send![self, pointingDeviceType] + } + + unsafe fn systemTabletID(self) -> NSUInteger { + msg_send![self, systemTabletID] + } + + unsafe fn tabletID(self) -> NSUInteger { + msg_send![self, tabletID] + } + + unsafe fn uniqueID(self) -> libc::c_ulonglong { + msg_send![self, uniqueID] + } + + unsafe fn vendorID(self) -> NSUInteger { + msg_send![self, vendorID] + } + + unsafe fn vendorPointingDeviceType(self) -> NSUInteger { + msg_send![self, vendorPointingDeviceType] + } + + + + unsafe fn absoluteX(self) -> NSInteger { + msg_send![self, absoluteX] + } + + unsafe fn absoluteY(self) -> NSInteger { + msg_send![self, absoluteY] + } + + unsafe fn absoluteZ(self) -> NSInteger { + msg_send![self, absoluteZ] + } + + unsafe fn buttonMask(self) -> NSEventButtonMask { + msg_send![self, buttonMask] + } + + unsafe fn rotation(self) -> libc::c_float { + msg_send![self, rotation] + } + + unsafe fn tangentialPressure(self) -> libc::c_float { + msg_send![self, tangentialPressure] + } + + unsafe fn tilt(self) -> NSPoint { + msg_send![self, tilt] + } + + unsafe fn vendorDefined(self) -> id { + msg_send![self, vendorDefined] + } + + + + unsafe fn startPeriodicEventsAfterDelay_withPeriod_(_: Self, delaySeconds: NSTimeInterval, periodSeconds: NSTimeInterval) { + msg_send![class!(NSEvent), startPeriodicEventsAfterDelay:delaySeconds withPeriod:periodSeconds] + } + + unsafe fn stopPeriodicEvents(_: Self) { + msg_send![class!(NSEvent), stopPeriodicEvents] + } + + + + unsafe fn magnification(self) -> CGFloat { + msg_send![self, magnification] + } + + unsafe fn touchesMatchingPhase_inView_(self, phase: NSTouchPhase, view: id ) -> id { + msg_send![self, touchesMatchingPhase:phase inView:view] + } + + unsafe fn isSwipeTrackingFromScrollEventsEnabled(_: Self) -> BOOL { + msg_send![class!(NSEvent), isSwipeTrackingFromScrollEventsEnabled] + } + + + + + + + unsafe fn removeMonitor_(_: Self, eventMonitor: id) { + msg_send![class!(NSEvent), removeMonitor:eventMonitor] + } + + + + unsafe fn hasPreciseScrollingDeltas(self) -> BOOL { + msg_send![self, hasPreciseScrollingDeltas] + } + + unsafe fn scrollingDeltaX(self) -> CGFloat { + msg_send![self, scrollingDeltaX] + } + + unsafe fn scrollingDeltaY(self) -> CGFloat { + msg_send![self, scrollingDeltaY] + } + + unsafe fn momentumPhase(self) -> NSEventPhase { + msg_send![self, momentumPhase] + } + + unsafe fn phase(self) -> NSEventPhase { + msg_send![self, phase] + } + + + + + unsafe fn locationInNode_(self, node: id ) -> CGPoint { + msg_send![self, locationInNode:node] + } +} + +pub trait NSScreen: Sized { + + unsafe fn mainScreen(_: Self) -> id ; + unsafe fn deepestScreen(_: Self) -> id ; + unsafe fn screens(_: Self) -> id ; + + + unsafe fn depth(self) -> NSWindowDepth; + unsafe fn frame(self) -> NSRect; + unsafe fn supportedWindowDepths(self) -> *const NSWindowDepth; + unsafe fn deviceDescription(self) -> id ; + unsafe fn visibleFrame(self) -> NSRect; + unsafe fn colorSpace(self) -> id ; + unsafe fn screensHaveSeparateSpaces(_: Self) -> BOOL; + + + unsafe fn backingAlignedRect_options_(self, aRect: NSRect, options: NSAlignmentOptions) -> NSRect; + unsafe fn backingScaleFactor(self) -> CGFloat; + unsafe fn convertRectFromBacking_(self, aRect: NSRect) -> NSRect; + unsafe fn convertRectToBacking_(self, aRect: NSRect) -> NSRect; +} + +impl NSScreen for id { + + + unsafe fn mainScreen(_: Self) -> id { + msg_send![class!(NSScreen), mainScreen] + } + + unsafe fn deepestScreen(_: Self) -> id { + msg_send![class!(NSScreen), deepestScreen] + } + + unsafe fn screens(_: Self) -> id { + msg_send![class!(NSScreen), screens] + } + + + + unsafe fn depth(self) -> NSWindowDepth { + msg_send![self, depth] + } + + unsafe fn frame(self) -> NSRect { + msg_send![self, frame] + } + + unsafe fn supportedWindowDepths(self) -> *const NSWindowDepth { + msg_send![self, supportedWindowDepths] + } + + unsafe fn deviceDescription(self) -> id { + msg_send![self, deviceDescription] + } + + unsafe fn visibleFrame(self) -> NSRect { + msg_send![self, visibleFrame] + } + + unsafe fn colorSpace(self) -> id { + msg_send![self, colorSpace] + } + + unsafe fn screensHaveSeparateSpaces(_: Self) -> BOOL { + msg_send![class!(NSScreen), screensHaveSeparateSpaces] + } + + + + unsafe fn backingAlignedRect_options_(self, aRect: NSRect, options: NSAlignmentOptions) -> NSRect { + msg_send![self, backingAlignedRect:aRect options:options] + } + + unsafe fn backingScaleFactor(self) -> CGFloat { + msg_send![self, backingScaleFactor] + } + + unsafe fn convertRectFromBacking_(self, aRect: NSRect) -> NSRect { + msg_send![self, convertRectFromBacking:aRect] + } + + unsafe fn convertRectToBacking_(self, aRect: NSRect) -> NSRect { + msg_send![self, convertRectToBacking:aRect] + } +} + + +pub trait NSControl: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSControl), alloc] + } + unsafe fn initWithFrame_(self, frameRect: NSRect) -> id; + unsafe fn isEnabled_(self) -> BOOL; + unsafe fn setEnabled_(self, enabled: BOOL) -> BOOL; +} + +impl NSControl for id { + unsafe fn initWithFrame_(self, frameRect: NSRect) -> id { + msg_send![self, initWithFrame:frameRect] + } + unsafe fn isEnabled_(self) -> BOOL { + msg_send![self, isEnabled] + } + unsafe fn setEnabled_(self, enabled: BOOL) -> BOOL { + msg_send![self, setEnabled:enabled] + } +} + +pub trait NSButton: Sized { + unsafe fn setImage_(self, img: id ); + unsafe fn setBezelStyle_(self, style: NSBezelStyle); + unsafe fn setTitle_(self, title: id ); + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSButton), alloc] + } + unsafe fn initWithFrame_(self, frameRect: NSRect) -> id; +} + +impl NSButton for id { + unsafe fn initWithFrame_(self, frameRect: NSRect) -> id { + msg_send![self, initWithFrame:frameRect] + } + unsafe fn setBezelStyle_(self, style: NSBezelStyle) { + msg_send![self, setBezelStyle:style]; + } + unsafe fn setTitle_(self, title: id ) { + msg_send![self, setTitle:title] + } + unsafe fn setImage_(self, img: id ) { + msg_send![self, setImage:img] + } +} + +pub trait NSImage: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSImage), alloc] + } + + unsafe fn initByReferencingFile_(self, file_name: id ) -> id; + unsafe fn initWithContentsOfFile_(self, file_name: id ) -> id; + unsafe fn initWithData_(self, data: id ) -> id; + unsafe fn initWithDataIgnoringOrientation_(self, data: id ) -> id; + unsafe fn initWithPasteboard_(self, pasteboard: id ) -> id; + unsafe fn initWithSize_flipped_drawingHandler_(self, size: NSSize, + drawingHandlerShouldBeCalledWithFlippedContext: BOOL, + drawingHandler: *mut Block<(NSRect,), BOOL>); + unsafe fn initWithSize_(self, aSize: NSSize) -> id; + + unsafe fn imageNamed_(_: Self, name: id ) -> id { + msg_send![class!(NSImage), imageNamed:name] + } + + unsafe fn name(self) -> id ; + unsafe fn setName_(self, name: id ) -> BOOL; + + unsafe fn size(self) -> NSSize; + unsafe fn template(self) -> BOOL; + + unsafe fn canInitWithPasteboard_(self, pasteboard: id ) -> BOOL; + unsafe fn imageTypes(self) -> id ; + unsafe fn imageUnfilteredTypes(self) -> id ; + + unsafe fn addRepresentation_(self, imageRep: id ); + unsafe fn addRepresentations_(self, imageReps: id ); + unsafe fn representations(self) -> id ; + unsafe fn removeRepresentation_(self, imageRep: id ); + unsafe fn bestRepresentationForRect_context_hints_(self, rect: NSRect, + referenceContext: id , + hints: id ) + -> id ; + unsafe fn prefersColorMatch(self) -> BOOL; + unsafe fn usesEPSOnResolutionMismatch(self) -> BOOL; + unsafe fn matchesOnMultipleResolution(self) -> BOOL; + + unsafe fn drawInRect_(self, rect: NSRect); + unsafe fn drawAtPoint_fromRect_operation_fraction_(self, point: NSPoint, srcRect: NSRect, + op: NSCompositingOperation, delta: CGFloat); + unsafe fn drawInRect_fromRect_operation_fraction_(self, dstRect: NSRect, srcRect: NSRect, + op: NSCompositingOperation, delta: CGFloat); + unsafe fn drawInRect_fromRect_operation_fraction_respectFlipped_hints_(self, dstSpacePortionRect: NSRect, + srcSpacePortionRect: NSRect, op: NSCompositingOperation, delta: CGFloat, respectContextIsFlipped: BOOL, + hints: id ); + unsafe fn drawRepresentation_inRect_(self, imageRep: id , dstRect: NSRect); + + unsafe fn isValid(self) -> BOOL; + unsafe fn backgroundColor(self) -> id ; + + unsafe fn lockFocus(self); + unsafe fn lockFocusFlipped_(self, flipped: BOOL); + unsafe fn unlockFocus(self); + + unsafe fn alignmentRect(self) -> NSRect; + + unsafe fn cacheMode(self) -> NSImageCacheMode; + unsafe fn recache(self); + + unsafe fn delegate(self) -> id ; + + unsafe fn TIFFRepresentation(self) -> id ; + unsafe fn TIFFRepresentationUsingCompression_factor_(self, comp: NSTIFFCompression, aFloat: f32) + -> id ; + + unsafe fn cancelIncrementalLoad(self); + + unsafe fn hitTestRect_withImageDestinationRect_context_hints_flipped_(self, testRectDestSpace: NSRect, + imageRectDestSpace: NSRect, referenceContext: id , + hints: id , flipped: BOOL) -> BOOL; + + unsafe fn accessibilityDescription(self) -> id ; + + unsafe fn layerContentsForContentsScale_(self, layerContentsScale: CGFloat) -> id ; + unsafe fn recommendedLayerContentsScale_(self, preferredContentsScale: CGFloat) -> CGFloat; + + unsafe fn matchesOnlyOnBestFittingAxis(self) -> BOOL; +} + +impl NSImage for id { + unsafe fn initByReferencingFile_(self, file_name: id ) -> id { + msg_send![self, initByReferencingFile:file_name] + } + + unsafe fn initWithContentsOfFile_(self, file_name: id ) -> id { + msg_send![self, initWithContentsOfFile:file_name] + } + + unsafe fn initWithData_(self, data: id ) -> id { + msg_send![self, initWithData:data] + } + + unsafe fn initWithDataIgnoringOrientation_(self, data: id ) -> id { + msg_send![self, initWithDataIgnoringOrientation:data] + } + + unsafe fn initWithPasteboard_(self, pasteboard: id ) -> id { + msg_send![self, initWithPasteboard:pasteboard] + } + + unsafe fn initWithSize_flipped_drawingHandler_(self, size: NSSize, + drawingHandlerShouldBeCalledWithFlippedContext: BOOL, + drawingHandler: *mut Block<(NSRect,), BOOL>) { + msg_send![self, initWithSize:size + flipped:drawingHandlerShouldBeCalledWithFlippedContext + drawingHandler:drawingHandler] + } + + unsafe fn initWithSize_(self, aSize: NSSize) -> id { + msg_send![self, initWithSize:aSize] + } + + unsafe fn name(self) -> id { + msg_send![self, name] + } + + unsafe fn setName_(self, name: id ) -> BOOL { + msg_send![self, setName:name] + } + + unsafe fn size(self) -> NSSize { + msg_send![self, size] + } + + unsafe fn template(self) -> BOOL { + msg_send![self, template] + } + + unsafe fn canInitWithPasteboard_(self, pasteboard: id ) -> BOOL { + msg_send![self, canInitWithPasteboard:pasteboard] + } + + unsafe fn imageTypes(self) -> id { + msg_send![self, imageTypes] + } + + unsafe fn imageUnfilteredTypes(self) -> id { + msg_send![self, imageUnfilteredTypes] + } + + unsafe fn addRepresentation_(self, imageRep: id ) { + msg_send![self, addRepresentation:imageRep] + } + + unsafe fn addRepresentations_(self, imageReps: id ) { + msg_send![self, addRepresentations:imageReps] + } + + unsafe fn representations(self) -> id { + msg_send![self, representations] + } + + unsafe fn removeRepresentation_(self, imageRep: id ) { + msg_send![self, removeRepresentation:imageRep] + } + + unsafe fn bestRepresentationForRect_context_hints_(self, rect: NSRect, + referenceContext: id , + hints: id ) + -> id { + msg_send![self, bestRepresentationForRect:rect context:referenceContext hints:hints] + } + + unsafe fn prefersColorMatch(self) -> BOOL { + msg_send![self, prefersColorMatch] + } + + unsafe fn usesEPSOnResolutionMismatch(self) -> BOOL { + msg_send![self, usesEPSOnResolutionMismatch] + } + + unsafe fn matchesOnMultipleResolution(self) -> BOOL { + msg_send![self, matchesOnMultipleResolution] + } + + unsafe fn drawInRect_(self, rect: NSRect) { + msg_send![self, drawInRect:rect] + } + + unsafe fn drawAtPoint_fromRect_operation_fraction_(self, point: NSPoint, srcRect: NSRect, + op: NSCompositingOperation, delta: CGFloat) { + msg_send![self, drawAtPoint:point fromRect:srcRect operation:op fraction:delta] + } + + unsafe fn drawInRect_fromRect_operation_fraction_(self, dstRect: NSRect, srcRect: NSRect, + op: NSCompositingOperation, delta: CGFloat) { + msg_send![self, drawInRect:dstRect fromRect:srcRect operation:op fraction:delta] + } + + unsafe fn drawInRect_fromRect_operation_fraction_respectFlipped_hints_(self, dstSpacePortionRect: NSRect, + srcSpacePortionRect: NSRect, op: NSCompositingOperation, delta: CGFloat, respectContextIsFlipped: BOOL, + hints: id ) { + msg_send![self, drawInRect:dstSpacePortionRect + fromRect:srcSpacePortionRect + operation:op + fraction:delta + respectFlipped:respectContextIsFlipped + hints:hints] + } + + unsafe fn drawRepresentation_inRect_(self, imageRep: id , dstRect: NSRect) { + msg_send![self, drawRepresentation:imageRep inRect:dstRect] + } + + unsafe fn isValid(self) -> BOOL { + msg_send![self, isValid] + } + + unsafe fn backgroundColor(self) -> id { + msg_send![self, backgroundColor] + } + + unsafe fn lockFocus(self) { + msg_send![self, lockFocus] + } + + unsafe fn lockFocusFlipped_(self, flipped: BOOL) { + msg_send![self, lockFocusFlipped:flipped] + } + + unsafe fn unlockFocus(self) { + msg_send![self, unlockFocus] + } + + unsafe fn alignmentRect(self) -> NSRect { + msg_send![self, alignmentRect] + } + + unsafe fn cacheMode(self) -> NSImageCacheMode { + msg_send![self, cacheMode] + } + + unsafe fn recache(self) { + msg_send![self, recache] + } + + unsafe fn delegate(self) -> id { + msg_send![self, delegate] + } + + unsafe fn TIFFRepresentation(self) -> id { + msg_send![self, TIFFRepresentation] + } + + unsafe fn TIFFRepresentationUsingCompression_factor_(self, comp: NSTIFFCompression, aFloat: f32) + -> id { + msg_send![self, TIFFRepresentationUsingCompression:comp factor:aFloat] + } + + unsafe fn cancelIncrementalLoad(self) { + msg_send![self, cancelIncrementalLoad] + } + + unsafe fn hitTestRect_withImageDestinationRect_context_hints_flipped_(self, testRectDestSpace: NSRect, + imageRectDestSpace: NSRect, referenceContext: id , + hints: id , flipped: BOOL) -> BOOL { + msg_send![self, hitTestRect:testRectDestSpace + withImageDestinationRect:imageRectDestSpace + context:referenceContext + hints:hints + flipped:flipped] + } + + unsafe fn accessibilityDescription(self) -> id { + msg_send![self, accessibilityDescription] + } + + unsafe fn layerContentsForContentsScale_(self, layerContentsScale: CGFloat) -> id { + msg_send![self, layerContentsForContentsScale:layerContentsScale] + } + + unsafe fn recommendedLayerContentsScale_(self, preferredContentsScale: CGFloat) -> CGFloat { + msg_send![self, recommendedLayerContentsScale:preferredContentsScale] + } + + unsafe fn matchesOnlyOnBestFittingAxis(self) -> BOOL { + msg_send![self, matchesOnlyOnBestFittingAxis] + } +} + +#[link(name = "AppKit", kind = "framework")] +extern { + + pub static NSImageHintCTM: id; + pub static NSImageHintInterpolation: id; + + + pub static NSImageNameQuickLookTemplate: id; + pub static NSImageNameBluetoothTemplate: id; + pub static NSImageNameIChatTheaterTemplate: id; + pub static NSImageNameSlideshowTemplate: id; + pub static NSImageNameActionTemplate: id; + pub static NSImageNameSmartBadgeTemplate: id; + pub static NSImageNamePathTemplate: id; + pub static NSImageNameInvalidDataFreestandingTemplate: id; + pub static NSImageNameLockLockedTemplate: id; + pub static NSImageNameLockUnlockedTemplate: id; + pub static NSImageNameGoRightTemplate: id; + pub static NSImageNameGoLeftTemplate: id; + pub static NSImageNameRightFacingTriangleTemplate: id; + pub static NSImageNameLeftFacingTriangleTemplate: id; + pub static NSImageNameAddTemplate: id; + pub static NSImageNameRemoveTemplate: id; + pub static NSImageNameRevealFreestandingTemplate: id; + pub static NSImageNameFollowLinkFreestandingTemplate: id; + pub static NSImageNameEnterFullScreenTemplate: id; + pub static NSImageNameExitFullScreenTemplate: id; + pub static NSImageNameStopProgressTemplate: id; + pub static NSImageNameStopProgressFreestandingTemplate: id; + pub static NSImageNameRefreshTemplate: id; + pub static NSImageNameRefreshFreestandingTemplate: id; + + pub static NSImageNameMultipleDocuments: id; + + pub static NSImageNameUser: id; + pub static NSImageNameUserGroup: id; + pub static NSImageNameEveryone: id; + pub static NSImageNameUserGuest: id; + + pub static NSImageNameBonjour: id; + pub static NSImageNameDotMac: id; + pub static NSImageNameComputer: id; + pub static NSImageNameFolderBurnable: id; + pub static NSImageNameFolderSmart: id; + pub static NSImageNameNetwork: id; + + pub static NSImageNameUserAccounts: id; + pub static NSImageNamePreferencesGeneral: id; + pub static NSImageNameAdvanced: id; + pub static NSImageNameInfo: id; + pub static NSImageNameFontPanel: id; + pub static NSImageNameColorPanel: id; + pub static NSImageNameFolder: id; + pub static NSImageNameTrashEmpty: id; + pub static NSImageNameTrashFull: id; + pub static NSImageNameHomeTemplate: id; + pub static NSImageNameBookmarksTemplate: id; + pub static NSImageNameCaution: id; + pub static NSImageNameStatusAvailable: id; + pub static NSImageNameStatusPartiallyAvailable: id; + pub static NSImageNameStatusUnavailable: id; + pub static NSImageNameStatusNone: id; + pub static NSImageNameApplicationIcon: id; + pub static NSImageNameMenuOnStateTemplate: id; + pub static NSImageNameMenuMixedStateTemplate: id; + pub static NSImageNameMobileMe: id; + + pub static NSImageNameIconViewTemplate: id; + pub static NSImageNameListViewTemplate: id; + pub static NSImageNameColumnViewTemplate: id; + pub static NSImageNameFlowViewTemplate: id; + pub static NSImageNameShareTemplate: id; +} + +#[repr(usize)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum NSCompositingOperation { + NSCompositeClear = 0, + NSCompositeCopy = 1, + NSCompositeSourceOver = 2, + NSCompositeSourceIn = 3, + NSCompositeSourceOut = 4, + NSCompositeSourceAtop = 5, + NSCompositeDestinationOver = 6, + NSCompositeDestinationIn = 7, + NSCompositeDestinationOut = 8, + NSCompositeDestinationAtop = 9, + NSCompositeXOR = 10, + NSCompositePlusDarker = 11, + NSCompositeHighlight = 12, + NSCompositePlusLighter = 13 +} + +#[repr(usize)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum NSImageCacheMode { + NSImageCacheDefault, + NSImageCacheAlways, + NSImageCacheBySize, + NSImageCacheNever +} + +#[repr(usize)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum NSTIFFCompression { + NSTIFFCompressionNone = 1, + NSTIFFCompressionCCITTFAX3 = 3, + NSTIFFCompressionCCITTFAX4 = 4, + NSTIFFCompressionLZW = 5, + NSTIFFCompressionJPEG = 6, + NSTIFFCompressionNEXT = 32766, + NSTIFFCompressionPackBits = 32773, + NSTIFFCompressionOldJPEG = 32865 +} + +#[repr(usize)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum NSImageLoadStatus { + NSImageLoadStatusCompleted, + NSImageLoadStatusCancelled, + NSImageLoadStatusInvalidData, + NSImageLoadStatusUnexpectedEOF, + NSImageLoadStatusReadError +} + +pub trait NSSound: Sized { + unsafe fn canInitWithPasteboard_(_: Self, pasteboard: id) -> BOOL { + msg_send![class!(NSSound), canInitWithPasteboard:pasteboard] + } + + unsafe fn initWithContentsOfFile_withReference_(self, filepath: id, byRef: BOOL) -> id; + unsafe fn initWithContentsOfURL_withReference_(self, fileUrl: id, byRef: BOOL) -> id; + unsafe fn initWithData_(self, audioData: id) -> id; + unsafe fn initWithPasteboard_(self, pasteboard: id) -> id; + + unsafe fn name(self) -> id; + unsafe fn volume(self) -> f32; + unsafe fn currentTime(self) -> NSTimeInterval; + unsafe fn loops(self) -> BOOL; + unsafe fn playbackDeviceIdentifier(self) -> id; + unsafe fn delegate(self) -> id; + + unsafe fn soundUnfilteredTypes(_: Self) -> id { + msg_send![class!(NSSound), soundUnfilteredTypes] + } + + unsafe fn soundNamed_(_: Self, soundName: id) -> id { + msg_send![class!(NSSound), soundNamed:soundName] + } + + unsafe fn duration(self) -> NSTimeInterval; + + unsafe fn playing(self) -> BOOL; + unsafe fn pause(self) -> BOOL; + unsafe fn play(self) -> BOOL; + unsafe fn resume(self) -> BOOL; + unsafe fn stop(self) -> BOOL; + + unsafe fn writeToPasteboard_(self, pasteboard: id); +} + +impl NSSound for id { + unsafe fn initWithContentsOfFile_withReference_(self, filepath: id, byRef: BOOL) -> id { + msg_send![self, initWithContentsOfFile:filepath withReference:byRef] + } + + unsafe fn initWithContentsOfURL_withReference_(self, fileUrl: id, byRef: BOOL) -> id { + msg_send![self, initWithContentsOfURL:fileUrl withReference:byRef] + } + + unsafe fn initWithData_(self, audioData: id) -> id { + msg_send![self, initWithData:audioData] + } + + unsafe fn initWithPasteboard_(self, pasteboard: id) -> id { + msg_send![self, initWithPasteboard:pasteboard] + } + + unsafe fn name(self) -> id { + msg_send![self, name] + } + + unsafe fn volume(self) -> f32 { + msg_send![self, volume] + } + + unsafe fn currentTime(self) -> NSTimeInterval { + msg_send![self, currentTime] + } + + unsafe fn loops(self) -> BOOL { + msg_send![self, loops] + } + + unsafe fn playbackDeviceIdentifier(self) -> id { + msg_send![self, playbackDeviceIdentifier] + } + + unsafe fn delegate(self) -> id { + msg_send![self, delegate] + } + + unsafe fn duration(self) -> NSTimeInterval { + msg_send![self, duration] + } + + unsafe fn playing(self) -> BOOL { + msg_send![self, playing] + } + + unsafe fn pause(self) -> BOOL { + msg_send![self, pause] + } + + unsafe fn play(self) -> BOOL { + msg_send![self, play] + } + + unsafe fn resume(self) -> BOOL { + msg_send![self, resume] + } + + unsafe fn stop(self) -> BOOL { + msg_send![self, stop] + } + + unsafe fn writeToPasteboard_(self, pasteboard: id) { + msg_send![self, writeToPasteboard:pasteboard] + } +} + +pub const NSVariableStatusItemLength: CGFloat = -1.0; +pub const NSSquareStatusItemLength: CGFloat = -2.0; + +pub trait NSStatusItem: Sized { + unsafe fn statusBar(self) -> id ; + unsafe fn button(self) -> id ; + unsafe fn menu(self) -> id; + unsafe fn setMenu_(self, menu: id); + unsafe fn length(self) -> CGFloat; + unsafe fn setLength_(self, length: CGFloat); +} + +impl NSStatusItem for id { + unsafe fn statusBar(self) -> id { + msg_send![self, statusBar] + } + + unsafe fn button(self) -> id { + msg_send![self, button] + } + + unsafe fn menu(self) -> id { + msg_send![self, menu] + } + + unsafe fn setMenu_(self, menu: id) { + msg_send![self, setMenu:menu] + } + + unsafe fn length(self) -> CGFloat { + msg_send![self, length] + } + + unsafe fn setLength_(self, length: CGFloat) { + msg_send![self, setLength: length] + } +} + +pub trait NSStatusBar: Sized { + unsafe fn systemStatusBar(_: Self) -> id { + msg_send![class!(NSStatusBar), systemStatusBar] + } + + unsafe fn statusItemWithLength_(self, length: CGFloat) -> id ; + unsafe fn removeStatusItem_(self, item: id ); + unsafe fn isVertical(self) -> BOOL; +} + +impl NSStatusBar for id { + unsafe fn statusItemWithLength_(self, length: CGFloat) -> id { + msg_send![self, statusItemWithLength:length] + } + + unsafe fn removeStatusItem_(self, item: id ) { + msg_send![self, removeStatusItem:item] + } + + unsafe fn isVertical(self) -> BOOL { + msg_send![self, isVertical] + } +} + +extern { + pub fn NSRectFill(rect: NSRect); +} + +pub trait NSTextField: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSTextField), alloc] + } + unsafe fn initWithFrame_(self, frameRect: NSRect) -> id; + unsafe fn setEditable_(self, editable: BOOL); + unsafe fn setStringValue_(self, label: id ); +} + +impl NSTextField for id { + unsafe fn initWithFrame_(self, frameRect: NSRect) -> id { + msg_send![self, initWithFrame:frameRect] + } + unsafe fn setEditable_(self, editable: BOOL) { + msg_send![self, setEditable:editable]; + } + unsafe fn setStringValue_(self, label: id) { + msg_send![self, setStringValue:label]; + } +} + +#[repr(u64)] +pub enum NSTabViewType { + NSTopTabsBezelBorder = 0, + NSLeftTabsBezelBorder = 1, + NSBottomTabsBezelBorder = 2, + NSRightTabsBezelBorder = 3, + NSNoTabsBezelBorder = 4, + NSNoTabsLineBorder = 5, + NSNoTabsNoBorder = 6 +} + +pub trait NSTabView: Sized { + unsafe fn new(_: Self) -> id { + msg_send![class!(NSTabView), new] + } + + unsafe fn initWithFrame_(self, frameRect: NSRect) -> id; + unsafe fn addTabViewItem_(self, tabViewItem: id); + unsafe fn insertTabViewItem_atIndex_(self,tabViewItem:id, index:NSInteger); + unsafe fn removeTabViewItem_(self,tabViewItem:id); + unsafe fn indexOfTabViewItem_(self, tabViewItem:id) -> id; + unsafe fn indexOfTabViewItemWithIdentifier_(self,identifier:id) -> id; + unsafe fn numberOfTabViewItems(self) -> id; + unsafe fn tabViewItemAtIndex_(self,index:id) -> id; + unsafe fn tabViewItems(self) -> id; + unsafe fn selectFirstTabViewItem_(self,sender:id); + unsafe fn selectLastTabViewItem_(self,sender:id); + unsafe fn selectNextTabViewItem_(self, sender:id); + unsafe fn selectPreviousTabViewItem_(self,sender:id); + unsafe fn selectTabViewItem_(self,tabViewItem:id); + unsafe fn selectTabViewItemAtIndex_(self,index:id); + unsafe fn selectTabViewItemWithIdentifier_(self,identifier:id); + unsafe fn selectedTabViewItem(self) -> id; + unsafe fn takeSelectedTabViewItemFromSender_(self,sender:id); + unsafe fn font(self) -> id; + unsafe fn setFont_(self, font:id); + unsafe fn tabViewType(self) -> NSTabViewType; + unsafe fn setTabViewType_(self,tabViewType: NSTabViewType); + unsafe fn controlTint(self) -> id; + unsafe fn setControlTint_(self,controlTint:id); + unsafe fn drawsBackground(self) -> BOOL; + unsafe fn setDrawsBackground_(self,drawsBackground:BOOL); + unsafe fn minimumSize(self) -> id; + unsafe fn contentRect(self) -> id; + unsafe fn controlSize(self) -> id; + unsafe fn setControlSize_(self,controlSize:id); + unsafe fn allowsTruncatedLabels(self) -> BOOL; + unsafe fn setAllowsTruncatedLabels_(self, allowTruncatedLabels:BOOL); + unsafe fn setDelegate_(self, delegate:id); + unsafe fn delegate(self) -> id ; + unsafe fn tabViewAtPoint_(self, point:id) -> id; +} + +impl NSTabView for id { + unsafe fn initWithFrame_(self, frameRect: NSRect) -> id { + msg_send![self, initWithFrame:frameRect] + } + + unsafe fn addTabViewItem_(self, tabViewItem: id) { + msg_send![self, addTabViewItem:tabViewItem] + } + unsafe fn insertTabViewItem_atIndex_(self, tabViewItem: id,index:NSInteger) { + msg_send![self, addTabViewItem:tabViewItem atIndex:index] + } + unsafe fn removeTabViewItem_(self,tabViewItem:id){ + msg_send![self, removeTabViewItem:tabViewItem] + } + + unsafe fn indexOfTabViewItem_(self, tabViewItem:id) -> id{ + msg_send![self, indexOfTabViewItem:tabViewItem] + } + + unsafe fn indexOfTabViewItemWithIdentifier_(self,identifier:id) -> id{ + msg_send![self, indexOfTabViewItemWithIdentifier:identifier] + } + unsafe fn numberOfTabViewItems(self) -> id{ + msg_send![self, numberOfTabViewItems] + } + + unsafe fn tabViewItemAtIndex_(self,index:id)->id{ + msg_send![self, tabViewItemAtIndex:index] + } + + unsafe fn tabViewItems(self)->id{ + msg_send![self, tabViewItems] + } + + unsafe fn selectFirstTabViewItem_(self,sender:id){ + msg_send![self, selectFirstTabViewItem:sender] + } + + unsafe fn selectLastTabViewItem_(self,sender:id){ + msg_send![self, selectLastTabViewItem:sender] + } + unsafe fn selectNextTabViewItem_(self, sender:id){ + msg_send![self, selectNextTabViewItem:sender] + } + unsafe fn selectPreviousTabViewItem_(self,sender:id){ + msg_send![self, selectPreviousTabViewItem:sender] + } + + unsafe fn selectTabViewItem_(self,tabViewItem:id){ + msg_send![self, selectTabViewItem:tabViewItem] + } + + unsafe fn selectTabViewItemAtIndex_(self,index:id){ + msg_send![self, selectTabViewItemAtIndex:index] + } + unsafe fn selectTabViewItemWithIdentifier_(self,identifier:id){ + msg_send![self, selectTabViewItemWithIdentifier:identifier] + } + unsafe fn selectedTabViewItem(self) -> id{ + msg_send![self, selectedTabViewItem] + } + unsafe fn takeSelectedTabViewItemFromSender_(self,sender:id){ + msg_send![self, takeSelectedTabViewItemFromSender:sender] + } + + unsafe fn font(self)->id{ + msg_send![self, font] + } + + unsafe fn setFont_(self, font:id){ + msg_send![self, setFont:font] + } + + unsafe fn tabViewType(self)->NSTabViewType{ + msg_send![self, tabViewType] + } + unsafe fn setTabViewType_(self,tabViewType: NSTabViewType){ + msg_send![self, setTabViewType:tabViewType] + } + + unsafe fn controlTint(self) -> id{ + msg_send![self, controlTint] + } + unsafe fn setControlTint_(self,controlTint:id){ + msg_send![self, setControlTint:controlTint] + } + + unsafe fn drawsBackground(self) -> BOOL{ + msg_send![self, drawsBackground] + } + unsafe fn setDrawsBackground_(self,drawsBackground:BOOL){ + msg_send![self, setDrawsBackground:drawsBackground as libc::c_int] + } + + unsafe fn minimumSize(self) -> id{ + msg_send![self, minimumSize] + } + unsafe fn contentRect(self) -> id{ + msg_send![self, contentRect] + } + unsafe fn controlSize(self) -> id{ + msg_send![self, controlSize] + } + unsafe fn setControlSize_(self,controlSize:id){ + msg_send![self, setControlSize:controlSize] + } + + unsafe fn allowsTruncatedLabels(self) -> BOOL{ + msg_send![self, allowsTruncatedLabels] + } + unsafe fn setAllowsTruncatedLabels_(self, allowTruncatedLabels:BOOL){ + msg_send![self, setAllowsTruncatedLabels:allowTruncatedLabels as libc::c_int] + } + + unsafe fn setDelegate_(self, delegate:id){ + msg_send![self, setDelegate:delegate] + } + unsafe fn delegate(self) -> id { + msg_send![self, delegate] + } + + unsafe fn tabViewAtPoint_(self, point:id) -> id{ + msg_send![self, tabViewAtPoint:point] + } +} + +#[repr(u64)] +pub enum NSTabState { + NSSelectedTab = 0, + NSBackgroundTab = 1, + NSPressedTab = 2 +} + +pub trait NSTabViewItem: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSTabViewItem), alloc] + } + unsafe fn new(_: Self) -> id { + msg_send![class!(NSTabViewItem), new] + } + + unsafe fn initWithIdentifier_(self, identifier:id) -> id; + unsafe fn drawLabel_inRect_(self,shouldTruncateLabel:BOOL,labelRect:NSRect); + unsafe fn label(self) -> id; + unsafe fn setLabel_(self,label:id); + unsafe fn sizeOfLabel_(self, computeMin:BOOL); + unsafe fn tabState(self) -> NSTabState; + unsafe fn identifier(self)-> id; + unsafe fn setIdentifier_(self,identifier:id); + unsafe fn color(self)-> id; + unsafe fn setColor_(self,color:id); + unsafe fn view(self) -> id; + unsafe fn setView_(self, view:id); + unsafe fn initialFirstResponder(self)->id; + unsafe fn setInitialFirstResponder_(self,initialFirstResponder:id); + unsafe fn tabView(self) -> id; + unsafe fn tooltip(self) -> id; + unsafe fn setTooltip_(self,toolTip:id); +} + +impl NSTabViewItem for id { + unsafe fn initWithIdentifier_(self, identifier: id) -> id { + msg_send![self, initWithIdentifier:identifier] + } + + unsafe fn drawLabel_inRect_(self, shouldTruncateLabel:BOOL,labelRect:NSRect){ + msg_send![self, drawLabel:shouldTruncateLabel as libc::c_int inRect:labelRect] + } + + unsafe fn label(self)->id{ + msg_send![self, label] + } + unsafe fn setLabel_(self,label : id){ + msg_send![self, setLabel:label] + } + + unsafe fn sizeOfLabel_(self,computeMin:BOOL){ + msg_send![self, sizeOfLabel:computeMin as libc::c_int] + } + + unsafe fn tabState(self) -> NSTabState{ + msg_send![self, tabState] + } + + unsafe fn identifier(self)-> id { + msg_send![self, identifier] + } + + unsafe fn setIdentifier_(self,identifier:id){ + msg_send![self, identifier:identifier] + } + + unsafe fn color(self)-> id{ + msg_send![self, color] + } + + unsafe fn setColor_(self,color:id){ + msg_send![self, color:color] + } + + unsafe fn view(self) -> id { + msg_send![self, view] + } + + unsafe fn setView_(self, view:id){ + msg_send![self, setView:view] + } + + unsafe fn initialFirstResponder(self)->id{ + msg_send![self, initialFirstResponder] + } + + unsafe fn setInitialFirstResponder_(self,initialFirstResponder:id){ + msg_send![self, setInitialFirstResponder:initialFirstResponder] + } + + unsafe fn tabView(self) -> id{ + msg_send![self, tabView] + } + + unsafe fn tooltip(self) -> id{ + msg_send![self, tooltip] + } + + unsafe fn setTooltip_(self,toolTip:id){ + msg_send![self, setTooltip:toolTip] + } +} + +pub trait NSLayoutConstraint: Sized { + unsafe fn activateConstraints(_: Self, constraints: id) -> id; +} + +impl NSLayoutConstraint for id { + unsafe fn activateConstraints(_: Self, constraints: id) -> id { + msg_send![class!(NSLayoutConstraint), activateConstraints:constraints] + } +} + +pub trait NSLayoutDimension: Sized { + unsafe fn constraintEqualToConstant(self, c: CGFloat) -> id; + unsafe fn constraintLessThanOrEqualToConstant(self, c: CGFloat) -> id; + unsafe fn constraintGreaterThanOrEqualToConstant(self, c: CGFloat) -> id; +} + +impl NSLayoutDimension for id { + unsafe fn constraintEqualToConstant(self, c: CGFloat) -> id { + msg_send![self, constraintEqualToConstant:c] + } + + unsafe fn constraintLessThanOrEqualToConstant(self, c: CGFloat) -> id { + msg_send![self, constraintLessThanOrEqualToConstant:c] + } + + unsafe fn constraintGreaterThanOrEqualToConstant(self, c: CGFloat) -> id { + msg_send![self, constraintGreaterThanOrEqualToConstant:c] + } +} + +pub trait NSColorSpace: Sized { + unsafe fn deviceRGBColorSpace(_:Self) -> id; + unsafe fn genericRGBColorSpace(_:Self) -> id; + unsafe fn deviceCMYKColorSpace(_:Self) -> id; + unsafe fn genericCMYKColorSpace(_:Self) -> id; + unsafe fn deviceGrayColorSpace(_:Self) -> id; + unsafe fn genericGrayColorSpace(_:Self) -> id; + unsafe fn sRGBColorSpace(_:Self) -> id; + unsafe fn extendedSRGBColorSpace(_:Self) -> id; + unsafe fn displayP3ColorSpace(_:Self) -> id; + unsafe fn genericGamma22GrayColorSpace(_:Self) -> id; + unsafe fn extendedGenericGamma22GrayColorSpace(_:Self) -> id; + unsafe fn adobeRGB1998ColorSpace(_:Self) -> id; + + unsafe fn alloc(_: Self) -> id; + + unsafe fn initWithCGColorSpace_(self, cg_color_space: *const c_void ) -> id; + unsafe fn CGColorSpace(self) -> *const c_void ; + unsafe fn localizedName(self) -> id; +} + +impl NSColorSpace for id { + unsafe fn deviceRGBColorSpace(_:Self) -> id { + msg_send![class!(NSColorSpace), deviceRGBColorSpace] + } + unsafe fn genericRGBColorSpace(_:Self) -> id { + msg_send![class!(NSColorSpace), genericRGBColorSpace] + } + unsafe fn deviceCMYKColorSpace(_:Self) -> id { + msg_send![class!(NSColorSpace), deviceCMYKColorSpace] + } + unsafe fn genericCMYKColorSpace(_:Self) -> id { + msg_send![class!(NSColorSpace), genericCMYKColorSpace] + } + unsafe fn deviceGrayColorSpace(_:Self) -> id { + msg_send![class!(NSColorSpace), deviceGrayColorSpace] + } + unsafe fn genericGrayColorSpace(_:Self) -> id { + msg_send![class!(NSColorSpace), genericGrayColorSpace] + } + unsafe fn sRGBColorSpace(_:Self) -> id { + msg_send![class!(NSColorSpace), sRGBColorSpace] + } + unsafe fn extendedSRGBColorSpace(_:Self) -> id { + msg_send![class!(NSColorSpace), extendedSRGBColorSpace] + } + unsafe fn displayP3ColorSpace(_:Self) -> id { + msg_send![class!(NSColorSpace), displayP3ColorSpace] + } + unsafe fn genericGamma22GrayColorSpace(_:Self) -> id { + msg_send![class!(NSColorSpace), genericGamma22GrayColorSpace] + } + unsafe fn extendedGenericGamma22GrayColorSpace(_:Self) -> id { + msg_send![class!(NSColorSpace), extendedGenericGamma22GrayColorSpace] + } + unsafe fn adobeRGB1998ColorSpace(_:Self) -> id { + msg_send![class!(NSColorSpace), adobeRGB1998ColorSpace] + } + + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSColorSpace), alloc] + } + + unsafe fn initWithCGColorSpace_(self, cg_color_space: *const c_void ) -> id { + msg_send![self, initWithCGColorSpace:cg_color_space] + } + unsafe fn CGColorSpace(self) -> *const c_void { + msg_send![self, CGColorSpace] + } + unsafe fn localizedName(self) -> id { + msg_send![self, localizedName] + } +} + +pub trait NSColor: Sized { + unsafe fn clearColor(_: Self) -> id; + unsafe fn colorWithRed_green_blue_alpha_(_:Self, r: CGFloat, g: CGFloat, b: CGFloat, a: CGFloat) -> id; + unsafe fn colorWithSRGBRed_green_blue_alpha_(_:Self, r: CGFloat, g: CGFloat, b: CGFloat, a: CGFloat) -> id; + unsafe fn colorWithDeviceRed_green_blue_alpha_(_:Self, r: CGFloat, g: CGFloat, b: CGFloat, a: CGFloat) -> id; + unsafe fn colorWithDisplayP3Red_green_blue_alpha_(_:Self, r: CGFloat, g: CGFloat, b: CGFloat, a: CGFloat) -> id; + unsafe fn colorWithCalibratedRed_green_blue_alpha_(_:Self, r: CGFloat, g: CGFloat, b: CGFloat, a: CGFloat) -> id; + + unsafe fn colorUsingColorSpace_(self, color_space: id) -> id; + + unsafe fn alphaComponent(self) -> CGFloat; + unsafe fn whiteComponent(self) -> CGFloat; + unsafe fn redComponent(self) -> CGFloat; + unsafe fn greenComponent(self) -> CGFloat; + unsafe fn blueComponent(self) -> CGFloat; + unsafe fn cyanComponent(self) -> CGFloat; + unsafe fn magentaComponent(self) -> CGFloat; + unsafe fn yellowComponent(self) -> CGFloat; + unsafe fn blackComponent(self) -> CGFloat; + unsafe fn hueComponent(self) -> CGFloat; + unsafe fn saturationComponent(self) -> CGFloat; + unsafe fn brightnessComponent(self) -> CGFloat; +} + +impl NSColor for id { + unsafe fn clearColor(_: Self) -> id { + msg_send![class!(NSColor), clearColor] + } + unsafe fn colorWithRed_green_blue_alpha_(_:Self, r: CGFloat, g: CGFloat, b: CGFloat, a: CGFloat) -> id { + msg_send![class!(NSColor), colorWithRed:r green:g blue:b alpha:a] + } + unsafe fn colorWithSRGBRed_green_blue_alpha_(_:Self, r: CGFloat, g: CGFloat, b: CGFloat, a: CGFloat) -> id { + msg_send![class!(NSColor), colorWithSRGBRed:r green:g blue:b alpha:a] + } + unsafe fn colorWithDeviceRed_green_blue_alpha_(_:Self, r: CGFloat, g: CGFloat, b: CGFloat, a: CGFloat) -> id { + msg_send![class!(NSColor), colorWithDeviceRed:r green:g blue:b alpha:a] + } + unsafe fn colorWithDisplayP3Red_green_blue_alpha_(_:Self, r: CGFloat, g: CGFloat, b: CGFloat, a: CGFloat) -> id { + msg_send![class!(NSColor), colorWithDisplayP3Red:r green:g blue:b alpha:a] + } + unsafe fn colorWithCalibratedRed_green_blue_alpha_(_:Self, r: CGFloat, g: CGFloat, b: CGFloat, a: CGFloat) -> id { + msg_send![class!(NSColor), colorWithCalibratedRed:r green:g blue:b alpha:a] + } + + unsafe fn colorUsingColorSpace_(self, color_space: id) -> id { + msg_send![self, colorUsingColorSpace:color_space] + } + + unsafe fn alphaComponent(self) -> CGFloat { + msg_send![self, alphaComponent] + } + unsafe fn whiteComponent(self) -> CGFloat { + msg_send![self, whiteComponent] + } + unsafe fn redComponent(self) -> CGFloat { + msg_send![self, redComponent] + } + unsafe fn greenComponent(self) -> CGFloat { + msg_send![self, greenComponent] + } + unsafe fn blueComponent(self) -> CGFloat { + msg_send![self, blueComponent] + } + unsafe fn cyanComponent(self) -> CGFloat { + msg_send![self, cyanComponent] + } + unsafe fn magentaComponent(self) -> CGFloat { + msg_send![self, magentaComponent] + } + unsafe fn yellowComponent(self) -> CGFloat { + msg_send![self, yellowComponent] + } + unsafe fn blackComponent(self) -> CGFloat { + msg_send![self, blackComponent] + } + unsafe fn hueComponent(self) -> CGFloat { + msg_send![self, hueComponent] + } + unsafe fn saturationComponent(self) -> CGFloat { + msg_send![self, saturationComponent] + } + unsafe fn brightnessComponent(self) -> CGFloat { + msg_send![self, brightnessComponent] + } +} + +pub trait NSToolbar: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSToolbar), alloc] + } + + unsafe fn init_(self) -> id ; + unsafe fn initWithIdentifier_(self, identifier: id) -> id ; + + unsafe fn showsBaselineSeparator(self) -> BOOL; + unsafe fn setShowsBaselineSeparator_(self, value: BOOL); +} + +impl NSToolbar for id { + unsafe fn init_(self) -> id { + msg_send![self, init] + } + + unsafe fn initWithIdentifier_(self, identifier: id) -> id { + msg_send![self, initWithIdentifier:identifier] + } + + unsafe fn showsBaselineSeparator(self) -> BOOL { + msg_send![self, showsBaselineSeparator] + } + + unsafe fn setShowsBaselineSeparator_(self, value: BOOL) { + msg_send![self, setShowsBaselineSeparator:value] + } +} + +pub trait NSSpellChecker : Sized { + unsafe fn sharedSpellChecker(_: Self) -> id; + unsafe fn checkSpellingOfString_startingAt(self, + stringToCheck: id, + startingOffset: NSInteger) -> NSRange; + unsafe fn checkSpellingOfString_startingAt_language_wrap_inSpellDocumentWithTag_wordCount( + self, + stringToCheck: id, + startingOffset: NSInteger, + language: id, + wrapFlag: BOOL, + tag: NSInteger) -> (NSRange, NSInteger); + unsafe fn uniqueSpellDocumentTag(_: Self) -> NSInteger; + unsafe fn closeSpellDocumentWithTag(self, tag: NSInteger); + unsafe fn ignoreWord_inSpellDocumentWithTag(self, wordToIgnore: id, tag: NSInteger); +} + +impl NSSpellChecker for id { + unsafe fn sharedSpellChecker(_: Self) -> id { + msg_send![class!(NSSpellChecker), sharedSpellChecker] + } + + unsafe fn checkSpellingOfString_startingAt(self, + stringToCheck: id, + startingOffset: NSInteger) -> NSRange { + msg_send![self, checkSpellingOfString:stringToCheck startingAt:startingOffset] + } + + unsafe fn checkSpellingOfString_startingAt_language_wrap_inSpellDocumentWithTag_wordCount( + self, + stringToCheck: id, + startingOffset: NSInteger, + language: id, + wrapFlag: BOOL, + tag: NSInteger) -> (NSRange, NSInteger) { + let mut wordCount = 0; + let range = msg_send![self, + checkSpellingOfString:stringToCheck + startingAt:startingOffset + language:language + wrap:wrapFlag + inSpellDocumentWithTag:tag + wordCount:&mut wordCount + ]; + (range, wordCount) + } + + unsafe fn uniqueSpellDocumentTag(_: Self) -> NSInteger { + msg_send![class!(NSSpellChecker), uniqueSpellDocumentTag] + } + + unsafe fn closeSpellDocumentWithTag(self, tag: NSInteger) { + msg_send![self, closeSpellDocumentWithTag:tag] + } + + unsafe fn ignoreWord_inSpellDocumentWithTag(self, wordToIgnore: id, tag: NSInteger) { + msg_send![self, ignoreWord:wordToIgnore inSpellDocumentWithTag:tag] + } +} + +pub trait NSNib: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSNib), alloc] + } + + unsafe fn initWithNibNamed_bundle_(self, name: id, bundle: id) -> id; +} + +impl NSNib for id { + unsafe fn initWithNibNamed_bundle_(self, name: id, bundle: id) -> id { + msg_send![self, initWithNibNamed:name bundle:bundle] + } +} + + +#[cfg(test)] +mod test { + use super::*; + + #[test] + pub fn test_nsapp() { + unsafe { + let _nsApp = NSApp(); + } + } +} diff --git a/third_party/rust/cocoa/src/base.rs b/third_party/rust/cocoa/src/base.rs new file mode 100644 index 000000000000..e8f357f1683b --- /dev/null +++ b/third_party/rust/cocoa/src/base.rs @@ -0,0 +1,28 @@ + + + + + + + + + +use objc::runtime; + +pub use objc::runtime::{BOOL, NO, YES}; + +pub type Class = *mut runtime::Class; +#[allow(non_camel_case_types)] +pub type id = *mut runtime::Object; +pub type SEL = runtime::Sel; + +#[allow(non_upper_case_globals)] +pub const nil: id = 0 as id; +#[allow(non_upper_case_globals)] +pub const Nil: Class = 0 as Class; + + +#[inline] +pub fn selector(name: &str) -> SEL { + runtime::Sel::register(name) +} diff --git a/third_party/rust/cocoa/src/foundation.rs b/third_party/rust/cocoa/src/foundation.rs new file mode 100644 index 000000000000..29c9f0b1746d --- /dev/null +++ b/third_party/rust/cocoa/src/foundation.rs @@ -0,0 +1,1318 @@ + + + + + + + + + +#![allow(non_upper_case_globals)] + +use std::ptr; +use std::os::raw::c_void; +use base::{id, BOOL, NO, SEL, nil}; +use block::Block; +use libc; + + +#[cfg(target_pointer_width = "32")] +pub type NSInteger = libc::c_int; +#[cfg(target_pointer_width = "32")] +pub type NSUInteger = libc::c_uint; + +#[cfg(target_pointer_width = "64")] +pub type NSInteger = libc::c_long; +#[cfg(target_pointer_width = "64")] +pub type NSUInteger = libc::c_ulong; + +pub const NSIntegerMax: NSInteger = NSInteger::max_value(); +pub const NSNotFound: NSInteger = NSIntegerMax; + +const UTF8_ENCODING: usize = 4; + +#[cfg(target_os = "macos")] +mod macos { + use std::mem; + use base::id; + use core_graphics::base::CGFloat; + use core_graphics::geometry::CGRect; + use objc; + + #[repr(C)] + #[derive(Copy, Clone)] + pub struct NSPoint { + pub x: f64, + pub y: f64, + } + + impl NSPoint { + #[inline] + pub fn new(x: f64, y: f64) -> NSPoint { + NSPoint { + x: x, + y: y, + } + } + } + + unsafe impl objc::Encode for NSPoint { + fn encode() -> objc::Encoding { + let encoding = format!("{{CGPoint={}{}}}", + f64::encode().as_str(), + f64::encode().as_str()); + unsafe { objc::Encoding::from_str(&encoding) } + } + } + + #[repr(C)] + #[derive(Copy, Clone)] + pub struct NSSize { + pub width: f64, + pub height: f64, + } + + impl NSSize { + #[inline] + pub fn new(width: f64, height: f64) -> NSSize { + NSSize { + width: width, + height: height, + } + } + } + + unsafe impl objc::Encode for NSSize { + fn encode() -> objc::Encoding { + let encoding = format!("{{CGSize={}{}}}", + f64::encode().as_str(), + f64::encode().as_str()); + unsafe { objc::Encoding::from_str(&encoding) } + } + } + + #[repr(C)] + #[derive(Copy, Clone)] + pub struct NSRect { + pub origin: NSPoint, + pub size: NSSize, + } + + impl NSRect { + #[inline] + pub fn new(origin: NSPoint, size: NSSize) -> NSRect { + NSRect { + origin: origin, + size: size + } + } + + #[inline] + pub fn as_CGRect(&self) -> &CGRect { + unsafe { + mem::transmute::<&NSRect, &CGRect>(self) + } + } + + #[inline] + pub fn inset(&self, x: CGFloat, y: CGFloat) -> NSRect { + unsafe { + NSInsetRect(*self, x, y) + } + } + } + + unsafe impl objc::Encode for NSRect { + fn encode() -> objc::Encoding { + let encoding = format!("{{CGRect={}{}}}", + NSPoint::encode().as_str(), + NSSize::encode().as_str()); + unsafe { objc::Encoding::from_str(&encoding) } + } + } + + + #[repr(u32)] + pub enum NSRectEdge { + NSRectMinXEdge, + NSRectMinYEdge, + NSRectMaxXEdge, + NSRectMaxYEdge, + } + + #[link(name = "Foundation", kind = "framework")] + extern { + fn NSInsetRect(rect: NSRect, x: CGFloat, y: CGFloat) -> NSRect; + } + + pub trait NSValue: Sized { + unsafe fn valueWithPoint(_: Self, point: NSPoint) -> id { + msg_send![class!(NSValue), valueWithPoint:point] + } + + unsafe fn valueWithSize(_: Self, size: NSSize) -> id { + msg_send![class!(NSValue), valueWithSize:size] + } + } + + impl NSValue for id { + } +} + +#[cfg(target_os = "macos")] +pub use self::macos::*; + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct NSRange { + pub location: NSUInteger, + pub length: NSUInteger, +} + +impl NSRange { + #[inline] + pub fn new(location: NSUInteger, length: NSUInteger) -> NSRange { + NSRange { + location: location, + length: length + } + } +} + +#[link(name = "Foundation", kind = "framework")] +extern { + pub static NSDefaultRunLoopMode: id; +} + +pub trait NSAutoreleasePool: Sized { + unsafe fn new(_: Self) -> id { + msg_send![class!(NSAutoreleasePool), new] + } + + unsafe fn autorelease(self) -> Self; + unsafe fn drain(self); +} + +impl NSAutoreleasePool for id { + unsafe fn autorelease(self) -> id { + msg_send![self, autorelease] + } + + unsafe fn drain(self) { + msg_send![self, drain] + } +} + +pub trait NSProcessInfo: Sized { + unsafe fn processInfo(_: Self) -> id { + msg_send![class!(NSProcessInfo), processInfo] + } + + unsafe fn processName(self) -> id; +} + +impl NSProcessInfo for id { + unsafe fn processName(self) -> id { + msg_send![self, processName] + } +} + +pub type NSTimeInterval = libc::c_double; + +pub trait NSArray: Sized { + unsafe fn array(_: Self) -> id { + msg_send![class!(NSArray), array] + } + + unsafe fn arrayWithObjects(_: Self, objects: &[id]) -> id { + msg_send![class!(NSArray), arrayWithObjects:objects.as_ptr() + count:objects.len()] + } + + unsafe fn arrayWithObject(_: Self, object: id) -> id { + msg_send![class!(NSArray), arrayWithObject:object] + } + + unsafe fn init(self) -> id; + + unsafe fn count(self) -> NSUInteger; + + unsafe fn arrayByAddingObjectFromArray(self, object: id) -> id; + unsafe fn arrayByAddingObjectsFromArray(self, objects: id) -> id; + unsafe fn objectAtIndex(self, index: NSUInteger) -> id; +} + +impl NSArray for id { + unsafe fn init(self) -> id { + msg_send![self, init] + } + + unsafe fn count(self) -> NSUInteger { + msg_send![self, count] + } + + unsafe fn arrayByAddingObjectFromArray(self, object: id) -> id { + msg_send![self, arrayByAddingObjectFromArray:object] + } + + unsafe fn arrayByAddingObjectsFromArray(self, objects: id) -> id { + msg_send![self, arrayByAddingObjectsFromArray:objects] + } + + unsafe fn objectAtIndex(self, index: NSUInteger) -> id { + msg_send![self, objectAtIndex:index] + } +} + +pub trait NSDictionary: Sized { + unsafe fn dictionary(_: Self) -> id { + msg_send![class!(NSDictionary), dictionary] + } + + unsafe fn dictionaryWithContentsOfFile_(_: Self, path: id) -> id { + msg_send![class!(NSDictionary), dictionaryWithContentsOfFile:path] + } + + unsafe fn dictionaryWithContentsOfURL_(_: Self, aURL: id) -> id { + msg_send![class!(NSDictionary), dictionaryWithContentsOfURL:aURL] + } + + unsafe fn dictionaryWithDictionary_(_: Self, otherDictionary: id) -> id { + msg_send![class!(NSDictionary), dictionaryWithDictionary:otherDictionary] + } + + unsafe fn dictionaryWithObject_forKey_(_: Self, anObject: id, aKey: id) -> id { + msg_send![class!(NSDictionary), dictionaryWithObject:anObject forKey:aKey] + } + + unsafe fn dictionaryWithObjects_forKeys_(_: Self, objects: id, keys: id) -> id { + msg_send![class!(NSDictionary), dictionaryWithObjects:objects forKeys:keys] + } + + unsafe fn dictionaryWithObjects_forKeys_count_(_: Self, objects: *const id, keys: *const id, count: NSUInteger) -> id { + msg_send![class!(NSDictionary), dictionaryWithObjects:objects forKeys:keys count:count] + } + + unsafe fn dictionaryWithObjectsAndKeys_(_: Self, firstObject: id) -> id { + msg_send![class!(NSDictionary), dictionaryWithObjectsAndKeys:firstObject] + } + + unsafe fn init(self) -> id; + unsafe fn initWithContentsOfFile_(self, path: id) -> id; + unsafe fn initWithContentsOfURL_(self, aURL: id) -> id; + unsafe fn initWithDictionary_(self, otherDicitonary: id) -> id; + unsafe fn initWithDictionary_copyItems_(self, otherDicitonary: id, flag: BOOL) -> id; + unsafe fn initWithObjects_forKeys_(self, objects: id, keys: id) -> id; + unsafe fn initWithObjects_forKeys_count_(self, objects: id, keys: id, count: NSUInteger) -> id; + unsafe fn initWithObjectsAndKeys_(self, firstObject: id) -> id; + + unsafe fn sharedKeySetForKeys_(_: Self, keys: id) -> id { + msg_send![class!(NSDictionary), sharedKeySetForKeys:keys] + } + + unsafe fn count(self) -> NSUInteger; + + unsafe fn isEqualToDictionary_(self, otherDictionary: id) -> BOOL; + + unsafe fn allKeys(self) -> id; + unsafe fn allKeysForObject_(self, anObject: id) -> id; + unsafe fn allValues(self) -> id; + unsafe fn objectForKey_(self, aKey: id) -> id; + unsafe fn objectForKeyedSubscript_(self, key: id) -> id; + unsafe fn objectsForKeys_notFoundMarker_(self, keys: id, anObject: id) -> id; + unsafe fn valueForKey_(self, key: id) -> id; + + unsafe fn keyEnumerator(self) -> id; + unsafe fn objectEnumerator(self) -> id; + unsafe fn enumerateKeysAndObjectsUsingBlock_(self, block: *mut Block<(id, id, *mut BOOL), ()>); + unsafe fn enumerateKeysAndObjectsWithOptions_usingBlock_(self, opts: NSEnumerationOptions, + block: *mut Block<(id, id, *mut BOOL), ()>); + + unsafe fn keysSortedByValueUsingSelector_(self, comparator: SEL) -> id; + unsafe fn keysSortedByValueUsingComparator_(self, cmptr: NSComparator) -> id; + unsafe fn keysSortedByValueWithOptions_usingComparator_(self, opts: NSEnumerationOptions, cmptr: NSComparator) -> id; + + unsafe fn keysOfEntriesPassingTest_(self, predicate: *mut Block<(id, id, *mut BOOL), BOOL>) -> id; + unsafe fn keysOfEntriesWithOptions_PassingTest_(self, opts: NSEnumerationOptions, + predicate: *mut Block<(id, id, *mut BOOL), BOOL>) -> id; + + unsafe fn writeToFile_atomically_(self, path: id, flag: BOOL) -> BOOL; + unsafe fn writeToURL_atomically_(self, aURL: id, flag: BOOL) -> BOOL; + + unsafe fn fileCreationDate(self) -> id; + unsafe fn fileExtensionHidden(self) -> BOOL; + unsafe fn fileGroupOwnerAccountID(self) -> id; + unsafe fn fileGroupOwnerAccountName(self) -> id; + unsafe fn fileIsAppendOnly(self) -> BOOL; + unsafe fn fileIsImmutable(self) -> BOOL; + unsafe fn fileModificationDate(self) -> id; + unsafe fn fileOwnerAccountID(self) -> id; + unsafe fn fileOwnerAccountName(self) -> id; + unsafe fn filePosixPermissions(self) -> NSUInteger; + unsafe fn fileSize(self) -> libc::c_ulonglong; + unsafe fn fileSystemFileNumber(self) -> NSUInteger; + unsafe fn fileSystemNumber(self) -> NSInteger; + unsafe fn fileType(self) -> id; + + unsafe fn description(self) -> id; + unsafe fn descriptionInStringsFileFormat(self) -> id; + unsafe fn descriptionWithLocale_(self, locale: id) -> id; + unsafe fn descriptionWithLocale_indent_(self, locale: id, indent: NSUInteger) -> id; +} + +impl NSDictionary for id { + unsafe fn init(self) -> id { + msg_send![self, init] + } + + unsafe fn initWithContentsOfFile_(self, path: id) -> id { + msg_send![self, initWithContentsOfFile:path] + } + + unsafe fn initWithContentsOfURL_(self, aURL: id) -> id { + msg_send![self, initWithContentsOfURL:aURL] + } + + unsafe fn initWithDictionary_(self, otherDictionary: id) -> id { + msg_send![self, initWithDictionary:otherDictionary] + } + + unsafe fn initWithDictionary_copyItems_(self, otherDictionary: id, flag: BOOL) -> id { + msg_send![self, initWithDictionary:otherDictionary copyItems:flag] + } + + unsafe fn initWithObjects_forKeys_(self, objects: id, keys: id) -> id { + msg_send![self, initWithObjects:objects forKeys:keys] + } + + unsafe fn initWithObjects_forKeys_count_(self, objects: id, keys: id, count: NSUInteger) -> id { + msg_send![self, initWithObjects:objects forKeys:keys count:count] + } + + unsafe fn initWithObjectsAndKeys_(self, firstObject: id) -> id { + msg_send![self, initWithObjectsAndKeys:firstObject] + } + + unsafe fn count(self) -> NSUInteger { + msg_send![self, count] + } + + unsafe fn isEqualToDictionary_(self, otherDictionary: id) -> BOOL { + msg_send![self, isEqualToDictionary:otherDictionary] + } + + unsafe fn allKeys(self) -> id { + msg_send![self, allKeys] + } + + unsafe fn allKeysForObject_(self, anObject: id) -> id { + msg_send![self, allKeysForObject:anObject] + } + + unsafe fn allValues(self) -> id { + msg_send![self, allValues] + } + + unsafe fn objectForKey_(self, aKey: id) -> id { + msg_send![self, objectForKey:aKey] + } + + unsafe fn objectForKeyedSubscript_(self, key: id) -> id { + msg_send![self, objectForKeyedSubscript:key] + } + + unsafe fn objectsForKeys_notFoundMarker_(self, keys: id, anObject: id) -> id { + msg_send![self, objectsForKeys:keys notFoundMarker:anObject] + } + + unsafe fn valueForKey_(self, key: id) -> id { + msg_send![self, valueForKey:key] + } + + unsafe fn keyEnumerator(self) -> id { + msg_send![self, keyEnumerator] + } + + unsafe fn objectEnumerator(self) -> id { + msg_send![self, objectEnumerator] + } + + unsafe fn enumerateKeysAndObjectsUsingBlock_(self, block: *mut Block<(id, id, *mut BOOL), ()>) { + msg_send![self, enumerateKeysAndObjectsUsingBlock:block] + } + + unsafe fn enumerateKeysAndObjectsWithOptions_usingBlock_(self, opts: NSEnumerationOptions, + block: *mut Block<(id, id, *mut BOOL), ()>) { + msg_send![self, enumerateKeysAndObjectsWithOptions:opts usingBlock:block] + } + + unsafe fn keysSortedByValueUsingSelector_(self, comparator: SEL) -> id { + msg_send![self, keysSortedByValueUsingSelector:comparator] + } + + unsafe fn keysSortedByValueUsingComparator_(self, cmptr: NSComparator) -> id { + msg_send![self, keysSortedByValueUsingComparator:cmptr] + } + + unsafe fn keysSortedByValueWithOptions_usingComparator_(self, opts: NSEnumerationOptions, cmptr: NSComparator) -> id { + let rv: id = msg_send![self, keysSortedByValueWithOptions:opts usingComparator:cmptr]; + rv + } + + unsafe fn keysOfEntriesPassingTest_(self, predicate: *mut Block<(id, id, *mut BOOL), BOOL>) -> id { + msg_send![self, keysOfEntriesPassingTest:predicate] + } + + unsafe fn keysOfEntriesWithOptions_PassingTest_(self, opts: NSEnumerationOptions, + predicate: *mut Block<(id, id, *mut BOOL), BOOL>) -> id { + msg_send![self, keysOfEntriesWithOptions:opts PassingTest:predicate] + } + + unsafe fn writeToFile_atomically_(self, path: id, flag: BOOL) -> BOOL { + msg_send![self, writeToFile:path atomically:flag] + } + + unsafe fn writeToURL_atomically_(self, aURL: id, flag: BOOL) -> BOOL { + msg_send![self, writeToURL:aURL atomically:flag] + } + + unsafe fn fileCreationDate(self) -> id { + msg_send![self, fileCreationDate] + } + + unsafe fn fileExtensionHidden(self) -> BOOL { + msg_send![self, fileExtensionHidden] + } + + unsafe fn fileGroupOwnerAccountID(self) -> id { + msg_send![self, fileGroupOwnerAccountID] + } + + unsafe fn fileGroupOwnerAccountName(self) -> id { + msg_send![self, fileGroupOwnerAccountName] + } + + unsafe fn fileIsAppendOnly(self) -> BOOL { + msg_send![self, fileIsAppendOnly] + } + + unsafe fn fileIsImmutable(self) -> BOOL { + msg_send![self, fileIsImmutable] + } + + unsafe fn fileModificationDate(self) -> id { + msg_send![self, fileModificationDate] + } + + unsafe fn fileOwnerAccountID(self) -> id { + msg_send![self, fileOwnerAccountID] + } + + unsafe fn fileOwnerAccountName(self) -> id { + msg_send![self, fileOwnerAccountName] + } + + unsafe fn filePosixPermissions(self) -> NSUInteger { + msg_send![self, filePosixPermissions] + } + + unsafe fn fileSize(self) -> libc::c_ulonglong { + msg_send![self, fileSize] + } + + unsafe fn fileSystemFileNumber(self) -> NSUInteger { + msg_send![self, fileSystemFileNumber] + } + + unsafe fn fileSystemNumber(self) -> NSInteger { + msg_send![self, fileSystemNumber] + } + + unsafe fn fileType(self) -> id { + msg_send![self, fileType] + } + + unsafe fn description(self) -> id { + msg_send![self, description] + } + + unsafe fn descriptionInStringsFileFormat(self) -> id { + msg_send![self, descriptionInStringsFileFormat] + } + + unsafe fn descriptionWithLocale_(self, locale: id) -> id { + msg_send![self, descriptionWithLocale:locale] + } + + unsafe fn descriptionWithLocale_indent_(self, locale: id, indent: NSUInteger) -> id { + msg_send![self, descriptionWithLocale:locale indent:indent] + } +} + +bitflags! { + pub struct NSEnumerationOptions: libc::c_ulonglong { + const NSEnumerationConcurrent = 1 << 0; + const NSEnumerationReverse = 1 << 1; + } +} + +pub type NSComparator = *mut Block<(id, id), NSComparisonResult>; + +#[repr(isize)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum NSComparisonResult { + NSOrderedAscending = -1, + NSOrderedSame = 0, + NSOrderedDescending = 1 +} + +pub trait NSString: Sized { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSString), alloc] + } + + unsafe fn stringByAppendingString_(self, other: id) -> id; + unsafe fn init_str(self, string: &str) -> Self; + unsafe fn UTF8String(self) -> *const libc::c_char; + unsafe fn len(self) -> usize; + unsafe fn isEqualToString(self, &str) -> bool; + unsafe fn substringWithRange(self, range: NSRange) -> id; +} + +impl NSString for id { + unsafe fn isEqualToString(self, other: &str) -> bool { + let other = NSString::alloc(nil).init_str(other); + let rv: BOOL = msg_send![self, isEqualToString:other]; + rv != NO + } + + unsafe fn stringByAppendingString_(self, other: id) -> id { + msg_send![self, stringByAppendingString:other] + } + + unsafe fn init_str(self, string: &str) -> id { + return msg_send![self, + initWithBytes:string.as_ptr() + length:string.len() + encoding:UTF8_ENCODING as id]; + } + + unsafe fn len(self) -> usize { + msg_send![self, lengthOfBytesUsingEncoding:UTF8_ENCODING] + } + + unsafe fn UTF8String(self) -> *const libc::c_char { + msg_send![self, UTF8String] + } + + unsafe fn substringWithRange(self, range: NSRange) -> id { + msg_send![self, substringWithRange:range] + } +} + +pub trait NSDate: Sized { + unsafe fn distantPast(_: Self) -> id { + msg_send![class!(NSDate), distantPast] + } + + unsafe fn distantFuture(_: Self) -> id { + msg_send![class!(NSDate), distantFuture] + } +} + +impl NSDate for id { + +} + +#[repr(C)] +struct NSFastEnumerationState { + pub state: libc::c_ulong, + pub items_ptr: *mut id, + pub mutations_ptr: *mut libc::c_ulong, + pub extra: [libc::c_ulong; 5] +} + +const NS_FAST_ENUM_BUF_SIZE: usize = 16; + +pub struct NSFastIterator { + state: NSFastEnumerationState, + buffer: [id; NS_FAST_ENUM_BUF_SIZE], + mut_val: Option, + len: usize, + idx: usize, + object: id +} + +impl Iterator for NSFastIterator { + type Item = id; + + fn next(&mut self) -> Option { + if self.idx >= self.len { + self.len = unsafe { + msg_send![self.object, countByEnumeratingWithState:&mut self.state objects:self.buffer.as_mut_ptr() count:NS_FAST_ENUM_BUF_SIZE] + }; + self.idx = 0; + } + + let new_mut = unsafe { + *self.state.mutations_ptr + }; + + if let Some(old_mut) = self.mut_val { + assert!(old_mut == new_mut, "The collection was mutated while being enumerated"); + } + + if self.idx < self.len { + let object = unsafe { + *self.state.items_ptr.offset(self.idx as isize) + }; + self.mut_val = Some(new_mut); + self.idx += 1; + Some(object) + } else { + None + } + } +} + +pub trait NSFastEnumeration: Sized { + unsafe fn iter(self) -> NSFastIterator; +} + +impl NSFastEnumeration for id { + unsafe fn iter(self) -> NSFastIterator { + NSFastIterator { + state: NSFastEnumerationState { + state: 0, + items_ptr: ptr::null_mut(), + mutations_ptr: ptr::null_mut(), + extra: [0; 5] + }, + buffer: [nil; NS_FAST_ENUM_BUF_SIZE], + mut_val: None, + len: 0, + idx: 0, + object: self + } + } +} + +pub trait NSRunLoop: Sized { + unsafe fn currentRunLoop() -> Self; + + unsafe fn performSelector_target_argument_order_modes_(self, + aSelector: SEL, + target: id, + anArgument: id, + order: NSUInteger, + modes: id); +} + +impl NSRunLoop for id { + unsafe fn currentRunLoop() -> id { + msg_send![class!(NSRunLoop), currentRunLoop] + } + + unsafe fn performSelector_target_argument_order_modes_(self, + aSelector: SEL, + target: id, + anArgument: id, + order: NSUInteger, + modes: id) { + msg_send![self, performSelector:aSelector + target:target + argument:anArgument + order:order + modes:modes] + } +} + +bitflags! { + pub struct NSURLBookmarkCreationOptions: NSUInteger { + const NSURLBookmarkCreationPreferFileIDResolution = 1 << 8; + const NSURLBookmarkCreationMinimalBookmark = 1 << 9; + const NSURLBookmarkCreationSuitableForBookmarkFile = 1 << 10; + const NSURLBookmarkCreationWithSecurityScope = 1 << 11; + const NSURLBookmarkCreationSecurityScopeAllowOnlyReadAccess = 1 << 12; + } +} + +pub type NSURLBookmarkFileCreationOptions = NSURLBookmarkCreationOptions; + +bitflags! { + pub struct NSURLBookmarkResolutionOptions: NSUInteger { + const NSURLBookmarkResolutionWithoutUI = 1 << 8; + const NSURLBookmarkResolutionWithoutMounting = 1 << 9; + const NSURLBookmarkResolutionWithSecurityScope = 1 << 10; + } +} + + +pub trait NSURL: Sized { + unsafe fn alloc(_: Self) -> id; + + unsafe fn URLWithString_(_:Self, string: id) -> id; + unsafe fn initWithString_(self, string: id) -> id; + unsafe fn URLWithString_relativeToURL_(_:Self, string: id, url: id) -> id; + unsafe fn initWithString_relativeToURL_(self, string: id, url: id) -> id; + unsafe fn fileURLWithPath_isDirectory_(_:Self, path: id, is_dir: BOOL) -> id; + unsafe fn initFileURLWithPath_isDirectory_(self, path: id, is_dir: BOOL) -> id; + unsafe fn fileURLWithPath_relativeToURL_(_:Self, path: id, url: id) -> id; + unsafe fn initFileURLWithPath_relativeToURL_(self, path: id, url: id) -> id; + unsafe fn fileURLWithPath_isDirectory_relativeToURL_(_:Self, path: id, is_dir: BOOL, url: id) -> id; + unsafe fn initFileURLWithPath_isDirectory_relativeToURL_(self, path: id, is_dir: BOOL, url: id) -> id; + unsafe fn fileURLWithPath_(_:Self, path: id) -> id; + unsafe fn initFileURLWithPath_(self, path: id) -> id; + unsafe fn fileURLWithPathComponents_(_:Self, path_components: id ) -> id; + unsafe fn URLByResolvingAliasFileAtURL_options_error_(_:Self, url: id, options: NSURLBookmarkResolutionOptions, error: *mut id ) -> id; + unsafe fn URLByResolvingBookmarkData_options_relativeToURL_bookmarkDataIsStale_error_(_:Self, data: id , options: NSURLBookmarkResolutionOptions, relative_to_url: id, is_stale: *mut BOOL, error: *mut id ) -> id; + unsafe fn initByResolvingBookmarkData_options_relativeToURL_bookmarkDataIsStale_error_(self, data: id , options: NSURLBookmarkResolutionOptions, relative_to_url: id, is_stale: *mut BOOL, error: *mut id ) -> id; + + + + unsafe fn absoluteURLWithDataRepresentation_relativeToURL_(_:Self, data: id , url: id) -> id; + unsafe fn initAbsoluteURLWithDataRepresentation_relativeToURL_(self, data: id , url: id) -> id; + unsafe fn URLWithDataRepresentation_relativeToURL_(_:Self, data: id , url: id) -> id; + unsafe fn initWithDataRepresentation_relativeToURL_(self, data: id , url: id) -> id; + unsafe fn dataRepresentation(self) -> id ; + + unsafe fn isEqual_(self, id: id) -> BOOL; + + unsafe fn checkResourceIsReachableAndReturnError_(self, error: id ) -> BOOL; + unsafe fn isFileReferenceURL(self) -> BOOL; + unsafe fn isFileURL(self) -> BOOL; + + unsafe fn absoluteString(self) -> id ; + unsafe fn absoluteURL(self) -> id ; + unsafe fn baseURL(self) -> id ; + + unsafe fn fragment(self) -> id ; + unsafe fn host(self) -> id ; + unsafe fn lastPathComponent(self) -> id ; + unsafe fn parameterString(self) -> id ; + unsafe fn password(self) -> id ; + unsafe fn path(self) -> id ; + unsafe fn pathComponents(self) -> id ; + unsafe fn pathExtension(self) -> id ; + unsafe fn port(self) -> id ; + unsafe fn query(self) -> id ; + unsafe fn relativePath(self) -> id ; + unsafe fn relativeString(self) -> id ; + unsafe fn resourceSpecifier(self) -> id ; + unsafe fn scheme(self) -> id ; + unsafe fn standardizedURL(self) -> id ; + unsafe fn user(self) -> id ; + + + + + + + + + unsafe fn NSURLResourceKey(self) -> id ; + + unsafe fn filePathURL(self) -> id; + unsafe fn fileReferenceURL(self) -> id; + unsafe fn URLByAppendingPathComponent_(self, path_component: id ) -> id; + unsafe fn URLByAppendingPathComponent_isDirectory_(self, path_component: id , is_dir: BOOL) -> id; + unsafe fn URLByAppendingPathExtension_(self, extension: id ) -> id; + unsafe fn URLByDeletingLastPathComponent(self) -> id; + unsafe fn URLByDeletingPathExtension(self) -> id; + unsafe fn URLByResolvingSymlinksInPath(self) -> id; + unsafe fn URLByStandardizingPath(self) -> id; + unsafe fn hasDirectoryPath(self) -> BOOL; + + unsafe fn bookmarkDataWithContentsOfURL_error_(_:Self, url: id, error: id ) -> id ; + unsafe fn bookmarkDataWithOptions_includingResourceValuesForKeys_relativeToURL_error_(self, options: NSURLBookmarkCreationOptions, resource_value_for_keys: id , relative_to_url: id, error: id ) -> id ; + + unsafe fn writeBookmarkData_toURL_options_error_(_:Self, data: id , to_url: id, options: NSURLBookmarkFileCreationOptions, error: id ) -> id; + unsafe fn startAccessingSecurityScopedResource(self) -> BOOL; + unsafe fn stopAccessingSecurityScopedResource(self); + unsafe fn NSURLBookmarkFileCreationOptions(self) -> NSURLBookmarkFileCreationOptions; + unsafe fn NSURLBookmarkCreationOptions(self) -> NSURLBookmarkCreationOptions; + unsafe fn NSURLBookmarkResolutionOptions(self) -> NSURLBookmarkResolutionOptions; + + + + + + + +} + +impl NSURL for id { + unsafe fn alloc(_: Self) -> id { + msg_send![class!(NSURL), alloc] + } + + unsafe fn URLWithString_(_:Self, string: id) -> id { + msg_send![class!(NSURL), URLWithString:string] + } + unsafe fn initWithString_(self, string: id) -> id { + msg_send![self, initWithString:string] + } + unsafe fn URLWithString_relativeToURL_(_:Self, string: id, url: id) -> id { + msg_send![class!(NSURL), URLWithString: string relativeToURL:url] + } + unsafe fn initWithString_relativeToURL_(self, string: id, url: id) -> id { + msg_send![self, initWithString:string relativeToURL:url] + } + unsafe fn fileURLWithPath_isDirectory_(_:Self, path: id, is_dir: BOOL) -> id { + msg_send![class!(NSURL), fileURLWithPath:path isDirectory:is_dir] + } + unsafe fn initFileURLWithPath_isDirectory_(self, path: id, is_dir: BOOL) -> id { + msg_send![self, initFileURLWithPath:path isDirectory:is_dir] + } + unsafe fn fileURLWithPath_relativeToURL_(_:Self, path: id, url: id) -> id { + msg_send![class!(NSURL), fileURLWithPath:path relativeToURL:url] + } + unsafe fn initFileURLWithPath_relativeToURL_(self, path: id, url: id) -> id { + msg_send![self, initFileURLWithPath:path relativeToURL:url] + } + unsafe fn fileURLWithPath_isDirectory_relativeToURL_(_:Self, path: id, is_dir: BOOL, url: id) -> id { + msg_send![class!(NSURL), fileURLWithPath:path isDirectory:is_dir relativeToURL:url] + } + unsafe fn initFileURLWithPath_isDirectory_relativeToURL_(self, path: id, is_dir: BOOL, url: id) -> id { + msg_send![self, initFileURLWithPath:path isDirectory:is_dir relativeToURL:url] + } + unsafe fn fileURLWithPath_(_:Self, path: id) -> id { + msg_send![class!(NSURL), fileURLWithPath:path] + } + unsafe fn initFileURLWithPath_(self, path: id) -> id { + msg_send![self, initFileURLWithPath:path] + } + unsafe fn fileURLWithPathComponents_(_:Self, path_components: id ) -> id { + msg_send![class!(NSURL), fileURLWithPathComponents:path_components] + } + unsafe fn URLByResolvingAliasFileAtURL_options_error_(_:Self, url: id, options: NSURLBookmarkResolutionOptions, error: *mut id ) -> id { + msg_send![class!(NSURL), URLByResolvingAliasFileAtURL:url options:options error:error] + } + unsafe fn URLByResolvingBookmarkData_options_relativeToURL_bookmarkDataIsStale_error_(_:Self, data: id , options: NSURLBookmarkResolutionOptions, relative_to_url: id, is_stale: *mut BOOL, error: *mut id ) -> id { + msg_send![class!(NSURL), URLByResolvingBookmarkData:data options:options relativeToURL:relative_to_url bookmarkDataIsStale:is_stale error:error] + } + unsafe fn initByResolvingBookmarkData_options_relativeToURL_bookmarkDataIsStale_error_(self, data: id , options: NSURLBookmarkResolutionOptions, relative_to_url: id, is_stale: *mut BOOL, error: *mut id ) -> id { + msg_send![self, initByResolvingBookmarkData:data options:options relativeToURL:relative_to_url bookmarkDataIsStale:is_stale error:error] + } + + + + unsafe fn absoluteURLWithDataRepresentation_relativeToURL_(_:Self, data: id , url: id) -> id { + msg_send![class!(NSURL), absoluteURLWithDataRepresentation:data relativeToURL:url] + } + unsafe fn initAbsoluteURLWithDataRepresentation_relativeToURL_(self, data: id , url: id) -> id { + msg_send![self, initAbsoluteURLWithDataRepresentation:data relativeToURL:url] + } + unsafe fn URLWithDataRepresentation_relativeToURL_(_:Self, data: id , url: id) -> id { + msg_send![class!(NSURL), URLWithDataRepresentation:data relativeToURL:url] + } + unsafe fn initWithDataRepresentation_relativeToURL_(self, data: id , url: id) -> id { + msg_send![self, initWithDataRepresentation:data relativeToURL:url] + } + unsafe fn dataRepresentation(self) -> id { + msg_send![self, dataRepresentation] + } + + unsafe fn isEqual_(self, id: id) -> BOOL { + msg_send![self, isEqual:id] + } + + unsafe fn checkResourceIsReachableAndReturnError_(self, error: id ) -> BOOL { + msg_send![self, checkResourceIsReachableAndReturnError:error] + } + unsafe fn isFileReferenceURL(self) -> BOOL { + msg_send![self, isFileReferenceURL] + } + unsafe fn isFileURL(self) -> BOOL { + msg_send![self, isFileURL] + } + + unsafe fn absoluteString(self) -> id { + msg_send![self, absoluteString] + } + unsafe fn absoluteURL(self) -> id { + msg_send![self, absoluteURL] + } + unsafe fn baseURL(self) -> id { + msg_send![self, baseURL] + } + + unsafe fn fragment(self) -> id { + msg_send![self, fragment] + } + unsafe fn host(self) -> id { + msg_send![self, host] + } + unsafe fn lastPathComponent(self) -> id { + msg_send![self, lastPathComponent] + } + unsafe fn parameterString(self) -> id { + msg_send![self, parameterString] + } + unsafe fn password(self) -> id { + msg_send![self, password] + } + unsafe fn path(self) -> id { + msg_send![self, path] + } + unsafe fn pathComponents(self) -> id { + msg_send![self, pathComponents] + } + unsafe fn pathExtension(self) -> id { + msg_send![self, pathExtension] + } + unsafe fn port(self) -> id { + msg_send![self, port] + } + unsafe fn query(self) -> id { + msg_send![self, query] + } + unsafe fn relativePath(self) -> id { + msg_send![self, relativePath] + } + unsafe fn relativeString(self) -> id { + msg_send![self, relativeString] + } + unsafe fn resourceSpecifier(self) -> id { + msg_send![self, resourceSpecifier] + } + unsafe fn scheme(self) -> id { + msg_send![self, scheme] + } + unsafe fn standardizedURL(self) -> id { + msg_send![self, standardizedURL] + } + unsafe fn user(self) -> id { + msg_send![self, user] + } + + + + + + + + + unsafe fn NSURLResourceKey(self) -> id { + msg_send![self, NSURLResourceKey] + } + + unsafe fn filePathURL(self) -> id { + msg_send![self, filePathURL] + } + unsafe fn fileReferenceURL(self) -> id { + msg_send![self, fileReferenceURL] + } + unsafe fn URLByAppendingPathComponent_(self, path_component: id ) -> id { + msg_send![self, URLByAppendingPathComponent:path_component] + } + unsafe fn URLByAppendingPathComponent_isDirectory_(self, path_component: id , is_dir: BOOL) -> id { + msg_send![self, URLByAppendingPathComponent:path_component isDirectory:is_dir] + } + unsafe fn URLByAppendingPathExtension_(self, extension: id ) -> id { + msg_send![self, URLByAppendingPathExtension:extension] + } + unsafe fn URLByDeletingLastPathComponent(self) -> id { + msg_send![self, URLByDeletingLastPathComponent] + } + unsafe fn URLByDeletingPathExtension(self) -> id { + msg_send![self, URLByDeletingPathExtension] + } + unsafe fn URLByResolvingSymlinksInPath(self) -> id { + msg_send![self, URLByResolvingSymlinksInPath] + } + unsafe fn URLByStandardizingPath(self) -> id { + msg_send![self, URLByStandardizingPath] + } + unsafe fn hasDirectoryPath(self) -> BOOL { + msg_send![self, hasDirectoryPath] + } + + unsafe fn bookmarkDataWithContentsOfURL_error_(_:Self, url: id, error: id ) -> id { + msg_send![class!(NSURL), bookmarkDataWithContentsOfURL:url error:error] + } + unsafe fn bookmarkDataWithOptions_includingResourceValuesForKeys_relativeToURL_error_(self, options: NSURLBookmarkCreationOptions, resource_value_for_keys: id , relative_to_url: id, error: id ) -> id { + msg_send![self, bookmarkDataWithOptions:options includingResourceValuesForKeys:resource_value_for_keys relativeToURL:relative_to_url error:error] + } + + unsafe fn writeBookmarkData_toURL_options_error_(_:Self, data: id , to_url: id, options: NSURLBookmarkFileCreationOptions, error: id ) -> id { + msg_send![class!(NSURL), writeBookmarkData:data toURL:to_url options:options error:error] + } + unsafe fn startAccessingSecurityScopedResource(self) -> BOOL { + msg_send![self, startAccessingSecurityScopedResource] + } + unsafe fn stopAccessingSecurityScopedResource(self) { + msg_send![self, stopAccessingSecurityScopedResource] + } + unsafe fn NSURLBookmarkFileCreationOptions(self) -> NSURLBookmarkFileCreationOptions { + msg_send![self, NSURLBookmarkFileCreationOptions] + } + unsafe fn NSURLBookmarkCreationOptions(self) -> NSURLBookmarkCreationOptions { + msg_send![self, NSURLBookmarkCreationOptions] + } + unsafe fn NSURLBookmarkResolutionOptions(self) -> NSURLBookmarkResolutionOptions { + msg_send![self, NSURLBookmarkResolutionOptions] + } + + + + + + + +} + +pub trait NSBundle: Sized { + unsafe fn mainBundle() -> Self; + + unsafe fn loadNibNamed_owner_topLevelObjects_(self, + name: id , + owner: id, + topLevelObjects: *mut id ) -> BOOL; +} + +impl NSBundle for id { + unsafe fn mainBundle() -> id { + msg_send![class!(NSBundle), mainBundle] + } + + unsafe fn loadNibNamed_owner_topLevelObjects_(self, + name: id , + owner: id, + topLevelObjects: *mut id ) -> BOOL { + msg_send![self, loadNibNamed:name + owner:owner + topLevelObjects:topLevelObjects] + } +} + +pub trait NSData: Sized { + unsafe fn data(_: Self) -> id { + msg_send![class!(NSData), data] + } + + unsafe fn dataWithBytes_length_(_: Self, bytes: *const c_void, length: NSUInteger) -> id { + msg_send![class!(NSData), dataWithBytes:bytes length:length] + } + + unsafe fn dataWithBytesNoCopy_length_(_: Self, bytes: *const c_void, length: NSUInteger) -> id { + msg_send![class!(NSData), dataWithBytesNoCopy:bytes length:length] + } + + unsafe fn dataWithBytesNoCopy_length_freeWhenDone_(_: Self, bytes: *const c_void, + length: NSUInteger, freeWhenDone: BOOL) -> id { + msg_send![class!(NSData), dataWithBytesNoCopy:bytes length:length freeWhenDone:freeWhenDone] + } + + unsafe fn dataWithContentsOfFile_(_: Self, path: id) -> id { + msg_send![class!(NSData), dataWithContentsOfFile:path] + } + + unsafe fn dataWithContentsOfFile_options_error_(_: Self, path: id, mask: NSDataReadingOptions, + errorPtr: *mut id) -> id { + msg_send![class!(NSData), dataWithContentsOfFile:path options:mask error:errorPtr] + } + + unsafe fn dataWithContentsOfURL_(_: Self, aURL: id) -> id { + msg_send![class!(NSData), dataWithContentsOfURL:aURL] + } + + unsafe fn dataWithContentsOfURL_options_error_(_: Self, aURL: id, mask: NSDataReadingOptions, + errorPtr: *mut id) -> id { + msg_send![class!(NSData), dataWithContentsOfURL:aURL options:mask error:errorPtr] + } + + unsafe fn dataWithData_(_: Self, aData: id) -> id { + msg_send![class!(NSData), dataWithData:aData] + } + + unsafe fn initWithBase64EncodedData_options_(self, base64Data: id, options: NSDataBase64DecodingOptions) + -> id; + unsafe fn initWithBase64EncodedString_options_(self, base64String: id, options: NSDataBase64DecodingOptions) + -> id; + unsafe fn initWithBytes_length_(self, bytes: *const c_void, length: NSUInteger) -> id; + unsafe fn initWithBytesNoCopy_length_(self, bytes: *const c_void, length: NSUInteger) -> id; + unsafe fn initWithBytesNoCopy_length_deallocator_(self, bytes: *const c_void, length: NSUInteger, + deallocator: *mut Block<(*const c_void, NSUInteger), ()>) + -> id; + unsafe fn initWithBytesNoCopy_length_freeWhenDone_(self, bytes: *const c_void, + length: NSUInteger, freeWhenDone: BOOL) -> id; + unsafe fn initWithContentsOfFile_(self, path: id) -> id; + unsafe fn initWithContentsOfFile_options_error(self, path: id, mask: NSDataReadingOptions, errorPtr: *mut id) + -> id; + unsafe fn initWithContentsOfURL_(self, aURL: id) -> id; + unsafe fn initWithContentsOfURL_options_error_(self, aURL: id, mask: NSDataReadingOptions, errorPtr: *mut id) + -> id; + unsafe fn initWithData_(self, data: id) -> id; + + unsafe fn bytes(self) -> *const c_void; + unsafe fn description(self) -> id; + unsafe fn enumerateByteRangesUsingBlock_(self, block: *mut Block<(*const c_void, NSRange, *mut BOOL), ()>); + unsafe fn getBytes_length_(self, buffer: *mut c_void, length: NSUInteger); + unsafe fn getBytes_range_(self, buffer: *mut c_void, range: NSRange); + unsafe fn subdataWithRange_(self, range: NSRange) -> id; + unsafe fn rangeOfData_options_range_(self, dataToFind: id, options: NSDataSearchOptions, searchRange: NSRange) + -> NSRange; + + unsafe fn base64EncodedDataWithOptions_(self, options: NSDataBase64EncodingOptions) -> id; + unsafe fn base64EncodedStringWithOptions_(self, options: NSDataBase64EncodingOptions) -> id; + + unsafe fn isEqualToData_(self, otherData: id) -> id; + unsafe fn length(self) -> NSUInteger; + + unsafe fn writeToFile_atomically_(self, path: id, atomically: BOOL) -> BOOL; + unsafe fn writeToFile_options_error_(self, path: id, mask: NSDataWritingOptions, errorPtr: *mut id) -> BOOL; + unsafe fn writeToURL_atomically_(self, aURL: id, atomically: BOOL) -> BOOL; + unsafe fn writeToURL_options_error_(self, aURL: id, mask: NSDataWritingOptions, errorPtr: *mut id) -> BOOL; +} + +impl NSData for id { + unsafe fn initWithBase64EncodedData_options_(self, base64Data: id, options: NSDataBase64DecodingOptions) + -> id { + msg_send![self, initWithBase64EncodedData:base64Data options:options] + } + + unsafe fn initWithBase64EncodedString_options_(self, base64String: id, options: NSDataBase64DecodingOptions) + -> id { + msg_send![self, initWithBase64EncodedString:base64String options:options] + } + + unsafe fn initWithBytes_length_(self, bytes: *const c_void, length: NSUInteger) -> id { + msg_send![self,initWithBytes:bytes length:length] + } + + unsafe fn initWithBytesNoCopy_length_(self, bytes: *const c_void, length: NSUInteger) -> id { + msg_send![self, initWithBytesNoCopy:bytes length:length] + } + + unsafe fn initWithBytesNoCopy_length_deallocator_(self, bytes: *const c_void, length: NSUInteger, + deallocator: *mut Block<(*const c_void, NSUInteger), ()>) + -> id { + msg_send![self, initWithBytesNoCopy:bytes length:length deallocator:deallocator] + } + + unsafe fn initWithBytesNoCopy_length_freeWhenDone_(self, bytes: *const c_void, + length: NSUInteger, freeWhenDone: BOOL) -> id { + msg_send![self, initWithBytesNoCopy:bytes length:length freeWhenDone:freeWhenDone] + } + + unsafe fn initWithContentsOfFile_(self, path: id) -> id { + msg_send![self, initWithContentsOfFile:path] + } + + unsafe fn initWithContentsOfFile_options_error(self, path: id, mask: NSDataReadingOptions, errorPtr: *mut id) + -> id { + msg_send![self, initWithContentsOfFile:path options:mask error:errorPtr] + } + + unsafe fn initWithContentsOfURL_(self, aURL: id) -> id { + msg_send![self, initWithContentsOfURL:aURL] + } + + unsafe fn initWithContentsOfURL_options_error_(self, aURL: id, mask: NSDataReadingOptions, errorPtr: *mut id) + -> id { + msg_send![self, initWithContentsOfURL:aURL options:mask error:errorPtr] + } + + unsafe fn initWithData_(self, data: id) -> id { + msg_send![self, initWithData:data] + } + + unsafe fn bytes(self) -> *const c_void { + msg_send![self, bytes] + } + + unsafe fn description(self) -> id { + msg_send![self, description] + } + + unsafe fn enumerateByteRangesUsingBlock_(self, block: *mut Block<(*const c_void, NSRange, *mut BOOL), ()>) { + msg_send![self, enumerateByteRangesUsingBlock:block] + } + + unsafe fn getBytes_length_(self, buffer: *mut c_void, length: NSUInteger) { + msg_send![self, getBytes:buffer length:length] + } + + unsafe fn getBytes_range_(self, buffer: *mut c_void, range: NSRange) { + msg_send![self, getBytes:buffer range:range] + } + + unsafe fn subdataWithRange_(self, range: NSRange) -> id { + msg_send![self, subdataWithRange:range] + } + + unsafe fn rangeOfData_options_range_(self, dataToFind: id, options: NSDataSearchOptions, searchRange: NSRange) + -> NSRange { + msg_send![self, rangeOfData:dataToFind options:options range:searchRange] + } + + unsafe fn base64EncodedDataWithOptions_(self, options: NSDataBase64EncodingOptions) -> id { + msg_send![self, base64EncodedDataWithOptions:options] + } + + unsafe fn base64EncodedStringWithOptions_(self, options: NSDataBase64EncodingOptions) -> id { + msg_send![self, base64EncodedStringWithOptions:options] + } + + unsafe fn isEqualToData_(self, otherData: id) -> id { + msg_send![self, isEqualToData:otherData] + } + + unsafe fn length(self) -> NSUInteger { + msg_send![self, length] + } + + unsafe fn writeToFile_atomically_(self, path: id, atomically: BOOL) -> BOOL { + msg_send![self, writeToFile:path atomically:atomically] + } + + unsafe fn writeToFile_options_error_(self, path: id, mask: NSDataWritingOptions, errorPtr: *mut id) -> BOOL { + msg_send![self, writeToFile:path options:mask error:errorPtr] + } + + unsafe fn writeToURL_atomically_(self, aURL: id, atomically: BOOL) -> BOOL { + msg_send![self, writeToURL:aURL atomically:atomically] + } + + unsafe fn writeToURL_options_error_(self, aURL: id, mask: NSDataWritingOptions, errorPtr: *mut id) -> BOOL { + msg_send![self, writeToURL:aURL options:mask error:errorPtr] + } +} + +bitflags! { + pub struct NSDataReadingOptions: libc::c_ulonglong { + const NSDataReadingMappedIfSafe = 1 << 0; + const NSDataReadingUncached = 1 << 1; + const NSDataReadingMappedAlways = 1 << 3; + } +} + +bitflags! { + pub struct NSDataBase64EncodingOptions: libc::c_ulonglong { + const NSDataBase64Encoding64CharacterLineLength = 1 << 0; + const NSDataBase64Encoding76CharacterLineLength = 1 << 1; + const NSDataBase64EncodingEndLineWithCarriageReturn = 1 << 4; + const NSDataBase64EncodingEndLineWithLineFeed = 1 << 5; + } +} + +bitflags! { + pub struct NSDataBase64DecodingOptions: libc::c_ulonglong { + const NSDataBase64DecodingIgnoreUnknownCharacters = 1 << 0; + } +} + +bitflags! { + pub struct NSDataWritingOptions: libc::c_ulonglong { + const NSDataWritingAtomic = 1 << 0; + const NSDataWritingWithoutOverwriting = 1 << 1; + } +} + +bitflags! { + pub struct NSDataSearchOptions: libc::c_ulonglong { + const NSDataSearchBackwards = 1 << 0; + const NSDataSearchAnchored = 1 << 1; + } +} diff --git a/third_party/rust/cocoa/src/lib.rs b/third_party/rust/cocoa/src/lib.rs new file mode 100644 index 000000000000..7c2ca3dba4fd --- /dev/null +++ b/third_party/rust/cocoa/src/lib.rs @@ -0,0 +1,32 @@ + + + + + + + + + +#![crate_name = "cocoa"] +#![crate_type = "rlib"] + +#![allow(non_snake_case)] + +extern crate block; +#[macro_use] +extern crate bitflags; +extern crate core_foundation; +extern crate core_graphics; +extern crate foreign_types; +extern crate libc; +#[macro_use] +extern crate objc; + +#[cfg(target_os = "macos")] +pub mod appkit; +pub mod base; +pub mod foundation; +#[cfg(target_os = "macos")] +pub mod quartzcore; +#[macro_use] +mod macros; diff --git a/third_party/rust/cocoa/src/macros.rs b/third_party/rust/cocoa/src/macros.rs new file mode 100644 index 000000000000..ca37f6ae3680 --- /dev/null +++ b/third_party/rust/cocoa/src/macros.rs @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#[macro_export] +macro_rules! delegate { + ( + $name:expr, { + $( ($($sel:ident :)+) => $func:expr),* + } + ) => ( + delegate!($name, { + , + $( ($($sel :)+) => $func),* + }) + ); + + ( + $name:expr, { + $($var:ident : $var_type:ty = $value:expr),* , + $( ($($sel:ident :)+) => $func:expr),* + } + ) => ({ + let mut decl = objc::declare::ClassDecl::new($name, class!(NSObject)).unwrap(); + + $( + decl.add_ivar::<$var_type>(stringify!($var)); + )* + + $( + decl.add_method(sel!($($sel :)+), $func); + )* + + let cl = decl.register(); + let delegate: id = msg_send![cl, alloc]; + + $( + (*delegate).set_ivar(stringify!($var), $value); + )* + + delegate + }); +} diff --git a/third_party/rust/cocoa/src/quartzcore.rs b/third_party/rust/cocoa/src/quartzcore.rs new file mode 100644 index 000000000000..4bf992ea1f4a --- /dev/null +++ b/third_party/rust/cocoa/src/quartzcore.rs @@ -0,0 +1,1859 @@ + + + + + + + + + +#![allow(non_upper_case_globals)] + +use core_foundation::array::{CFArray, CFArrayRef}; +use core_foundation::base::{CFType, CFTypeRef, TCFType}; +use core_foundation::date::CFTimeInterval; +use core_foundation::dictionary::{CFDictionary, CFDictionaryRef}; +use core_foundation::string::{CFString, CFStringRef}; +use core_graphics::base::CGFloat; +use core_graphics::color::{CGColor, SysCGColorRef}; +use core_graphics::color_space::CGColorSpace; +use core_graphics::context::CGContext; +use core_graphics::geometry::{CGAffineTransform, CGPoint, CGRect, CGSize}; +use core_graphics::path::{CGPath, SysCGPathRef}; +use foreign_types::ForeignType; +use std::ops::Mul; +use std::ptr; + +use appkit::CGLContextObj; +use base::{BOOL, id, nil, YES}; +use foundation::NSUInteger; + + + +pub fn current_media_time() -> CFTimeInterval { + unsafe { + CACurrentMediaTime() + } +} + + + +pub struct CALayer(id); + +unsafe impl Send for CALayer {} +unsafe impl Sync for CALayer {} + +impl Clone for CALayer { + #[inline] + fn clone(&self) -> CALayer { + unsafe { + CALayer(msg_send![self.id(), retain]) + } + } +} + +impl Drop for CALayer { + #[inline] + fn drop(&mut self) { + unsafe { + msg_send![self.id(), release] + } + } +} + +impl CALayer { + #[inline] + pub fn id(&self) -> id { + self.0 + } + + #[inline] + pub fn new() -> CALayer { + unsafe { + CALayer(msg_send![class!(CALayer), layer]) + } + } + + #[inline] + pub fn from_layer(other: &CALayer) -> CALayer { + unsafe { + let layer: id = msg_send![class!(CALayer), alloc]; + CALayer(msg_send![layer, initWithLayer:other.id()]) + } + } + + #[inline] + pub fn presentation_layer(&self) -> CALayer { + unsafe { + CALayer(msg_send![self.id(), presentationLayer]) + } + } + + #[inline] + pub fn model_layer(&self) -> CALayer { + unsafe { + CALayer(msg_send![self.id(), modelLayer]) + } + } + + #[inline] + pub fn default_value_for_key(key: &CFString) -> id { + unsafe { + msg_send![class!(CALayer), defaultValueForKey:(key.as_CFTypeRef())] + } + } + + #[inline] + pub fn needs_display_for_key(key: &CFString) -> bool { + unsafe { + let flag: BOOL = msg_send![class!(CALayer), needsDisplayForKey:(key.as_CFTypeRef())]; + flag == YES + } + } + + #[inline] + pub fn should_archive_value_for_key(key: &CFString) -> bool { + unsafe { + let flag: BOOL = msg_send![class!(CALayer), shouldArchiveValueForKey:(key.as_CFTypeRef())]; + flag == YES + } + } + + #[inline] + pub fn bounds(&self) -> CGRect { + unsafe { + msg_send![self.id(), bounds] + } + } + + #[inline] + pub fn set_bounds(&self, bounds: &CGRect) { + unsafe { + msg_send![self.id(), setBounds:*bounds] + } + } + + #[inline] + pub fn position(&self) -> CGPoint { + unsafe { + msg_send![self.id(), position] + } + } + + #[inline] + pub fn set_position(&self, position: &CGPoint) { + unsafe { + msg_send![self.id(), setPosition:*position] + } + } + + #[inline] + pub fn z_position(&self) -> CGFloat { + unsafe { + msg_send![self.id(), zPosition] + } + } + + #[inline] + pub fn set_z_position(&self, z_position: CGFloat) { + unsafe { + msg_send![self.id(), setZPosition:z_position] + } + } + + #[inline] + pub fn anchor_point(&self) -> CGPoint { + unsafe { + msg_send![self.id(), anchorPoint] + } + } + + #[inline] + pub fn set_anchor_point(&self, anchor_point: &CGPoint) { + unsafe { + msg_send![self.id(), setAnchorPoint:*anchor_point] + } + } + + #[inline] + pub fn anchor_point_z(&self) -> CGFloat { + unsafe { + msg_send![self.id(), anchorPointZ] + } + } + + #[inline] + pub fn set_anchor_point_z(&self, anchor_point_z: CGFloat) { + unsafe { + msg_send![self.id(), setAnchorPointZ:anchor_point_z] + } + } + + #[inline] + pub fn transform(&self) -> CATransform3D { + unsafe { + msg_send![self.id(), transform] + } + } + + #[inline] + pub fn set_transform(&self, transform: &CATransform3D) { + unsafe { + msg_send![self.id(), setTransform:*transform] + } + } + + #[inline] + pub fn affine_transform(&self) -> CGAffineTransform { + unsafe { + msg_send![self.id(), affineTransform] + } + } + + #[inline] + pub fn set_affine_transform(&self, affine_transform: &CGAffineTransform) { + unsafe { + msg_send![self.id(), setAffineTransform:*affine_transform] + } + } + + #[inline] + pub fn frame(&self) -> CGRect { + unsafe { + msg_send![self.id(), frame] + } + } + + #[inline] + pub fn set_frame(&self, frame: &CGRect) { + unsafe { + msg_send![self.id(), setFrame:*frame] + } + } + + #[inline] + pub fn is_hidden(&self) -> bool { + unsafe { + let flag: BOOL = msg_send![self.id(), isHidden]; + flag == YES + } + } + + #[inline] + pub fn set_hidden(&self, hidden: bool) { + unsafe { + msg_send![self.id(), setHidden:hidden as BOOL] + } + } + + #[inline] + pub fn is_double_sided(&self) -> bool { + unsafe { + let flag: BOOL = msg_send![self.id(), isDoubleSided]; + flag == YES + } + } + + #[inline] + pub fn set_double_sided(&self, double_sided: bool) { + unsafe { + msg_send![self.id(), setDoubleSided:double_sided as BOOL] + } + } + + #[inline] + pub fn is_geometry_flipped(&self) -> bool { + unsafe { + let flag: BOOL = msg_send![self.id(), isGeometryFlipped]; + flag == YES + } + } + + #[inline] + pub fn set_geometry_flipped(&self, geometry_flipped: bool) { + unsafe { + msg_send![self.id(), setGeometryFlipped:geometry_flipped as BOOL] + } + } + + #[inline] + pub fn contents_are_flipped(&self) -> bool { + unsafe { + let flag: BOOL = msg_send![self.id(), contentsAreFlipped]; + flag == YES + } + } + + #[inline] + pub fn superlayer(&self) -> Option { + unsafe { + let superlayer: id = msg_send![self.id(), superlayer]; + if superlayer.is_null() { + None + } else { + Some(CALayer(superlayer)) + } + } + } + + #[inline] + pub fn remove_from_superlayer(&self) { + unsafe { + msg_send![self.id(), removeFromSuperlayer] + } + } + + #[inline] + pub fn sublayers(&self) -> CFArray { + unsafe { + let sublayers: CFArrayRef = msg_send![self.id(), sublayers]; + TCFType::wrap_under_create_rule(sublayers) + } + } + + #[inline] + pub fn add_sublayer(&self, sublayer: &CALayer) { + unsafe { + msg_send![self.id(), addSublayer:sublayer.id()] + } + } + + #[inline] + pub fn insert_sublayer_at_index(&self, sublayer: &CALayer, index: u32) { + unsafe { + msg_send![self.id(), insertSublayer:sublayer.id() atIndex:index] + } + } + + #[inline] + pub fn insert_sublayer_below(&self, sublayer: &CALayer, sibling: &CALayer) { + unsafe { + msg_send![self.id(), insertSublayer:sublayer.id() below:sibling.id()] + } + } + + #[inline] + pub fn insert_sublayer_above(&self, sublayer: &CALayer, sibling: &CALayer) { + unsafe { + msg_send![self.id(), insertSublayer:sublayer.id() above:sibling.id()] + } + } + + #[inline] + pub fn replace_sublayer_with(&self, old_layer: &CALayer, new_layer: &CALayer) { + unsafe { + msg_send![self.id(), replaceSublayer:old_layer.id() with:new_layer.id()] + } + } + + #[inline] + pub fn sublayer_transform(&self) -> CATransform3D { + unsafe { + msg_send![self.id(), sublayerTransform] + } + } + + #[inline] + pub fn set_sublayer_transform(&self, sublayer_transform: CATransform3D) { + unsafe { + msg_send![self.id(), setSublayerTransform:sublayer_transform] + } + } + + #[inline] + pub fn mask(&self) -> Option { + unsafe { + let mask: id = msg_send![self.id(), mask]; + if mask.is_null() { + None + } else { + Some(CALayer(mask)) + } + } + } + + #[inline] + pub fn set_mask(&self, mask: Option) { + unsafe { + match mask { + None => msg_send![self.id(), setMask:nil], + Some(mask) => msg_send![self.id(), setMask:(mask.id())], + } + } + } + + #[inline] + pub fn masks_to_bounds(&self) -> bool { + unsafe { + let flag: BOOL = msg_send![self.id(), masksToBounds]; + flag == YES + } + } + + #[inline] + pub fn set_masks_to_bounds(&self, flag: bool) { + unsafe { + msg_send![self.id(), setMasksToBounds:flag as BOOL] + } + } + + #[inline] + pub fn convert_point_from_layer(&self, point: &CGPoint, layer: Option) -> CGPoint { + unsafe { + let layer = match layer { + None => nil, + Some(ref layer) => layer.id(), + }; + msg_send![self.id(), convertPoint:*point fromLayer:layer] + } + } + + #[inline] + pub fn convert_point_to_layer(&self, point: &CGPoint, layer: Option) -> CGPoint { + unsafe { + let layer = match layer { + None => nil, + Some(ref layer) => layer.id(), + }; + msg_send![self.id(), convertPoint:*point toLayer:layer] + } + } + + #[inline] + pub fn convert_rect_from_layer(&self, rect: &CGRect, layer: Option) -> CGRect { + unsafe { + let layer = match layer { + None => nil, + Some(ref layer) => layer.id(), + }; + msg_send![self.id(), convertRect:*rect fromLayer:layer] + } + } + + #[inline] + pub fn convert_rect_to_layer(&self, rect: &CGRect, layer: Option) -> CGRect { + unsafe { + let layer = match layer { + None => nil, + Some(ref layer) => layer.id(), + }; + msg_send![self.id(), convertRect:*rect toLayer:layer] + } + } + + #[inline] + pub fn convert_time_from_layer(&self, time: CFTimeInterval, layer: Option) + -> CFTimeInterval { + unsafe { + let layer = match layer { + None => nil, + Some(ref layer) => layer.id(), + }; + msg_send![self.id(), convertTime:time fromLayer:layer] + } + } + + #[inline] + pub fn convert_time_to_layer(&self, time: CFTimeInterval, layer: Option) + -> CFTimeInterval { + unsafe { + let layer = match layer { + None => nil, + Some(ref layer) => layer.id(), + }; + msg_send![self.id(), convertTime:time toLayer:layer] + } + } + + #[inline] + pub fn hit_test(&self, point: &CGPoint) -> Option { + unsafe { + let layer: id = msg_send![self.id(), hitTest:*point]; + if layer == nil { + None + } else { + Some(CALayer(layer)) + } + } + } + + #[inline] + pub fn contains_point(&self, point: &CGPoint) -> bool { + unsafe { + let result: BOOL = msg_send![self.id(), containsPoint:*point]; + result == YES + } + } + + #[inline] + pub fn contents(&self) -> id { + unsafe { + msg_send![self.id(), contents] + } + } + + #[inline] + pub unsafe fn set_contents(&self, contents: id) { + msg_send![self.id(), setContents:contents] + } + + #[inline] + pub fn contents_rect(&self) -> CGRect { + unsafe { + msg_send![self.id(), contentsRect] + } + } + + #[inline] + pub fn set_contents_rect(&self, contents_rect: &CGRect) { + unsafe { + msg_send![self.id(), setContentsRect:*contents_rect] + } + } + + #[inline] + pub fn contents_gravity(&self) -> ContentsGravity { + unsafe { + let string: CFStringRef = msg_send![self.id(), contentsGravity]; + ContentsGravity::from_CFString(TCFType::wrap_under_create_rule(string)) + } + } + + #[inline] + pub fn set_contents_gravity(&self, new_contents_gravity: ContentsGravity) { + unsafe { + let contents_gravity: CFString = new_contents_gravity.into_CFString(); + msg_send![self.id(), setContentsGravity:contents_gravity.as_CFTypeRef()] + } + } + + #[inline] + pub fn contents_scale(&self) -> CGFloat { + unsafe { + msg_send![self.id(), contentsScale] + } + } + + #[inline] + pub fn set_contents_scale(&self, new_contents_scale: CGFloat) { + unsafe { + msg_send![self.id(), setContentsScale:new_contents_scale] + } + } + + #[inline] + pub fn contents_center(&self) -> CGRect { + unsafe { + msg_send![self.id(), contentsCenter] + } + } + + #[inline] + pub fn set_contents_center(&self, new_rect: &CGRect) { + unsafe { + msg_send![self.id(), setContentsCenter:*new_rect] + } + } + + #[inline] + pub fn contents_format(&self) -> ContentsFormat { + unsafe { + let string: CFStringRef = msg_send![self.id(), contentsFormat]; + ContentsFormat::from_CFString(TCFType::wrap_under_create_rule(string)) + } + } + + #[inline] + pub fn set_contents_format(&self, new_contents_format: ContentsFormat) { + unsafe { + let contents_format: CFString = new_contents_format.into_CFString(); + msg_send![self.id(), setContentsFormat:contents_format.as_CFTypeRef()] + } + } + + #[inline] + pub fn minification_filter(&self) -> Filter { + unsafe { + let string: CFStringRef = msg_send![self.id(), minificationFilter]; + Filter::from_CFString(TCFType::wrap_under_create_rule(string)) + } + } + + #[inline] + pub fn set_minification_filter(&self, new_filter: Filter) { + unsafe { + let filter: CFString = new_filter.into_CFString(); + msg_send![self.id(), setMinificationFilter:filter.as_CFTypeRef()] + } + } + + #[inline] + pub fn magnification_filter(&self) -> Filter { + unsafe { + let string: CFStringRef = msg_send![self.id(), magnificationFilter]; + Filter::from_CFString(TCFType::wrap_under_create_rule(string)) + } + } + + #[inline] + pub fn set_magnification_filter(&self, new_filter: Filter) { + unsafe { + let filter: CFString = new_filter.into_CFString(); + msg_send![self.id(), setMagnificationFilter:filter.as_CFTypeRef()] + } + } + + #[inline] + pub fn minification_filter_bias(&self) -> f32 { + unsafe { + msg_send![self.id(), minificationFilterBias] + } + } + + #[inline] + pub fn set_minification_filter_bias(&self, new_filter_bias: f32) { + unsafe { + msg_send![self.id(), setMinificationFilterBias:new_filter_bias] + } + } + + #[inline] + pub fn is_opaque(&self) -> bool { + unsafe { + let flag: BOOL = msg_send![self.id(), isOpaque]; + flag == YES + } + } + + #[inline] + pub fn set_opaque(&self, opaque: bool) { + unsafe { + msg_send![self.id(), setOpaque:opaque as BOOL] + } + } + + #[inline] + pub fn display(&self) { + unsafe { + msg_send![self.id(), display] + } + } + + #[inline] + pub fn set_needs_display(&self) { + unsafe { + msg_send![self.id(), setNeedsDisplay] + } + } + + #[inline] + pub fn set_needs_display_in_rect(&self, rect: &CGRect) { + unsafe { + msg_send![self.id(), setNeedsDisplayInRect:*rect] + } + } + + #[inline] + pub fn needs_display(&self) -> bool { + unsafe { + let flag: BOOL = msg_send![self.id(), needsDisplay]; + flag == YES + } + } + + #[inline] + pub fn display_if_needed(&self) { + unsafe { + msg_send![self.id(), displayIfNeeded] + } + } + + #[inline] + pub fn needs_display_on_bounds_change(&self) -> bool { + unsafe { + let flag: BOOL = msg_send![self.id(), needsDisplayOnBoundsChange]; + flag == YES + } + } + + #[inline] + pub fn set_needs_display_on_bounds_change(&self, flag: bool) { + unsafe { + msg_send![self.id(), setNeedsDisplayOnBoundsChange:flag as BOOL] + } + } + + #[inline] + pub fn draws_asynchronously(&self) -> bool { + unsafe { + let flag: BOOL = msg_send![self.id(), drawsAsynchronously]; + flag == YES + } + } + + #[inline] + pub fn set_draws_asynchronously(&self, flag: bool) { + unsafe { + msg_send![self.id(), setDrawsAsynchronously:flag as BOOL] + } + } + + #[inline] + pub fn draw_in_context(&self, context: &CGContext) { + unsafe { + msg_send![self.id(), drawInContext:(*context).as_ptr()] + } + } + + #[inline] + pub fn render_in_context(&self, context: &CGContext) { + unsafe { + msg_send![self.id(), renderInContext:(*context).as_ptr()] + } + } + + #[inline] + pub fn edge_antialiasing_mask(&self) -> EdgeAntialiasingMask { + unsafe { + EdgeAntialiasingMask::from_bits_truncate(msg_send![self.id(), edgeAntialiasingMask]) + } + } + + #[inline] + pub fn set_edge_antialiasing_mask(&self, mask: EdgeAntialiasingMask) { + unsafe { + msg_send![self.id(), setEdgeAntialiasingMask:mask.bits()] + } + } + + #[inline] + pub fn background_color(&self) -> Option { + unsafe { + let color: SysCGColorRef = msg_send![self.id(), backgroundColor]; + if color.is_null() { + None + } else { + Some(CGColor::wrap_under_get_rule(color)) + } + } + } + + #[inline] + pub fn set_background_color(&self, color: Option) { + unsafe { + let color = match color { + None => ptr::null(), + Some(color) => color.as_CFTypeRef(), + }; + msg_send![self.id(), setBackgroundColor:color] + } + } + + #[inline] + pub fn corner_radius(&self) -> CGFloat { + unsafe { + msg_send![self.id(), cornerRadius] + } + } + + #[inline] + pub fn set_corner_radius(&self, radius: CGFloat) { + unsafe { + msg_send![self.id(), setCornerRadius:radius] + } + } + + #[inline] + pub fn masked_corners(&self) -> CornerMask { + unsafe { + CornerMask::from_bits_truncate(msg_send![self.id(), maskedCorners]) + } + } + + #[inline] + pub fn set_masked_corners(&self, mask: CornerMask) { + unsafe { + msg_send![self.id(), setCornerMask:mask.bits()] + } + } + + #[inline] + pub fn border_width(&self) -> CGFloat { + unsafe { + msg_send![self.id(), borderWidth] + } + } + + #[inline] + pub fn set_border_width(&self, border_width: CGFloat) { + unsafe { + msg_send![self.id(), setBorderWidth:border_width] + } + } + + #[inline] + pub fn border_color(&self) -> Option { + unsafe { + let color: SysCGColorRef = msg_send![self.id(), borderColor]; + if color.is_null() { + None + } else { + Some(CGColor::wrap_under_get_rule(color)) + } + } + } + + #[inline] + pub fn set_border_color(&self, color: Option) { + unsafe { + let color = match color { + None => ptr::null(), + Some(color) => color.as_CFTypeRef(), + }; + msg_send![self.id(), setBorderColor:color] + } + } + + #[inline] + pub fn opacity(&self) -> f32 { + unsafe { + msg_send![self.id(), opacity] + } + } + + #[inline] + pub fn set_opacity(&self, opacity: f32) { + unsafe { + msg_send![self.id(), setOpacity:opacity] + } + } + + #[inline] + pub fn compositing_filter(&self) -> id { + unsafe { + msg_send![self.id(), compositingFilter] + } + } + + #[inline] + pub unsafe fn set_compositing_filter(&self, filter: id) { + msg_send![self.id(), setCompositingFilter:filter] + } + + #[inline] + pub unsafe fn filters(&self) -> Option { + let filters: CFArrayRef = msg_send![self.id(), filters]; + if filters.is_null() { + None + } else { + Some(CFArray::wrap_under_get_rule(filters)) + } + } + + #[inline] + pub unsafe fn set_filters(&self, filters: Option) { + let filters: CFTypeRef = match filters { + Some(ref filters) => filters.as_CFTypeRef(), + None => ptr::null(), + }; + msg_send![self.id(), setFilters:filters] + } + + #[inline] + pub unsafe fn background_filters(&self) -> Option { + let filters: CFArrayRef = msg_send![self.id(), backgroundFilters]; + if filters.is_null() { + None + } else { + Some(CFArray::wrap_under_get_rule(filters)) + } + } + + #[inline] + pub unsafe fn set_background_filters(&self, filters: Option) { + let filters: CFTypeRef = match filters { + Some(ref filters) => filters.as_CFTypeRef(), + None => ptr::null(), + }; + msg_send![self.id(), setBackgroundFilters:filters] + } + + #[inline] + pub fn should_rasterize(&self) -> bool { + unsafe { + let flag: BOOL = msg_send![self.id(), shouldRasterize]; + flag == YES + } + } + + #[inline] + pub fn set_should_rasterize(&self, flag: bool) { + unsafe { + msg_send![self.id(), setShouldRasterize:(flag as BOOL)]; + } + } + + #[inline] + pub fn rasterization_scale(&self) -> CGFloat { + unsafe { + msg_send![self.id(), rasterizationScale] + } + } + + #[inline] + pub fn set_rasterization_scale(&self, scale: CGFloat) { + unsafe { + msg_send![self.id(), setRasterizationScale:scale] + } + } + + + + #[inline] + pub fn shadow_color(&self) -> Option { + unsafe { + let color: SysCGColorRef = msg_send![self.id(), shadowColor]; + if color.is_null() { + None + } else { + Some(CGColor::wrap_under_get_rule(color)) + } + } + } + + #[inline] + pub fn set_shadow_color(&self, color: Option) { + unsafe { + let color = match color { + None => ptr::null(), + Some(color) => color.as_CFTypeRef(), + }; + msg_send![self.id(), setShadowColor:color] + } + } + + #[inline] + pub fn shadow_opacity(&self) -> f32 { + unsafe { + msg_send![self.id(), shadowOpacity] + } + } + + #[inline] + pub fn set_shadow_opacity(&self, opacity: f32) { + unsafe { + msg_send![self.id(), setShadowOpacity:opacity] + } + } + + #[inline] + pub fn shadow_offset(&self) -> CGSize { + unsafe { + msg_send![self.id(), shadowOffset] + } + } + + #[inline] + pub fn set_shadow_offset(&self, offset: &CGSize) { + unsafe { + msg_send![self.id(), setShadowOffset:*offset] + } + } + + #[inline] + pub fn shadow_radius(&self) -> CGFloat { + unsafe { + msg_send![self.id(), shadowRadius] + } + } + + #[inline] + pub fn set_shadow_radius(&self, radius: CGFloat) { + unsafe { + msg_send![self.id(), setShadowRadius:radius] + } + } + + #[inline] + pub fn shadow_path(&self) -> Option { + unsafe { + let path: SysCGPathRef = msg_send![self.id(), shadowPath]; + if path.is_null() { + None + } else { + Some(CGPath::from_ptr(path)) + } + } + } + + #[inline] + pub fn set_shadow_path(&self, path: Option) { + unsafe { + let sys_path_ref = match path { + None => ptr::null(), + Some(path) => path.as_ptr(), + }; + msg_send![self.id(), setShadowPath:sys_path_ref] + } + } + + + + #[inline] + pub fn autoresizing_mask(&self) -> AutoresizingMask { + unsafe { + AutoresizingMask::from_bits_truncate(msg_send![self.id(), autoresizingMask]) + } + } + + #[inline] + pub fn set_autoresizing_mask(&self, mask: AutoresizingMask) { + unsafe { + msg_send![self.id(), setAutoresizingMask:mask.bits()] + } + } + + #[inline] + pub fn layout_manager(&self) -> id { + unsafe { + msg_send![self.id(), layoutManager] + } + } + + #[inline] + pub unsafe fn set_layout_manager(&self, manager: id) { + msg_send![self.id(), setLayoutManager:manager] + } + + #[inline] + pub fn preferred_frame_size(&self) -> CGSize { + unsafe { + msg_send![self.id(), preferredFrameSize] + } + } + + #[inline] + pub fn set_needs_layout(&self) { + unsafe { + msg_send![self.id(), setNeedsLayout] + } + } + + #[inline] + pub fn needs_layout(&self) -> bool { + unsafe { + let flag: BOOL = msg_send![self.id(), needsLayout]; + flag == YES + } + } + + #[inline] + pub fn layout_if_needed(&self) { + unsafe { + msg_send![self.id(), layoutIfNeeded] + } + } + + #[inline] + pub fn layout_sublayers(&self) { + unsafe { + msg_send![self.id(), layoutSublayers] + } + } + + #[inline] + pub fn resize_sublayers_with_old_size(&self, size: &CGSize) { + unsafe { + msg_send![self.id(), resizeSublayersWithOldSize:*size] + } + } + + #[inline] + pub fn resize_with_old_superlayer_size(&self, size: &CGSize) { + unsafe { + msg_send![self.id(), resizeWithOldSuperlayerSize:*size] + } + } + + + + #[inline] + pub fn default_action_for_key(event: &str) -> id { + unsafe { + let event: CFString = CFString::from(event); + msg_send![class!(CALayer), defaultActionForKey:event.as_CFTypeRef()] + } + } + + #[inline] + pub fn action_for_key(&self, event: &str) -> id { + unsafe { + let event: CFString = CFString::from(event); + msg_send![self.id(), actionForKey:event.as_CFTypeRef()] + } + } + + #[inline] + pub fn actions(&self) -> CFDictionary { + unsafe { + msg_send![self.id(), actions] + } + } + + #[inline] + pub unsafe fn set_actions(&self, actions: CFDictionary) { + msg_send![self.id(), setActions:actions] + } + + + #[inline] + pub unsafe fn add_animation_for_key(&self, animation: id, for_key: Option<&str>) { + let for_key: Option = for_key.map(CFString::from); + let for_key: CFTypeRef = match for_key { + Some(ref for_key) => for_key.as_CFTypeRef(), + None => ptr::null(), + }; + msg_send![self.id(), addAnimation:animation forKey:for_key] + } + + #[inline] + pub fn remove_all_animation(&self) { + unsafe { + msg_send![self.id(), removeAllAnimations] + } + } + + #[inline] + pub fn remove_animation_for_key(&self, key: &str) { + unsafe { + let key = CFString::from(key); + msg_send![self.id(), removeAnimationForKey:key.as_CFTypeRef()] + } + } + + #[inline] + pub fn animation_keys(&self) -> Vec { + unsafe { + let keys: CFArrayRef = msg_send![self.id(), animationKeys]; + let keys: CFArray = TCFType::wrap_under_create_rule(keys); + keys.into_iter().map(|string| { + CFString::wrap_under_get_rule(*string as CFStringRef).to_string() + }).collect() + } + } + + #[inline] + pub fn animation_for_key(&self, key: &str) -> id { + unsafe { + let key = CFString::from(key); + msg_send![self.id(), animationForKey:key.as_CFTypeRef()] + } + } + + + + #[inline] + pub fn name(&self) -> String { + unsafe { + let name: CFStringRef = msg_send![self.id(), name]; + CFString::wrap_under_get_rule(name).to_string() + } + } + + #[inline] + pub fn set_name(&self, name: &str) { + unsafe { + let name = CFString::from(name); + msg_send![self.id(), setName:name.as_CFTypeRef()] + } + } + + #[inline] + pub fn delegate(&self) -> id { + unsafe { + msg_send![self.id(), delegate] + } + } + + #[inline] + pub unsafe fn set_delegate(&self, delegate: id) { + msg_send![self.id(), setDelegate:delegate] + } + + #[inline] + pub fn style(&self) -> Option { + unsafe { + let dictionary: CFDictionaryRef = msg_send![self.id(), style]; + if dictionary.is_null() { + None + } else { + Some(CFDictionary::wrap_under_get_rule(dictionary)) + } + } + } + + #[inline] + pub fn set_style(&self, dictionary: Option) { + unsafe { + let dictionary = match dictionary { + None => ptr::null(), + Some(ref dictionary) => dictionary.as_CFTypeRef(), + }; + msg_send![self.id(), setStyle:dictionary] + } + } + + + + #[inline] + pub fn reload_value_for_key_path(&self, key: &str) { + unsafe { + let key = CFString::from(key); + msg_send![self.id(), reloadValueForKeyPath:key.as_CFTypeRef()] + } + } + + #[inline] + pub fn set_contents_opaque(&self, opaque: bool) { + unsafe { + msg_send![self.id(), setContentsOpaque:opaque as BOOL] + } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub enum ContentsGravity { + Center, + Top, + Bottom, + Left, + Right, + TopLeft, + TopRight, + BottomLeft, + BottomRight, + Resize, + ResizeAspect, + ResizeAspectFill, + Other(CFString), +} + +impl ContentsGravity { + fn into_CFString(self) -> CFString { + let string = match self { + ContentsGravity::Center => "center", + ContentsGravity::Top => "top", + ContentsGravity::Bottom => "bottom", + ContentsGravity::Left => "left", + ContentsGravity::Right => "right", + ContentsGravity::TopLeft => "topLeft", + ContentsGravity::TopRight => "topRight", + ContentsGravity::BottomLeft => "bottomLeft", + ContentsGravity::BottomRight => "bottomRight", + ContentsGravity::Resize => "resize", + ContentsGravity::ResizeAspect => "resizeAspect", + ContentsGravity::ResizeAspectFill => "resizeAspectFill", + ContentsGravity::Other(other) => return other, + }; + CFString::from(string) + } + + + fn from_CFString(string: CFString) -> ContentsGravity { + match string.to_string() { + ref s if s == "center" => ContentsGravity::Center, + ref s if s == "top" => ContentsGravity::Top, + ref s if s == "bottom" => ContentsGravity::Bottom, + ref s if s == "left" => ContentsGravity::Left, + ref s if s == "right" => ContentsGravity::Right, + ref s if s == "topLeft" => ContentsGravity::TopLeft, + ref s if s == "topRight" => ContentsGravity::TopRight, + ref s if s == "bottomLeft" => ContentsGravity::BottomLeft, + ref s if s == "bottomRight" => ContentsGravity::BottomRight, + ref s if s == "resize" => ContentsGravity::Resize, + ref s if s == "resizeAspect" => ContentsGravity::ResizeAspect, + ref s if s == "resizeAspectFill" => ContentsGravity::ResizeAspectFill, + _ => ContentsGravity::Other(string), + } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub enum ContentsFormat { + RGBA8Uint, + RGBA16Float, + Gray8Uint, + Other(CFString), +} + +impl ContentsFormat { + fn into_CFString(self) -> CFString { + let string = match self { + ContentsFormat::RGBA8Uint => "RGBA8", + ContentsFormat::RGBA16Float => "RGBAh", + ContentsFormat::Gray8Uint => "Gray8", + ContentsFormat::Other(other) => return other, + }; + CFString::from(string) + } + + + fn from_CFString(string: CFString) -> ContentsFormat { + match string.to_string() { + ref s if s == "RGBA8" => ContentsFormat::RGBA8Uint, + ref s if s == "RGBAh" => ContentsFormat::RGBA16Float, + ref s if s == "Gray8" => ContentsFormat::Gray8Uint, + _ => ContentsFormat::Other(string), + } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub enum Filter { + Nearest, + Linear, + Trilinear, + Other(CFString), +} + +impl Filter { + fn into_CFString(self) -> CFString { + let string = match self { + Filter::Nearest => "nearest", + Filter::Linear => "linear", + Filter::Trilinear => "trilinear", + Filter::Other(other) => return other, + }; + CFString::from(string) + } + + + fn from_CFString(string: CFString) -> Filter { + match string.to_string() { + ref s if s == "nearest" => Filter::Nearest, + ref s if s == "linear" => Filter::Linear, + ref s if s == "trilinear" => Filter::Trilinear, + _ => Filter::Other(string), + } + } +} + +bitflags! { + pub struct EdgeAntialiasingMask: u32 { + const LEFT_EDGE = 1 << 0; // kCALayerLeftEdge + const RIGHT_EDGE = 1 << 1; // kCALayerRightEdge + const BOTTOM_EDGE = 1 << 2; // kCALayerBottomEdge + const TOP_EDGE = 1 << 3; // kCALayerTopEdge + } +} + +bitflags! { + pub struct CornerMask: NSUInteger { + const MIN_X_MIN_Y_CORNER = 1 << 0; // kCALayerMinXMinYCorner + const MAX_X_MIN_Y_CORNER = 1 << 1; // kCALayerMaxXMinYCorner + const MIN_X_MAX_Y_CORNER = 1 << 2; // kCALayerMinXMaxYCorner + const MAX_X_MAX_Y_CORNER = 1 << 3; // kCALayerMaxXMaxYCorner + } +} + +bitflags! { + pub struct AutoresizingMask: u32 { + const NOT_SIZABLE = 0; // kCALayerNotSizable + const MIN_X_MARGIN = 1 << 0; // kCALayerMinXMargin + const WIDTH_SIZABLE = 1 << 1; // kCALayerWidthSizable + const MAX_X_MARGIN = 1 << 2; // kCALayerMaxXMargin + const MIN_Y_MARGIN = 1 << 3; // kCALayerMinYMargin + const HEIGHT_SIZABLE = 1 << 4; // kCALayerHeightSizable + const MAX_Y_MARGIN = 1 << 5; // kCALayerMaxYMargin + } +} + + + +pub struct CARenderer(id); + +unsafe impl Send for CARenderer {} +unsafe impl Sync for CARenderer {} + +impl Clone for CARenderer { + #[inline] + fn clone(&self) -> CARenderer { + unsafe { + CARenderer(msg_send![self.id(), retain]) + } + } +} + +impl Drop for CARenderer { + #[inline] + fn drop(&mut self) { + unsafe { + msg_send![self.id(), release] + } + } +} + +impl CARenderer { + #[inline] + pub fn id(&self) -> id { + self.0 + } + + #[inline] + pub unsafe fn from_cgl_context(context: CGLContextObj, color_space: Option) + -> CARenderer { + let mut pairs: Vec<(CFString, CFType)> = vec![]; + if let Some(color_space) = color_space { + pairs.push((CFString::wrap_under_get_rule(kCARendererColorSpace), + CFType::wrap_under_get_rule(color_space.as_ptr() as *const _ as *const _))) + } + + let options: CFDictionary = CFDictionary::from_CFType_pairs(&pairs); + + let renderer: id = + msg_send![class!(CARenderer), rendererWithCGLContext:context + options:options.as_CFTypeRef()]; + debug_assert!(renderer != nil); + CARenderer(renderer) + } + + #[inline] + pub unsafe fn from_metal_texture(metal_texture: id, + metal_command_queue: id, + color_space: Option) + -> CARenderer { + let mut pairs: Vec<(CFString, CFType)> = vec![ + (CFString::wrap_under_get_rule(kCARendererMetalCommandQueue), + CFType::wrap_under_get_rule(metal_command_queue as *const _ as *const _)), + ]; + if let Some(color_space) = color_space { + pairs.push((CFString::wrap_under_get_rule(kCARendererColorSpace), + CFType::wrap_under_get_rule(color_space.as_ptr() as *const _ as *const _))) + } + + let options: CFDictionary = CFDictionary::from_CFType_pairs(&pairs); + + let renderer: id = + msg_send![class!(CARenderer), rendererWithMTLTexture:metal_texture + options:options.as_CFTypeRef()]; + debug_assert!(renderer != nil); + CARenderer(renderer) + } + + #[inline] + pub fn layer(&self) -> Option { + unsafe { + let layer: id = msg_send![self.id(), layer]; + if layer.is_null() { + None + } else { + Some(CALayer(layer)) + } + } + } + + #[inline] + pub fn set_layer(&self, layer: Option) { + unsafe { + let layer = match layer { + Some(ref layer) => layer.id(), + None => nil, + }; + msg_send![self.id(), setLayer:layer]; + } + } + + #[inline] + pub fn bounds(&self) -> CGRect { + unsafe { + msg_send![self.id(), bounds] + } + } + + #[inline] + pub fn set_bounds(&self, bounds: CGRect) { + unsafe { + msg_send![self.id(), setBounds:bounds] + } + } + + #[inline] + pub fn begin_frame_at(&self, time: CFTimeInterval, timestamp: Option<&CVTimeStamp>) { + unsafe { + msg_send![self.id(), beginFrameAtTime:time timeStamp:timestamp] + } + } + + #[inline] + pub fn update_bounds(&self) -> CGRect { + unsafe { + msg_send![self.id(), updateBounds] + } + } + + #[inline] + pub fn add_update_rect(&self, rect: CGRect) { + unsafe { + msg_send![self.id(), addUpdateRect:rect] + } + } + + #[inline] + pub fn render(&self) { + unsafe { + msg_send![self.id(), render] + } + } + + #[inline] + pub fn next_frame_time(&self) -> CFTimeInterval { + unsafe { + msg_send![self.id(), nextFrameTime] + } + } + + #[inline] + pub fn end_frame(&self) { + unsafe { + msg_send![self.id(), endFrame] + } + } + + #[inline] + pub unsafe fn set_destination(&self, metal_texture: id) { + msg_send![self.id(), setDestination:metal_texture] + } +} + + + + + +pub mod transaction { + use block::{Block, ConcreteBlock, IntoConcreteBlock, RcBlock}; + use core_foundation::date::CFTimeInterval; + use core_foundation::string::CFString; + + use base::{BOOL, YES, id}; + + #[inline] + pub fn begin() { + unsafe { + msg_send![class!(CATransaction), begin] + } + } + + #[inline] + pub fn commit() { + unsafe { + msg_send![class!(CATransaction), commit] + } + } + + #[inline] + pub fn flush() { + unsafe { + msg_send![class!(CATransaction), flush] + } + } + + #[inline] + pub fn lock() { + unsafe { + msg_send![class!(CATransaction), lock] + } + } + + #[inline] + pub fn unlock() { + unsafe { + msg_send![class!(CATransaction), unlock] + } + } + + #[inline] + pub fn animation_duration() -> CFTimeInterval { + unsafe { + msg_send![class!(CATransaction), animationDuration] + } + } + + #[inline] + pub fn set_animation_duration(duration: CFTimeInterval) { + unsafe { + msg_send![class!(CATransaction), setAnimationDuration:duration] + } + } + + #[inline] + pub fn animation_timing_function() -> id { + unsafe { + msg_send![class!(CATransaction), animationTimingFunction] + } + } + + #[inline] + pub unsafe fn set_animation_timing_function(function: id) { + msg_send![class!(CATransaction), setAnimationTimingFunction:function] + } + + #[inline] + pub fn disable_actions() -> bool { + unsafe { + let flag: BOOL = msg_send![class!(CATransaction), disableActions]; + flag == YES + } + } + + #[inline] + pub fn set_disable_actions(flag: bool) { + unsafe { + msg_send![class!(CATransaction), setDisableActions:flag as BOOL] + } + } + + #[inline] + pub fn completion_block() -> Option> { + unsafe { + let completion_block: *mut Block<(), ()> = + msg_send![class!(CATransaction), completionBlock]; + if completion_block.is_null() { + None + } else { + Some(RcBlock::new(completion_block)) + } + } + } + + #[inline] + pub fn set_completion_block(block: ConcreteBlock<(), (), F>) + where F: 'static + IntoConcreteBlock<(), Ret = ()> { + unsafe { + let block = block.copy(); + msg_send![class!(CATransaction), setCompletionBlock:&*block] + } + } + + #[inline] + pub fn value_for_key(key: &str) -> id { + unsafe { + let key: CFString = CFString::from(key); + msg_send![class!(CATransaction), valueForKey:key] + } + } + + #[inline] + pub fn set_value_for_key(value: id, key: &str) { + unsafe { + let key: CFString = CFString::from(key); + msg_send![class!(CATransaction), setValue:value forKey:key] + } + } +} + + + +#[repr(C)] +#[derive(Clone, Copy)] +pub struct CATransform3D { + pub m11: CGFloat, pub m12: CGFloat, pub m13: CGFloat, pub m14: CGFloat, + pub m21: CGFloat, pub m22: CGFloat, pub m23: CGFloat, pub m24: CGFloat, + pub m31: CGFloat, pub m32: CGFloat, pub m33: CGFloat, pub m34: CGFloat, + pub m41: CGFloat, pub m42: CGFloat, pub m43: CGFloat, pub m44: CGFloat, +} + +impl PartialEq for CATransform3D { + #[inline] + fn eq(&self, other: &CATransform3D) -> bool { + unsafe { + CATransform3DEqualToTransform(*self, *other) + } + } +} + +impl Mul for CATransform3D { + type Output = CATransform3D; + + #[inline] + fn mul(self, other: CATransform3D) -> CATransform3D { + unsafe { + CATransform3DConcat(self, other) + } + } +} + +impl CATransform3D { + pub const IDENTITY: CATransform3D = CATransform3D { + m11: 1.0, m12: 0.0, m13: 0.0, m14: 0.0, + m21: 0.0, m22: 1.0, m23: 0.0, m24: 0.0, + m31: 0.0, m32: 0.0, m33: 1.0, m34: 0.0, + m41: 0.0, m42: 0.0, m43: 0.0, m44: 1.0, + }; + + #[inline] + pub fn from_translation(tx: CGFloat, ty: CGFloat, tz: CGFloat) -> CATransform3D { + unsafe { + CATransform3DMakeTranslation(tx, ty, tz) + } + } + + #[inline] + pub fn from_scale(sx: CGFloat, sy: CGFloat, sz: CGFloat) -> CATransform3D { + unsafe { + CATransform3DMakeScale(sx, sy, sz) + } + } + + #[inline] + pub fn from_rotation(angle: CGFloat, x: CGFloat, y: CGFloat, z: CGFloat) -> CATransform3D { + unsafe { + CATransform3DMakeRotation(angle, x, y, z) + } + } + + #[inline] + pub fn affine(affine_transform: CGAffineTransform) -> CATransform3D { + unsafe { + CATransform3DMakeAffineTransform(affine_transform) + } + } + + #[inline] + pub fn is_identity(&self) -> bool { + unsafe { + CATransform3DIsIdentity(*self) + } + } + + #[inline] + pub fn translate(&self, tx: CGFloat, ty: CGFloat, tz: CGFloat) -> CATransform3D { + unsafe { + CATransform3DTranslate(*self, tx, ty, tz) + } + } + + #[inline] + pub fn scale(&self, sx: CGFloat, sy: CGFloat, sz: CGFloat) -> CATransform3D { + unsafe { + CATransform3DScale(*self, sx, sy, sz) + } + } + + #[inline] + pub fn rotate(&self, angle: CGFloat, x: CGFloat, y: CGFloat, z: CGFloat) -> CATransform3D { + unsafe { + CATransform3DRotate(*self, angle, x, y, z) + } + } + + #[inline] + pub fn invert(&self) -> CATransform3D { + unsafe { + CATransform3DInvert(*self) + } + } + + #[inline] + pub fn is_affine(&self) -> bool { + unsafe { + CATransform3DIsAffine(*self) + } + } + + #[inline] + pub fn to_affine(&self) -> CGAffineTransform { + unsafe { + CATransform3DGetAffineTransform(*self) + } + } +} + +#[link(name = "QuartzCore", kind = "framework")] +extern { + static kCARendererColorSpace: CFStringRef; + static kCARendererMetalCommandQueue: CFStringRef; + + fn CACurrentMediaTime() -> CFTimeInterval; + + fn CATransform3DIsIdentity(t: CATransform3D) -> bool; + fn CATransform3DEqualToTransform(a: CATransform3D, b: CATransform3D) -> bool; + fn CATransform3DMakeTranslation(tx: CGFloat, ty: CGFloat, tz: CGFloat) -> CATransform3D; + fn CATransform3DMakeScale(sx: CGFloat, sy: CGFloat, sz: CGFloat) -> CATransform3D; + fn CATransform3DMakeRotation(angle: CGFloat, x: CGFloat, y: CGFloat, z: CGFloat) + -> CATransform3D; + fn CATransform3DTranslate(t: CATransform3D, tx: CGFloat, ty: CGFloat, tz: CGFloat) + -> CATransform3D; + fn CATransform3DScale(t: CATransform3D, sx: CGFloat, sy: CGFloat, sz: CGFloat) + -> CATransform3D; + fn CATransform3DRotate(t: CATransform3D, angle: CGFloat, x: CGFloat, y: CGFloat, z: CGFloat) + -> CATransform3D; + fn CATransform3DConcat(a: CATransform3D, b: CATransform3D) -> CATransform3D; + fn CATransform3DInvert(t: CATransform3D) -> CATransform3D; + fn CATransform3DMakeAffineTransform(m: CGAffineTransform) -> CATransform3D; + fn CATransform3DIsAffine(t: CATransform3D) -> bool; + fn CATransform3DGetAffineTransform(t: CATransform3D) -> CGAffineTransform; +} + + + + + + + +#[repr(C)] +#[derive(Clone, Copy)] +pub struct CVTimeStamp { + pub version: u32, + pub videoTimeScale: i32, + pub videoTime: i64, + pub hostTime: u64, + pub rateScalar: f64, + pub videoRefreshPeriod: i64, + pub smpteTime: CVSMPTETime, + pub flags: u64, + pub reserved: u64, +} + +pub type CVTimeStampFlags = u64; + +pub const kCVTimeStampVideoTimeValid: CVTimeStampFlags = 1 << 0; +pub const kCVTimeStampHostTimeValid: CVTimeStampFlags = 1 << 1; +pub const kCVTimeStampSMPTETimeValid: CVTimeStampFlags = 1 << 2; +pub const kCVTimeStampVideoRefreshPeriodValid: CVTimeStampFlags = 1 << 3; +pub const kCVTimeStampRateScalarValid: CVTimeStampFlags = 1 << 4; +pub const kCVTimeStampTopField: CVTimeStampFlags = 1 << 16; +pub const kCVTimeStampBottomField: CVTimeStampFlags = 1 << 17; +pub const kCVTimeStampVideoHostTimeValid: CVTimeStampFlags = + kCVTimeStampVideoTimeValid | kCVTimeStampHostTimeValid; +pub const kCVTimeStampIsInterlaced: CVTimeStampFlags = + kCVTimeStampTopField | kCVTimeStampBottomField; + +#[repr(C)] +#[derive(Clone, Copy)] +pub struct CVSMPTETime { + pub subframes: i16, + pub subframeDivisor: i16, + pub counter: u32, + pub time_type: u32, + pub flags: u32, + pub hours: i16, + pub minutes: i16, + pub seconds: i16, + pub frames: i16, +} + +pub type CVSMPTETimeType = u32; + +pub const kCVSMPTETimeType24: CVSMPTETimeType = 0; +pub const kCVSMPTETimeType25: CVSMPTETimeType = 1; +pub const kCVSMPTETimeType30Drop: CVSMPTETimeType = 2; +pub const kCVSMPTETimeType30: CVSMPTETimeType = 3; +pub const kCVSMPTETimeType2997: CVSMPTETimeType = 4; +pub const kCVSMPTETimeType2997Drop: CVSMPTETimeType = 5; +pub const kCVSMPTETimeType60: CVSMPTETimeType = 6; +pub const kCVSMPTETimeType5994: CVSMPTETimeType = 7; + +pub type CVSMPTETimeFlags = u32; + +pub const kCVSMPTETimeValid: CVSMPTETimeFlags = 1 << 0; +pub const kCVSMPTETimeRunning: CVSMPTETimeFlags = 1 << 1; diff --git a/third_party/rust/cocoa/tests/foundation.rs b/third_party/rust/cocoa/tests/foundation.rs new file mode 100644 index 000000000000..55c2ff18c3b3 --- /dev/null +++ b/third_party/rust/cocoa/tests/foundation.rs @@ -0,0 +1,189 @@ +#[macro_use] +extern crate objc; +extern crate block; +extern crate cocoa; + +#[cfg(test)] +mod foundation { + mod nsstring { + use cocoa::foundation::NSString; + use cocoa::base::nil; + use std::slice; + use std::str; + + #[test] + fn test_utf8() { + let expected = "Iñtërnâtiônàlizætiøn"; + unsafe { + let built = NSString::alloc(nil).init_str(expected); + let bytes = built.UTF8String() as *const u8; + let objc_string = str::from_utf8(slice::from_raw_parts(bytes, built.len())) + .unwrap(); + assert_eq!(objc_string.len(), expected.len()); + assert_eq!(objc_string, expected); + } + } + + #[test] + fn test_string() { + let expected = "Hello World!"; + unsafe { + let built = NSString::alloc(nil).init_str(expected); + let bytes = built.UTF8String() as *const u8; + let objc_string = str::from_utf8(slice::from_raw_parts(bytes, built.len())) + .unwrap(); + assert_eq!(objc_string.len(), expected.len()); + assert_eq!(objc_string, expected); + } + } + + #[test] + fn test_length() { + let expected = "Hello!"; + unsafe { + let built = NSString::alloc(nil).init_str(expected); + assert_eq!(built.len(), expected.len()); + } + } + + #[test] + fn test_append_by_appending_string() { + let initial_str = "Iñtërnâtiônàlizætiøn"; + let to_append = "_more_strings"; + let expected = concat!("Iñtërnâtiônàlizætiøn", "_more_strings"); + unsafe { + let built = NSString::alloc(nil).init_str(initial_str); + let built_to_append = NSString::alloc(nil).init_str(to_append); + let append_string = built.stringByAppendingString_(built_to_append); + let bytes = append_string.UTF8String() as *const u8; + let objc_string = str::from_utf8(slice::from_raw_parts(bytes, append_string.len())) + .unwrap(); + assert_eq!(objc_string, expected); + } + } + } + + mod nsfastenumeration { + use std::str; + use std::slice; + use cocoa::foundation::{NSString, NSFastEnumeration}; + use cocoa::base::{id, nil}; + + #[test] + fn test_iter() { + unsafe { + let string = NSString::alloc(nil).init_str("this is a test string"); + let separator = NSString::alloc(nil).init_str(" "); + let components: id = msg_send![string, componentsSeparatedByString: separator]; + + let combined = components.iter() + .map(|s| { + let bytes = s.UTF8String() as *const u8; + str::from_utf8(slice::from_raw_parts(bytes, s.len())).unwrap() + }) + .fold(String::new(), |mut acc, s| { + acc.push_str(s); + acc + }); + + assert_eq!(combined, "thisisateststring"); + } + } + + #[test] + #[should_panic] + fn test_mutation() { + unsafe { + let string = NSString::alloc(nil).init_str("this is a test string"); + let separator = NSString::alloc(nil).init_str(" "); + let components: id = msg_send![string, componentsSeparatedByString: separator]; + let mut_components: id = msg_send![components, mutableCopy]; + let mut iter = mut_components.iter(); + iter.next(); + msg_send![mut_components, removeObjectAtIndex:1]; + iter.next(); + } + } + } + + mod nsdictionary { + use block::ConcreteBlock; + use cocoa::foundation::{NSArray, NSComparisonResult, NSDictionary, NSFastEnumeration, + NSString}; + use cocoa::base::{id, nil}; + + #[test] + fn test_get() { + const KEY: &'static str = "The key"; + const VALUE: &'static str = "Some value"; + unsafe { + let key = NSString::alloc(nil).init_str(KEY); + let value = NSString::alloc(nil).init_str(VALUE); + let dict = NSDictionary::dictionaryWithObject_forKey_(nil, value, key); + + let retrieved_value = dict.objectForKey_(key); + assert!(retrieved_value.isEqualToString(VALUE)); + } + } + + #[test] + fn test_iter() { + let mkstr = |s| unsafe { NSString::alloc(nil).init_str(s) }; + let keys = vec!["a", "b", "c", "d", "e", "f"]; + let objects = vec!["1", "2", "3", "4", "5", "6"]; + unsafe { + use std::{slice, str}; + use std::cmp::{Ord, Ordering}; + + let keys_raw_vec = keys.clone().into_iter().map(&mkstr).collect::>(); + let objs_raw_vec = objects.clone().into_iter().map(&mkstr).collect::>(); + + let keys_array = NSArray::arrayWithObjects(nil, &keys_raw_vec); + let objs_array = NSArray::arrayWithObjects(nil, &objs_raw_vec); + + let dict = + NSDictionary::dictionaryWithObjects_forKeys_(nil, objs_array, keys_array); + + + + + + + let mut comparator = ConcreteBlock::new(|s0: id, s1: id| { + let (bytes0, len0) = (s0.UTF8String() as *const u8, s0.len()); + let (bytes1, len1) = (s1.UTF8String() as *const u8, s1.len()); + let (s0, s1) = (str::from_utf8(slice::from_raw_parts(bytes0, len0)).unwrap(), + str::from_utf8(slice::from_raw_parts(bytes1, len1)).unwrap()); + let (c0, c1) = (s0.chars().next().unwrap(), s1.chars().next().unwrap()); + match c0.cmp(&c1) { + Ordering::Less => NSComparisonResult::NSOrderedAscending, + Ordering::Equal => NSComparisonResult::NSOrderedSame, + Ordering::Greater => NSComparisonResult::NSOrderedDescending, + } + }); + + let associated_iter = keys.iter().zip(objects.iter()); + for (k_id, (k, v)) in dict.keysSortedByValueUsingComparator_(&mut *comparator) + .iter() + .zip(associated_iter) { + assert!(k_id.isEqualToString(k)); + let v_id = dict.objectForKey_(k_id); + assert!(v_id.isEqualToString(v)); + } + + + let mut keys_arr = dict.allKeys().iter().collect::>(); + keys_arr.sort(); + for (k0, k1) in keys_arr.into_iter().zip(keys.iter()) { + assert!(k0.isEqualToString(k1)); + } + + let mut objects_arr = dict.allValues().iter().collect::>(); + objects_arr.sort(); + for (v0, v1) in objects_arr.into_iter().zip(objects.iter()) { + assert!(v0.isEqualToString(v1)); + } + } + } + } +} diff --git a/third_party/rust/colorful/.cargo-checksum.json b/third_party/rust/colorful/.cargo-checksum.json new file mode 100644 index 000000000000..a1a56cb23c2e --- /dev/null +++ b/third_party/rust/colorful/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"4b57cc26ea5fe95d54cd5ddaa036a435b1f996cd283283efd6675f1f4b77fed1","CodeOfConduct.md":"9a97bc250bc21e1b3fe7d7217296a33b7379e082e293f206afb6bcc93f2d3cce","README.md":"0059d0114dc6283f54eacf7cc285e0b7f3fe5db35107f5841844c8949c67000e","license":"48da2f39e100d4085767e94966b43f4fa95ff6a0698fba57ed460914e35f94a0","rustfmt.toml":"2e13002b5bbc7c434e8b6e669bd5e9417c58607bacf2f3c8711e9fc1745dd302","src/core/color_string.rs":"e860320fdce6d9590eceee040eda4e769a100e9f3a3ef24c3620bd523482fff8","src/core/colors.rs":"d22bf7c763994259ba02320ec64d6349a4f596cbf68c2161c09130f2fad16219","src/core/hsl.rs":"7308dd6b02b74b1e13eb61d4f960d52a50c9e01621404fe7468bedcf29596de5","src/core/mod.rs":"331493703e3c09b36cbf7a51d7c2b6b2455f34b7558fac172a847470b1e31fd9","src/core/rgb.rs":"c5dda4eb726a1d137658b22aa5d69958c4c710a595dff6a966395003918c1e95","src/core/style.rs":"315def912b8df5f4d6efa3b92adf5c5d21caaa8b3688dab79348140909551d9c","src/core/symbols.rs":"4925401f864d7c9b40bebf9f1a565c5650e7475dcc05a4d43240c32fd7ea38da","src/lib.rs":"f423d55cd70f7b4d4a31172f0ac3b74123ef7ed925f6be33913a8222da092279","tests/test_all_color.rs":"83d8ff40812200682360e59c9ac8ced14af65adb558699958e280e05d359933d","tests/test_animation.rs":"1b6db4c29c7b2727337c9096b53108475bf12cea0ffda9e26aa86bd7956885e2","tests/test_basic.rs":"8884ac1fb3b6749d94429ce57336a43a1a84f6c2c8c9c7ea9cdf224e2bc230df","tests/test_extra.rs":"c46c7f4fd45851565359fa20d9fce3216a19e000b66d08b1af8dc8f1e0b2282c","tests/test_gradient.rs":"bedd7a0afedbca9f606acfa3ae3bc718fab03b5f69fced8c9fbf0d499ad9d991","tests/test_hsl.rs":"668a7db4f84b555210f47cac4183141703aae679d3343bcbdb6fa75c1b3057b2"},"package":"0bca1619ff57dd7a56b58a8e25ef4199f123e78e503fe1653410350a1b98ae65"} \ No newline at end of file diff --git a/third_party/rust/colorful/Cargo.toml b/third_party/rust/colorful/Cargo.toml new file mode 100644 index 000000000000..67abb1367dfb --- /dev/null +++ b/third_party/rust/colorful/Cargo.toml @@ -0,0 +1,26 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "colorful" +version = "0.2.1" +authors = ["da2018 "] +exclude = ["images/*", "examples"] +description = "Make your terminal output colorful" +homepage = "https://github.com/rocketsman/colorful" +readme = "README.md" +keywords = ["cli", "colors", "terminal"] +categories = ["cli", "colors", "terminal"] +license = "MIT" +repository = "https://github.com/rocketsman/colorful" + +[dependencies] diff --git a/third_party/rust/colorful/CodeOfConduct.md b/third_party/rust/colorful/CodeOfConduct.md new file mode 100644 index 000000000000..b6b0dea7f2f6 --- /dev/null +++ b/third_party/rust/colorful/CodeOfConduct.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual identity +and orientation. + +## Our Standards + +Examples of behaviour that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behaviour by participants include: + +- The use of sexualised language or imagery and unwelcome sexual + attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others’ private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behaviour and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behaviour. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviours that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may +be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behaviour may be +reported by contacting the project team at tituswormer@gmail.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an +incident. Further details of specific enforcement policies may be posted +separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project’s leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at . + +[homepage]: https://www.contributor-covenant.org diff --git a/third_party/rust/colorful/README.md b/third_party/rust/colorful/README.md new file mode 100644 index 000000000000..4abec5ee21aa --- /dev/null +++ b/third_party/rust/colorful/README.md @@ -0,0 +1,196 @@ +

+ Colorful +
+
+

+ +[![Build Status](https://travis-ci.org/rocketsman/colorful.svg?branch=master)](https://travis-ci.org/rocketsman/colorful) [![Coverage Status](https://coveralls.io/repos/github/rocketsman/colorful/badge.svg?branch=master)](https://coveralls.io/github/rocketsman/colorful?branch=master) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/37a45510f41445eea0168f0f07e8f7cb)](https://app.codacy.com/app/rocketsman/colorful_2?utm_source=github.com&utm_medium=referral&utm_content=rocketsman/colorful&utm_campaign=Badge_Grade_Dashboard) + +## Usage + +### Basic Usage + +```Rust +extern crate colorful; + +use colorful::Color; +use colorful::Colorful; +//use colorful::HSL; +//use colorful::RGB; + +fn main() { + let s = "Hello world"; + println!("{}", s.color(Color::Blue).bg_color(Color::Yellow).bold()); + // println!("{}", s.color(HSL::new(1.0, 1.0, 0.5)).bold()); + // println!("{}", s.color(RGB::new(255, 0, 0)).bold()); + println!("{}", s.blue().bg_yellow()); +} +``` + +### Gradient + +```Rust +extern crate colorful; + +use colorful::Color; +use colorful::Colorful; + +fn main() { + println!("{}", "This code is editable and runnable!".gradient(Color::Red)); + println!("{}", "¡Este código es editable y ejecutable!".gradient(Color::Green)); + println!("{}", "Ce code est modifiable et exécutable !".gradient(Color::Yellow)); + println!("{}", "Questo codice è modificabile ed eseguibile!".gradient(Color::Blue)); + println!("{}", "このコードは編集して実行出来ます!".gradient(Color::Magenta)); + println!("{}", "여기에서 코드를 수정하고 실행할 수 있습니다!".gradient(Color::Cyan)); + println!("{}", "Ten kod można edytować oraz uruchomić!".gradient(Color::LightGray)); + println!("{}", "Este código é editável e executável!".gradient(Color::DarkGray)); + println!("{}", "Этот код можно отредактировать и запустить!".gradient(Color::LightRed)); + println!("{}", "Bạn có thể edit và run code trực tiếp!".gradient(Color::LightGreen)); + println!("{}", "这段代码是可以编辑并且能够运行的!".gradient(Color::LightYellow)); + println!("{}", "Dieser Code kann bearbeitet und ausgeführt werden!".gradient(Color::LightBlue)); + println!("{}", "Den här koden kan redigeras och köras!".gradient(Color::LightMagenta)); + println!("{}", "Tento kód můžete upravit a spustit".gradient(Color::LightCyan)); + println!("{}", "این کد قابلیت ویرایش و اجرا دارد!".gradient(Color::White)); + println!("{}", "โค้ดนี้สามารถแก้ไขได้และรันได้".gradient(Color::Grey0)); +} + +``` +
+ +
+ +### Gradient with style + +```Rust +extern crate colorful; + +use colorful::Colorful; + +fn main() { + println!("{}", "言葉にできず 凍えたままで 人前ではやさしく生きていた しわよせで こんなふうに雑に 雨の夜にきみを 抱きしめてた".gradient_with_color(HSL::new(0.0, 1.0, 0.5), HSL::new(0.833, 1.0, 0.5)).underlined()); +} +``` + +
+ +
+ +### Bar chart + +```Rust +extern crate colorful; + +use colorful::Colorful; +use colorful::HSL; + +fn main() { + let s = "█"; + println!("{}\n", "Most Loved, Dreaded, and Wanted Languages".red()); + let values = vec![78.9, 75.1, 68.0, 67.0, 65.6, 65.1, 61.9, 60.4]; + let languages = vec!["Rust", "Kotlin", "Python", "TypeScript", "Go", "Swift", "JavaScript", "C#"]; + let c = languages.iter().max_by_key(|x| x.len()).unwrap(); + + for (i, value) in values.iter().enumerate() { + let h = (*value as f32 * 15.0 % 360.0) / 360.0; + let length = (value - 30.0) as usize; + println!("{: + + + +### Animation + +#### Rainbow + +```Rust +extern crate colorful; + +use colorful::Colorful; + +fn main() { + let text = format!("{:^50}\n{}\r\n{}", "岳飞 小重山", "昨夜寒蛩不住鸣 惊回千里梦 已三更 起身独自绕阶行 人悄悄 帘外月胧明", + "白首为功名 旧山松竹老 阻归程 欲将心事付瑶琴 知音少 弦断有谁听"); + text.rainbow(); +} +``` +Output + +
+ +
+ +#### Neon + +```Rust +extern crate colorful; + +use colorful::Colorful; + +fn main() { + let text = format!("{:^28}\n{}", "WARNING", "BIG BROTHER IS WATCHING YOU!!!"); + text.neon(RGB::new(226, 14, 14), RGB::new(158, 158, 158)); + // or you can use text.warn(); +} + +``` +Output + +
+ +
+ + +## Terminals compatibility + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TerminalFormattingColor
BoldDimUnderlinedBlinkInvertHidden81688256
aTerm ~
Eterm ~ ~
GNOME Terminal
Guake
Konsole
Nautilus Terminal
rxvt ~
Terminator
Tilda
XFCE4 Terminal
XTerm
xvt
Linux TTY ~
VTE Terminal
+ +~: Supported in a special way by the terminal. + +## Todo + +- [x] Basic 16 color +- [ ] Extra 240 color +- [x] HSL support +- [x] RGB support +- [x] Gradient mode +- [x] Rainbow mode +- [x] Animation mode +- [ ] Document +- [x] Terminals compatibility + +## License + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fda2018%2Fcolorful.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fda2018%2Fcolorful?ref=badge_large) diff --git a/third_party/rust/colorful/license b/third_party/rust/colorful/license new file mode 100644 index 000000000000..e7af2f77107d --- /dev/null +++ b/third_party/rust/colorful/license @@ -0,0 +1,9 @@ +MIT License + +Copyright (c) Sindre Sorhus (sindresorhus.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/colorful/rustfmt.toml b/third_party/rust/colorful/rustfmt.toml new file mode 100644 index 000000000000..4fff285e7dfd --- /dev/null +++ b/third_party/rust/colorful/rustfmt.toml @@ -0,0 +1,5 @@ +max_width = 89 +reorder_imports = true +#wrap_comments = true +fn_args_density = "Compressed" +#use_small_heuristics = false diff --git a/third_party/rust/colorful/src/core/color_string.rs b/third_party/rust/colorful/src/core/color_string.rs new file mode 100644 index 000000000000..f619f9949ad1 --- /dev/null +++ b/third_party/rust/colorful/src/core/color_string.rs @@ -0,0 +1,134 @@ +use std::fmt::Display; +use std::fmt::Formatter; +use std::fmt::Result as FmtResult; + +use core::ColorInterface; +use core::colors::Colorado; +use core::colors::ColorMode; +use core::StrMarker; +use core::symbols::Symbol; +use Style; + + +#[derive(Clone)] +pub struct CString { + text: String, + fg_color: Option, + bg_color: Option, + styles: Option>, +} + +impl StrMarker for CString { + fn to_str(&self) -> String { + self.text.to_owned() + } + fn get_fg_color(&self) -> Option { + self.fg_color.clone() + } + fn get_bg_color(&self) -> Option { + self.bg_color.clone() + } + fn get_style(&self) -> Option> { + self.styles.clone() + } +} + + +impl CString { + pub fn new(cs: S) -> CString { + CString { + text: cs.to_str(), + fg_color: cs.get_fg_color(), + bg_color: cs.get_bg_color(), + styles: cs.get_style(), + } + } + pub fn create_by_text(cs: S, t: String) -> CString { + CString { text: t, ..CString::new(cs) } + } + pub fn create_by_fg(cs: S, color: C) -> CString { + CString { fg_color: Some(Colorado::new(color)), ..CString::new(cs) } + } + pub fn create_by_bg(cs: S, color: C) -> CString { + CString { bg_color: Some(Colorado::new(color)), ..CString::new(cs) } + } + pub fn create_by_style(cs: S, style: Style) -> CString { + CString { + text: cs.to_str(), + styles: match cs.get_style() { + Some(mut v) => { + v.push(style); + Some(v) + } + _ => { Some(vec![style]) } + }, + fg_color: cs.get_fg_color(), + bg_color: cs.get_bg_color(), + } + } +} + +impl Display for CString { + fn fmt(&self, f: &mut Formatter) -> FmtResult { + let mut is_colored = false; + + if self.bg_color.is_none() && self.fg_color.is_none() && self.styles.is_none() { + write!(f, "{}", self.text)?; + Ok(()) + } else { + match &self.fg_color { + Some(v) => { + is_colored = true; + match v.get_mode() { + ColorMode::SIMPLE => { + f.write_str(Symbol::Simple256Foreground.to_str())?; + } + ColorMode::RGB => { + f.write_str(Symbol::RgbForeground.to_str())?; + } + _ => {} + } + write!(f, "{}", v.get_color())?; + } + _ => {} + } + match &self.bg_color { + Some(v) => { + if is_colored { + f.write_str(Symbol::Mode.to_str())?; + } else { + is_colored = true; + } + match v.get_mode() { + ColorMode::SIMPLE => { + f.write_str(Symbol::Simple256Background.to_str())?; + } + ColorMode::RGB => { + f.write_str(Symbol::RgbBackground.to_str())?; + } + _ => {} + } + write!(f, "{}", v.get_color())?; + } + _ => {} + } + + match &self.styles { + Some(v) => { + if !is_colored { + write!(f, "{}{}", Symbol::Esc, Symbol::LeftBrackets)?; + } else { + f.write_str(Symbol::Semicolon.to_str())?; + } + let t: Vec = v.into_iter().map(|x| x.to_string()).collect(); + f.write_str(&t.join(";")[..])?; + } + _ => {} + } + f.write_str(Symbol::Mode.to_str())?; + write!(f, "{}", self.text)?; + f.write_str(Symbol::Reset.to_str())?; + Ok(()) + } + } +} diff --git a/third_party/rust/colorful/src/core/colors.rs b/third_party/rust/colorful/src/core/colors.rs new file mode 100644 index 000000000000..ca4194b1b20d --- /dev/null +++ b/third_party/rust/colorful/src/core/colors.rs @@ -0,0 +1,847 @@ +use std::slice::Iter; + +use core::ColorInterface; +use HSL; + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Color { + Black, + Red, + Green, + Yellow, + Blue, + Magenta, + Cyan, + LightGray, + DarkGray, + LightRed, + LightGreen, + LightYellow, + LightBlue, + LightMagenta, + LightCyan, + White, + Grey0, + NavyBlue, + DarkBlue, + Blue3a, + Blue3b, + Blue1, + DarkGreen, + DeepSkyBlue4a, + DeepSkyBlue4b, + DeepSkyBlue4c, + DodgerBlue3, + DodgerBlue2, + Green4, + SpringGreen4, + Turquoise4, + DeepSkyBlue3a, + DeepSkyBlue3b, + DodgerBlue1, + Green3a, + SpringGreen3a, + DarkCyan, + LightSeaGreen, + DeepSkyBlue2, + DeepSkyBlue1, + Green3b, + SpringGreen3b, + SpringGreen2a, + Cyan3, + DarkTurquoise, + Turquoise2, + Green1, + SpringGreen2b, + SpringGreen1, + MediumSpringGreen, + Cyan2, + Cyan1, + DarkRed1, + DeepPink4a, + Purple4a, + Purple4b, + Purple3, + BlueViolet, + Orange4a, + Grey37, + MediumPurple4, + SlateBlue3a, + SlateBlue3b, + RoyalBlue1, + Chartreuse4, + DarkSeaGreen4a, + PaleTurquoise4, + SteelBlue, + SteelBlue3, + CornflowerBlue, + Chartreuse3a, + DarkSeaGreen4b, + CadetBlue2, + CadetBlue1, + SkyBlue3, + SteelBlue1a, + Chartreuse3b, + PaleGreen3a, + SeaGreen3, + Aquamarine3, + MediumTurquoise, + SteelBlue1b, + Chartreuse2a, + SeaGreen2, + SeaGreen1a, + SeaGreen1b, + Aquamarine1a, + DarkSlateGray2, + DarkRed2, + DeepPink4b, + DarkMagenta1, + DarkMagenta2, + DarkViolet1a, + Purple1a, + Orange4b, + LightPink4, + Plum4, + MediumPurple3a, + MediumPurple3b, + SlateBlue1, + Yellow4a, + Wheat4, + Grey53, + LightSlateGrey, + MediumPurple, + LightSlateBlue, + Yellow4b, + DarkOliveGreen3a, + DarkGreenSea, + LightSkyBlue3a, + LightSkyBlue3b, + SkyBlue2, + Chartreuse2b, + DarkOliveGreen3b, + PaleGreen3b, + DarkSeaGreen3a, + DarkSlateGray3, + SkyBlue1, + Chartreuse1, + LightGreen2, + LightGreen3, + PaleGreen1a, + Aquamarine1b, + DarkSlateGray1, + Red3a, + DeepPink4c, + MediumVioletRed, + Magenta3a, + DarkViolet1b, + Purple1b, + DarkOrange3a, + IndianRed1a, + HotPink3a, + MediumOrchid3, + MediumOrchid, + MediumPurple2a, + DarkGoldenrod, + LightSalmon3a, + RosyBrown, + Grey63, + MediumPurple2b, + MediumPurple1, + Gold3a, + DarkKhaki, + NavajoWhite3, + Grey69, + LightSteelBlue3, + LightSteelBlue, + Yellow3a, + DarkOliveGreen3, + DarkSeaGreen3b, + DarkSeaGreen2, + LightCyan3, + LightSkyBlue1, + GreenYellow, + DarkOliveGreen2, + PaleGreen1b, + DarkSeaGreen5b, + DarkSeaGreen5a, + PaleTurquoise1, + Red3b, + DeepPink3a, + DeepPink3b, + Magenta3b, + Magenta3c, + Magenta2a, + DarkOrange3b, + IndianRed1b, + HotPink3b, + HotPink2, + Orchid, + MediumOrchid1a, + Orange3, + LightSalmon3b, + LightPink3, + Pink3, + Plum3, + Violet, + Gold3b, + LightGoldenrod3, + Tan, + MistyRose3, + Thistle3, + Plum2, + Yellow3b, + Khaki3, + LightGoldenrod2a, + LightYellow3, + Grey84, + LightSteelBlue1, + Yellow2, + DarkOliveGreen1a, + DarkOliveGreen1b, + DarkSeaGreen1, + Honeydew2, + LightCyan1, + Red1, + DeepPink2, + DeepPink1a, + DeepPink1b, + Magenta2b, + Magenta1, + OrangeRed1, + IndianRed1c, + IndianRed1d, + HotPink1a, + HotPink1b, + MediumOrchid1b, + DarkOrange, + Salmon1, + LightCoral, + PaleVioletRed1, + Orchid2, + Orchid1, + Orange1, + SandyBrown, + LightSalmon1, + LightPink1, + Pink1, + Plum1, + Gold1, + LightGoldenrod2b, + LightGoldenrod2c, + NavajoWhite1, + MistyRose1, + Thistle1, + Yellow1, + LightGoldenrod1, + Khaki1, + Wheat1, + CornSilk1, + Grey100, + Grey3, + Grey7, + Grey11, + Grey15, + Grey19, + Grey23, + Grey27, + Grey30, + Grey35, + Grey39, + Grey42, + Grey46, + Grey50, + Grey54, + Grey58, + Grey62, + Grey66, + Grey70, + Grey74, + Grey78, + Grey82, + Grey85, + Grey89, + Grey93, +} + +impl ColorInterface for Color { + fn to_color_str(&self) -> String { + format!("{}", + match self { + Color::Black => "0", + Color::Red => "1", + Color::Green => "2", + Color::Yellow => "3", + Color::Blue => "4", + Color::Magenta => "5", + Color::Cyan => "6", + Color::LightGray => "7", + Color::DarkGray => "8", + Color::LightRed => "9", + Color::LightGreen => "10", + Color::LightYellow => "11", + Color::LightBlue => "12", + Color::LightMagenta => "13", + Color::LightCyan => "14", + Color::White => "15", + Color::Grey0 => "16", + Color::NavyBlue => "17", + Color::DarkBlue => "18", + Color::Blue3a => "19", + Color::Blue3b => "20", + Color::Blue1 => "21", + Color::DarkGreen => "22", + Color::DeepSkyBlue4a => "23", + Color::DeepSkyBlue4b => "24", + Color::DeepSkyBlue4c => "25", + Color::DodgerBlue3 => "26", + Color::DodgerBlue2 => "27", + Color::Green4 => "28", + Color::SpringGreen4 => "29", + Color::Turquoise4 => "30", + Color::DeepSkyBlue3a => "31", + Color::DeepSkyBlue3b => "32", + Color::DodgerBlue1 => "33", + Color::Green3a => "34", + Color::SpringGreen3a => "35", + Color::DarkCyan => "36", + Color::LightSeaGreen => "37", + Color::DeepSkyBlue2 => "38", + Color::DeepSkyBlue1 => "39", + Color::Green3b => "40", + Color::SpringGreen3b => "41", + Color::SpringGreen2a => "42", + Color::Cyan3 => "43", + Color::DarkTurquoise => "44", + Color::Turquoise2 => "45", + Color::Green1 => "46", + Color::SpringGreen2b => "47", + Color::SpringGreen1 => "48", + Color::MediumSpringGreen => "49", + Color::Cyan2 => "50", + Color::Cyan1 => "51", + Color::DarkRed1 => "52", + Color::DeepPink4a => "53", + Color::Purple4a => "54", + Color::Purple4b => "55", + Color::Purple3 => "56", + Color::BlueViolet => "57", + Color::Orange4a => "58", + Color::Grey37 => "59", + Color::MediumPurple4 => "60", + Color::SlateBlue3a => "61", + Color::SlateBlue3b => "62", + Color::RoyalBlue1 => "63", + Color::Chartreuse4 => "64", + Color::DarkSeaGreen4a => "65", + Color::PaleTurquoise4 => "66", + Color::SteelBlue => "67", + Color::SteelBlue3 => "68", + Color::CornflowerBlue => "69", + Color::Chartreuse3a => "70", + Color::DarkSeaGreen4b => "71", + Color::CadetBlue2 => "72", + Color::CadetBlue1 => "73", + Color::SkyBlue3 => "74", + Color::SteelBlue1a => "75", + Color::Chartreuse3b => "76", + Color::PaleGreen3a => "77", + Color::SeaGreen3 => "78", + Color::Aquamarine3 => "79", + Color::MediumTurquoise => "80", + Color::SteelBlue1b => "81", + Color::Chartreuse2a => "82", + Color::SeaGreen2 => "83", + Color::SeaGreen1a => "84", + Color::SeaGreen1b => "85", + Color::Aquamarine1a => "86", + Color::DarkSlateGray2 => "87", + Color::DarkRed2 => "88", + Color::DeepPink4b => "89", + Color::DarkMagenta1 => "90", + Color::DarkMagenta2 => "91", + Color::DarkViolet1a => "92", + Color::Purple1a => "93", + Color::Orange4b => "94", + Color::LightPink4 => "95", + Color::Plum4 => "96", + Color::MediumPurple3a => "97", + Color::MediumPurple3b => "98", + Color::SlateBlue1 => "99", + Color::Yellow4a => "100", + Color::Wheat4 => "101", + Color::Grey53 => "102", + Color::LightSlateGrey => "103", + Color::MediumPurple => "104", + Color::LightSlateBlue => "105", + Color::Yellow4b => "106", + Color::DarkOliveGreen3a => "107", + Color::DarkGreenSea => "108", + Color::LightSkyBlue3a => "109", + Color::LightSkyBlue3b => "110", + Color::SkyBlue2 => "111", + Color::Chartreuse2b => "112", + Color::DarkOliveGreen3b => "113", + Color::PaleGreen3b => "114", + Color::DarkSeaGreen3a => "115", + Color::DarkSlateGray3 => "116", + Color::SkyBlue1 => "117", + Color::Chartreuse1 => "118", + Color::LightGreen2 => "119", + Color::LightGreen3 => "120", + Color::PaleGreen1a => "121", + Color::Aquamarine1b => "122", + Color::DarkSlateGray1 => "123", + Color::Red3a => "124", + Color::DeepPink4c => "125", + Color::MediumVioletRed => "126", + Color::Magenta3a => "127", + Color::DarkViolet1b => "128", + Color::Purple1b => "129", + Color::DarkOrange3a => "130", + Color::IndianRed1a => "131", + Color::HotPink3a => "132", + Color::MediumOrchid3 => "133", + Color::MediumOrchid => "134", + Color::MediumPurple2a => "135", + Color::DarkGoldenrod => "136", + Color::LightSalmon3a => "137", + Color::RosyBrown => "138", + Color::Grey63 => "139", + Color::MediumPurple2b => "140", + Color::MediumPurple1 => "141", + Color::Gold3a => "142", + Color::DarkKhaki => "143", + Color::NavajoWhite3 => "144", + Color::Grey69 => "145", + Color::LightSteelBlue3 => "146", + Color::LightSteelBlue => "147", + Color::Yellow3a => "148", + Color::DarkOliveGreen3 => "149", + Color::DarkSeaGreen3b => "150", + Color::DarkSeaGreen2 => "151", + Color::LightCyan3 => "152", + Color::LightSkyBlue1 => "153", + Color::GreenYellow => "154", + Color::DarkOliveGreen2 => "155", + Color::PaleGreen1b => "156", + Color::DarkSeaGreen5b => "157", + Color::DarkSeaGreen5a => "158", + Color::PaleTurquoise1 => "159", + Color::Red3b => "160", + Color::DeepPink3a => "161", + Color::DeepPink3b => "162", + Color::Magenta3b => "163", + Color::Magenta3c => "164", + Color::Magenta2a => "165", + Color::DarkOrange3b => "166", + Color::IndianRed1b => "167", + Color::HotPink3b => "168", + Color::HotPink2 => "169", + Color::Orchid => "170", + Color::MediumOrchid1a => "171", + Color::Orange3 => "172", + Color::LightSalmon3b => "173", + Color::LightPink3 => "174", + Color::Pink3 => "175", + Color::Plum3 => "176", + Color::Violet => "177", + Color::Gold3b => "178", + Color::LightGoldenrod3 => "179", + Color::Tan => "180", + Color::MistyRose3 => "181", + Color::Thistle3 => "182", + Color::Plum2 => "183", + Color::Yellow3b => "184", + Color::Khaki3 => "185", + Color::LightGoldenrod2a => "186", + Color::LightYellow3 => "187", + Color::Grey84 => "188", + Color::LightSteelBlue1 => "189", + Color::Yellow2 => "190", + Color::DarkOliveGreen1a => "191", + Color::DarkOliveGreen1b => "192", + Color::DarkSeaGreen1 => "193", + Color::Honeydew2 => "194", + Color::LightCyan1 => "195", + Color::Red1 => "196", + Color::DeepPink2 => "197", + Color::DeepPink1a => "198", + Color::DeepPink1b => "199", + Color::Magenta2b => "200", + Color::Magenta1 => "201", + Color::OrangeRed1 => "202", + Color::IndianRed1c => "203", + Color::IndianRed1d => "204", + Color::HotPink1a => "205", + Color::HotPink1b => "206", + Color::MediumOrchid1b => "207", + Color::DarkOrange => "208", + Color::Salmon1 => "209", + Color::LightCoral => "210", + Color::PaleVioletRed1 => "211", + Color::Orchid2 => "212", + Color::Orchid1 => "213", + Color::Orange1 => "214", + Color::SandyBrown => "215", + Color::LightSalmon1 => "216", + Color::LightPink1 => "217", + Color::Pink1 => "218", + Color::Plum1 => "219", + Color::Gold1 => "220", + Color::LightGoldenrod2b => "221", + Color::LightGoldenrod2c => "222", + Color::NavajoWhite1 => "223", + Color::MistyRose1 => "224", + Color::Thistle1 => "225", + Color::Yellow1 => "226", + Color::LightGoldenrod1 => "227", + Color::Khaki1 => "228", + Color::Wheat1 => "229", + Color::CornSilk1 => "230", + Color::Grey100 => "231", + Color::Grey3 => "232", + Color::Grey7 => "233", + Color::Grey11 => "234", + Color::Grey15 => "235", + Color::Grey19 => "236", + Color::Grey23 => "237", + Color::Grey27 => "238", + Color::Grey30 => "239", + Color::Grey35 => "240", + Color::Grey39 => "241", + Color::Grey42 => "242", + Color::Grey46 => "243", + Color::Grey50 => "244", + Color::Grey54 => "245", + Color::Grey58 => "246", + Color::Grey62 => "247", + Color::Grey66 => "248", + Color::Grey70 => "249", + Color::Grey74 => "250", + Color::Grey78 => "251", + Color::Grey82 => "252", + Color::Grey85 => "253", + Color::Grey89 => "254", + Color::Grey93 => "255", + } + ) + } + fn to_hsl(&self) -> HSL { + match self { + Color::Black => HSL::new(0.0, 0.0, 0.0), + Color::Red => HSL::new(0.0, 1.0, 0.25), + Color::Green => HSL::new(0.3333333333333333, 1.0, 0.25), + Color::Yellow => HSL::new(0.16666666666666666, 1.0, 0.25), + Color::Blue => HSL::new(0.6666666666666666, 1.0, 0.25), + Color::Magenta => HSL::new(0.8333333333333334, 1.0, 0.25), + Color::Cyan => HSL::new(0.5, 1.0, 0.25), + Color::LightGray => HSL::new(0.0, 0.0, 0.75), + Color::DarkGray => HSL::new(0.0, 0.0, 0.5), + Color::LightRed => HSL::new(0.0, 1.0, 0.5), + Color::LightGreen => HSL::new(0.3333333333333333, 1.0, 0.5), + Color::LightYellow => HSL::new(0.16666666666666666, 1.0, 0.5), + Color::LightBlue => HSL::new(0.6666666666666666, 1.0, 0.5), + Color::LightMagenta => HSL::new(0.8333333333333334, 1.0, 0.5), + Color::LightCyan => HSL::new(0.5, 1.0, 0.5), + Color::White => HSL::new(0.0, 0.0, 1.0), + Color::Grey0 => HSL::new(0.0, 0.0, 0.0), + Color::NavyBlue => HSL::new(0.6666666666666666, 1.0, 0.18), + Color::DarkBlue => HSL::new(0.6666666666666666, 1.0, 0.26), + Color::Blue3a => HSL::new(0.6666666666666666, 1.0, 0.34), + Color::Blue3b => HSL::new(0.6666666666666666, 1.0, 0.42), + Color::Blue1 => HSL::new(0.6666666666666666, 1.0, 0.5), + Color::DarkGreen => HSL::new(0.3333333333333333, 1.0, 0.18), + Color::DeepSkyBlue4a => HSL::new(0.5, 1.0, 0.18), + Color::DeepSkyBlue4b => HSL::new(0.5493827160493834, 1.0, 0.26), + Color::DeepSkyBlue4c => HSL::new(0.5761904761904749, 1.0, 0.34), + Color::DodgerBlue3 => HSL::new(0.5930232558139528, 1.0, 0.42), + Color::DodgerBlue2 => HSL::new(0.6045751633986917, 1.0, 0.5), + Color::Green4 => HSL::new(0.3333333333333333, 1.0, 0.26), + Color::SpringGreen4 => HSL::new(0.4506172839506167, 1.0, 0.26), + Color::Turquoise4 => HSL::new(0.5, 1.0, 0.26), + Color::DeepSkyBlue3a => HSL::new(0.538095238095239, 1.0, 0.34), + Color::DeepSkyBlue3b => HSL::new(0.5620155038759694, 1.0, 0.42), + Color::DodgerBlue1 => HSL::new(0.5784313725490194, 1.0, 0.5), + Color::Green3a => HSL::new(0.3333333333333333, 1.0, 0.34), + Color::SpringGreen3a => HSL::new(0.423809523809525, 1.0, 0.34), + Color::DarkCyan => HSL::new(0.4619047619047611, 1.0, 0.34), + Color::LightSeaGreen => HSL::new(0.5, 1.0, 0.34), + Color::DeepSkyBlue2 => HSL::new(0.5310077519379833, 1.0, 0.42), + Color::DeepSkyBlue1 => HSL::new(0.5522875816993472, 1.0, 0.5), + Color::Green3b => HSL::new(0.3333333333333333, 1.0, 0.42), + Color::SpringGreen3b => HSL::new(0.40697674418604723, 1.0, 0.42), + Color::SpringGreen2a => HSL::new(0.43798449612403056, 1.0, 0.42), + Color::Cyan3 => HSL::new(0.4689922480620166, 1.0, 0.42), + Color::DarkTurquoise => HSL::new(0.5, 1.0, 0.42), + Color::Turquoise2 => HSL::new(0.5261437908496722, 1.0, 0.5), + Color::Green1 => HSL::new(0.3333333333333333, 1.0, 0.5), + Color::SpringGreen2b => HSL::new(0.39542483660130834, 1.0, 0.5), + Color::SpringGreen1 => HSL::new(0.4215686274509806, 1.0, 0.5), + Color::MediumSpringGreen => HSL::new(0.4477124183006528, 1.0, 0.5), + Color::Cyan2 => HSL::new(0.4738562091503278, 1.0, 0.5), + Color::Cyan1 => HSL::new(0.5, 1.0, 0.5), + Color::DarkRed1 => HSL::new(0.0, 1.0, 0.18), + Color::DeepPink4a => HSL::new(0.8333333333333334, 1.0, 0.18), + Color::Purple4a => HSL::new(0.78395061728395, 1.0, 0.26), + Color::Purple4b => HSL::new(0.7571428571428583, 1.0, 0.34), + Color::Purple3 => HSL::new(0.7403100775193806, 1.0, 0.42), + Color::BlueViolet => HSL::new(0.7287581699346417, 1.0, 0.5), + Color::Orange4a => HSL::new(0.16666666666666666, 1.0, 0.18), + Color::Grey37 => HSL::new(0.0, 0.0, 0.37), + Color::MediumPurple4 => HSL::new(0.6666666666666666, 0.17, 0.45), + Color::SlateBlue3a => HSL::new(0.6666666666666666, 0.33, 0.52), + Color::SlateBlue3b => HSL::new(0.6666666666666666, 0.6, 0.6), + Color::RoyalBlue1 => HSL::new(0.6666666666666666, 1.0, 0.68), + Color::Chartreuse4 => HSL::new(0.21604938271604945, 1.0, 0.26), + Color::DarkSeaGreen4a => HSL::new(0.3333333333333333, 0.17, 0.45), + Color::PaleTurquoise4 => HSL::new(0.5, 0.17, 0.45), + Color::SteelBlue => HSL::new(0.5833333333333334, 0.33, 0.52), + Color::SteelBlue3 => HSL::new(0.6111111111111112, 0.6, 0.6), + Color::CornflowerBlue => HSL::new(0.625, 1.0, 0.68), + Color::Chartreuse3a => HSL::new(0.24285714285714277, 1.0, 0.34), + Color::DarkSeaGreen4b => HSL::new(0.3333333333333333, 0.33, 0.52), + Color::CadetBlue2 => HSL::new(0.4166666666666667, 0.33, 0.52), + Color::CadetBlue1 => HSL::new(0.5, 0.33, 0.52), + Color::SkyBlue3 => HSL::new(0.5555555555555556, 0.6, 0.6), + Color::SteelBlue1a => HSL::new(0.5833333333333334, 1.0, 0.68), + Color::Chartreuse3b => HSL::new(0.2596899224806203, 1.0, 0.42), + Color::PaleGreen3a => HSL::new(0.3333333333333333, 0.6, 0.6), + Color::SeaGreen3 => HSL::new(0.3888888888888889, 0.6, 0.6), + Color::Aquamarine3 => HSL::new(0.4444444444444444, 0.6, 0.6), + Color::MediumTurquoise => HSL::new(0.5, 0.6, 0.6), + Color::SteelBlue1b => HSL::new(0.5416666666666666, 1.0, 0.68), + Color::Chartreuse2a => HSL::new(0.27124183006535946, 1.0, 0.5), + Color::SeaGreen2 => HSL::new(0.3333333333333333, 1.0, 0.68), + Color::SeaGreen1a => HSL::new(0.375, 1.0, 0.68), + Color::SeaGreen1b => HSL::new(0.4166666666666667, 1.0, 0.68), + Color::Aquamarine1a => HSL::new(0.4583333333333333, 1.0, 0.68), + Color::DarkSlateGray2 => HSL::new(0.5, 1.0, 0.68), + Color::DarkRed2 => HSL::new(0.0, 1.0, 0.26), + Color::DeepPink4b => HSL::new(0.8827160493827166, 1.0, 0.26), + Color::DarkMagenta1 => HSL::new(0.8333333333333334, 1.0, 0.26), + Color::DarkMagenta2 => HSL::new(0.7952380952380944, 1.0, 0.34), + Color::DarkViolet1a => HSL::new(0.7713178294573639, 1.0, 0.42), + Color::Purple1a => HSL::new(0.7549019607843138, 1.0, 0.5), + Color::Orange4b => HSL::new(0.11728395061728389, 1.0, 0.26), + Color::LightPink4 => HSL::new(0.0, 0.17, 0.45), + Color::Plum4 => HSL::new(0.8333333333333334, 0.17, 0.45), + Color::MediumPurple3a => HSL::new(0.75, 0.33, 0.52), + Color::MediumPurple3b => HSL::new(0.7222222222222222, 0.6, 0.6), + Color::SlateBlue1 => HSL::new(0.7083333333333334, 1.0, 0.68), + Color::Yellow4a => HSL::new(0.16666666666666666, 1.0, 0.26), + Color::Wheat4 => HSL::new(0.16666666666666666, 0.17, 0.45), + Color::Grey53 => HSL::new(0.0, 0.0, 0.52), + Color::LightSlateGrey => HSL::new(0.6666666666666666, 0.2, 0.6), + Color::MediumPurple => HSL::new(0.6666666666666666, 0.5, 0.68), + Color::LightSlateBlue => HSL::new(0.6666666666666666, 1.0, 0.76), + Color::Yellow4b => HSL::new(0.2047619047619047, 1.0, 0.34), + Color::DarkOliveGreen3a => HSL::new(0.25, 0.33, 0.52), + Color::DarkGreenSea => HSL::new(0.3333333333333333, 0.2, 0.6), + Color::LightSkyBlue3a => HSL::new(0.5, 0.2, 0.6), + Color::LightSkyBlue3b => HSL::new(0.5833333333333334, 0.5, 0.68), + Color::SkyBlue2 => HSL::new(0.6111111111111112, 1.0, 0.76), + Color::Chartreuse2b => HSL::new(0.22868217054263557, 1.0, 0.42), + Color::DarkOliveGreen3b => HSL::new(0.2777777777777778, 0.6, 0.6), + Color::PaleGreen3b => HSL::new(0.3333333333333333, 0.5, 0.68), + Color::DarkSeaGreen3a => HSL::new(0.4166666666666667, 0.5, 0.68), + Color::DarkSlateGray3 => HSL::new(0.5, 0.5, 0.68), + Color::SkyBlue1 => HSL::new(0.5555555555555556, 1.0, 0.76), + Color::Chartreuse1 => HSL::new(0.2450980392156864, 1.0, 0.5), + Color::LightGreen2 => HSL::new(0.2916666666666667, 1.0, 0.68), + Color::LightGreen3 => HSL::new(0.3333333333333333, 1.0, 0.76), + Color::PaleGreen1a => HSL::new(0.3888888888888889, 1.0, 0.76), + Color::Aquamarine1b => HSL::new(0.4444444444444444, 1.0, 0.76), + Color::DarkSlateGray1 => HSL::new(0.5, 1.0, 0.76), + Color::Red3a => HSL::new(0.0, 1.0, 0.34), + Color::DeepPink4c => HSL::new(0.9095238095238083, 1.0, 0.34), + Color::MediumVioletRed => HSL::new(0.8714285714285722, 1.0, 0.34), + Color::Magenta3a => HSL::new(0.8333333333333334, 1.0, 0.34), + Color::DarkViolet1b => HSL::new(0.80232558139535, 1.0, 0.42), + Color::Purple1b => HSL::new(0.7810457516339862, 1.0, 0.5), + Color::DarkOrange3a => HSL::new(0.09047619047619054, 1.0, 0.34), + Color::IndianRed1a => HSL::new(0.0, 0.33, 0.52), + Color::HotPink3a => HSL::new(0.9166666666666666, 0.33, 0.52), + Color::MediumOrchid3 => HSL::new(0.8333333333333334, 0.33, 0.52), + Color::MediumOrchid => HSL::new(0.7777777777777778, 0.6, 0.6), + Color::MediumPurple2a => HSL::new(0.75, 1.0, 0.68), + Color::DarkGoldenrod => HSL::new(0.12857142857142861, 1.0, 0.34), + Color::LightSalmon3a => HSL::new(0.08333333333333333, 0.33, 0.52), + Color::RosyBrown => HSL::new(0.0, 0.2, 0.6), + Color::Grey63 => HSL::new(0.8333333333333334, 0.2, 0.6), + Color::MediumPurple2b => HSL::new(0.75, 0.5, 0.68), + Color::MediumPurple1 => HSL::new(0.7222222222222222, 1.0, 0.76), + Color::Gold3a => HSL::new(0.16666666666666666, 1.0, 0.34), + Color::DarkKhaki => HSL::new(0.16666666666666666, 0.33, 0.52), + Color::NavajoWhite3 => HSL::new(0.16666666666666666, 0.2, 0.6), + Color::Grey69 => HSL::new(0.0, 0.0, 0.68), + Color::LightSteelBlue3 => HSL::new(0.6666666666666666, 0.33, 0.76), + Color::LightSteelBlue => HSL::new(0.6666666666666666, 1.0, 0.84), + Color::Yellow3a => HSL::new(0.1976744186046511, 1.0, 0.42), + Color::DarkOliveGreen3 => HSL::new(0.2222222222222222, 0.6, 0.6), + Color::DarkSeaGreen3b => HSL::new(0.25, 0.5, 0.68), + Color::DarkSeaGreen2 => HSL::new(0.3333333333333333, 0.33, 0.76), + Color::LightCyan3 => HSL::new(0.5, 0.33, 0.76), + Color::LightSkyBlue1 => HSL::new(0.5833333333333334, 1.0, 0.84), + Color::GreenYellow => HSL::new(0.21895424836601304, 1.0, 0.5), + Color::DarkOliveGreen2 => HSL::new(0.25, 1.0, 0.68), + Color::PaleGreen1b => HSL::new(0.2777777777777778, 1.0, 0.76), + Color::DarkSeaGreen5b => HSL::new(0.3333333333333333, 1.0, 0.84), + Color::DarkSeaGreen5a => HSL::new(0.4166666666666667, 1.0, 0.84), + Color::PaleTurquoise1 => HSL::new(0.5, 1.0, 0.84), + Color::Red3b => HSL::new(0.0, 1.0, 0.42), + Color::DeepPink3a => HSL::new(0.926356589147286, 1.0, 0.42), + Color::DeepPink3b => HSL::new(0.8953488372093028, 1.0, 0.42), + Color::Magenta3b => HSL::new(0.8643410852713166, 1.0, 0.42), + Color::Magenta3c => HSL::new(0.8333333333333334, 1.0, 0.42), + Color::Magenta2a => HSL::new(0.8071895424836611, 1.0, 0.5), + Color::DarkOrange3b => HSL::new(0.07364341085271306, 1.0, 0.42), + Color::IndianRed1b => HSL::new(0.0, 0.6, 0.6), + Color::HotPink3b => HSL::new(0.9444444444444444, 0.6, 0.6), + Color::HotPink2 => HSL::new(0.8888888888888888, 0.6, 0.6), + Color::Orchid => HSL::new(0.8333333333333334, 0.6, 0.6), + Color::MediumOrchid1a => HSL::new(0.7916666666666666, 1.0, 0.68), + Color::Orange3 => HSL::new(0.10465116279069778, 1.0, 0.42), + Color::LightSalmon3b => HSL::new(0.05555555555555555, 0.6, 0.6), + Color::LightPink3 => HSL::new(0.0, 0.5, 0.68), + Color::Pink3 => HSL::new(0.9166666666666666, 0.5, 0.68), + Color::Plum3 => HSL::new(0.8333333333333334, 0.5, 0.68), + Color::Violet => HSL::new(0.7777777777777778, 1.0, 0.76), + Color::Gold3b => HSL::new(0.13565891472868222, 1.0, 0.42), + Color::LightGoldenrod3 => HSL::new(0.1111111111111111, 0.6, 0.6), + Color::Tan => HSL::new(0.08333333333333333, 0.5, 0.68), + Color::MistyRose3 => HSL::new(0.0, 0.33, 0.76), + Color::Thistle3 => HSL::new(0.8333333333333334, 0.33, 0.76), + Color::Plum2 => HSL::new(0.75, 1.0, 0.84), + Color::Yellow3b => HSL::new(0.16666666666666666, 1.0, 0.42), + Color::Khaki3 => HSL::new(0.16666666666666666, 0.6, 0.6), + Color::LightGoldenrod2a => HSL::new(0.16666666666666666, 0.5, 0.68), + Color::LightYellow3 => HSL::new(0.16666666666666666, 0.33, 0.76), + Color::Grey84 => HSL::new(0.0, 0.0, 0.84), + Color::LightSteelBlue1 => HSL::new(0.6666666666666666, 1.0, 0.92), + Color::Yellow2 => HSL::new(0.19281045751633974, 1.0, 0.5), + Color::DarkOliveGreen1a => HSL::new(0.20833333333333334, 1.0, 0.68), + Color::DarkOliveGreen1b => HSL::new(0.2222222222222222, 1.0, 0.76), + Color::DarkSeaGreen1 => HSL::new(0.25, 1.0, 0.84), + Color::Honeydew2 => HSL::new(0.3333333333333333, 1.0, 0.92), + Color::LightCyan1 => HSL::new(0.5, 1.0, 0.92), + Color::Red1 => HSL::new(0.0, 1.0, 0.5), + Color::DeepPink2 => HSL::new(0.937908496732025, 1.0, 0.5), + Color::DeepPink1a => HSL::new(0.9117647058823528, 1.0, 0.5), + Color::DeepPink1b => HSL::new(0.8856209150326805, 1.0, 0.5), + Color::Magenta2b => HSL::new(0.8594771241830055, 1.0, 0.5), + Color::Magenta1 => HSL::new(0.8333333333333334, 1.0, 0.5), + Color::OrangeRed1 => HSL::new(0.06209150326797389, 1.0, 0.5), + Color::IndianRed1c => HSL::new(0.0, 1.0, 0.68), + Color::IndianRed1d => HSL::new(0.9583333333333334, 1.0, 0.68), + Color::HotPink1a => HSL::new(0.9166666666666666, 1.0, 0.68), + Color::HotPink1b => HSL::new(0.875, 1.0, 0.68), + Color::MediumOrchid1b => HSL::new(0.8333333333333334, 1.0, 0.68), + Color::DarkOrange => HSL::new(0.08823529411764694, 1.0, 0.5), + Color::Salmon1 => HSL::new(0.041666666666666664, 1.0, 0.68), + Color::LightCoral => HSL::new(0.0, 1.0, 0.76), + Color::PaleVioletRed1 => HSL::new(0.9444444444444444, 1.0, 0.76), + Color::Orchid2 => HSL::new(0.8888888888888888, 1.0, 0.76), + Color::Orchid1 => HSL::new(0.8333333333333334, 1.0, 0.76), + Color::Orange1 => HSL::new(0.11437908496732027, 1.0, 0.5), + Color::SandyBrown => HSL::new(0.08333333333333333, 1.0, 0.68), + Color::LightSalmon1 => HSL::new(0.05555555555555555, 1.0, 0.76), + Color::LightPink1 => HSL::new(0.0, 1.0, 0.84), + Color::Pink1 => HSL::new(0.9166666666666666, 1.0, 0.84), + Color::Plum1 => HSL::new(0.8333333333333334, 1.0, 0.84), + Color::Gold1 => HSL::new(0.14052287581699335, 1.0, 0.5), + Color::LightGoldenrod2b => HSL::new(0.125, 1.0, 0.68), + Color::LightGoldenrod2c => HSL::new(0.1111111111111111, 1.0, 0.76), + Color::NavajoWhite1 => HSL::new(0.08333333333333333, 1.0, 0.84), + Color::MistyRose1 => HSL::new(0.0, 1.0, 0.92), + Color::Thistle1 => HSL::new(0.8333333333333334, 1.0, 0.92), + Color::Yellow1 => HSL::new(0.16666666666666666, 1.0, 0.5), + Color::LightGoldenrod1 => HSL::new(0.16666666666666666, 1.0, 0.68), + Color::Khaki1 => HSL::new(0.16666666666666666, 1.0, 0.76), + Color::Wheat1 => HSL::new(0.16666666666666666, 1.0, 0.84), + Color::CornSilk1 => HSL::new(0.16666666666666666, 1.0, 0.92), + Color::Grey100 => HSL::new(0.0, 0.0, 1.0), + Color::Grey3 => HSL::new(0.0, 0.0, 0.03), + Color::Grey7 => HSL::new(0.0, 0.0, 0.07), + Color::Grey11 => HSL::new(0.0, 0.0, 0.1), + Color::Grey15 => HSL::new(0.0, 0.0, 0.14), + Color::Grey19 => HSL::new(0.0, 0.0, 0.18), + Color::Grey23 => HSL::new(0.0, 0.0, 0.22), + Color::Grey27 => HSL::new(0.0, 0.0, 0.26), + Color::Grey30 => HSL::new(0.0, 0.0, 0.3), + Color::Grey35 => HSL::new(0.0, 0.0, 0.34), + Color::Grey39 => HSL::new(0.0, 0.0, 0.37), + Color::Grey42 => HSL::new(0.0, 0.0, 0.4), + Color::Grey46 => HSL::new(0.0, 0.0, 0.46), + Color::Grey50 => HSL::new(0.0, 0.0, 0.5), + Color::Grey54 => HSL::new(0.0, 0.0, 0.54), + Color::Grey58 => HSL::new(0.0, 0.0, 0.58), + Color::Grey62 => HSL::new(0.0, 0.0, 0.61), + Color::Grey66 => HSL::new(0.0, 0.0, 0.65), + Color::Grey70 => HSL::new(0.0, 0.0, 0.69), + Color::Grey74 => HSL::new(0.0, 0.0, 0.73), + Color::Grey78 => HSL::new(0.0, 0.0, 0.77), + Color::Grey82 => HSL::new(0.0, 0.0, 0.81), + Color::Grey85 => HSL::new(0.0, 0.0, 0.85), + Color::Grey89 => HSL::new(0.0, 0.0, 0.89), + Color::Grey93 => HSL::new(0.0, 0.0, 0.93), + } + } +} + +impl Color { + pub fn iterator() -> Iter<'static, Color> { + use Color::*; + static ITEMS: [Color; 256] = [Black, Red, Green, Yellow, Blue, Magenta, Cyan, LightGray, DarkGray, LightRed, LightGreen, LightYellow, LightBlue, LightMagenta, LightCyan, White, Grey0, NavyBlue, DarkBlue, Blue3a, Blue3b, Blue1, DarkGreen, DeepSkyBlue4a, DeepSkyBlue4b, DeepSkyBlue4c, DodgerBlue3, DodgerBlue2, Green4, SpringGreen4, Turquoise4, DeepSkyBlue3a, DeepSkyBlue3b, DodgerBlue1, Green3a, SpringGreen3a, DarkCyan, LightSeaGreen, DeepSkyBlue2, DeepSkyBlue1, Green3b, SpringGreen3b, SpringGreen2a, Cyan3, DarkTurquoise, Turquoise2, Green1, SpringGreen2b, SpringGreen1, MediumSpringGreen, Cyan2, Cyan1, DarkRed1, DeepPink4a, Purple4a, Purple4b, Purple3, BlueViolet, Orange4a, Grey37, MediumPurple4, SlateBlue3a, SlateBlue3b, RoyalBlue1, Chartreuse4, DarkSeaGreen4a, PaleTurquoise4, SteelBlue, SteelBlue3, CornflowerBlue, Chartreuse3a, DarkSeaGreen4b, CadetBlue2, CadetBlue1, SkyBlue3, SteelBlue1a, Chartreuse3b, PaleGreen3a, SeaGreen3, Aquamarine3, MediumTurquoise, SteelBlue1b, Chartreuse2a, SeaGreen2, SeaGreen1a, SeaGreen1b, Aquamarine1a, DarkSlateGray2, DarkRed2, DeepPink4b, DarkMagenta1, DarkMagenta2, DarkViolet1a, Purple1a, Orange4b, LightPink4, Plum4, MediumPurple3a, MediumPurple3b, SlateBlue1, Yellow4a, Wheat4, Grey53, LightSlateGrey, MediumPurple, LightSlateBlue, Yellow4b, DarkOliveGreen3a, DarkGreenSea, LightSkyBlue3a, LightSkyBlue3b, SkyBlue2, Chartreuse2b, DarkOliveGreen3b, PaleGreen3b, DarkSeaGreen3a, DarkSlateGray3, SkyBlue1, Chartreuse1, LightGreen2, LightGreen3, PaleGreen1a, Aquamarine1b, DarkSlateGray1, Red3a, DeepPink4c, MediumVioletRed, Magenta3a, DarkViolet1b, Purple1b, DarkOrange3a, IndianRed1a, HotPink3a, MediumOrchid3, MediumOrchid, MediumPurple2a, DarkGoldenrod, LightSalmon3a, RosyBrown, Grey63, MediumPurple2b, MediumPurple1, Gold3a, DarkKhaki, NavajoWhite3, Grey69, LightSteelBlue3, LightSteelBlue, Yellow3a, DarkOliveGreen3, DarkSeaGreen3b, DarkSeaGreen2, LightCyan3, LightSkyBlue1, GreenYellow, DarkOliveGreen2, PaleGreen1b, DarkSeaGreen5b, DarkSeaGreen5a, PaleTurquoise1, Red3b, DeepPink3a, DeepPink3b, Magenta3b, Magenta3c, Magenta2a, DarkOrange3b, IndianRed1b, HotPink3b, HotPink2, Orchid, MediumOrchid1a, Orange3, LightSalmon3b, LightPink3, Pink3, Plum3, Violet, Gold3b, LightGoldenrod3, Tan, MistyRose3, Thistle3, Plum2, Yellow3b, Khaki3, LightGoldenrod2a, LightYellow3, Grey84, LightSteelBlue1, Yellow2, DarkOliveGreen1a, DarkOliveGreen1b, DarkSeaGreen1, Honeydew2, LightCyan1, Red1, DeepPink2, DeepPink1a, DeepPink1b, Magenta2b, Magenta1, OrangeRed1, IndianRed1c, IndianRed1d, HotPink1a, HotPink1b, MediumOrchid1b, DarkOrange, Salmon1, LightCoral, PaleVioletRed1, Orchid2, Orchid1, Orange1, SandyBrown, LightSalmon1, LightPink1, Pink1, Plum1, Gold1, LightGoldenrod2b, LightGoldenrod2c, NavajoWhite1, MistyRose1, Thistle1, Yellow1, LightGoldenrod1, Khaki1, Wheat1, CornSilk1, Grey100, Grey3, Grey7, Grey11, Grey15, Grey19, Grey23, Grey27, Grey30, Grey35, Grey39, Grey42, Grey46, Grey50, Grey54, Grey58, Grey62, Grey66, Grey70, Grey74, Grey78, Grey82, Grey85, Grey89, Grey93]; + ITEMS.iter() + } +} + +#[derive(Copy, Clone)] +pub enum ColorMode { + SIMPLE, + RGB, + HSL, +} + +#[derive(Clone)] +pub struct Colorado { + mode: ColorMode, + color: String, +} + + +impl Default for Colorado { + fn default() -> Colorado { + Colorado { + mode: ColorMode::SIMPLE, + color: String::default(), + } + } +} + +impl Colorado { + pub fn new(color: C) -> Colorado { + let c = format!("{}", color.to_color_str()); + Colorado { + color: c.clone(), + mode: if c.contains(";") { + ColorMode::RGB + } else { + ColorMode::SIMPLE + }, + } + } + pub fn get_color(&self) -> String { self.color.clone() } + pub fn get_mode(&self) -> ColorMode { self.mode } +} + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_rgb_iterator() { + for _ in Color::iterator() {} + } +} \ No newline at end of file diff --git a/third_party/rust/colorful/src/core/hsl.rs b/third_party/rust/colorful/src/core/hsl.rs new file mode 100644 index 000000000000..7a1a1a3a639d --- /dev/null +++ b/third_party/rust/colorful/src/core/hsl.rs @@ -0,0 +1,105 @@ +use core::ColorInterface; +use core::rgb::RGB; + +#[derive(Debug, Copy, Clone, PartialEq)] +pub struct HSL { + + pub h: f32, + s: f32, + l: f32, +} + +impl HSL { + pub fn new(h: f32, s: f32, l: f32) -> HSL { + HSL { h: _round(h), s: _round(s), l: _round(l) } + } + + pub fn hsl_to_rgb(&self) -> RGB { + let red: f32; + let green: f32; + let blue: f32; + let var_1: f32; + let var_2: f32; + if self.s == 0.0 { + let tmp = self.l * 255.0; + red = tmp; + green = tmp; + blue = tmp; + } else { + if self.l < 0.5 { + var_2 = self.l * (1.0 + self.s); + } else { + var_2 = (self.l + self.s) - (self.s * self.l); + } + var_1 = 2.0 * self.l - var_2; + red = 255.0 * hue_2_rgb(var_1, var_2, &mut (self.h + (1.0 / 3.0))); + green = 255.0 * hue_2_rgb(var_1, var_2, &mut self.h.clone()); + blue = 255.0 * hue_2_rgb(var_1, var_2, &mut (self.h - (1.0 / 3.0))); + } + RGB::new(red.round() as u8, green.round() as u8, blue.round() as u8) + } +} + +impl ColorInterface for HSL { + fn to_color_str(&self) -> String { + self.hsl_to_rgb().to_color_str() + } + fn to_hsl(&self) -> HSL { *self } +} + +fn hue_2_rgb(v1: f32, v2: f32, vh: &mut f32) -> f32 { + if *vh < 0.0 { + *vh += 1.0; + } + if *vh > 1.0 { + *vh -= 1.0; + } + if 6.0 * *vh < 1.0 { + return v1 + (v2 - v1) * 6.0 * *vh; + } + if 2.0 * *vh < 1.0 { + return v2; + } + if 3.0 * *vh < 2.0 { + return v1 + (v2 - v1) * (2.0 / 3.0 - *vh) * 6.0; + } + v1 +} + +fn _round(value: f32) -> f32 { + if value < 0.0 { + 0.0 + } else if value >= 1.0 { + 1.0 + } else { + value + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_hsl_2_rgb_1() { + let hsl = HSL::new(0.7, 0.50, 0.60); + let rgb = RGB::new(122, 102, 204); + + assert_eq!(rgb, hsl.hsl_to_rgb()); + } + + #[test] + fn test_hsl_2_rgb_2() { + let hsl = HSL::new(0.7, 0.0, 0.60); + let rgb = RGB::new(153, 153, 153); + assert_eq!(rgb, hsl.hsl_to_rgb()); + } + + #[test] + fn test_hsl_2_rgb_3() { + let hsl = HSL::new(0.7, 0.50, 0.30); + let rgb = RGB::new(54, 38, 115); + + assert_eq!(rgb, hsl.hsl_to_rgb()); + } +} \ No newline at end of file diff --git a/third_party/rust/colorful/src/core/mod.rs b/third_party/rust/colorful/src/core/mod.rs new file mode 100644 index 000000000000..a64ad68efdac --- /dev/null +++ b/third_party/rust/colorful/src/core/mod.rs @@ -0,0 +1,36 @@ +use core::colors::Colorado; +use core::style::Style; +use HSL; + + +pub mod colors; +pub mod symbols; +pub mod style; +pub mod color_string; +pub mod rgb; +pub mod hsl; + +pub trait StrMarker { + fn to_str(&self) -> String; + fn get_fg_color(&self) -> Option { None } + fn get_bg_color(&self) -> Option { None } + fn get_style(&self) -> Option> { None } +} + +impl<'a> StrMarker for &'a str { + fn to_str(&self) -> String { + String::from(*self) + } +} + +impl StrMarker for String { + fn to_str(&self) -> String { + self.clone() + } +} + + +pub trait ColorInterface: Clone { + fn to_color_str(&self) -> String; + fn to_hsl(&self) -> HSL; +} diff --git a/third_party/rust/colorful/src/core/rgb.rs b/third_party/rust/colorful/src/core/rgb.rs new file mode 100644 index 000000000000..2e0b06a29d59 --- /dev/null +++ b/third_party/rust/colorful/src/core/rgb.rs @@ -0,0 +1,90 @@ +use core::ColorInterface; +use HSL; + +#[derive(Debug, Copy, Clone, PartialEq)] +pub struct RGB { + + r: u8, + g: u8, + b: u8, +} + +impl RGB { + pub fn new(r: u8, g: u8, b: u8) -> RGB { + RGB { r, g, b } + } + + pub fn unpack(&self) -> (u8, u8, u8) { + (self.r, self.g, self.b) + } + + pub fn rgb_to_hsl(&self) -> HSL { + let (r, g, b) = self.unpack(); + let r = r as f32 / 255.0; + let g = g as f32 / 255.0; + let b = b as f32 / 255.0; + + let max = r.max(g).max(b); + let min = r.min(g).min(b); + let mut h: f32 = 0.0; + let mut s: f32 = 0.0; + let l = (max + min) / 2.0; + + if max != min { + let d = max - min; + s = if l > 0.5 { d / (2.0 - max - min) } else { d / (max + min) }; + if max == r { + h = (g - b) / d + (if g < b { 6.0 } else { 0.0 }); + } else if max == g { + h = (b - r) / d + 2.0; + } else { + h = (r - g) / d + 4.0; + } + h /= 6.0; + } + return HSL::new(h, s, l); + } +} + +impl ColorInterface for RGB { + fn to_color_str(&self) -> String { + format!("{};{};{}", self.r, self.g, self.b) + } + fn to_hsl(&self) -> HSL { self.rgb_to_hsl() } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_rgb_2_hsl_1() { + let hsl = HSL::new(0.69934636, 0.49999997, 0.60); + let rgb = RGB::new(122, 102, 204); + + assert_eq!(hsl, rgb.rgb_to_hsl()); + } + + #[test] + fn test_rgb_2_hsl_2() { + let hsl = HSL::new(0.0, 0.0, 0.60); + let rgb = RGB::new(153, 153, 153); + assert_eq!(hsl, rgb.rgb_to_hsl()); + } + + #[test] + fn test_rgb_2_hsl_3() { + let hsl = HSL::new(0.7012987, 0.50326794, 0.30); + let rgb = RGB::new(54, 38, 115); + + assert_eq!(hsl, rgb.rgb_to_hsl()); + } + + #[test] + fn test_rgb_2_hsl_4() { + let hsl = HSL::new(0.08333334, 1.0, 0.6862745); + let rgb = RGB::new(255, 175, 95); + + assert_eq!(hsl, rgb.rgb_to_hsl()); + } +} \ No newline at end of file diff --git a/third_party/rust/colorful/src/core/style.rs b/third_party/rust/colorful/src/core/style.rs new file mode 100644 index 000000000000..af78b1164118 --- /dev/null +++ b/third_party/rust/colorful/src/core/style.rs @@ -0,0 +1,24 @@ +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Style { + Bold, + Dim, + Underlined, + Blink, + + Reverse, + + Hidden, +} + +impl Style { + pub fn to_string(&self) -> String { + match self { + Style::Bold => String::from("1"), + Style::Dim => String::from("2"), + Style::Underlined => String::from("4"), + Style::Blink => String::from("5"), + Style::Reverse => String::from("7"), + Style::Hidden => String::from("8"), + } + } +} diff --git a/third_party/rust/colorful/src/core/symbols.rs b/third_party/rust/colorful/src/core/symbols.rs new file mode 100644 index 000000000000..fddfe317eea7 --- /dev/null +++ b/third_party/rust/colorful/src/core/symbols.rs @@ -0,0 +1,55 @@ +use std::fmt::Display; +use std::fmt::Formatter; +use std::fmt::Result as FmtResult; + +pub enum Symbol { + Mode, + Semicolon, + LeftBrackets, + Esc, + Reset, + Simple256Foreground, + Simple256Background, + RgbForeground, + RgbBackground, + ResetStyle, + ResetForeground, + ResetBackground, + ClearScreenFromCursorToEnd, + ClearScreenUpToCursor, + ClearEntireScreen, + ClearLineFromCursorToEnd, + ClearLineUpToCursor, + ClearEntireLine +} + +impl Symbol { + pub fn to_str<'a>(&self) -> &'a str { + match self { + Symbol::Mode => "m", + Symbol::Semicolon => ";", + Symbol::LeftBrackets => "[", + Symbol::Esc => "\x1B", + Symbol::Reset => "\x1B[0m", + Symbol::Simple256Foreground => "\x1B[38;5;", + Symbol::Simple256Background => "\x1B[48;5;", + Symbol::RgbForeground => "\x1B[38;2;", + Symbol::RgbBackground => "\x1B[48;2;", + Symbol::ResetStyle => "\x1B[20m", + Symbol::ResetForeground => "\x1B[39m", + Symbol::ResetBackground => "\x1B[49m", + Symbol::ClearScreenFromCursorToEnd => "\x1B[0J", + Symbol::ClearScreenUpToCursor => "\x1B[1J", + Symbol::ClearEntireScreen => "\x1B[2J", + Symbol::ClearLineFromCursorToEnd => "\x1B[0K", + Symbol::ClearLineUpToCursor => "\x1B[1K", + Symbol::ClearEntireLine => "\x1B[2K", + } + } +} + +impl Display for Symbol { + fn fmt(&self, f: &mut Formatter) -> FmtResult { + write!(f, "{}", self.to_str()) + } +} \ No newline at end of file diff --git a/third_party/rust/colorful/src/lib.rs b/third_party/rust/colorful/src/lib.rs new file mode 100644 index 000000000000..3b9c03c008bb --- /dev/null +++ b/third_party/rust/colorful/src/lib.rs @@ -0,0 +1,316 @@ + + + + + + + + + + + + + + + + + + + + + + + +use std::{thread, time}; + +use core::color_string::CString; +use core::ColorInterface; +pub use core::colors::Color; +pub use core::hsl::HSL; +pub use core::rgb::RGB; +use core::StrMarker; +pub use core::style::Style; + +pub mod core; + + +pub trait Colorful { + + + + + + + + + + + + + + + + fn color(self, color: C) -> CString; + fn black(self) -> CString; + fn red(self) -> CString; + fn green(self) -> CString; + fn yellow(self) -> CString; + fn blue(self) -> CString; + fn magenta(self) -> CString; + fn cyan(self) -> CString; + fn light_gray(self) -> CString; + fn dark_gray(self) -> CString; + fn light_red(self) -> CString; + fn light_green(self) -> CString; + fn light_yellow(self) -> CString; + fn light_blue(self) -> CString; + fn light_magenta(self) -> CString; + fn light_cyan(self) -> CString; + fn white(self) -> CString; + + + + + + + + + + + + + fn bg_color(self, color: C) -> CString; + fn bg_black(self) -> CString; + fn bg_red(self) -> CString; + fn bg_green(self) -> CString; + fn bg_yellow(self) -> CString; + fn bg_blue(self) -> CString; + fn bg_magenta(self) -> CString; + fn bg_cyan(self) -> CString; + fn bg_light_gray(self) -> CString; + fn bg_dark_gray(self) -> CString; + fn bg_light_red(self) -> CString; + fn bg_light_green(self) -> CString; + fn bg_light_yellow(self) -> CString; + fn bg_light_blue(self) -> CString; + fn bg_light_magenta(self) -> CString; + fn bg_light_cyan(self) -> CString; + fn bg_white(self) -> CString; + + + + + + + + + + + + + fn rgb(self, r: u8, g: u8, b: u8) -> CString; + fn bg_rgb(self, r: u8, g: u8, b: u8) -> CString; + fn hsl(self, h: f32, s: f32, l: f32) -> CString; + fn bg_hsl(self, h: f32, s: f32, l: f32) -> CString; + + + + fn style(self, style: Style) -> CString; + + fn bold(self) -> CString; + + fn blink(self) -> CString; + + fn dim(self) -> CString; + + fn underlined(self) -> CString; + + fn reverse(self) -> CString; + + fn hidden(self) -> CString; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + fn gradient_with_step(self, color: C, step: f32) -> CString; + fn gradient_with_color(self, start: C, stop: C) -> CString; + fn gradient(self, color: C) -> CString; + fn rainbow_with_speed(self, speed: i32); + + + fn rainbow(self); + fn neon_with_speed(self, low: C, high: C, speed: i32); + + fn neon(self, low: C, high: C); + + + fn warn(self); +} + +impl Colorful for T where T: StrMarker { + fn color(self, color: C) -> CString { CString::create_by_fg(self, color) } + fn black(self) -> CString { self.color(Color::Black) } + fn red(self) -> CString { self.color(Color::Red) } + fn green(self) -> CString { self.color(Color::Green) } + fn yellow(self) -> CString { self.color(Color::Yellow) } + fn blue(self) -> CString { self.color(Color::Blue) } + fn magenta(self) -> CString { self.color(Color::Magenta) } + fn cyan(self) -> CString { self.color(Color::Cyan) } + fn light_gray(self) -> CString { self.color(Color::LightGray) } + fn dark_gray(self) -> CString { self.color(Color::DarkGray) } + fn light_red(self) -> CString { self.color(Color::LightRed) } + fn light_green(self) -> CString { self.color(Color::LightGreen) } + fn light_yellow(self) -> CString { self.color(Color::LightYellow) } + fn light_blue(self) -> CString { self.color(Color::LightBlue) } + fn light_magenta(self) -> CString { self.color(Color::LightMagenta) } + fn light_cyan(self) -> CString { self.color(Color::LightCyan) } + fn white(self) -> CString { self.color(Color::White) } + fn bg_color(self, color: C) -> CString { CString::create_by_bg(self, color) } + fn bg_black(self) -> CString { self.bg_color(Color::Black) } + fn bg_red(self) -> CString { self.bg_color(Color::Red) } + fn bg_green(self) -> CString { self.bg_color(Color::Green) } + fn bg_yellow(self) -> CString { self.bg_color(Color::Yellow) } + fn bg_blue(self) -> CString { self.bg_color(Color::Blue) } + fn bg_magenta(self) -> CString { self.bg_color(Color::Magenta) } + fn bg_cyan(self) -> CString { self.bg_color(Color::Cyan) } + fn bg_light_gray(self) -> CString { self.bg_color(Color::LightGray) } + fn bg_dark_gray(self) -> CString { self.bg_color(Color::DarkGray) } + fn bg_light_red(self) -> CString { self.bg_color(Color::LightRed) } + fn bg_light_green(self) -> CString { self.bg_color(Color::LightGreen) } + fn bg_light_yellow(self) -> CString { self.bg_color(Color::LightYellow) } + fn bg_light_blue(self) -> CString { self.bg_color(Color::LightBlue) } + fn bg_light_magenta(self) -> CString { self.bg_color(Color::LightMagenta) } + fn bg_light_cyan(self) -> CString { self.bg_color(Color::LightCyan) } + fn bg_white(self) -> CString { self.bg_color(Color::White) } + fn rgb(self, r: u8, g: u8, b: u8) -> CString { CString::create_by_fg(self, RGB::new(r, g, b)) } + fn bg_rgb(self, r: u8, g: u8, b: u8) -> CString { CString::create_by_bg(self, RGB::new(r, g, b)) } + fn hsl(self, h: f32, s: f32, l: f32) -> CString { CString::create_by_fg(self, HSL::new(h, s, l)) } + fn bg_hsl(self, h: f32, s: f32, l: f32) -> CString { CString::create_by_bg(self, HSL::new(h, s, l)) } + fn style(self, style: Style) -> CString { CString::create_by_style(self, style) } + fn bold(self) -> CString { self.style(Style::Bold) } + fn blink(self) -> CString { self.style(Style::Blink) } + fn dim(self) -> CString { self.style(Style::Dim) } + fn underlined(self) -> CString { self.style(Style::Underlined) } + fn reverse(self) -> CString { self.style(Style::Reverse) } + fn hidden(self) -> CString { self.style(Style::Hidden) } + fn gradient_with_step(self, color: C, step: f32) -> CString { + let mut t = vec![]; + let mut start = color.to_hsl().h; + let s = self.to_str(); + let c = s.chars(); + let length = c.clone().count() - 1; + for (index, i) in c.enumerate() { + let b = i.to_string(); + let tmp = b.hsl(start, 1.0, 0.5).to_string(); + t.push(format!("{}", &tmp[..tmp.len() - if index != length { 4 } else { 0 }])); + start = (start + step) % 1.0; + } + CString::create_by_text(self, t.join("")) + } + fn gradient_with_color(self, start: C, stop: C) -> CString { + let mut t = vec![]; + let c = self.to_str(); + let s = c.chars(); + let length = s.clone().count() - 1; + let mut start = start.to_hsl().h; + let stop = stop.to_hsl().h; + let step = (stop - start) / length as f32; + for (index, i) in s.enumerate() { + let b = i.to_string(); + let tmp = b.hsl(start, 1.0, 0.5).to_string(); + t.push(format!("{}", &tmp[..tmp.len() - if index != length { 4 } else { 0 }])); + start = (start + step) % 1.0; + } + CString::create_by_text(self, t.join("")) + } + fn gradient(self, color: C) -> CString { + let text = self.to_str(); + let lines: Vec<_> = text.lines().collect(); + let mut tmp = vec![]; + for sub_str in lines.iter() { + tmp.push(sub_str.gradient_with_step(color.clone(), 1.5 / 360.0).to_string()); + } + CString::new(tmp.join("\n")) + } + fn rainbow_with_speed(self, speed: i32) { + let respite: u64 = match speed { + 3 => { 10 } + 2 => { 5 } + 1 => { 2 } + _ => { 0 } + }; + let text = self.to_str(); + let lines: Vec<_> = text.lines().collect(); + for i in 0..360 { + let mut tmp = vec![]; + for sub_str in lines.iter() { + tmp.push(sub_str.gradient_with_step(HSL::new(i as f32 / 360.0, 1.0, 0.5), 0.02).to_string()); + } + println!("{}\x1B[{}F\x1B[G\x1B[2K", tmp.join("\n"), lines.len()); + let ten_millis = time::Duration::from_millis(respite); + thread::sleep(ten_millis); + } + } + fn rainbow(self) { + self.rainbow_with_speed(3); + } + fn neon_with_speed(self, high: C, low: C, speed: i32) { + let respite: u64 = match speed { + 3 => { 500 } + 2 => { 200 } + 1 => { 100 } + _ => { 0 } + }; + let text = self.to_str(); + let lines: Vec<_> = text.lines().collect(); + let mut coin = true; + let positive = format!("{}\x1B[{}F\x1B[2K", text.clone().color(high), lines.len()); + let negative = format!("{}\x1B[{}F\x1B[2K", text.clone().color(low), lines.len()); + for _ in 0..360 { + if coin { println!("{}", positive) } else { println!("{}", negative) }; + let ten_millis = time::Duration::from_millis(respite); + thread::sleep(ten_millis); + coin = !coin; + } + } + fn neon(self, high: C, low: C) { + self.neon_with_speed(high, low, 3); + } + fn warn(self) { + self.neon_with_speed(RGB::new(226, 14, 14), RGB::new(158, 158, 158), 3); + } +} + +pub trait ExtraColorInterface { + fn grey0(self) -> CString; +} + +impl ExtraColorInterface for T where T: Colorful { + fn grey0(self) -> CString { self.color(Color::Grey0) } +} + diff --git a/third_party/rust/colorful/tests/test_all_color.rs b/third_party/rust/colorful/tests/test_all_color.rs new file mode 100644 index 000000000000..e2803734e6c9 --- /dev/null +++ b/third_party/rust/colorful/tests/test_all_color.rs @@ -0,0 +1,280 @@ +extern crate colorful; +extern crate core; + +use colorful::Colorful; +use colorful::Color; +use colorful::core::ColorInterface; +use colorful::HSL; + +#[test] +fn test_color() { + let s = "Hello world"; + for (i, color) in Color::iterator().enumerate() { + assert_eq!(format!("\x1B[38;5;{}mHello world\x1B[0m", i.to_owned()), s.color(*color).to_string()); + } +} + + +#[test] +fn test_color_to_hsl() { + let l = vec![ + HSL::new(0.0, 0.0, 0.0), + HSL::new(0.0, 1.0, 0.25), + HSL::new(0.3333333333333333, 1.0, 0.25), + HSL::new(0.16666666666666666, 1.0, 0.25), + HSL::new(0.6666666666666666, 1.0, 0.25), + HSL::new(0.8333333333333334, 1.0, 0.25), + HSL::new(0.5, 1.0, 0.25), + HSL::new(0.0, 0.0, 0.75), + HSL::new(0.0, 0.0, 0.5), + HSL::new(0.0, 1.0, 0.5), + HSL::new(0.3333333333333333, 1.0, 0.5), + HSL::new(0.16666666666666666, 1.0, 0.5), + HSL::new(0.6666666666666666, 1.0, 0.5), + HSL::new(0.8333333333333334, 1.0, 0.5), + HSL::new(0.5, 1.0, 0.5), + HSL::new(0.0, 0.0, 1.0), + HSL::new(0.0, 0.0, 0.0), + HSL::new(0.6666666666666666, 1.0, 0.18), + HSL::new(0.6666666666666666, 1.0, 0.26), + HSL::new(0.6666666666666666, 1.0, 0.34), + HSL::new(0.6666666666666666, 1.0, 0.42), + HSL::new(0.6666666666666666, 1.0, 0.5), + HSL::new(0.3333333333333333, 1.0, 0.18), + HSL::new(0.5, 1.0, 0.18), + HSL::new(0.5493827160493834, 1.0, 0.26), + HSL::new(0.5761904761904749, 1.0, 0.34), + HSL::new(0.5930232558139528, 1.0, 0.42), + HSL::new(0.6045751633986917, 1.0, 0.5), + HSL::new(0.3333333333333333, 1.0, 0.26), + HSL::new(0.4506172839506167, 1.0, 0.26), + HSL::new(0.5, 1.0, 0.26), + HSL::new(0.538095238095239, 1.0, 0.34), + HSL::new(0.5620155038759694, 1.0, 0.42), + HSL::new(0.5784313725490194, 1.0, 0.5), + HSL::new(0.3333333333333333, 1.0, 0.34), + HSL::new(0.423809523809525, 1.0, 0.34), + HSL::new(0.4619047619047611, 1.0, 0.34), + HSL::new(0.5, 1.0, 0.34), + HSL::new(0.5310077519379833, 1.0, 0.42), + HSL::new(0.5522875816993472, 1.0, 0.5), + HSL::new(0.3333333333333333, 1.0, 0.42), + HSL::new(0.40697674418604723, 1.0, 0.42), + HSL::new(0.43798449612403056, 1.0, 0.42), + HSL::new(0.4689922480620166, 1.0, 0.42), + HSL::new(0.5, 1.0, 0.42), + HSL::new(0.5261437908496722, 1.0, 0.5), + HSL::new(0.3333333333333333, 1.0, 0.5), + HSL::new(0.39542483660130834, 1.0, 0.5), + HSL::new(0.4215686274509806, 1.0, 0.5), + HSL::new(0.4477124183006528, 1.0, 0.5), + HSL::new(0.4738562091503278, 1.0, 0.5), + HSL::new(0.5, 1.0, 0.5), + HSL::new(0.0, 1.0, 0.18), + HSL::new(0.8333333333333334, 1.0, 0.18), + HSL::new(0.78395061728395, 1.0, 0.26), + HSL::new(0.7571428571428583, 1.0, 0.34), + HSL::new(0.7403100775193806, 1.0, 0.42), + HSL::new(0.7287581699346417, 1.0, 0.5), + HSL::new(0.16666666666666666, 1.0, 0.18), + HSL::new(0.0, 0.0, 0.37), + HSL::new(0.6666666666666666, 0.17, 0.45), + HSL::new(0.6666666666666666, 0.33, 0.52), + HSL::new(0.6666666666666666, 0.6, 0.6), + HSL::new(0.6666666666666666, 1.0, 0.68), + HSL::new(0.21604938271604945, 1.0, 0.26), + HSL::new(0.3333333333333333, 0.17, 0.45), + HSL::new(0.5, 0.17, 0.45), + HSL::new(0.5833333333333334, 0.33, 0.52), + HSL::new(0.6111111111111112, 0.6, 0.6), + HSL::new(0.625, 1.0, 0.68), + HSL::new(0.24285714285714277, 1.0, 0.34), + HSL::new(0.3333333333333333, 0.33, 0.52), + HSL::new(0.4166666666666667, 0.33, 0.52), + HSL::new(0.5, 0.33, 0.52), + HSL::new(0.5555555555555556, 0.6, 0.6), + HSL::new(0.5833333333333334, 1.0, 0.68), + HSL::new(0.2596899224806203, 1.0, 0.42), + HSL::new(0.3333333333333333, 0.6, 0.6), + HSL::new(0.3888888888888889, 0.6, 0.6), + HSL::new(0.4444444444444444, 0.6, 0.6), + HSL::new(0.5, 0.6, 0.6), + HSL::new(0.5416666666666666, 1.0, 0.68), + HSL::new(0.27124183006535946, 1.0, 0.5), + HSL::new(0.3333333333333333, 1.0, 0.68), + HSL::new(0.375, 1.0, 0.68), + HSL::new(0.4166666666666667, 1.0, 0.68), + HSL::new(0.4583333333333333, 1.0, 0.68), + HSL::new(0.5, 1.0, 0.68), + HSL::new(0.0, 1.0, 0.26), + HSL::new(0.8827160493827166, 1.0, 0.26), + HSL::new(0.8333333333333334, 1.0, 0.26), + HSL::new(0.7952380952380944, 1.0, 0.34), + HSL::new(0.7713178294573639, 1.0, 0.42), + HSL::new(0.7549019607843138, 1.0, 0.5), + HSL::new(0.11728395061728389, 1.0, 0.26), + HSL::new(0.0, 0.17, 0.45), + HSL::new(0.8333333333333334, 0.17, 0.45), + HSL::new(0.75, 0.33, 0.52), + HSL::new(0.7222222222222222, 0.6, 0.6), + HSL::new(0.7083333333333334, 1.0, 0.68), + HSL::new(0.16666666666666666, 1.0, 0.26), + HSL::new(0.16666666666666666, 0.17, 0.45), + HSL::new(0.0, 0.0, 0.52), + HSL::new(0.6666666666666666, 0.2, 0.6), + HSL::new(0.6666666666666666, 0.5, 0.68), + HSL::new(0.6666666666666666, 1.0, 0.76), + HSL::new(0.2047619047619047, 1.0, 0.34), + HSL::new(0.25, 0.33, 0.52), + HSL::new(0.3333333333333333, 0.2, 0.6), + HSL::new(0.5, 0.2, 0.6), + HSL::new(0.5833333333333334, 0.5, 0.68), + HSL::new(0.6111111111111112, 1.0, 0.76), + HSL::new(0.22868217054263557, 1.0, 0.42), + HSL::new(0.2777777777777778, 0.6, 0.6), + HSL::new(0.3333333333333333, 0.5, 0.68), + HSL::new(0.4166666666666667, 0.5, 0.68), + HSL::new(0.5, 0.5, 0.68), + HSL::new(0.5555555555555556, 1.0, 0.76), + HSL::new(0.2450980392156864, 1.0, 0.5), + HSL::new(0.2916666666666667, 1.0, 0.68), + HSL::new(0.3333333333333333, 1.0, 0.76), + HSL::new(0.3888888888888889, 1.0, 0.76), + HSL::new(0.4444444444444444, 1.0, 0.76), + HSL::new(0.5, 1.0, 0.76), + HSL::new(0.0, 1.0, 0.34), + HSL::new(0.9095238095238083, 1.0, 0.34), + HSL::new(0.8714285714285722, 1.0, 0.34), + HSL::new(0.8333333333333334, 1.0, 0.34), + HSL::new(0.80232558139535, 1.0, 0.42), + HSL::new(0.7810457516339862, 1.0, 0.5), + HSL::new(0.09047619047619054, 1.0, 0.34), + HSL::new(0.0, 0.33, 0.52), + HSL::new(0.9166666666666666, 0.33, 0.52), + HSL::new(0.8333333333333334, 0.33, 0.52), + HSL::new(0.7777777777777778, 0.6, 0.6), + HSL::new(0.75, 1.0, 0.68), + HSL::new(0.12857142857142861, 1.0, 0.34), + HSL::new(0.08333333333333333, 0.33, 0.52), + HSL::new(0.0, 0.2, 0.6), + HSL::new(0.8333333333333334, 0.2, 0.6), + HSL::new(0.75, 0.5, 0.68), + HSL::new(0.7222222222222222, 1.0, 0.76), + HSL::new(0.16666666666666666, 1.0, 0.34), + HSL::new(0.16666666666666666, 0.33, 0.52), + HSL::new(0.16666666666666666, 0.2, 0.6), + HSL::new(0.0, 0.0, 0.68), + HSL::new(0.6666666666666666, 0.33, 0.76), + HSL::new(0.6666666666666666, 1.0, 0.84), + HSL::new(0.1976744186046511, 1.0, 0.42), + HSL::new(0.2222222222222222, 0.6, 0.6), + HSL::new(0.25, 0.5, 0.68), + HSL::new(0.3333333333333333, 0.33, 0.76), + HSL::new(0.5, 0.33, 0.76), + HSL::new(0.5833333333333334, 1.0, 0.84), + HSL::new(0.21895424836601304, 1.0, 0.5), + HSL::new(0.25, 1.0, 0.68), + HSL::new(0.2777777777777778, 1.0, 0.76), + HSL::new(0.3333333333333333, 1.0, 0.84), + HSL::new(0.4166666666666667, 1.0, 0.84), + HSL::new(0.5, 1.0, 0.84), + HSL::new(0.0, 1.0, 0.42), + HSL::new(0.926356589147286, 1.0, 0.42), + HSL::new(0.8953488372093028, 1.0, 0.42), + HSL::new(0.8643410852713166, 1.0, 0.42), + HSL::new(0.8333333333333334, 1.0, 0.42), + HSL::new(0.8071895424836611, 1.0, 0.5), + HSL::new(0.07364341085271306, 1.0, 0.42), + HSL::new(0.0, 0.6, 0.6), + HSL::new(0.9444444444444444, 0.6, 0.6), + HSL::new(0.8888888888888888, 0.6, 0.6), + HSL::new(0.8333333333333334, 0.6, 0.6), + HSL::new(0.7916666666666666, 1.0, 0.68), + HSL::new(0.10465116279069778, 1.0, 0.42), + HSL::new(0.05555555555555555, 0.6, 0.6), + HSL::new(0.0, 0.5, 0.68), + HSL::new(0.9166666666666666, 0.5, 0.68), + HSL::new(0.8333333333333334, 0.5, 0.68), + HSL::new(0.7777777777777778, 1.0, 0.76), + HSL::new(0.13565891472868222, 1.0, 0.42), + HSL::new(0.1111111111111111, 0.6, 0.6), + HSL::new(0.08333333333333333, 0.5, 0.68), + HSL::new(0.0, 0.33, 0.76), + HSL::new(0.8333333333333334, 0.33, 0.76), + HSL::new(0.75, 1.0, 0.84), + HSL::new(0.16666666666666666, 1.0, 0.42), + HSL::new(0.16666666666666666, 0.6, 0.6), + HSL::new(0.16666666666666666, 0.5, 0.68), + HSL::new(0.16666666666666666, 0.33, 0.76), + HSL::new(0.0, 0.0, 0.84), + HSL::new(0.6666666666666666, 1.0, 0.92), + HSL::new(0.19281045751633974, 1.0, 0.5), + HSL::new(0.20833333333333334, 1.0, 0.68), + HSL::new(0.2222222222222222, 1.0, 0.76), + HSL::new(0.25, 1.0, 0.84), + HSL::new(0.3333333333333333, 1.0, 0.92), + HSL::new(0.5, 1.0, 0.92), + HSL::new(0.0, 1.0, 0.5), + HSL::new(0.937908496732025, 1.0, 0.5), + HSL::new(0.9117647058823528, 1.0, 0.5), + HSL::new(0.8856209150326805, 1.0, 0.5), + HSL::new(0.8594771241830055, 1.0, 0.5), + HSL::new(0.8333333333333334, 1.0, 0.5), + HSL::new(0.06209150326797389, 1.0, 0.5), + HSL::new(0.0, 1.0, 0.68), + HSL::new(0.9583333333333334, 1.0, 0.68), + HSL::new(0.9166666666666666, 1.0, 0.68), + HSL::new(0.875, 1.0, 0.68), + HSL::new(0.8333333333333334, 1.0, 0.68), + HSL::new(0.08823529411764694, 1.0, 0.5), + HSL::new(0.041666666666666664, 1.0, 0.68), + HSL::new(0.0, 1.0, 0.76), + HSL::new(0.9444444444444444, 1.0, 0.76), + HSL::new(0.8888888888888888, 1.0, 0.76), + HSL::new(0.8333333333333334, 1.0, 0.76), + HSL::new(0.11437908496732027, 1.0, 0.5), + HSL::new(0.08333333333333333, 1.0, 0.68), + HSL::new(0.05555555555555555, 1.0, 0.76), + HSL::new(0.0, 1.0, 0.84), + HSL::new(0.9166666666666666, 1.0, 0.84), + HSL::new(0.8333333333333334, 1.0, 0.84), + HSL::new(0.14052287581699335, 1.0, 0.5), + HSL::new(0.125, 1.0, 0.68), + HSL::new(0.1111111111111111, 1.0, 0.76), + HSL::new(0.08333333333333333, 1.0, 0.84), + HSL::new(0.0, 1.0, 0.92), + HSL::new(0.8333333333333334, 1.0, 0.92), + HSL::new(0.16666666666666666, 1.0, 0.5), + HSL::new(0.16666666666666666, 1.0, 0.68), + HSL::new(0.16666666666666666, 1.0, 0.76), + HSL::new(0.16666666666666666, 1.0, 0.84), + HSL::new(0.16666666666666666, 1.0, 0.92), + HSL::new(0.0, 0.0, 1.0), + HSL::new(0.0, 0.0, 0.03), + HSL::new(0.0, 0.0, 0.07), + HSL::new(0.0, 0.0, 0.1), + HSL::new(0.0, 0.0, 0.14), + HSL::new(0.0, 0.0, 0.18), + HSL::new(0.0, 0.0, 0.22), + HSL::new(0.0, 0.0, 0.26), + HSL::new(0.0, 0.0, 0.3), + HSL::new(0.0, 0.0, 0.34), + HSL::new(0.0, 0.0, 0.37), + HSL::new(0.0, 0.0, 0.4), + HSL::new(0.0, 0.0, 0.46), + HSL::new(0.0, 0.0, 0.5), + HSL::new(0.0, 0.0, 0.54), + HSL::new(0.0, 0.0, 0.58), + HSL::new(0.0, 0.0, 0.61), + HSL::new(0.0, 0.0, 0.65), + HSL::new(0.0, 0.0, 0.69), + HSL::new(0.0, 0.0, 0.73), + HSL::new(0.0, 0.0, 0.77), + HSL::new(0.0, 0.0, 0.81), + HSL::new(0.0, 0.0, 0.85), + HSL::new(0.0, 0.0, 0.89), + HSL::new(0.0, 0.0, 0.93), ]; + for (i, color) in Color::iterator().enumerate() { + assert_eq!(color.to_hsl(), l[i]); + } +} \ No newline at end of file diff --git a/third_party/rust/colorful/tests/test_animation.rs b/third_party/rust/colorful/tests/test_animation.rs new file mode 100644 index 000000000000..fd40781a8c97 --- /dev/null +++ b/third_party/rust/colorful/tests/test_animation.rs @@ -0,0 +1,31 @@ +extern crate colorful; + +use colorful::Color; +use colorful::Colorful; +use colorful::HSL; +use colorful::RGB; + +#[test] +fn test_rainbow() { + let s = "Hello world"; + s.rainbow_with_speed(0); +} + +#[test] +fn test_neon_1() { + let s = "Hello world"; + s.neon_with_speed(Color::Grey0, Color::Grey0, 0); +} + +#[test] +fn test_neon_2() { + let s = "Hello world"; + s.neon_with_speed(HSL::new(1.0, 1.0, 0.4), HSL::new(0.5, 1.0, 0.4), 0); +} + +#[test] +fn test_neon_3() { + let s = "Hello world"; + s.neon_with_speed(RGB::new(122, 122, 122), RGB::new(222, 222, 222), 0); +} + diff --git a/third_party/rust/colorful/tests/test_basic.rs b/third_party/rust/colorful/tests/test_basic.rs new file mode 100644 index 000000000000..9e05eb43f818 --- /dev/null +++ b/third_party/rust/colorful/tests/test_basic.rs @@ -0,0 +1,53 @@ +extern crate colorful; +extern crate core; + +use colorful::Colorful; +use colorful::Color; +use colorful::Style; + +#[test] +fn test_1() { + assert_eq!("\u{1b}", "\x1B"); +} + +#[test] +fn test_color() { + let s = "Hello world"; + assert_eq!("\x1B[38;5;1mHello world\x1B[0m".to_owned(), s.color(Color::Red).to_string()); + assert_eq!("\x1B[38;5;220mHello world\x1B[0m".to_owned(), s.color(Color::Red).color(Color::Gold1).to_string()); +} + +#[test] +fn test_bg_color() { + let s = "Hello world"; + assert_eq!("\x1B[38;5;1m\x1B[48;5;16mHello world\x1B[0m".to_owned(), s.color(Color::Red).bg_color(Color::Grey0).to_string()); + assert_eq!("\x1B[38;5;1m\x1B[48;5;6mHello world\x1B[0m".to_owned(), s.color(Color::Red).bg_cyan().to_string()); + assert_eq!("\x1B[38;5;220m\x1B[48;5;6mHello world\x1B[0m".to_owned(), s.color(Color::Red).color(Color::Gold1).bg_color(Color::Cyan).to_string()); +} + + +#[test] +fn test_style() { + let s = "Hello world"; + assert_eq!("\x1B[1mHello world\x1B[0m".to_owned(), s.style(Style::Bold).to_string()); + assert_eq!("\x1B[1;5mHello world\x1B[0m".to_owned(), s.style(Style::Bold).style(Style::Blink).to_string()); +} + +#[test] +fn test_interface() { + let s = "Hello world"; + assert_eq!("\x1B[1mHello world\x1B[0m".to_owned(), s.bold().to_string()); + assert_eq!("\x1B[1;5mHello world\x1B[0m".to_owned(), s.bold().blink().to_string()); + assert_eq!("\x1B[38;5;1mHello world\x1B[0m".to_owned(), s.red().to_string()); + assert_eq!("\x1B[38;5;2mHello world\x1B[0m".to_owned(), s.red().green().to_string()); +} + +#[test] +fn test_mix() { + let s = "Hello world"; + assert_eq!("\x1B[38;5;1;5mHello world\x1B[0m".to_owned(), s.color(Color::Red).blink().to_string()); + assert_eq!("\x1B[38;5;220;1mHello world\x1B[0m".to_owned(), s.bold().color(Color::Gold1).to_string()); + + assert_eq!("\x1B[38;5;2;5;1mHello world\x1B[0m".to_owned(), s.color(Color::Green).blink().bold().to_string()); + assert_eq!("\x1B[38;5;220;1;5mHello world\x1B[0m".to_owned(), s.bold().blink().color(Color::Gold1).to_string()); +} diff --git a/third_party/rust/colorful/tests/test_extra.rs b/third_party/rust/colorful/tests/test_extra.rs new file mode 100644 index 000000000000..c8a79888c996 --- /dev/null +++ b/third_party/rust/colorful/tests/test_extra.rs @@ -0,0 +1,10 @@ +extern crate colorful; +extern crate core; + + +#[test] +fn test_extra_color() { + use colorful::ExtraColorInterface; + let s = "Hello world"; + assert_eq!("\x1B[38;5;16mHello world\x1B[0m".to_owned(), s.grey0().to_string()); +} diff --git a/third_party/rust/colorful/tests/test_gradient.rs b/third_party/rust/colorful/tests/test_gradient.rs new file mode 100644 index 000000000000..ddebc1ff6844 --- /dev/null +++ b/third_party/rust/colorful/tests/test_gradient.rs @@ -0,0 +1,16 @@ +extern crate colorful; + +use colorful::Color; +use colorful::Colorful; + +#[test] +fn test_gradient_color() { + let s = "Hello world"; + assert_eq!("\u{1b}[38;2;255;0;0mH\u{1b}[38;2;255;6;0me\u{1b}[38;2;255;13;0ml\u{1b}[38;2;255;19;0ml\u{1b}[38;2;255;26;0mo\u{1b}[38;2;255;32;0m \u{1b}[38;2;255;38;0mw\u{1b}[38;2;255;45;0mo\u{1b}[38;2;255;51;0mr\u{1b}[38;2;255;57;0ml\u{1b}[38;2;255;64;0md\u{1b}[0m".to_owned(), s.gradient(Color::Red).to_string()); +} + +#[test] +fn test_gradient_multiple_lines() { + let s = "a\nb"; + assert_eq!("\u{1b}[38;2;255;0;0ma\u{1b}[0m\n\u{1b}[38;2;255;0;0mb\u{1b}[0m".to_owned(), s.gradient(Color::Red).to_string()); +} \ No newline at end of file diff --git a/third_party/rust/colorful/tests/test_hsl.rs b/third_party/rust/colorful/tests/test_hsl.rs new file mode 100644 index 000000000000..57222c2598e9 --- /dev/null +++ b/third_party/rust/colorful/tests/test_hsl.rs @@ -0,0 +1,10 @@ +extern crate colorful; +extern crate core; + +use colorful::Colorful; + +#[test] +fn test_hsl_color() { + let s = "Hello world"; + assert_eq!("\x1B[38;2;19;205;94mHello world\x1B[0m", s.hsl(0.4, 0.83, 0.44).to_string()); +} diff --git a/third_party/rust/copyless/.cargo-checksum.json b/third_party/rust/copyless/.cargo-checksum.json new file mode 100644 index 000000000000..cc96255dae96 --- /dev/null +++ b/third_party/rust/copyless/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"fc14ff68be7982dcdf02342b449cd3db15cbe28073d242f054b75ffbfccb705b","Cargo.toml":"b2def24309e2e6b2ee8777bbbaddcce93cb787857da4d5029ea8a7b74706806e","LICENSE":"1f256ecad192880510e84ad60474eab7589218784b9a50bc7ceee34c2b91f1d5","README.md":"8e715eaac241b9555b784986105428dd181eefa060058e476b2332f54691ffec","bors.toml":"2fc93950678c7308b3685c466f55d9d93e88366c2241277691f1c42c41c1f645","rustfmt.toml":"988d2287fa5a3511acbb1168a2db2d63dbc5d3ce9f13d59c89bb63b5f0d487bb","src/boxed.rs":"01acef5ec63581b829e8f6f6620954f414eae4e05dd38cbd075fec7bf48eaec6","src/lib.rs":"9250d444179dd3138a021880752a337035b9159ee238ad3df6fe6c4668cb3573","src/vec.rs":"cbef7b318ad96f9f7bf70b84b2111f66fdda2503d4d082622a02a3855e9c6310"},"package":"6ff9c56c9fb2a49c05ef0e431485a22400af20d33226dc0764d891d09e724127"} \ No newline at end of file diff --git a/third_party/rust/copyless/CHANGELOG.md b/third_party/rust/copyless/CHANGELOG.md new file mode 100644 index 000000000000..c13ffa47b4a6 --- /dev/null +++ b/third_party/rust/copyless/CHANGELOG.md @@ -0,0 +1,17 @@ +# Change Log + +## v0.1.4 (24-06-2019) + - `BoxHelper`: replaced nullable pointer with NonNull + +## v0.1.3 (31-05-2019) + - fixed zero-sized box allocations + - fixed file permissions in the package + +## v0.1.2 (19-03-2019) + - fixed box alignment + +## v0.1.1 (15-03-2019) + - `BoxHelper` extension + +## v0.1 (15-03-2019) + - `VecHelper` extension diff --git a/third_party/rust/copyless/Cargo.toml b/third_party/rust/copyless/Cargo.toml new file mode 100644 index 000000000000..da3fff2597fa --- /dev/null +++ b/third_party/rust/copyless/Cargo.toml @@ -0,0 +1,26 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "copyless" +version = "0.1.4" +authors = ["Dzmitry Malyshau "] +description = "Ways to eliminate memcpy calls when using the standard library." +homepage = "https://github.com/kvark/copyless" +keywords = ["containers", "memcpy"] +license = "MPL-2.0" +repository = "https://github.com/kvark/copyless" + +[lib] + +[dependencies] diff --git a/third_party/rust/copyless/LICENSE b/third_party/rust/copyless/LICENSE new file mode 100644 index 000000000000..a612ad9813b0 --- /dev/null +++ b/third_party/rust/copyless/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/third_party/rust/copyless/README.md b/third_party/rust/copyless/README.md new file mode 100644 index 000000000000..8caf5448b664 --- /dev/null +++ b/third_party/rust/copyless/README.md @@ -0,0 +1,35 @@ +## copyless +[![Build Status](https://travis-ci.org/kvark/copyless.svg)](https://travis-ci.org/kvark/copyless) +[![Crates.io](https://img.shields.io/crates/v/copyless.svg)](https://crates.io/crates/copyless) + +Rust abstractions can be zero cost in theory, but often reveal quite a few unnecessary `memcpy` calls in practice. This library provides a number of trait extensions for standard containers that expose API that is more friendly to LLVM optimization passes and doesn't end up with as many copies. + +It aims to accelerate [WebRender](https://github.com/servo/webrender) and [gfx-rs](https://github.com/gfx-rs/gfx). + +## Background + +The `memcpy` instructions showed in profiles of WebRender running in Gecko. @jrmuizel built a tool called [memcpy-find](https://github.com/jrmuizel/memcpy-find) that analyzes LLVM IR and spews out the call stacks that end up producing `memcpy` instructions. We figured out a way to convince the compiler to eliminate the copies. This library attempts to make these ways available to Rust ecosystem, at least until the compiler gets smart enough ;) + +## Here is a small example + +```rust +use copyless::BoxHelper; + +enum Foo { + Small(i8), + Big([f32; 100]), +} + +#[inline(never)] +fn foo() -> Box { + Box::new(Foo::Small(4)) // this has 1 memcopy + //Box::alloc().init(Foo::Small(4)) // this has 0 memcopies +} + +fn main() { + let z = foo(); + println!("{:?}", &*z as *const _); +} +``` + +Playground [permalink](https://play.rust-lang.org/?version=stable&mode=release&edition=2018&gist=579ab13345b1266752b1fa4400194cc7). diff --git a/third_party/rust/copyless/bors.toml b/third_party/rust/copyless/bors.toml new file mode 100644 index 000000000000..dccefd8780ee --- /dev/null +++ b/third_party/rust/copyless/bors.toml @@ -0,0 +1,5 @@ +status = [ + "continuous-integration/travis-ci/push", +] + +timeout_sec = 18000 # 5 hours diff --git a/third_party/rust/copyless/rustfmt.toml b/third_party/rust/copyless/rustfmt.toml new file mode 100644 index 000000000000..03cbecbdef3f --- /dev/null +++ b/third_party/rust/copyless/rustfmt.toml @@ -0,0 +1,2 @@ +newline_style = "Native" +spaces_around_ranges = true diff --git a/third_party/rust/copyless/src/boxed.rs b/third_party/rust/copyless/src/boxed.rs new file mode 100644 index 000000000000..a9bd7e4b7a7e --- /dev/null +++ b/third_party/rust/copyless/src/boxed.rs @@ -0,0 +1,62 @@ +use std::{ + alloc, mem, + ptr::{self, NonNull}, +}; + + +pub struct BoxAllocation( + + + + NonNull, +); + +impl BoxAllocation { + + #[inline(always)] + pub fn init(self, value: T) -> Box { + if mem::size_of::() == 0 { + return Box::new(value); + } + + unsafe { + let ptr = self.0.as_ptr(); + mem::forget(self); + ptr::write(ptr, value); + Box::from_raw(ptr) + } + } +} + +impl Drop for BoxAllocation { + fn drop(&mut self) { + if mem::size_of::() == 0 { + return; + } + + let layout = alloc::Layout::new::(); + unsafe { + alloc::dealloc(self.0.as_ptr() as *mut u8, layout); + } + } +} + + +pub trait BoxHelper { + + fn alloc() -> BoxAllocation; +} + +impl BoxHelper for Box { + fn alloc() -> BoxAllocation { + if mem::size_of::() == 0 { + return BoxAllocation(NonNull::dangling()); + } + + let layout = alloc::Layout::new::(); + BoxAllocation( + NonNull::new(unsafe { alloc::alloc(layout) as *mut T }) + .unwrap_or_else(|| alloc::handle_alloc_error(layout)), + ) + } +} diff --git a/third_party/rust/copyless/src/lib.rs b/third_party/rust/copyless/src/lib.rs new file mode 100644 index 000000000000..1e9f3220bd4e --- /dev/null +++ b/third_party/rust/copyless/src/lib.rs @@ -0,0 +1,11 @@ +#![warn(missing_docs)] + + + +pub use self::{ + boxed::{BoxAllocation, BoxHelper}, + vec::{VecAllocation, VecEntry, VecHelper}, +}; + +mod boxed; +mod vec; diff --git a/third_party/rust/copyless/src/vec.rs b/third_party/rust/copyless/src/vec.rs new file mode 100644 index 000000000000..fd7f24fd7b45 --- /dev/null +++ b/third_party/rust/copyless/src/vec.rs @@ -0,0 +1,75 @@ +use std::ptr; + + + +pub struct VecAllocation<'a, T: 'a> { + vec: &'a mut Vec, + index: usize, +} + +impl<'a, T> VecAllocation<'a, T> { + + + + + #[inline(always)] + pub fn init(self, value: T) -> usize { + unsafe { + ptr::write(self.vec.as_mut_ptr().add(self.index), value); + self.vec.set_len(self.index + 1); + } + self.index + } +} + + +pub enum VecEntry<'a, T: 'a> { + + Vacant(VecAllocation<'a, T>), + + Occupied(&'a mut T), +} + +impl<'a, T> VecEntry<'a, T> { + + #[inline(always)] + pub fn set(self, value: T) { + match self { + VecEntry::Vacant(alloc) => { alloc.init(value); } + VecEntry::Occupied(slot) => { *slot = value; } + } + } +} + + +pub trait VecHelper { + + fn alloc(&mut self) -> VecAllocation; + + + fn entry(&mut self, index: usize) -> VecEntry; +} + +impl VecHelper for Vec { + fn alloc(&mut self) -> VecAllocation { + let index = self.len(); + if self.capacity() == index { + self.reserve(1); + } + VecAllocation { + vec: self, + index, + } + } + + fn entry(&mut self, index: usize) -> VecEntry { + if index < self.len() { + VecEntry::Occupied(unsafe { + self.get_unchecked_mut(index) + }) + } else { + assert_eq!(index, self.len()); + VecEntry::Vacant(self.alloc()) + } + } +} diff --git a/third_party/rust/core-graphics/.cargo-checksum.json b/third_party/rust/core-graphics/.cargo-checksum.json index c10a7e42a529..1dd113145e90 100644 --- a/third_party/rust/core-graphics/.cargo-checksum.json +++ b/third_party/rust/core-graphics/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"COPYRIGHT":"ec82b96487e9e778ee610c7ab245162464782cfa1f555c2299333f8dbe5c036a","Cargo.toml":"c30c4f657ed844adb3f412f758ce2bff9c62b83c8ec844cc447e24d785d18dba","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"62065228e42caebca7e7d7db1204cbb867033de5982ca4009928915e4095f3a3","README.md":"4a45abeb1e684e30bb361dfa7db59189423348e18d310cbae694b7c8c57cd86a","src/base.rs":"5faaadaf17d93c370a20f358be6c3f12958ab7d3f99ccc634421e28758fec88b","src/color.rs":"b054b38b507733c8c3f5af981483c54836e8a7499a6d1a22244fb58312824acb","src/color_space.rs":"b3d7ee8a21703c789160867cb8eb2188bd1daa193e3d030f21adb6f1a6f872de","src/context.rs":"6b14ec712e5d0af4af6beb0cb1a998bf1262ec6ab6ad2b3efad9e0362ade83c9","src/data_provider.rs":"22614a6ce7f857dec33e6d2dc01261b71b1bc5d5609a54ee55e04c049670c072","src/display.rs":"5b04d1fded021fc1eecb89b6350a66f6668e802b51e75cf69892ca082257443c","src/event.rs":"f2ade1c2c112bae7bc4f5df1eda63c13d1c32e5db255228f139ce17fb37c1a4b","src/event_source.rs":"d55a4f5b5e62789325028febc51bbf54c74b15ab1a4e70c6ad749a2f9753e081","src/font.rs":"63b7e50243a56254c800421df586abee59aead84f735f7df838ae04693aedf4b","src/geometry.rs":"cdeb9624df601d235bcc34d46e35bea302079ce1e3668253356a618486693a9f","src/image.rs":"0af720ee020fb1c6a2f4b1ce49e3d27f8f21f0be6b81ba4b9c824f87564efa58","src/lib.rs":"03628fc67576f6948bb803a53fb147c520b264eaba684e37d26cd1b0197ebf30","src/path.rs":"c429afeaed999b02ac00f89a867b5fc64f1e223039079a4e0529306b734ff117","src/private.rs":"da3fd61338bab2d8e26aa5433b2e18ecd2a0a408c62e1ac2b33a0f87f2dad88a","src/sys.rs":"cc90b690f172da51a87ffb234f6e74a9f501c4f1630d7b51fa2d5846e80fc164"},"package":"62ceafe1622ffc9a332199096841d0ff9912ec8cf8f9cde01e254a7d5217cd10"} \ No newline at end of file +{"files":{"COPYRIGHT":"ec82b96487e9e778ee610c7ab245162464782cfa1f555c2299333f8dbe5c036a","Cargo.toml":"ff714d37c339428ee9c8958414b52f0f49578de1001cd27e732b8cc965ad0326","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"62065228e42caebca7e7d7db1204cbb867033de5982ca4009928915e4095f3a3","README.md":"4a45abeb1e684e30bb361dfa7db59189423348e18d310cbae694b7c8c57cd86a","src/base.rs":"5faaadaf17d93c370a20f358be6c3f12958ab7d3f99ccc634421e28758fec88b","src/color.rs":"4c8ec4ab828cbc1b2a1538a34a51f5b380927f2f1daf187dff6f732f57a43656","src/color_space.rs":"b3d7ee8a21703c789160867cb8eb2188bd1daa193e3d030f21adb6f1a6f872de","src/context.rs":"6b14ec712e5d0af4af6beb0cb1a998bf1262ec6ab6ad2b3efad9e0362ade83c9","src/data_provider.rs":"22614a6ce7f857dec33e6d2dc01261b71b1bc5d5609a54ee55e04c049670c072","src/display.rs":"5b04d1fded021fc1eecb89b6350a66f6668e802b51e75cf69892ca082257443c","src/event.rs":"f2ade1c2c112bae7bc4f5df1eda63c13d1c32e5db255228f139ce17fb37c1a4b","src/event_source.rs":"d55a4f5b5e62789325028febc51bbf54c74b15ab1a4e70c6ad749a2f9753e081","src/font.rs":"63b7e50243a56254c800421df586abee59aead84f735f7df838ae04693aedf4b","src/geometry.rs":"cdeb9624df601d235bcc34d46e35bea302079ce1e3668253356a618486693a9f","src/image.rs":"0af720ee020fb1c6a2f4b1ce49e3d27f8f21f0be6b81ba4b9c824f87564efa58","src/lib.rs":"9b9601462de1bbc806e881b2b42e86b16372cad8eeefe1a96b772a9f7329958d","src/path.rs":"c429afeaed999b02ac00f89a867b5fc64f1e223039079a4e0529306b734ff117","src/private.rs":"da3fd61338bab2d8e26aa5433b2e18ecd2a0a408c62e1ac2b33a0f87f2dad88a","src/sys.rs":"cc90b690f172da51a87ffb234f6e74a9f501c4f1630d7b51fa2d5846e80fc164","src/window.rs":"2f6c3dc958ae2c0c9e2fc5033300b96e60ed0abee9823ea1f03797d64df0911a"},"package":"56790968ab1c8a1202a102e6de05fc6e1ec87da99e4e93e9a7d13efbfc1e95a9"} \ No newline at end of file diff --git a/third_party/rust/core-graphics/Cargo.toml b/third_party/rust/core-graphics/Cargo.toml index c7dab3124f63..e4454019bf7f 100644 --- a/third_party/rust/core-graphics/Cargo.toml +++ b/third_party/rust/core-graphics/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "core-graphics" -version = "0.17.1" +version = "0.17.3" authors = ["The Servo Project Developers"] description = "Bindings to Core Graphics for OS X" homepage = "https://github.com/servo/core-graphics-rs" diff --git a/third_party/rust/core-graphics/src/color.rs b/third_party/rust/core-graphics/src/color.rs index 1b5301974add..2fe962276f8b 100644 --- a/third_party/rust/core-graphics/src/color.rs +++ b/third_party/rust/core-graphics/src/color.rs @@ -12,6 +12,8 @@ use base::CGFloat; use core_foundation::base::TCFType; use super::sys::{CGColorRef}; +pub use super::sys::CGColorRef as SysCGColorRef; + declare_TCFType!{ CGColor, CGColorRef } diff --git a/third_party/rust/core-graphics/src/lib.rs b/third_party/rust/core-graphics/src/lib.rs index 0fc2e86735f9..3fb55bd42c44 100644 --- a/third_party/rust/core-graphics/src/lib.rs +++ b/third_party/rust/core-graphics/src/lib.rs @@ -33,6 +33,8 @@ pub mod event_source; pub mod font; pub mod geometry; #[cfg(target_os = "macos")] +pub mod window; +#[cfg(target_os = "macos")] pub mod private; pub mod image; pub mod path; diff --git a/third_party/rust/core-graphics/src/window.rs b/third_party/rust/core-graphics/src/window.rs new file mode 100644 index 000000000000..884523dd0611 --- /dev/null +++ b/third_party/rust/core-graphics/src/window.rs @@ -0,0 +1,149 @@ + + + + + + + + + +#![allow(non_upper_case_globals)] + +use core_foundation::array::{CFArray, CFArrayRef}; +use core_foundation::base::{CFType, TCFType}; +use core_foundation::dictionary::CFDictionary; +use core_foundation::string::{CFString, CFStringRef}; +use foreign_types::ForeignType; + +use geometry::CGRect; +use image::CGImage; +use sys; + +pub type CGWindowID = u32; + +pub type CGWindowSharingType = u32; +pub const kCGWindowSharingNone: CGWindowSharingType = 0; +pub const kCGWindowSharingReadOnly: CGWindowSharingType = 1; +pub const kCGWindowSharingReadWrite: CGWindowSharingType = 1; + +pub type CGWindowBackingType = u32; +pub const kCGWindowBackingStoreRetained: CGWindowBackingType = 0; +pub const kCGWindowBackingStoreNonretained: CGWindowBackingType = 1; +pub const kCGWindowBackingStoreBuffered: CGWindowBackingType = 2; + +pub type CGWindowListOption = u32; +pub const kCGWindowListOptionAll: CGWindowListOption = 1 << 0; +pub const kCGWindowListOptionOnScreenOnly: CGWindowListOption = 1 << 1; +pub const kCGWindowListOptionOnScreenAboveWindow: CGWindowListOption = 1 << 2; +pub const kCGWindowListOptionOnScreenBelowWindow: CGWindowListOption = 1 << 3; +pub const kCGWindowListOptionIncludingWindow: CGWindowListOption = 1 << 4; +pub const kCGWindowListOptionExcludeDesktopElements: CGWindowListOption = 1 << 5; + +pub type CGWindowImageOption = u32; +pub const kCGWindowImageDefault: CGWindowImageOption = 0; +pub const kCGWindowImageBoundsIgnoreFraming: CGWindowImageOption = 1 << 0; +pub const kCGWindowImageShouldBeOpaque: CGWindowImageOption = 1 << 1; +pub const kCGWindowImageOnlyShadows: CGWindowImageOption = 1 << 2; +pub const kCGWindowImageBestResolution: CGWindowImageOption = 1 << 3; +pub const kCGWindowImageNominalResolution: CGWindowImageOption = 1 << 4; + +pub const kCGNullWindowID: CGWindowID = 0; + +pub fn copy_window_info(option: CGWindowListOption, relative_to_window: CGWindowID) + -> Option { + unsafe { + let array = CGWindowListCopyWindowInfo(option, relative_to_window); + if array.is_null() { + None + } else { + Some(TCFType::wrap_under_create_rule(array)) + } + } +} + +pub fn create_window_list(option: CGWindowListOption, relative_to_window: CGWindowID) + -> Option> { + unsafe { + let array = CGWindowListCreate(option, relative_to_window); + if array.is_null() { + None + } else { + Some(TCFType::wrap_under_create_rule(array)) + } + } +} + +pub fn create_description_from_array(window_array: CFArray) -> + Option>> { + unsafe { + let array = CGWindowListCreateDescriptionFromArray(window_array.as_concrete_TypeRef()); + if array.is_null() { + None + } else { + Some(TCFType::wrap_under_create_rule(array)) + } + } +} + +pub fn create_image(screen_bounds: CGRect, + list_option: CGWindowListOption, + window_id: CGWindowID, + image_option: CGWindowImageOption) + -> Option { + unsafe { + let image = CGWindowListCreateImage(screen_bounds, list_option, window_id, image_option); + if image.is_null() { + None + } else { + Some(CGImage::from_ptr(image)) + } + } +} + +pub fn create_image_from_array(screen_bounds: CGRect, + window_array: CFArray, + image_option: CGWindowImageOption) + -> Option { + unsafe { + let image = CGWindowListCreateImageFromArray(screen_bounds, + window_array.as_concrete_TypeRef(), + image_option); + if image.is_null() { + None + } else { + Some(CGImage::from_ptr(image)) + } + } +} + +#[link(name = "CoreGraphics", kind = "framework")] +extern { + pub static kCGWindowNumber: CFStringRef; + pub static kCGWindowStoreType: CFStringRef; + pub static kCGWindowLayer: CFStringRef; + pub static kCGWindowBounds: CFStringRef; + pub static kCGWindowSharingState: CFStringRef; + pub static kCGWindowAlpha: CFStringRef; + pub static kCGWindowOwnerPID: CFStringRef; + pub static kCGWindowMemoryUsage: CFStringRef; + pub static kCGWindowWorkspace: CFStringRef; + pub static kCGWindowOwnerName: CFStringRef; + pub static kCGWindowName: CFStringRef; + pub static kCGWindowIsOnscreen: CFStringRef; + pub static kCGWindowBackingLocationVideoMemory: CFStringRef; + + pub fn CGWindowListCopyWindowInfo(option: CGWindowListOption, relativeToWindow: CGWindowID) + -> CFArrayRef; + pub fn CGWindowListCreate(option: CGWindowListOption, relativeToWindow: CGWindowID) + -> CFArrayRef; + pub fn CGWindowListCreateDescriptionFromArray(windowArray: CFArrayRef) -> CFArrayRef; + pub fn CGWindowListCreateImage(screenBounds: CGRect, + listOption: CGWindowListOption, + windowID: CGWindowID, + imageOption: CGWindowImageOption) + -> *mut sys::CGImage; + pub fn CGWindowListCreateImageFromArray(screenBounds: CGRect, + windowArray: CFArrayRef, + imageOption: CGWindowImageOption) + -> *mut sys::CGImage; +} diff --git a/third_party/rust/crossbeam-deque/.cargo-checksum.json b/third_party/rust/crossbeam-deque/.cargo-checksum.json index b7a4fe9f79e7..76bc64238e6a 100644 --- a/third_party/rust/crossbeam-deque/.cargo-checksum.json +++ b/third_party/rust/crossbeam-deque/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"6e0989b778b897fdc72bd6b9664a7a9349b6478ca692eaead24229b6db7af82a","Cargo.toml":"8f6e1bf276829efac8152be2e0f334708979fa1a29ab31a50840f3367b41c429","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"6d91170c612b4c65e402653b96b72a798b18c4a4082531bda83265d5aad42b5e","src/lib.rs":"364cb0371b849d89745176f8dd45e92c8077942ebfbeac52cf24be46ba245b46","tests/fifo.rs":"8e9a607ae26f35ebd52c14f4ac7b9e57a025c3ba12c8c0c69d4c20cbdc7a3f1a","tests/injector.rs":"fa655ef52be18a2b8bc081a68946adcb12e478cdb9ce09e07e490184db6d5763","tests/lifo.rs":"bf258f83f7c022358d01823c8a047c02e5a6e9a42695a5250341698a37ed0fc4","tests/steal.rs":"519549c18429db563c5238d7147e733901336943ca099669af2b553933b82694"},"package":"b18cd2e169ad86297e6bc0ad9aa679aee9daa4f19e8163860faf7c164e4f5a71"} \ No newline at end of file +{"files":{"CHANGELOG.md":"2a351fc7603e0cf7a66c305fa9429f7871113605427a8e43f701b71854cc2181","Cargo.toml":"381453938bdca74a67ac96146f8c544b30aa62293e47eaa6447a9d9bb03668eb","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","README.md":"c1e2f0e311bab5c2b82a9346e7b2fdaf17050a66913aad57fac40665fb65bb7b","src/lib.rs":"279ea1d9e7e72488e2590cb88fb14bdf9b8250676adb27d19efd281c91e63680","tests/fifo.rs":"7546ce471330a9d928a54f6ca41ddd36e9f4999852fdc4719bf9b24122a1c15f","tests/injector.rs":"c9107b437f790dbfab333f94d7211df29bb9a868d2d86304ad9fd7fa8db57d0a","tests/lifo.rs":"264967bc868870211e12a826f448a6d9e19ab5f7cc0e0bde86496cf76bb96e56","tests/steal.rs":"519549c18429db563c5238d7147e733901336943ca099669af2b553933b82694"},"package":"c3aa945d63861bfe624b55d153a39684da1e8c0bc8fba932f7ee3a3c16cea3ca"} \ No newline at end of file diff --git a/third_party/rust/crossbeam-deque/CHANGELOG.md b/third_party/rust/crossbeam-deque/CHANGELOG.md index 63fb092fdcc5..a9e1c79a8837 100644 --- a/third_party/rust/crossbeam-deque/CHANGELOG.md +++ b/third_party/rust/crossbeam-deque/CHANGELOG.md @@ -1,3 +1,8 @@ +# Version 0.7.2 + +- Bump `crossbeam-epoch` to `0.8`. +- Bump `crossbeam-utils` to `0.7`. + # Version 0.7.1 - Bump the minimum required version of `crossbeam-utils`. diff --git a/third_party/rust/crossbeam-deque/Cargo.toml b/third_party/rust/crossbeam-deque/Cargo.toml index 9c1549ff0e22..2ce052337fd6 100644 --- a/third_party/rust/crossbeam-deque/Cargo.toml +++ b/third_party/rust/crossbeam-deque/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "crossbeam-deque" -version = "0.7.1" +version = "0.7.2" authors = ["The Crossbeam Project Developers"] description = "Concurrent work-stealing deque" homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-deque" @@ -23,9 +23,9 @@ categories = ["algorithms", "concurrency", "data-structures"] license = "MIT/Apache-2.0" repository = "https://github.com/crossbeam-rs/crossbeam" [dependencies.crossbeam-epoch] -version = "0.7" +version = "0.8" [dependencies.crossbeam-utils] -version = "0.6.5" +version = "0.7" [dev-dependencies.rand] version = "0.6" diff --git a/third_party/rust/crossbeam-deque/LICENSE-MIT b/third_party/rust/crossbeam-deque/LICENSE-MIT index 31aa79387f27..068d491fd551 100644 --- a/third_party/rust/crossbeam-deque/LICENSE-MIT +++ b/third_party/rust/crossbeam-deque/LICENSE-MIT @@ -1,3 +1,7 @@ +The MIT License (MIT) + +Copyright (c) 2019 The Crossbeam Project Developers + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the diff --git a/third_party/rust/crossbeam-deque/README.md b/third_party/rust/crossbeam-deque/README.md index edb5f4039fd4..a5ef93a929d3 100644 --- a/third_party/rust/crossbeam-deque/README.md +++ b/third_party/rust/crossbeam-deque/README.md @@ -10,6 +10,7 @@ https://crates.io/crates/crossbeam-deque) https://docs.rs/crossbeam-deque) [![Rust 1.28+](https://img.shields.io/badge/rust-1.28+-lightgray.svg)]( https://www.rust-lang.org) +[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.gg/BBYwKq) This crate provides work-stealing deques, which are primarily intended for building task schedulers. @@ -29,6 +30,10 @@ Next, add this to your crate: extern crate crossbeam_deque; ``` +## Compatibility + +The minimum supported Rust version is 1.28. Any change to this is considered a breaking change. + ## License Licensed under either of diff --git a/third_party/rust/crossbeam-deque/src/lib.rs b/third_party/rust/crossbeam-deque/src/lib.rs index 5adea6c6095c..7d19d8f7c317 100644 --- a/third_party/rust/crossbeam-deque/src/lib.rs +++ b/third_party/rust/crossbeam-deque/src/lib.rs @@ -581,7 +581,8 @@ impl Worker { f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed, - ).is_err() + ) + .is_err() { mem::forget(task.take()); @@ -811,7 +812,8 @@ impl Stealer { f.wrapping_add(batch_size), Ordering::SeqCst, Ordering::Relaxed, - ).is_err() + ) + .is_err() { return Steal::Retry; } @@ -891,9 +893,7 @@ impl Stealer { - dest.inner - .back - .store(dest_b, Ordering::Release); + dest.inner.back.store(dest_b, Ordering::Release); Steal::Success(()) @@ -992,7 +992,8 @@ impl Stealer { f.wrapping_add(batch_size + 1), Ordering::SeqCst, Ordering::Relaxed, - ).is_err() + ) + .is_err() { mem::forget(task); @@ -1082,9 +1083,7 @@ impl Stealer { - dest.inner - .back - .store(dest_b, Ordering::Release); + dest.inner.back.store(dest_b, Ordering::Release); Steal::Success(task) @@ -1296,14 +1295,12 @@ impl Injector { let new_tail = tail + (1 << SHIFT); - match self.tail.index - .compare_exchange_weak( - tail, - new_tail, - Ordering::SeqCst, - Ordering::Acquire, - ) - { + match self.tail.index.compare_exchange_weak( + tail, + new_tail, + Ordering::SeqCst, + Ordering::Acquire, + ) { Ok(_) => unsafe { if offset + 1 == BLOCK_CAP { @@ -1321,7 +1318,7 @@ impl Injector { slot.state.fetch_or(WRITE, Ordering::Release); return; - } + }, Err(t) => { tail = t; block = self.tail.block.load(Ordering::Acquire); @@ -1385,13 +1382,10 @@ impl Injector { } - if self.head.index - .compare_exchange_weak( - head, - new_head, - Ordering::SeqCst, - Ordering::Acquire, - ) + if self + .head + .index + .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) .is_err() { return Steal::Retry; @@ -1502,13 +1496,10 @@ impl Injector { let new_offset = offset + advance; - if self.head.index - .compare_exchange_weak( - head, - new_head, - Ordering::SeqCst, - Ordering::Acquire, - ) + if self + .head + .index + .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) .is_err() { return Steal::Retry; @@ -1665,13 +1656,10 @@ impl Injector { let new_offset = offset + advance; - if self.head.index - .compare_exchange_weak( - head, - new_head, - Ordering::SeqCst, - Ordering::Acquire, - ) + if self + .head + .index + .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) .is_err() { return Steal::Retry; diff --git a/third_party/rust/crossbeam-deque/tests/fifo.rs b/third_party/rust/crossbeam-deque/tests/fifo.rs index 559a069f9228..4a3216eb531a 100644 --- a/third_party/rust/crossbeam-deque/tests/fifo.rs +++ b/third_party/rust/crossbeam-deque/tests/fifo.rs @@ -97,7 +97,8 @@ fn spsc() { for i in 0..STEPS { w.push(i); } - }).unwrap(); + }) + .unwrap(); } #[test] @@ -137,7 +138,8 @@ fn stampede() { remaining.fetch_sub(1, SeqCst); } } - }).unwrap(); + }) + .unwrap(); } #[test] @@ -195,7 +197,8 @@ fn stress() { } } done.store(true, SeqCst); - }).unwrap(); + }) + .unwrap(); } #[test] @@ -253,7 +256,8 @@ fn no_starvation() { } } done.store(true, SeqCst); - }).unwrap(); + }) + .unwrap(); } #[test] @@ -313,7 +317,8 @@ fn destructors() { remaining.fetch_sub(1, SeqCst); } } - }).unwrap(); + }) + .unwrap(); let rem = remaining.load(SeqCst); assert!(rem > 0); diff --git a/third_party/rust/crossbeam-deque/tests/injector.rs b/third_party/rust/crossbeam-deque/tests/injector.rs index b7f1ba89b54f..069215399b6a 100644 --- a/third_party/rust/crossbeam-deque/tests/injector.rs +++ b/third_party/rust/crossbeam-deque/tests/injector.rs @@ -6,8 +6,8 @@ use std::sync::atomic::Ordering::SeqCst; use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::sync::{Arc, Mutex}; -use deque::{Injector, Worker}; use deque::Steal::{Empty, Success}; +use deque::{Injector, Worker}; use rand::Rng; use utils::thread::scope; @@ -71,7 +71,8 @@ fn spsc() { for i in 0..COUNT { q.push(i); } - }).unwrap(); + }) + .unwrap(); } #[test] @@ -103,7 +104,8 @@ fn mpmc() { } }); } - }).unwrap(); + }) + .unwrap(); for c in v { assert_eq!(c.load(SeqCst), THREADS); @@ -147,7 +149,8 @@ fn stampede() { remaining.fetch_sub(1, SeqCst); } } - }).unwrap(); + }) + .unwrap(); } #[test] @@ -205,7 +208,8 @@ fn stress() { } } done.store(true, SeqCst); - }).unwrap(); + }) + .unwrap(); } #[test] @@ -263,7 +267,8 @@ fn no_starvation() { } } done.store(true, SeqCst); - }).unwrap(); + }) + .unwrap(); } #[test] @@ -323,7 +328,8 @@ fn destructors() { remaining.fetch_sub(1, SeqCst); } } - }).unwrap(); + }) + .unwrap(); let rem = remaining.load(SeqCst); assert!(rem > 0); diff --git a/third_party/rust/crossbeam-deque/tests/lifo.rs b/third_party/rust/crossbeam-deque/tests/lifo.rs index 45f404d1f920..a44edfe44272 100644 --- a/third_party/rust/crossbeam-deque/tests/lifo.rs +++ b/third_party/rust/crossbeam-deque/tests/lifo.rs @@ -97,7 +97,8 @@ fn spsc() { for i in 0..STEPS { w.push(i); } - }).unwrap(); + }) + .unwrap(); } #[test] @@ -137,7 +138,8 @@ fn stampede() { remaining.fetch_sub(1, SeqCst); } } - }).unwrap(); + }) + .unwrap(); } #[test] @@ -195,7 +197,8 @@ fn stress() { } } done.store(true, SeqCst); - }).unwrap(); + }) + .unwrap(); } #[test] @@ -253,7 +256,8 @@ fn no_starvation() { } } done.store(true, SeqCst); - }).unwrap(); + }) + .unwrap(); } #[test] @@ -313,7 +317,8 @@ fn destructors() { remaining.fetch_sub(1, SeqCst); } } - }).unwrap(); + }) + .unwrap(); let rem = remaining.load(SeqCst); assert!(rem > 0); diff --git a/third_party/rust/crossbeam-epoch/.cargo-checksum.json b/third_party/rust/crossbeam-epoch/.cargo-checksum.json index dcbbfea10675..d42d88919de4 100644 --- a/third_party/rust/crossbeam-epoch/.cargo-checksum.json +++ b/third_party/rust/crossbeam-epoch/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"fdffb6df6ae4827aa7c6543788a474ed47d96eb0b1e9d6f4d41ee922f921e9a9","Cargo.lock":"a9c563b8727667c39a3afb799b4c2eb3fc0b7f87b3b18b617fa79b75db0b98ba","Cargo.toml":"abb9a5a47d688f63ee2f82fa62900d357a8cce329f98a615a5a5a6e8c4516d41","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","README.md":"2f095ef276fadd1bc42e4ea16c6f9a8d098ff3c39f1e72a899ae02326bcf14a5","benches/defer.rs":"a4b17e7b96bef2f268c76925cfa58744b81504146426fab6f781ff0b2f666f2b","benches/flush.rs":"1d93ac40cb78bf2c55332869d4f7d1b283571bff4a0c6dbe245f02dd65e8e1d8","benches/pin.rs":"dd937ecfc8d7353995d438a7f4be383e91c002e9ee6acd4caa703bb340c97383","examples/sanitize.rs":"90f80709cd620e994f699d84ab9ce88f8552500d08bac64d60def5792460495a","examples/treiber_stack.rs":"b5d3bafa6d57af435b00d04145572a51ea871f64ff2c23ba544054c4c4145d65","src/atomic.rs":"8fc50bf82b298298b6512311df5f6da2c5c909d0b6eafa7776850538e65045e6","src/collector.rs":"a783049d3cb22989ec978366fd865f31562b0acbfbd0bc9ff9b600ceaa5ffa87","src/default.rs":"6afda8fd141ad594bed62baeb73f2e97c5ef33b051969a542bb908946fe39dd1","src/deferred.rs":"d5ace4be72e926cedb699cd19ae4076bbe87d795d650aa68f264106e6ff15bee","src/epoch.rs":"76dd63356d5bc52e741883d39abb636e4ccb04d20499fb2a0ce797bb81aa4e91","src/guard.rs":"1d90d690b02ee735263e845827f720be44faea871852731dd8444b92796f1539","src/internal.rs":"9ff5b0fa05cbfa550ef9eaf8bd2025445a097d9da59efb7b976580dd0a09a2ff","src/lib.rs":"6eaea9a81b55844c1fd084924b8b7f750fed33aae2a75e2d5f430ed054092a44","src/sync/list.rs":"43e09e9cf79d7cf95b5110051de9dd47ab7761e3bc90ea3be16c4cbabce6d4be","src/sync/mod.rs":"2da979ca3a2293f7626a2e6a9ab2fad758d92e3d2bed6cc712ef59eeeea87eab","src/sync/queue.rs":"34991ba75fbc8ba3494bf91559727feb1919f9f39b3c67d6e2aca82376ef9d3c"},"package":"fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9"} \ No newline at end of file +{"files":{"CHANGELOG.md":"1d5defc0cddbd56dab9aad0c52a6101e2ae38a04703622b70822af4162b422b6","Cargo.lock":"dd8ea5cc92174990a2726564d8ea7d4cf1dcbc0e5775a5a8f07b854bcb25e27d","Cargo.toml":"802dbfef2329ab8d39ce1602e93f41403e2c297521e702e4278cf88df106f3a4","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","README.md":"30bed3b95612935f87903eeb37dfd09e133e4c064a66eb8b5681eaf97128c8e9","benches/defer.rs":"a4b17e7b96bef2f268c76925cfa58744b81504146426fab6f781ff0b2f666f2b","benches/flush.rs":"1d93ac40cb78bf2c55332869d4f7d1b283571bff4a0c6dbe245f02dd65e8e1d8","benches/pin.rs":"dd937ecfc8d7353995d438a7f4be383e91c002e9ee6acd4caa703bb340c97383","build.rs":"825c47ae19028dc4b28101ec71c04e7e41b8b185f6ecbeacee223596524c86ad","examples/sanitize.rs":"90f80709cd620e994f699d84ab9ce88f8552500d08bac64d60def5792460495a","examples/treiber_stack.rs":"b5d3bafa6d57af435b00d04145572a51ea871f64ff2c23ba544054c4c4145d65","src/atomic.rs":"53c29c8df6a90e2cd3d7747f7ebfe604087966c8ea6473ff2593bdd495f43951","src/collector.rs":"a783049d3cb22989ec978366fd865f31562b0acbfbd0bc9ff9b600ceaa5ffa87","src/default.rs":"6afda8fd141ad594bed62baeb73f2e97c5ef33b051969a542bb908946fe39dd1","src/deferred.rs":"883067dd60b108baacaafc9024833c0ec08c6b5649b60c030ab9a59e952ccd06","src/epoch.rs":"76dd63356d5bc52e741883d39abb636e4ccb04d20499fb2a0ce797bb81aa4e91","src/guard.rs":"486efbc061b7f402f4c8a96abd1889aff4b28eb10347b65e538c39d539d919ad","src/internal.rs":"5b09661b12d93412140b2ee186ebb304891374e4c512b5bbc97cd015ec34c493","src/lib.rs":"e55b6c418970e3bbd265a21b9beaf2d8254f7f7799e9954edf4486666869e2aa","src/sync/list.rs":"43e09e9cf79d7cf95b5110051de9dd47ab7761e3bc90ea3be16c4cbabce6d4be","src/sync/mod.rs":"2da979ca3a2293f7626a2e6a9ab2fad758d92e3d2bed6cc712ef59eeeea87eab","src/sync/queue.rs":"266036a201222fe46b6674fa789b96dd8c385bf2c37135799fb9ba1fba53049a"},"package":"5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac"} \ No newline at end of file diff --git a/third_party/rust/crossbeam-epoch/CHANGELOG.md b/third_party/rust/crossbeam-epoch/CHANGELOG.md index f47f7b862a14..e4aa1fdb2964 100644 --- a/third_party/rust/crossbeam-epoch/CHANGELOG.md +++ b/third_party/rust/crossbeam-epoch/CHANGELOG.md @@ -1,3 +1,10 @@ +# Version 0.8.0 + +- Bump the minimum required version to 1.28. +- Fix breakage with nightly feature due to rust-lang/rust#65214. +- Make `Atomic::null()` const function at 1.31+. +- Bump `crossbeam-utils` to `0.7`. + # Version 0.7.2 - Add `Atomic::into_owned()`. diff --git a/third_party/rust/crossbeam-epoch/Cargo.lock b/third_party/rust/crossbeam-epoch/Cargo.lock index 6162f296afb5..9f19f55e358f 100644 --- a/third_party/rust/crossbeam-epoch/Cargo.lock +++ b/third_party/rust/crossbeam-epoch/Cargo.lock @@ -1,26 +1,18 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "arrayvec" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "autocfg" -version = "0.1.5" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bitflags" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cfg-if" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -28,29 +20,30 @@ name = "cloudabi" version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-epoch" -version = "0.7.2" +version = "0.8.0" dependencies = [ - "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-utils" -version = "0.6.6" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -60,43 +53,38 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "lazy_static" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.60" +version = "0.2.65" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "memoffset" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "nodrop" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "rand" version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -104,7 +92,7 @@ name = "rand_chacha" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -113,12 +101,12 @@ name = "rand_core" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rand_core" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -142,9 +130,9 @@ name = "rand_jitter" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -154,10 +142,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -165,8 +153,8 @@ name = "rand_pcg" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -213,7 +201,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "winapi" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -231,21 +219,19 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" [metadata] -"checksum arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b8d73f9beda665eaa98ab9e4f7442bd4e7de6652587de55b2525e52e29c1b0ba" -"checksum autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "22130e92352b948e7e82a49cdb0aa94f2211761117f29e052dd397c1ac33542b" -"checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd" -"checksum cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33" +"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" +"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" +"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4" "checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" -"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14" -"checksum libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "d44e80633f007889c7eff624b709ab43c92d708caad982295768a7b13ca3b5eb" -"checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f" -"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945" +"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +"checksum libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)" = "1a31a0627fdf1f6a39ec0dd577e101440b7db22672c0901fe00a9a6fbb5c24e8" +"checksum memoffset 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a85c1a8c329f11437034d7313dca647c79096523533a1c79e86f1d0f657c7cc" "checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" "checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" "checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -"checksum rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d0e7a549d590831370895ab7ba4ea0c1b6b011d106b5ff2da6eee112615e6dc0" +"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" "checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" "checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" "checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" @@ -257,6 +243,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -"checksum winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f10e386af2b13e47c89e7236a7a14a086791a2b88ebad6df9bf42040195cf770" +"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/third_party/rust/crossbeam-epoch/Cargo.toml b/third_party/rust/crossbeam-epoch/Cargo.toml index 93de822d16f9..0cdcbbbcd02f 100644 --- a/third_party/rust/crossbeam-epoch/Cargo.toml +++ b/third_party/rust/crossbeam-epoch/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "crossbeam-epoch" -version = "0.7.2" +version = "0.8.0" authors = ["The Crossbeam Project Developers"] description = "Epoch-based garbage collection" homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-epoch" @@ -22,15 +22,11 @@ keywords = ["lock-free", "rcu", "atomic", "garbage"] categories = ["concurrency", "memory-management", "no-std"] license = "MIT/Apache-2.0" repository = "https://github.com/crossbeam-rs/crossbeam" -[dependencies.arrayvec] -version = "0.4" -default-features = false - [dependencies.cfg-if] version = "0.1.2" [dependencies.crossbeam-utils] -version = "0.6" +version = "0.7" default-features = false [dependencies.lazy_static] @@ -45,10 +41,12 @@ version = "1" default-features = false [dev-dependencies.rand] version = "0.6" +[build-dependencies.autocfg] +version = "0.1.6" [features] alloc = ["crossbeam-utils/alloc"] default = ["std"] -nightly = ["crossbeam-utils/nightly", "arrayvec/use_union"] +nightly = ["crossbeam-utils/nightly"] sanitize = [] std = ["crossbeam-utils/std", "lazy_static"] diff --git a/third_party/rust/crossbeam-epoch/README.md b/third_party/rust/crossbeam-epoch/README.md index a1fb4141c9e8..79b4c773e196 100644 --- a/third_party/rust/crossbeam-epoch/README.md +++ b/third_party/rust/crossbeam-epoch/README.md @@ -8,7 +8,7 @@ https://github.com/crossbeam-rs/crossbeam-epoch) https://crates.io/crates/crossbeam-epoch) [![Documentation](https://docs.rs/crossbeam-epoch/badge.svg)]( https://docs.rs/crossbeam-epoch) -[![Rust 1.26+](https://img.shields.io/badge/rust-1.26+-lightgray.svg)]( +[![Rust 1.28+](https://img.shields.io/badge/rust-1.28+-lightgray.svg)]( https://www.rust-lang.org) [![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.gg/BBYwKq) @@ -28,7 +28,7 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -crossbeam-epoch = "0.7" +crossbeam-epoch = "0.8" ``` Next, add this to your crate: @@ -37,6 +37,10 @@ Next, add this to your crate: extern crate crossbeam_epoch as epoch; ``` +## Compatibility + +The minimum supported Rust version is 1.28. Any change to this is considered a breaking change. + ## License Licensed under either of diff --git a/third_party/rust/crossbeam-epoch/build.rs b/third_party/rust/crossbeam-epoch/build.rs new file mode 100644 index 000000000000..d451c24b2f1f --- /dev/null +++ b/third_party/rust/crossbeam-epoch/build.rs @@ -0,0 +1,8 @@ +extern crate autocfg; + +fn main() { + let cfg = autocfg::new(); + if cfg.probe_rustc_version(1, 31) { + println!("cargo:rustc-cfg=has_min_const_fn"); + } +} diff --git a/third_party/rust/crossbeam-epoch/src/atomic.rs b/third_party/rust/crossbeam-epoch/src/atomic.rs index 4064a617c24a..c41a738a0d9b 100644 --- a/third_party/rust/crossbeam-epoch/src/atomic.rs +++ b/third_party/rust/crossbeam-epoch/src/atomic.rs @@ -150,7 +150,7 @@ impl Atomic { - #[cfg(not(feature = "nightly"))] + #[cfg(not(has_min_const_fn))] pub fn null() -> Atomic { Self { data: AtomicUsize::new(0), @@ -167,7 +167,7 @@ impl Atomic { - #[cfg(feature = "nightly")] + #[cfg(has_min_const_fn)] pub const fn null() -> Atomic { Self { data: AtomicUsize::new(0), diff --git a/third_party/rust/crossbeam-epoch/src/deferred.rs b/third_party/rust/crossbeam-epoch/src/deferred.rs index f456638ca5d4..96615aae782e 100644 --- a/third_party/rust/crossbeam-epoch/src/deferred.rs +++ b/third_party/rust/crossbeam-epoch/src/deferred.rs @@ -36,6 +36,9 @@ impl Deferred { unsafe { if size <= mem::size_of::() && align <= mem::align_of::() { + + + #[allow(deprecated)] let mut data: Data = mem::uninitialized(); ptr::write(&mut data as *mut Data as *mut F, f); @@ -51,6 +54,9 @@ impl Deferred { } } else { let b: Box = Box::new(f); + + + #[allow(deprecated)] let mut data: Data = mem::uninitialized(); ptr::write(&mut data as *mut Data as *mut Box, b); diff --git a/third_party/rust/crossbeam-epoch/src/guard.rs b/third_party/rust/crossbeam-epoch/src/guard.rs index ab15e8615365..297f1ab53025 100644 --- a/third_party/rust/crossbeam-epoch/src/guard.rs +++ b/third_party/rust/crossbeam-epoch/src/guard.rs @@ -193,6 +193,8 @@ impl Guard { { if let Some(local) = self.local.as_ref() { local.defer(Deferred::new(move || drop(f())), self); + } else { + drop(f()); } } diff --git a/third_party/rust/crossbeam-epoch/src/internal.rs b/third_party/rust/crossbeam-epoch/src/internal.rs index a8ad62b3f001..d625546a1842 100644 --- a/third_party/rust/crossbeam-epoch/src/internal.rs +++ b/third_party/rust/crossbeam-epoch/src/internal.rs @@ -38,11 +38,10 @@ use core::cell::{Cell, UnsafeCell}; use core::mem::{self, ManuallyDrop}; use core::num::Wrapping; -use core::ptr; +use core::{ptr, fmt}; use core::sync::atomic; use core::sync::atomic::Ordering; -use arrayvec::ArrayVec; use crossbeam_utils::CachePadded; use atomic::{Shared, Owned}; @@ -60,10 +59,10 @@ const MAX_OBJECTS: usize = 64; const MAX_OBJECTS: usize = 4; -#[derive(Default, Debug)] pub struct Bag { - deferreds: ArrayVec<[Deferred; MAX_OBJECTS]>, + deferreds: [Deferred; MAX_OBJECTS], + len: usize } @@ -77,7 +76,7 @@ impl Bag { pub fn is_empty(&self) -> bool { - self.deferreds.is_empty() + self.len == 0 } @@ -89,7 +88,13 @@ impl Bag { pub unsafe fn try_push(&mut self, deferred: Deferred) -> Result<(), Deferred> { - self.deferreds.try_push(deferred).map_err(|e| e.element()) + if self.len < MAX_OBJECTS { + self.deferreds[self.len] = deferred; + self.len += 1; + Ok(()) + } else { + Err(deferred) + } } @@ -98,16 +103,54 @@ impl Bag { } } +impl Default for Bag { + fn default() -> Self { + + #[cfg(not(feature = "sanitize"))] + return Bag { len: 0, deferreds: + [Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), + Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func)] + }; + #[cfg(feature = "sanitize")] + return Bag { len: 0, deferreds: [Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func)] }; + } +} + impl Drop for Bag { fn drop(&mut self) { - for deferred in self.deferreds.drain(..) { - deferred.call(); + for deferred in &mut self.deferreds[..self.len] { + let no_op = Deferred::new(no_op_func); + let owned_deferred = mem::replace(deferred, no_op); + owned_deferred.call(); } } } +impl fmt::Debug for Bag { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Bag").field("deferreds", &&self.deferreds[..self.len]).finish() + } +} + +fn no_op_func() {} + + #[derive(Default, Debug)] struct SealedBag { epoch: Epoch, diff --git a/third_party/rust/crossbeam-epoch/src/lib.rs b/third_party/rust/crossbeam-epoch/src/lib.rs index 53c6b5a5e70b..99fb1cc7ff91 100644 --- a/third_party/rust/crossbeam-epoch/src/lib.rs +++ b/third_party/rust/crossbeam-epoch/src/lib.rs @@ -57,7 +57,6 @@ #![warn(missing_docs)] #![warn(missing_debug_implementations)] #![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(feature = "nightly", feature(const_fn))] #![cfg_attr(feature = "nightly", feature(cfg_target_has_atomic))] #[macro_use] @@ -73,13 +72,9 @@ cfg_if! { } } -#[cfg_attr( - feature = "nightly", - cfg(all(target_has_atomic = "cas", target_has_atomic = "ptr")) -)] +#[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))] cfg_if! { if #[cfg(any(feature = "alloc", feature = "std"))] { - extern crate arrayvec; extern crate crossbeam_utils; #[macro_use] extern crate memoffset; diff --git a/third_party/rust/crossbeam-epoch/src/sync/queue.rs b/third_party/rust/crossbeam-epoch/src/sync/queue.rs index d21c9f0537b4..6bd7f0cdb4da 100644 --- a/third_party/rust/crossbeam-epoch/src/sync/queue.rs +++ b/third_party/rust/crossbeam-epoch/src/sync/queue.rs @@ -46,6 +46,9 @@ impl Queue { head: CachePadded::new(Atomic::null()), tail: CachePadded::new(Atomic::null()), }; + + + #[allow(deprecated)] let sentinel = Owned::new(Node { data: unsafe { mem::uninitialized() }, next: Atomic::null(), diff --git a/third_party/rust/crossbeam-utils-0.6.5/.cargo-checksum.json b/third_party/rust/crossbeam-utils-0.6.5/.cargo-checksum.json new file mode 100644 index 000000000000..ec17be04c271 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"e58bfef23e76d04b244941fd4ecdb35837a1a6f1370bf4596cc0280193c9a4f9","Cargo.toml":"2d4d20231a89e61fa6d1d83ad853b274e71d243c992eda5a9de0c9e8ca428ba5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"63ba61fd2e75aa90572476eda5246fc766846af40d31e0bdccbf763d9f0799ba","benches/atomic_cell.rs":"ada69698def9d4eab485a6e0da235aaac001efe49a6b0d6f5c5be381a645310f","src/atomic/atomic_cell.rs":"97a9ec7ac2625ee0a951b984a419fbeab62173ed9c23cab47dfc13ed25e8ee6c","src/atomic/consume.rs":"bfdc7e2d8370a5a3bb1699b6214347c359d66fcc92a2d1345a513676ac91d821","src/atomic/mod.rs":"404eacae422012f3628cb44262df73a5891fe02a17ab345b832e3062982b5a20","src/backoff.rs":"029fede365eaa3408c7359cf868303120903976304aee546aeedcb80085568d5","src/cache_padded.rs":"95b10657b4e50316d2213894e195c61602ff0c6655cc965301de1584fb7d61c7","src/lib.rs":"957df3bd2875147aa1b939fc47f1a8a72719748e9001f27dba2f3589e27a73b4","src/sync/mod.rs":"4c8ad6ec4601f212791b0b531b46ee5decec2f1d14746aa7f2c18e36c609cd8e","src/sync/parker.rs":"55324bbea5b7c6838a0f8467a5b8a5dbd5526c8e1c7fd4f6d64dad1ab19f9be9","src/sync/sharded_lock.rs":"7a401ba621233732c26cf49324748269359d7bc5dc27e0ec26c9493e9a5ec97d","src/sync/wait_group.rs":"21708bbd46daa98e9f788765a9a4ef3b087a8d1e97a6e9406b4a960c95e44ca0","src/thread.rs":"384e3c6e6db565e752169223205991f1eadb1258b1d416758172a40a6c9bd645","tests/atomic_cell.rs":"690f516c7e827b18adec5da1c3249ebb26ff674c5887d863ddc94fe1600b9c28","tests/cache_padded.rs":"02235757a554279dae5053d46314a765059ec036c63a05336353994c2aa344d1","tests/parker.rs":"996212c084286567638919c27d46a250a5d592d8e1a97c1e6a4d7e10c060e4dd","tests/sharded_lock.rs":"1e2e8a355b74d89569873fbba7772235bc64d13a7209ee673f368f4fe6f70c65","tests/thread.rs":"0d86998085a8aace79e5b3dae61aa8bd864492f44aafcce6ec85778954f55809","tests/wait_group.rs":"e3d5168581fb511b760f4249ca487b919cffc60ac2b4610a78db99899772dd5b"},"package":"f8306fcef4a7b563b76b7dd949ca48f52bc1141aa067d2ea09565f3e2652aa5c"} \ No newline at end of file diff --git a/third_party/rust/crossbeam-utils-0.6.5/CHANGELOG.md b/third_party/rust/crossbeam-utils-0.6.5/CHANGELOG.md new file mode 100644 index 000000000000..e3a2bdd0727c --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/CHANGELOG.md @@ -0,0 +1,89 @@ +# Version 0.6.5 + +- Rename `Backoff::is_complete()` to `Backoff::is_completed()`. + +# Version 0.6.4 + +- Add `WaitGroup`, `ShardedLock`, and `Backoff`. +- Add `fetch_*` methods for `AtomicCell` and `AtomicCell`. +- Expand documentation. + +# Version 0.6.3 + +- Add `AtomicCell`. +- Improve documentation. + +# Version 0.6.2 + +- Add `Parker`. +- Improve documentation. + +# Version 0.6.1 + +- Fix a soundness bug in `Scope::spawn()`. +- Remove the `T: 'scope` bound on `ScopedJoinHandle`. + +# Version 0.6.0 + +- Move `AtomicConsume` to `atomic` module. +- `scope()` returns a `Result` of thread joins. +- Remove `spawn_unchecked`. +- Fix a soundness bug due to incorrect lifetimes. +- Improve documentation. +- Support nested scoped spawns. +- Implement `Copy`, `Hash`, `PartialEq`, and `Eq` for `CachePadded`. +- Add `CachePadded::into_inner()`. + +# Version 0.5.0 + +- Reorganize sub-modules and rename functions. + +# Version 0.4.1 + +- Fix a documentation link. + +# Version 0.4.0 + +- `CachePadded` supports types bigger than 64 bytes. +- Fix a bug in scoped threads where unitialized memory was being dropped. +- Minimum required Rust version is now 1.25. + +# Version 0.3.2 + +- Mark `load_consume` with `#[inline]`. + +# Version 0.3.1 + +- `load_consume` on ARM and AArch64. + +# Version 0.3.0 + +- Add `join` for scoped thread API. +- Add `load_consume` for atomic load-consume memory ordering. +- Remove `AtomicOption`. + +# Version 0.2.2 + +- Support Rust 1.12.1. +- Call `T::clone` when cloning a `CachePadded`. + +# Version 0.2.1 + +- Add `use_std` feature. + +# Version 0.2.0 + +- Add `nightly` feature. +- Use `repr(align(64))` on `CachePadded` with the `nightly` feature. +- Implement `Drop` for `CachePadded`. +- Implement `Clone` for `CachePadded`. +- Implement `From` for `CachePadded`. +- Implement better `Debug` for `CachePadded`. +- Write more tests. +- Add this changelog. +- Change cache line length to 64 bytes. +- Remove `ZerosValid`. + +# Version 0.1.0 + +- Old implementation of `CachePadded` from `crossbeam` version 0.3.0 diff --git a/third_party/rust/crossbeam-utils-0.6.5/Cargo.toml b/third_party/rust/crossbeam-utils-0.6.5/Cargo.toml new file mode 100644 index 000000000000..5c2c84b3cd36 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/Cargo.toml @@ -0,0 +1,37 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "crossbeam-utils" +version = "0.6.5" +authors = ["The Crossbeam Project Developers"] +description = "Utilities for concurrent programming" +homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils" +documentation = "https://docs.rs/crossbeam-utils" +readme = "README.md" +keywords = ["scoped", "thread", "atomic", "cache"] +categories = ["algorithms", "concurrency", "data-structures", "no-std"] +license = "MIT/Apache-2.0" +repository = "https://github.com/crossbeam-rs/crossbeam" +[dependencies.cfg-if] +version = "0.1" + +[dependencies.lazy_static] +version = "1.1.0" +optional = true +[dev-dependencies.rand] +version = "0.6" + +[features] +default = ["std"] +nightly = [] +std = ["lazy_static"] diff --git a/third_party/rust/crossbeam-utils-0.6.5/LICENSE-APACHE b/third_party/rust/crossbeam-utils-0.6.5/LICENSE-APACHE new file mode 100644 index 000000000000..16fe87b06e80 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/crossbeam-utils-0.6.5/LICENSE-MIT b/third_party/rust/crossbeam-utils-0.6.5/LICENSE-MIT new file mode 100644 index 000000000000..31aa79387f27 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/crossbeam-utils-0.6.5/README.md b/third_party/rust/crossbeam-utils-0.6.5/README.md new file mode 100644 index 000000000000..a454c141ed76 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/README.md @@ -0,0 +1,72 @@ +# Crossbeam Utils + +[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam.svg?branch=master)]( +https://travis-ci.org/crossbeam-rs/crossbeam) +[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)]( +https://github.com/crossbeam-rs/crossbeam-utils/tree/master/src) +[![Cargo](https://img.shields.io/crates/v/crossbeam-utils.svg)]( +https://crates.io/crates/crossbeam-utils) +[![Documentation](https://docs.rs/crossbeam-utils/badge.svg)]( +https://docs.rs/crossbeam-utils) +[![Rust 1.26+](https://img.shields.io/badge/rust-1.26+-lightgray.svg)]( +https://www.rust-lang.org) + +This crate provides miscellaneous tools for concurrent programming: + +#### Atomics + +* [`AtomicCell`], a thread-safe mutable memory location.(\*) +* [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.(\*) + +#### Thread synchronization + +* [`Parker`], a thread parking primitive. +* [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads. +* [`WaitGroup`], for synchronizing the beginning or end of some computation. + +#### Utilities + +* [`Backoff`], for exponential backoff in spin loops.(\*) +* [`CachePadded`], for padding and aligning a value to the length of a cache line.(\*) +* [`scope`], for spawning threads that borrow local variables from the stack. + +*Features marked with (\*) can be used in `no_std` environments.* + +[`AtomicCell`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/atomic/struct.AtomicCell.html +[`AtomicConsume`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/atomic/trait.AtomicConsume.html +[`Parker`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/sync/struct.Parker.html +[`ShardedLock`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/sync/struct.ShardedLock.html +[`WaitGroup`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/sync/struct.WaitGroup.html +[`Backoff`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/struct.Backoff.html +[`CachePadded`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/struct.CachePadded.html +[`scope`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/thread/fn.scope.html + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +crossbeam-utils = "0.6" +``` + +Next, add this to your crate: + +```rust +extern crate crossbeam_utils; +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/crossbeam-utils-0.6.5/benches/atomic_cell.rs b/third_party/rust/crossbeam-utils-0.6.5/benches/atomic_cell.rs new file mode 100644 index 000000000000..aae17d41192a --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/benches/atomic_cell.rs @@ -0,0 +1,159 @@ +#![feature(test)] + +extern crate crossbeam_utils; +extern crate test; + +use std::sync::Barrier; + +use crossbeam_utils::atomic::AtomicCell; +use crossbeam_utils::thread; + +#[bench] +fn load_u8(b: &mut test::Bencher) { + let a = AtomicCell::new(0u8); + let mut sum = 0; + b.iter(|| sum += a.load()); + test::black_box(sum); +} + +#[bench] +fn store_u8(b: &mut test::Bencher) { + let a = AtomicCell::new(0u8); + b.iter(|| a.store(1)); +} + +#[bench] +fn fetch_add_u8(b: &mut test::Bencher) { + let a = AtomicCell::new(0u8); + b.iter(|| a.fetch_add(1)); +} + +#[bench] +fn compare_and_swap_u8(b: &mut test::Bencher) { + let a = AtomicCell::new(0u8); + let mut i = 0; + b.iter(|| { + a.compare_and_swap(i, i.wrapping_add(1)); + i = i.wrapping_add(1); + }); +} + +#[bench] +fn concurrent_load_u8(b: &mut test::Bencher) { + const THREADS: usize = 2; + const STEPS: usize = 1_000_000; + + let start = Barrier::new(THREADS + 1); + let end = Barrier::new(THREADS + 1); + let exit = AtomicCell::new(false); + + let a = AtomicCell::new(0u8); + + thread::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|_| { + loop { + start.wait(); + + let mut sum = 0; + for _ in 0..STEPS { + sum += a.load(); + } + test::black_box(sum); + + end.wait(); + if exit.load() { + break; + } + } + }); + } + + start.wait(); + end.wait(); + + b.iter(|| { + start.wait(); + end.wait(); + }); + + start.wait(); + exit.store(true); + end.wait(); + }).unwrap(); +} + +#[bench] +fn load_usize(b: &mut test::Bencher) { + let a = AtomicCell::new(0usize); + let mut sum = 0; + b.iter(|| sum += a.load()); + test::black_box(sum); +} + +#[bench] +fn store_usize(b: &mut test::Bencher) { + let a = AtomicCell::new(0usize); + b.iter(|| a.store(1)); +} + +#[bench] +fn fetch_add_usize(b: &mut test::Bencher) { + let a = AtomicCell::new(0usize); + b.iter(|| a.fetch_add(1)); +} + +#[bench] +fn compare_and_swap_usize(b: &mut test::Bencher) { + let a = AtomicCell::new(0usize); + let mut i = 0; + b.iter(|| { + a.compare_and_swap(i, i.wrapping_add(1)); + i = i.wrapping_add(1); + }); +} + +#[bench] +fn concurrent_load_usize(b: &mut test::Bencher) { + const THREADS: usize = 2; + const STEPS: usize = 1_000_000; + + let start = Barrier::new(THREADS + 1); + let end = Barrier::new(THREADS + 1); + let exit = AtomicCell::new(false); + + let a = AtomicCell::new(0usize); + + thread::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|_| { + loop { + start.wait(); + + let mut sum = 0; + for _ in 0..STEPS { + sum += a.load(); + } + test::black_box(sum); + + end.wait(); + if exit.load() { + break; + } + } + }); + } + + start.wait(); + end.wait(); + + b.iter(|| { + start.wait(); + end.wait(); + }); + + start.wait(); + exit.store(true); + end.wait(); + }).unwrap(); +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/src/atomic/atomic_cell.rs b/third_party/rust/crossbeam-utils-0.6.5/src/atomic/atomic_cell.rs new file mode 100644 index 000000000000..0d4670801e65 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/src/atomic/atomic_cell.rs @@ -0,0 +1,924 @@ +use core::cell::UnsafeCell; +use core::fmt; +use core::mem; +use core::ptr; +use core::slice; +use core::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering}; + +use Backoff; + + + + + + + + + + + +pub struct AtomicCell { + + + + + + value: UnsafeCell, +} + +unsafe impl Send for AtomicCell {} +unsafe impl Sync for AtomicCell {} + +impl AtomicCell { + + + + + + + + + + pub fn new(val: T) -> AtomicCell { + AtomicCell { + value: UnsafeCell::new(val), + } + } + + + + + + + + + + + + + + pub fn get_mut(&mut self) -> &mut T { + unsafe { &mut *self.value.get() } + } + + + + + + + + + + + + + + pub fn into_inner(self) -> T { + self.value.into_inner() + } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + pub fn is_lock_free() -> bool { + atomic_is_lock_free::() + } + + + + + + + + + + + + + + + pub fn store(&self, val: T) { + if mem::needs_drop::() { + drop(self.swap(val)); + } else { + unsafe { + atomic_store(self.value.get(), val); + } + } + } + + + + + + + + + + + + + + + pub fn swap(&self, val: T) -> T { + unsafe { atomic_swap(self.value.get(), val) } + } +} + +impl AtomicCell { + + + + + + + + + + + + pub fn load(&self) -> T { + unsafe { atomic_load(self.value.get()) } + } +} + +impl AtomicCell { + + + + + + + + + + + + + + + + + + + pub fn compare_and_swap(&self, current: T, new: T) -> T { + match self.compare_exchange(current, new) { + Ok(v) => v, + Err(v) => v, + } + } + + + + + + + + + + + + + + + + + + + + pub fn compare_exchange(&self, mut current: T, new: T) -> Result { + loop { + match unsafe { atomic_compare_exchange_weak(self.value.get(), current, new) } { + Ok(_) => return Ok(current), + Err(previous) => { + if previous != current { + return Err(previous); + } + + + + + + current = previous; + } + } + } + } +} + +macro_rules! impl_arithmetic { + ($t:ty, $example:tt) => { + impl AtomicCell<$t> { + /// Increments the current value by `val` and returns the previous value. + /// + /// The addition wraps on overflow. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_add(3), 7); + /// assert_eq!(a.load(), 10); + /// ``` + #[inline] + pub fn fetch_add(&self, val: $t) -> $t { + if can_transmute::<$t, atomic::AtomicUsize>() { + let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) }; + a.fetch_add(val as usize, Ordering::SeqCst) as $t + } else { + let _guard = lock(self.value.get() as usize).write(); + let value = unsafe { &mut *(self.value.get()) }; + let old = *value; + *value = value.wrapping_add(val); + old + } + } + + /// Decrements the current value by `val` and returns the previous value. + /// + /// The subtraction wraps on overflow. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_sub(3), 7); + /// assert_eq!(a.load(), 4); + /// ``` + #[inline] + pub fn fetch_sub(&self, val: $t) -> $t { + if can_transmute::<$t, atomic::AtomicUsize>() { + let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) }; + a.fetch_sub(val as usize, Ordering::SeqCst) as $t + } else { + let _guard = lock(self.value.get() as usize).write(); + let value = unsafe { &mut *(self.value.get()) }; + let old = *value; + *value = value.wrapping_sub(val); + old + } + } + + /// Applies bitwise "and" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_and(3), 7); + /// assert_eq!(a.load(), 3); + /// ``` + #[inline] + pub fn fetch_and(&self, val: $t) -> $t { + if can_transmute::<$t, atomic::AtomicUsize>() { + let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) }; + a.fetch_and(val as usize, Ordering::SeqCst) as $t + } else { + let _guard = lock(self.value.get() as usize).write(); + let value = unsafe { &mut *(self.value.get()) }; + let old = *value; + *value &= val; + old + } + } + + /// Applies bitwise "or" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_or(16), 7); + /// assert_eq!(a.load(), 23); + /// ``` + #[inline] + pub fn fetch_or(&self, val: $t) -> $t { + if can_transmute::<$t, atomic::AtomicUsize>() { + let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) }; + a.fetch_or(val as usize, Ordering::SeqCst) as $t + } else { + let _guard = lock(self.value.get() as usize).write(); + let value = unsafe { &mut *(self.value.get()) }; + let old = *value; + *value |= val; + old + } + } + + /// Applies bitwise "xor" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_xor(2), 7); + /// assert_eq!(a.load(), 5); + /// ``` + #[inline] + pub fn fetch_xor(&self, val: $t) -> $t { + if can_transmute::<$t, atomic::AtomicUsize>() { + let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) }; + a.fetch_xor(val as usize, Ordering::SeqCst) as $t + } else { + let _guard = lock(self.value.get() as usize).write(); + let value = unsafe { &mut *(self.value.get()) }; + let old = *value; + *value ^= val; + old + } + } + } + }; + ($t:ty, $atomic:ty, $example:tt) => { + impl AtomicCell<$t> { + /// Increments the current value by `val` and returns the previous value. + /// + /// The addition wraps on overflow. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_add(3), 7); + /// assert_eq!(a.load(), 10); + /// ``` + #[inline] + pub fn fetch_add(&self, val: $t) -> $t { + let a = unsafe { &*(self.value.get() as *const $atomic) }; + a.fetch_add(val, Ordering::SeqCst) + } + + /// Decrements the current value by `val` and returns the previous value. + /// + /// The subtraction wraps on overflow. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_sub(3), 7); + /// assert_eq!(a.load(), 4); + /// ``` + #[inline] + pub fn fetch_sub(&self, val: $t) -> $t { + let a = unsafe { &*(self.value.get() as *const $atomic) }; + a.fetch_sub(val, Ordering::SeqCst) + } + + /// Applies bitwise "and" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_and(3), 7); + /// assert_eq!(a.load(), 3); + /// ``` + #[inline] + pub fn fetch_and(&self, val: $t) -> $t { + let a = unsafe { &*(self.value.get() as *const $atomic) }; + a.fetch_and(val, Ordering::SeqCst) + } + + /// Applies bitwise "or" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_or(16), 7); + /// assert_eq!(a.load(), 23); + /// ``` + #[inline] + pub fn fetch_or(&self, val: $t) -> $t { + let a = unsafe { &*(self.value.get() as *const $atomic) }; + a.fetch_or(val, Ordering::SeqCst) + } + + /// Applies bitwise "xor" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_xor(2), 7); + /// assert_eq!(a.load(), 5); + /// ``` + #[inline] + pub fn fetch_xor(&self, val: $t) -> $t { + let a = unsafe { &*(self.value.get() as *const $atomic) }; + a.fetch_xor(val, Ordering::SeqCst) + } + } + }; + ($t:ty, $size:tt, $atomic:ty, $example:tt) => { + #[cfg(target_has_atomic = $size)] + impl_arithmetic!($t, $atomic, $example); + }; +} + +cfg_if! { + if #[cfg(feature = "nightly")] { + impl_arithmetic!(u8, "8", atomic::AtomicU8, "let a = AtomicCell::new(7u8);"); + impl_arithmetic!(i8, "8", atomic::AtomicI8, "let a = AtomicCell::new(7i8);"); + impl_arithmetic!(u16, "16", atomic::AtomicU16, "let a = AtomicCell::new(7u16);"); + impl_arithmetic!(i16, "16", atomic::AtomicI16, "let a = AtomicCell::new(7i16);"); + impl_arithmetic!(u32, "32", atomic::AtomicU32, "let a = AtomicCell::new(7u32);"); + impl_arithmetic!(i32, "32", atomic::AtomicI32, "let a = AtomicCell::new(7i32);"); + impl_arithmetic!(u64, "64", atomic::AtomicU64, "let a = AtomicCell::new(7u64);"); + impl_arithmetic!(i64, "64", atomic::AtomicI64, "let a = AtomicCell::new(7i64);"); + impl_arithmetic!(u128, "let a = AtomicCell::new(7u128);"); + impl_arithmetic!(i128, "let a = AtomicCell::new(7i128);"); + } else { + impl_arithmetic!(u8, "let a = AtomicCell::new(7u8);"); + impl_arithmetic!(i8, "let a = AtomicCell::new(7i8);"); + impl_arithmetic!(u16, "let a = AtomicCell::new(7u16);"); + impl_arithmetic!(i16, "let a = AtomicCell::new(7i16);"); + impl_arithmetic!(u32, "let a = AtomicCell::new(7u32);"); + impl_arithmetic!(i32, "let a = AtomicCell::new(7i32);"); + impl_arithmetic!(u64, "let a = AtomicCell::new(7u64);"); + impl_arithmetic!(i64, "let a = AtomicCell::new(7i64);"); + impl_arithmetic!(u128, "let a = AtomicCell::new(7u128);"); + impl_arithmetic!(i128, "let a = AtomicCell::new(7i128);"); + } +} + +impl_arithmetic!( + usize, + atomic::AtomicUsize, + "let a = AtomicCell::new(7usize);" +); +impl_arithmetic!( + isize, + atomic::AtomicIsize, + "let a = AtomicCell::new(7isize);" +); + +impl AtomicCell { + + + + + + + + + + + + + + + + #[inline] + pub fn fetch_and(&self, val: bool) -> bool { + let a = unsafe { &*(self.value.get() as *const AtomicBool) }; + a.fetch_and(val, Ordering::SeqCst) + } + + + + + + + + + + + + + + + + + #[inline] + pub fn fetch_or(&self, val: bool) -> bool { + let a = unsafe { &*(self.value.get() as *const AtomicBool) }; + a.fetch_or(val, Ordering::SeqCst) + } + + + + + + + + + + + + + + + + + #[inline] + pub fn fetch_xor(&self, val: bool) -> bool { + let a = unsafe { &*(self.value.get() as *const AtomicBool) }; + a.fetch_xor(val, Ordering::SeqCst) + } +} + +impl Default for AtomicCell { + fn default() -> AtomicCell { + AtomicCell::new(T::default()) + } +} + +impl fmt::Debug for AtomicCell { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("AtomicCell") + .field("value", &self.load()) + .finish() + } +} + + +fn byte_eq(a: &T, b: &T) -> bool { + unsafe { + let a = slice::from_raw_parts(a as *const _ as *const u8, mem::size_of::()); + let b = slice::from_raw_parts(b as *const _ as *const u8, mem::size_of::()); + a == b + } +} + + +fn can_transmute() -> bool { + + mem::size_of::() == mem::size_of::() && mem::align_of::() >= mem::align_of::() +} + + +struct Lock { + + + + + state: AtomicUsize, +} + +impl Lock { + + + + #[inline] + fn optimistic_read(&self) -> Option { + let state = self.state.load(Ordering::Acquire); + if state == 1 { + None + } else { + Some(state) + } + } + + + + + + #[inline] + fn validate_read(&self, stamp: usize) -> bool { + atomic::fence(Ordering::Acquire); + self.state.load(Ordering::Relaxed) == stamp + } + + + #[inline] + fn write(&'static self) -> WriteGuard { + let backoff = Backoff::new(); + loop { + let previous = self.state.swap(1, Ordering::Acquire); + + if previous != 1 { + atomic::fence(Ordering::Release); + + return WriteGuard { + lock: self, + state: previous, + }; + } + + backoff.snooze(); + } + } +} + + +struct WriteGuard { + + lock: &'static Lock, + + + state: usize, +} + +impl WriteGuard { + + #[inline] + fn abort(self) { + self.lock.state.store(self.state, Ordering::Release); + } +} + +impl Drop for WriteGuard { + #[inline] + fn drop(&mut self) { + + self.lock + .state + .store(self.state.wrapping_add(2), Ordering::Release); + } +} + + + + + + + + + +#[inline] +#[must_use] +fn lock(addr: usize) -> &'static Lock { + + + + + + + + + + + + + + + + + + + + + + + const LEN: usize = 97; + + const L: Lock = Lock { + state: AtomicUsize::new(0), + }; + static LOCKS: [Lock; LEN] = [ + L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, + L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, + L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, + L, L, L, L, L, L, L, + ]; + + + + &LOCKS[addr % LEN] +} + + + + +struct AtomicUnit; + +impl AtomicUnit { + #[inline] + fn load(&self, _order: Ordering) {} + + #[inline] + fn store(&self, _val: (), _order: Ordering) {} + + #[inline] + fn swap(&self, _val: (), _order: Ordering) {} + + #[inline] + fn compare_exchange_weak( + &self, + _current: (), + _new: (), + _success: Ordering, + _failure: Ordering, + ) -> Result<(), ()> { + Ok(()) + } +} + +macro_rules! atomic { + + + (@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => { + if can_transmute::<$t, $atomic>() { + let $a: &$atomic; + break $atomic_op; + } + }; + + + + + ($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => { + loop { + atomic!(@check, $t, AtomicUnit, $a, $atomic_op); + atomic!(@check, $t, atomic::AtomicUsize, $a, $atomic_op); + + #[cfg(feature = "nightly")] + { + #[cfg(target_has_atomic = "8")] + atomic!(@check, $t, atomic::AtomicU8, $a, $atomic_op); + #[cfg(target_has_atomic = "16")] + atomic!(@check, $t, atomic::AtomicU16, $a, $atomic_op); + #[cfg(target_has_atomic = "32")] + atomic!(@check, $t, atomic::AtomicU32, $a, $atomic_op); + #[cfg(target_has_atomic = "64")] + atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op); + } + + break $fallback_op; + } + }; +} + + +fn atomic_is_lock_free() -> bool { + atomic! { T, _a, true, false } +} + + + + + +unsafe fn atomic_load(src: *mut T) -> T +where + T: Copy, +{ + atomic! { + T, a, + { + a = &*(src as *const _ as *const _); + mem::transmute_copy(&a.load(Ordering::SeqCst)) + }, + { + let lock = lock(src as usize); + + // Try doing an optimistic read first. + if let Some(stamp) = lock.optimistic_read() { + // We need a volatile read here because other threads might concurrently modify the + // value. In theory, data races are *always* UB, even if we use volatile reads and + // discard the data when a data race is detected. The proper solution would be to + // do atomic reads and atomic writes, but we can't atomically read and write all + // kinds of data since `AtomicU8` is not available on stable Rust yet. + let val = ptr::read_volatile(src); + + if lock.validate_read(stamp) { + return val; + } + } + + // Grab a regular write lock so that writers don't starve this load. + let guard = lock.write(); + let val = ptr::read(src); + // The value hasn't been changed. Drop the guard without incrementing the stamp. + guard.abort(); + val + } + } +} + + + + + +unsafe fn atomic_store(dst: *mut T, val: T) { + atomic! { + T, a, + { + a = &*(dst as *const _ as *const _); + let res = a.store(mem::transmute_copy(&val), Ordering::SeqCst); + mem::forget(val); + res + }, + { + let _guard = lock(dst as usize).write(); + ptr::write(dst, val) + } + } +} + + + + + +unsafe fn atomic_swap(dst: *mut T, val: T) -> T { + atomic! { + T, a, + { + a = &*(dst as *const _ as *const _); + let res = mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::SeqCst)); + mem::forget(val); + res + }, + { + let _guard = lock(dst as usize).write(); + ptr::replace(dst, val) + } + } +} + + + + + + + + +unsafe fn atomic_compare_exchange_weak(dst: *mut T, current: T, new: T) -> Result +where + T: Copy, +{ + atomic! { + T, a, + { + a = &*(dst as *const _ as *const _); + let res = a.compare_exchange_weak( + mem::transmute_copy(¤t), + mem::transmute_copy(&new), + Ordering::SeqCst, + Ordering::SeqCst, + ); + match res { + Ok(v) => Ok(mem::transmute_copy(&v)), + Err(v) => Err(mem::transmute_copy(&v)), + } + }, + { + let guard = lock(dst as usize).write(); + + if byte_eq(&*dst, ¤t) { + Ok(ptr::replace(dst, new)) + } else { + let val = ptr::read(dst); + // The value hasn't been changed. Drop the guard without incrementing the stamp. + guard.abort(); + Err(val) + } + } + } +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/src/atomic/consume.rs b/third_party/rust/crossbeam-utils-0.6.5/src/atomic/consume.rs new file mode 100644 index 000000000000..bfd80680281c --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/src/atomic/consume.rs @@ -0,0 +1,82 @@ +#[cfg(any(target_arch = "arm", target_arch = "aarch64"))] +use core::sync::atomic::compiler_fence; +use core::sync::atomic::Ordering; + + +pub trait AtomicConsume { + + type Val; + + + + + + + + + + + + + + + + + fn load_consume(&self) -> Self::Val; +} + +#[cfg(any(target_arch = "arm", target_arch = "aarch64"))] +macro_rules! impl_consume { + () => { + #[inline] + fn load_consume(&self) -> Self::Val { + let result = self.load(Ordering::Relaxed); + compiler_fence(Ordering::Acquire); + result + } + }; +} + +#[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] +macro_rules! impl_consume { + () => { + #[inline] + fn load_consume(&self) -> Self::Val { + self.load(Ordering::Acquire) + } + }; +} + +macro_rules! impl_atomic { + ($atomic:ident, $val:ty) => { + impl AtomicConsume for ::core::sync::atomic::$atomic { + type Val = $val; + impl_consume!(); + } + }; +} + +impl_atomic!(AtomicBool, bool); +impl_atomic!(AtomicUsize, usize); +impl_atomic!(AtomicIsize, isize); +#[cfg(all(feature = "nightly", target_has_atomic = "8"))] +impl_atomic!(AtomicU8, u8); +#[cfg(all(feature = "nightly", target_has_atomic = "8"))] +impl_atomic!(AtomicI8, i8); +#[cfg(all(feature = "nightly", target_has_atomic = "16"))] +impl_atomic!(AtomicU16, u16); +#[cfg(all(feature = "nightly", target_has_atomic = "16"))] +impl_atomic!(AtomicI16, i16); +#[cfg(all(feature = "nightly", target_has_atomic = "32"))] +impl_atomic!(AtomicU32, u32); +#[cfg(all(feature = "nightly", target_has_atomic = "32"))] +impl_atomic!(AtomicI32, i32); +#[cfg(all(feature = "nightly", target_has_atomic = "64"))] +impl_atomic!(AtomicU64, u64); +#[cfg(all(feature = "nightly", target_has_atomic = "64"))] +impl_atomic!(AtomicI64, i64); + +impl AtomicConsume for ::core::sync::atomic::AtomicPtr { + type Val = *mut T; + impl_consume!(); +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/src/atomic/mod.rs b/third_party/rust/crossbeam-utils-0.6.5/src/atomic/mod.rs new file mode 100644 index 000000000000..82e5ece8bcb0 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/src/atomic/mod.rs @@ -0,0 +1,7 @@ + + +mod atomic_cell; +mod consume; + +pub use self::atomic_cell::AtomicCell; +pub use self::consume::AtomicConsume; diff --git a/third_party/rust/crossbeam-utils-0.6.5/src/backoff.rs b/third_party/rust/crossbeam-utils-0.6.5/src/backoff.rs new file mode 100644 index 000000000000..fb09fd8806b6 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/src/backoff.rs @@ -0,0 +1,294 @@ +use core::cell::Cell; +use core::fmt; +use core::sync::atomic; + +const SPIN_LIMIT: u32 = 6; +const YIELD_LIMIT: u32 = 10; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +pub struct Backoff { + step: Cell, +} + +impl Backoff { + + + + + + + + + + #[inline] + pub fn new() -> Self { + Backoff { + step: Cell::new(0), + } + } + + + + + + + + + + + + #[inline] + pub fn reset(&self) { + self.step.set(0); + } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #[inline] + pub fn spin(&self) { + for _ in 0..1 << self.step.get().min(SPIN_LIMIT) { + atomic::spin_loop_hint(); + } + + if self.step.get() <= SPIN_LIMIT { + self.step.set(self.step.get() + 1); + } + } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #[inline] + pub fn snooze(&self) { + if self.step.get() <= SPIN_LIMIT { + for _ in 0..1 << self.step.get() { + atomic::spin_loop_hint(); + } + } else { + #[cfg(not(feature = "std"))] + for _ in 0..1 << self.step.get() { + atomic::spin_loop_hint(); + } + + #[cfg(feature = "std")] + ::std::thread::yield_now(); + } + + if self.step.get() <= YIELD_LIMIT { + self.step.set(self.step.get() + 1); + } + } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #[inline] + pub fn is_completed(&self) -> bool { + self.step.get() > YIELD_LIMIT + } + + #[inline] + #[doc(hidden)] + #[deprecated(note = "use `is_completed` instead")] + pub fn is_complete(&self) -> bool { + self.is_completed() + } +} + +impl fmt::Debug for Backoff { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Backoff") + .field("step", &self.step) + .field("is_completed", &self.is_completed()) + .finish() + } +} + +impl Default for Backoff { + fn default() -> Backoff { + Backoff::new() + } +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/src/cache_padded.rs b/third_party/rust/crossbeam-utils-0.6.5/src/cache_padded.rs new file mode 100644 index 000000000000..62258974fe5a --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/src/cache_padded.rs @@ -0,0 +1,116 @@ +use core::fmt; +use core::ops::{Deref, DerefMut}; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] +#[repr(align(64))] +pub struct CachePadded { + value: T, +} + +unsafe impl Send for CachePadded {} +unsafe impl Sync for CachePadded {} + +impl CachePadded { + + + + + + + + + + pub fn new(t: T) -> CachePadded { + CachePadded:: { value: t } + } + + + + + + + + + + + + + pub fn into_inner(self) -> T { + self.value + } +} + +impl Deref for CachePadded { + type Target = T; + + fn deref(&self) -> &T { + &self.value + } +} + +impl DerefMut for CachePadded { + fn deref_mut(&mut self) -> &mut T { + &mut self.value + } +} + +impl fmt::Debug for CachePadded { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("CachePadded") + .field("value", &self.value) + .finish() + } +} + +impl From for CachePadded { + fn from(t: T) -> Self { + CachePadded::new(t) + } +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/src/lib.rs b/third_party/rust/crossbeam-utils-0.6.5/src/lib.rs new file mode 100644 index 000000000000..27524f6bde19 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/src/lib.rs @@ -0,0 +1,67 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#![warn(missing_docs)] +#![warn(missing_debug_implementations)] +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(feature = "nightly", feature(alloc))] +#![cfg_attr(feature = "nightly", feature(cfg_target_has_atomic))] + +#[macro_use] +extern crate cfg_if; +#[cfg(feature = "std")] +extern crate core; + +cfg_if! { + if #[cfg(feature = "nightly")] { + extern crate alloc; + } else { + mod alloc { + extern crate std; + pub use self::std::*; + } + } +} + +pub mod atomic; + +mod cache_padded; +pub use cache_padded::CachePadded; + +mod backoff; +pub use backoff::Backoff; + +cfg_if! { + if #[cfg(feature = "std")] { + #[macro_use] + extern crate lazy_static; + + pub mod sync; + pub mod thread; + } +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/src/sync/mod.rs b/third_party/rust/crossbeam-utils-0.6.5/src/sync/mod.rs new file mode 100644 index 000000000000..24643667185a --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/src/sync/mod.rs @@ -0,0 +1,17 @@ + + + + + + + + + + +mod parker; +mod sharded_lock; +mod wait_group; + +pub use self::sharded_lock::{ShardedLock, ShardedLockReadGuard, ShardedLockWriteGuard}; +pub use self::parker::{Parker, Unparker}; +pub use self::wait_group::WaitGroup; diff --git a/third_party/rust/crossbeam-utils-0.6.5/src/sync/parker.rs b/third_party/rust/crossbeam-utils-0.6.5/src/sync/parker.rs new file mode 100644 index 000000000000..f4dd072fa4f6 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/src/sync/parker.rs @@ -0,0 +1,311 @@ +use std::fmt; +use std::marker::PhantomData; +use std::sync::{Arc, Condvar, Mutex}; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; +use std::time::Duration; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +pub struct Parker { + unparker: Unparker, + _marker: PhantomData<*const ()>, +} + +unsafe impl Send for Parker {} + +impl Parker { + + + + + + + + + + + pub fn new() -> Parker { + Parker { + unparker: Unparker { + inner: Arc::new(Inner { + state: AtomicUsize::new(EMPTY), + lock: Mutex::new(()), + cvar: Condvar::new(), + }), + }, + _marker: PhantomData, + } + } + + + + + + + + + + + + + + + + + + + + + pub fn park(&self) { + self.unparker.inner.park(None); + } + + + + + + + + + + + + + + + + + + pub fn park_timeout(&self, timeout: Duration) { + self.unparker.inner.park(Some(timeout)); + } + + + + + + + + + + + + + + + + + + + + + + + + pub fn unparker(&self) -> &Unparker { + &self.unparker + } +} + +impl fmt::Debug for Parker { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad("Parker { .. }") + } +} + + + + +pub struct Unparker { + inner: Arc, +} + +unsafe impl Send for Unparker {} +unsafe impl Sync for Unparker {} + +impl Unparker { + + + + + + + + + + + + + + + + + + + + + + + + + + + + pub fn unpark(&self) { + self.inner.unpark() + } +} + +impl fmt::Debug for Unparker { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad("Unparker { .. }") + } +} + +impl Clone for Unparker { + fn clone(&self) -> Unparker { + Unparker { + inner: self.inner.clone(), + } + } +} + +const EMPTY: usize = 0; +const PARKED: usize = 1; +const NOTIFIED: usize = 2; + +struct Inner { + state: AtomicUsize, + lock: Mutex<()>, + cvar: Condvar, +} + +impl Inner { + fn park(&self, timeout: Option) { + + if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { + return; + } + + + if let Some(ref dur) = timeout { + if *dur == Duration::from_millis(0) { + return; + } + } + + + let mut m = self.lock.lock().unwrap(); + + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + + Err(NOTIFIED) => { + + + + + + let old = self.state.swap(EMPTY, SeqCst); + assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + return; + } + Err(n) => panic!("inconsistent park_timeout state: {}", n), + } + + match timeout { + None => { + loop { + + m = self.cvar.wait(m).unwrap(); + + match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) { + Ok(_) => return, + Err(_) => {} + } + } + } + Some(timeout) => { + + + + let (_m, _result) = self.cvar.wait_timeout(m, timeout).unwrap(); + + match self.state.swap(EMPTY, SeqCst) { + NOTIFIED => {} + PARKED => {} + n => panic!("inconsistent park_timeout state: {}", n), + } + } + } + } + + pub fn unpark(&self) { + + + + + match self.state.swap(NOTIFIED, SeqCst) { + EMPTY => return, + NOTIFIED => return, + PARKED => {} + _ => panic!("inconsistent state in unpark"), + } + + + + + + + + + + drop(self.lock.lock().unwrap()); + self.cvar.notify_one(); + } +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/src/sync/sharded_lock.rs b/third_party/rust/crossbeam-utils-0.6.5/src/sync/sharded_lock.rs new file mode 100644 index 000000000000..bfd6d680faf7 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/src/sync/sharded_lock.rs @@ -0,0 +1,600 @@ +use std::cell::UnsafeCell; +use std::collections::HashMap; +use std::fmt; +use std::marker::PhantomData; +use std::mem; +use std::ops::{Deref, DerefMut}; +use std::panic::{RefUnwindSafe, UnwindSafe}; +use std::sync::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use std::sync::{LockResult, PoisonError, TryLockError, TryLockResult}; +use std::thread::{self, ThreadId}; + +use CachePadded; + + +const NUM_SHARDS: usize = 8; + + +struct Shard { + + lock: RwLock<()>, + + + + + + write_guard: UnsafeCell>>, +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +pub struct ShardedLock { + + shards: Box<[CachePadded]>, + + + value: UnsafeCell, +} + +unsafe impl Send for ShardedLock {} +unsafe impl Sync for ShardedLock {} + +impl UnwindSafe for ShardedLock {} +impl RefUnwindSafe for ShardedLock {} + +impl ShardedLock { + + + + + + + + + + pub fn new(value: T) -> ShardedLock { + ShardedLock { + shards: (0..NUM_SHARDS) + .map(|_| CachePadded::new(Shard { + lock: RwLock::new(()), + write_guard: UnsafeCell::new(None), + })) + .collect::>() + .into_boxed_slice(), + value: UnsafeCell::new(value), + } + } + + + + + + + + + + + + + + + + + + + pub fn into_inner(self) -> LockResult { + let is_poisoned = self.is_poisoned(); + let inner = self.value.into_inner(); + + if is_poisoned { + Err(PoisonError::new(inner)) + } else { + Ok(inner) + } + } +} + +impl ShardedLock { + + + + + + + + + + + + + + + + + + + + + + pub fn is_poisoned(&self) -> bool { + self.shards[0].lock.is_poisoned() + } + + + + + + + + + + + + + + + + + + pub fn get_mut(&mut self) -> LockResult<&mut T> { + let is_poisoned = self.is_poisoned(); + let inner = unsafe { &mut *self.value.get() }; + + if is_poisoned { + Err(PoisonError::new(inner)) + } else { + Ok(inner) + } + } + + + + + + + + + + + + + + + + + + + + + + + + pub fn try_read(&self) -> TryLockResult> { + + + let current_index = current_index().unwrap_or(0); + let shard_index = current_index & (self.shards.len() - 1); + + match self.shards[shard_index].lock.try_read() { + Ok(guard) => Ok(ShardedLockReadGuard { + lock: self, + _guard: guard, + _marker: PhantomData, + }), + Err(TryLockError::Poisoned(err)) => { + let guard = ShardedLockReadGuard { + lock: self, + _guard: err.into_inner(), + _marker: PhantomData, + }; + Err(TryLockError::Poisoned(PoisonError::new(guard))) + }, + Err(TryLockError::WouldBlock) => Err(TryLockError::WouldBlock), + } + } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + pub fn read(&self) -> LockResult> { + + + let current_index = current_index().unwrap_or(0); + let shard_index = current_index & (self.shards.len() - 1); + + match self.shards[shard_index].lock.read() { + Ok(guard) => Ok(ShardedLockReadGuard { + lock: self, + _guard: guard, + _marker: PhantomData, + }), + Err(err) => Err(PoisonError::new(ShardedLockReadGuard { + lock: self, + _guard: err.into_inner(), + _marker: PhantomData, + })), + } + } + + + + + + + + + + + + + + + + + + + + + + + + pub fn try_write(&self) -> TryLockResult> { + let mut poisoned = false; + let mut blocked = None; + + + for (i, shard) in self.shards.iter().enumerate() { + let guard = match shard.lock.try_write() { + Ok(guard) => guard, + Err(TryLockError::Poisoned(err)) => { + poisoned = true; + err.into_inner() + }, + Err(TryLockError::WouldBlock) => { + blocked = Some(i); + break; + } + }; + + + unsafe { + let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard); + let dest: *mut _ = shard.write_guard.get(); + *dest = Some(guard); + } + } + + if let Some(i) = blocked { + + for shard in self.shards[0..i].iter().rev() { + unsafe { + let dest: *mut _ = shard.write_guard.get(); + let guard = mem::replace(&mut *dest, None); + drop(guard); + } + } + Err(TryLockError::WouldBlock) + } else if poisoned { + let guard = ShardedLockWriteGuard { + lock: self, + _marker: PhantomData, + }; + Err(TryLockError::Poisoned(PoisonError::new(guard))) + } else { + Ok(ShardedLockWriteGuard { + lock: self, + _marker: PhantomData, + }) + } + } + + + + + + + + + + + + + + + + + + + + + + + pub fn write(&self) -> LockResult> { + let mut poisoned = false; + + + for shard in self.shards.iter() { + let guard = match shard.lock.write() { + Ok(guard) => guard, + Err(err) => { + poisoned = true; + err.into_inner() + } + }; + + + unsafe { + let guard: RwLockWriteGuard<'_, ()> = guard; + let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard); + let dest: *mut _ = shard.write_guard.get(); + *dest = Some(guard); + } + } + + if poisoned { + Err(PoisonError::new(ShardedLockWriteGuard { + lock: self, + _marker: PhantomData, + })) + } else { + Ok(ShardedLockWriteGuard { + lock: self, + _marker: PhantomData, + }) + } + } +} + +impl fmt::Debug for ShardedLock { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.try_read() { + Ok(guard) => f.debug_struct("ShardedLock").field("data", &&*guard).finish(), + Err(TryLockError::Poisoned(err)) => { + f.debug_struct("ShardedLock").field("data", &&**err.get_ref()).finish() + }, + Err(TryLockError::WouldBlock) => { + struct LockedPlaceholder; + impl fmt::Debug for LockedPlaceholder { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("") + } + } + f.debug_struct("ShardedLock").field("data", &LockedPlaceholder).finish() + } + } + } +} + +impl Default for ShardedLock { + fn default() -> ShardedLock { + ShardedLock::new(Default::default()) + } +} + +impl From for ShardedLock { + fn from(t: T) -> Self { + ShardedLock::new(t) + } +} + + + + +pub struct ShardedLockReadGuard<'a, T: ?Sized + 'a> { + lock: &'a ShardedLock, + _guard: RwLockReadGuard<'a, ()>, + _marker: PhantomData>, +} + +unsafe impl<'a, T: ?Sized + Sync> Sync for ShardedLockReadGuard<'a, T> {} + +impl<'a, T: ?Sized> Deref for ShardedLockReadGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.lock.value.get() } + } +} + +impl<'a, T: fmt::Debug> fmt::Debug for ShardedLockReadGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("ShardedLockReadGuard") + .field("lock", &self.lock) + .finish() + } +} + +impl<'a, T: ?Sized + fmt::Display> fmt::Display for ShardedLockReadGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(f) + } +} + + + + +pub struct ShardedLockWriteGuard<'a, T: ?Sized + 'a> { + lock: &'a ShardedLock, + _marker: PhantomData>, +} + +unsafe impl<'a, T: ?Sized + Sync> Sync for ShardedLockWriteGuard<'a, T> {} + +impl<'a, T: ?Sized> Drop for ShardedLockWriteGuard<'a, T> { + fn drop(&mut self) { + + for shard in self.lock.shards.iter().rev() { + unsafe { + let dest: *mut _ = shard.write_guard.get(); + let guard = mem::replace(&mut *dest, None); + drop(guard); + } + } + } +} + +impl<'a, T: fmt::Debug> fmt::Debug for ShardedLockWriteGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("ShardedLockWriteGuard") + .field("lock", &self.lock) + .finish() + } +} + +impl<'a, T: ?Sized + fmt::Display> fmt::Display for ShardedLockWriteGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(f) + } +} + +impl<'a, T: ?Sized> Deref for ShardedLockWriteGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.lock.value.get() } + } +} + +impl<'a, T: ?Sized> DerefMut for ShardedLockWriteGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.lock.value.get() } + } +} + + + + + + + + +#[inline] +fn current_index() -> Option { + REGISTRATION.try_with(|reg| reg.index).ok() +} + + +struct ThreadIndices { + + mapping: HashMap, + + + free_list: Vec, + + + next_index: usize, +} + +lazy_static! { + static ref THREAD_INDICES: Mutex = Mutex::new(ThreadIndices { + mapping: HashMap::new(), + free_list: Vec::new(), + next_index: 0, + }); +} + + + + +struct Registration { + index: usize, + thread_id: ThreadId, +} + +impl Drop for Registration { + fn drop(&mut self) { + let mut indices = THREAD_INDICES.lock().unwrap(); + indices.mapping.remove(&self.thread_id); + indices.free_list.push(self.index); + } +} + +thread_local! { + static REGISTRATION: Registration = { + let thread_id = thread::current().id(); + let mut indices = THREAD_INDICES.lock().unwrap(); + + let index = match indices.free_list.pop() { + Some(i) => i, + None => { + let i = indices.next_index; + indices.next_index += 1; + i + } + }; + indices.mapping.insert(thread_id, index); + + Registration { + index, + thread_id, + } + }; +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/src/sync/wait_group.rs b/third_party/rust/crossbeam-utils-0.6.5/src/sync/wait_group.rs new file mode 100644 index 000000000000..aca66431919a --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/src/sync/wait_group.rs @@ -0,0 +1,139 @@ +use std::fmt; +use std::sync::{Arc, Condvar, Mutex}; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +pub struct WaitGroup { + inner: Arc, +} + + +struct Inner { + cvar: Condvar, + count: Mutex, +} + +impl WaitGroup { + + + + + + + + + + pub fn new() -> WaitGroup { + WaitGroup { + inner: Arc::new(Inner { + cvar: Condvar::new(), + count: Mutex::new(1), + }), + } + } + + + + + + + + + + + + + + + + + + + + + + + pub fn wait(self) { + if *self.inner.count.lock().unwrap() == 1 { + return; + } + + let inner = self.inner.clone(); + drop(self); + + let mut count = inner.count.lock().unwrap(); + while *count > 0 { + count = inner.cvar.wait(count).unwrap(); + } + } +} + +impl Drop for WaitGroup { + fn drop(&mut self) { + let mut count = self.inner.count.lock().unwrap(); + *count -= 1; + + if *count == 0 { + self.inner.cvar.notify_all(); + } + } +} + +impl Clone for WaitGroup { + fn clone(&self) -> WaitGroup { + let mut count = self.inner.count.lock().unwrap(); + *count += 1; + + WaitGroup { + inner: self.inner.clone(), + } + } +} + +impl fmt::Debug for WaitGroup { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let count: &usize = &*self.inner.count.lock().unwrap(); + f.debug_struct("WaitGroup") + .field("count", count) + .finish() + } +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/src/thread.rs b/third_party/rust/crossbeam-utils-0.6.5/src/thread.rs new file mode 100644 index 000000000000..6d905314b7d4 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/src/thread.rs @@ -0,0 +1,529 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +use std::fmt; +use std::io; +use std::marker::PhantomData; +use std::mem; +use std::panic; +use std::sync::{Arc, Mutex}; +use std::thread; + +use sync::WaitGroup; + +type SharedVec = Arc>>; +type SharedOption = Arc>>; + + + + + + + + + + + + + + + + + + + + + +pub fn scope<'env, F, R>(f: F) -> thread::Result +where + F: FnOnce(&Scope<'env>) -> R, +{ + let wg = WaitGroup::new(); + let scope = Scope::<'env> { + handles: SharedVec::default(), + wait_group: wg.clone(), + _marker: PhantomData, + }; + + + let result = panic::catch_unwind(panic::AssertUnwindSafe(|| f(&scope))); + + + drop(scope.wait_group); + wg.wait(); + + + let panics: Vec<_> = { + let mut handles = scope.handles.lock().unwrap(); + + + let panics = handles + .drain(..) + .filter_map(|handle| handle.lock().unwrap().take()) + .filter_map(|handle| handle.join().err()) + .collect(); + + panics + }; + + + + + match result { + Err(err) => panic::resume_unwind(err), + Ok(res) => { + if panics.is_empty() { + Ok(res) + } else { + Err(Box::new(panics)) + } + } + } +} + + +pub struct Scope<'env> { + + handles: SharedVec>>, + + + wait_group: WaitGroup, + + + _marker: PhantomData<&'env mut &'env ()>, +} + +unsafe impl<'env> Sync for Scope<'env> {} + +impl<'env> Scope<'env> { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + pub fn spawn<'scope, F, T>(&'scope self, f: F) -> ScopedJoinHandle<'scope, T> + where + F: FnOnce(&Scope<'env>) -> T, + F: Send + 'env, + T: Send + 'env, + { + self.builder().spawn(f).unwrap() + } + + + + + + + + + + + + + + + + pub fn builder<'scope>(&'scope self) -> ScopedThreadBuilder<'scope, 'env> { + ScopedThreadBuilder { + scope: self, + builder: thread::Builder::new(), + } + } +} + +impl<'env> fmt::Debug for Scope<'env> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad("Scope { .. }") + } +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#[derive(Debug)] +pub struct ScopedThreadBuilder<'scope, 'env: 'scope> { + scope: &'scope Scope<'env>, + builder: thread::Builder, +} + +impl<'scope, 'env> ScopedThreadBuilder<'scope, 'env> { + + + + + + + + + + + + + + + + + + + + + pub fn name(mut self, name: String) -> ScopedThreadBuilder<'scope, 'env> { + self.builder = self.builder.name(name); + self + } + + + + + + + + + + + + + + + + + + pub fn stack_size(mut self, size: usize) -> ScopedThreadBuilder<'scope, 'env> { + self.builder = self.builder.stack_size(size); + self + } + + + + + + + + + + + + + + + + + + + + + + + + + + + pub fn spawn(self, f: F) -> io::Result> + where + F: FnOnce(&Scope<'env>) -> T, + F: Send + 'env, + T: Send + 'env, + { + + let result = SharedOption::default(); + + + let (handle, thread) = { + let result = Arc::clone(&result); + + + let scope = Scope::<'env> { + handles: Arc::clone(&self.scope.handles), + wait_group: self.scope.wait_group.clone(), + _marker: PhantomData, + }; + + + let handle = { + let closure = move || { + + let scope: Scope<'env> = scope; + + + let res = f(&scope); + + + *result.lock().unwrap() = Some(res); + }; + + + let mut closure = Some(closure); + let closure = move || closure.take().unwrap()(); + + + let closure: Box = Box::new(closure); + let closure: Box = unsafe { mem::transmute(closure) }; + + + let mut closure = closure; + self.builder.spawn(move || closure())? + }; + + let thread = handle.thread().clone(); + let handle = Arc::new(Mutex::new(Some(handle))); + (handle, thread) + }; + + + self.scope.handles.lock().unwrap().push(Arc::clone(&handle)); + + Ok(ScopedJoinHandle { + handle, + result, + thread, + _marker: PhantomData, + }) + } +} + +unsafe impl<'scope, T> Send for ScopedJoinHandle<'scope, T> {} +unsafe impl<'scope, T> Sync for ScopedJoinHandle<'scope, T> {} + + +pub struct ScopedJoinHandle<'scope, T> { + + handle: SharedOption>, + + + result: SharedOption, + + + thread: thread::Thread, + + + _marker: PhantomData<&'scope ()>, +} + +impl<'scope, T> ScopedJoinHandle<'scope, T> { + + + + + + + + + + + + + + + + + + + + + + + + + + + + pub fn join(self) -> thread::Result { + + + let handle = self.handle.lock().unwrap().take().unwrap(); + + + handle + .join() + .map(|()| self.result.lock().unwrap().take().unwrap()) + } + + + + + + + + + + + + + + pub fn thread(&self) -> &thread::Thread { + &self.thread + } +} + +impl<'scope, T> fmt::Debug for ScopedJoinHandle<'scope, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad("ScopedJoinHandle { .. }") + } +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/tests/atomic_cell.rs b/third_party/rust/crossbeam-utils-0.6.5/tests/atomic_cell.rs new file mode 100644 index 000000000000..37c901f04288 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/tests/atomic_cell.rs @@ -0,0 +1,208 @@ +extern crate crossbeam_utils; + +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; + +use crossbeam_utils::atomic::AtomicCell; + +#[test] +fn is_lock_free() { + struct UsizeWrap(usize); + struct U8Wrap(bool); + + assert_eq!(AtomicCell::::is_lock_free(), true); + assert_eq!(AtomicCell::::is_lock_free(), true); + assert_eq!(AtomicCell::::is_lock_free(), true); + + assert_eq!(AtomicCell::::is_lock_free(), cfg!(feature = "nightly")); + assert_eq!( + AtomicCell::::is_lock_free(), + cfg!(feature = "nightly") + ); + assert_eq!( + AtomicCell::::is_lock_free(), + cfg!(feature = "nightly") + ); +} + +#[test] +fn drops_unit() { + static CNT: AtomicUsize = AtomicUsize::new(0); + CNT.store(0, SeqCst); + + #[derive(Debug, PartialEq, Eq)] + struct Foo(); + + impl Foo { + fn new() -> Foo { + CNT.fetch_add(1, SeqCst); + Foo() + } + } + + impl Drop for Foo { + fn drop(&mut self) { + CNT.fetch_sub(1, SeqCst); + } + } + + impl Default for Foo { + fn default() -> Foo { + Foo::new() + } + } + + let a = AtomicCell::new(Foo::new()); + + assert_eq!(a.swap(Foo::new()), Foo::new()); + assert_eq!(CNT.load(SeqCst), 1); + + a.store(Foo::new()); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new()); + assert_eq!(CNT.load(SeqCst), 1); + + drop(a); + assert_eq!(CNT.load(SeqCst), 0); +} + +#[test] +fn drops_u8() { + static CNT: AtomicUsize = AtomicUsize::new(0); + CNT.store(0, SeqCst); + + #[derive(Debug, PartialEq, Eq)] + struct Foo(u8); + + impl Foo { + fn new(val: u8) -> Foo { + CNT.fetch_add(1, SeqCst); + Foo(val) + } + } + + impl Drop for Foo { + fn drop(&mut self) { + CNT.fetch_sub(1, SeqCst); + } + } + + impl Default for Foo { + fn default() -> Foo { + Foo::new(0) + } + } + + let a = AtomicCell::new(Foo::new(5)); + + assert_eq!(a.swap(Foo::new(6)), Foo::new(5)); + assert_eq!(a.swap(Foo::new(1)), Foo::new(6)); + assert_eq!(CNT.load(SeqCst), 1); + + a.store(Foo::new(2)); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new(2)); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new(0)); + assert_eq!(CNT.load(SeqCst), 1); + + drop(a); + assert_eq!(CNT.load(SeqCst), 0); +} + +#[test] +fn drops_usize() { + static CNT: AtomicUsize = AtomicUsize::new(0); + CNT.store(0, SeqCst); + + #[derive(Debug, PartialEq, Eq)] + struct Foo(usize); + + impl Foo { + fn new(val: usize) -> Foo { + CNT.fetch_add(1, SeqCst); + Foo(val) + } + } + + impl Drop for Foo { + fn drop(&mut self) { + CNT.fetch_sub(1, SeqCst); + } + } + + impl Default for Foo { + fn default() -> Foo { + Foo::new(0) + } + } + + let a = AtomicCell::new(Foo::new(5)); + + assert_eq!(a.swap(Foo::new(6)), Foo::new(5)); + assert_eq!(a.swap(Foo::new(1)), Foo::new(6)); + assert_eq!(CNT.load(SeqCst), 1); + + a.store(Foo::new(2)); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new(2)); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new(0)); + assert_eq!(CNT.load(SeqCst), 1); + + drop(a); + assert_eq!(CNT.load(SeqCst), 0); +} + +#[test] +fn modular_u8() { + #[derive(Clone, Copy, Eq, Debug, Default)] + struct Foo(u8); + + impl PartialEq for Foo { + fn eq(&self, other: &Foo) -> bool { + self.0 % 5 == other.0 % 5 + } + } + + let a = AtomicCell::new(Foo(1)); + + assert_eq!(a.load(), Foo(1)); + assert_eq!(a.swap(Foo(2)), Foo(11)); + assert_eq!(a.load(), Foo(52)); + + a.store(Foo(0)); + assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100))); + assert_eq!(a.load().0, 5); + assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100))); + assert_eq!(a.load().0, 15); +} + +#[test] +fn modular_usize() { + #[derive(Clone, Copy, Eq, Debug, Default)] + struct Foo(usize); + + impl PartialEq for Foo { + fn eq(&self, other: &Foo) -> bool { + self.0 % 5 == other.0 % 5 + } + } + + let a = AtomicCell::new(Foo(1)); + + assert_eq!(a.load(), Foo(1)); + assert_eq!(a.swap(Foo(2)), Foo(11)); + assert_eq!(a.load(), Foo(52)); + + a.store(Foo(0)); + assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100))); + assert_eq!(a.load().0, 5); + assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100))); + assert_eq!(a.load().0, 15); +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/tests/cache_padded.rs b/third_party/rust/crossbeam-utils-0.6.5/tests/cache_padded.rs new file mode 100644 index 000000000000..8ad7d40a4ee9 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/tests/cache_padded.rs @@ -0,0 +1,112 @@ +extern crate crossbeam_utils; + +use std::cell::Cell; +use std::mem; + +use crossbeam_utils::CachePadded; + +#[test] +fn default() { + let x: CachePadded = Default::default(); + assert_eq!(*x, 0); +} + +#[test] +fn store_u64() { + let x: CachePadded = CachePadded::new(17); + assert_eq!(*x, 17); +} + +#[test] +fn store_pair() { + let x: CachePadded<(u64, u64)> = CachePadded::new((17, 37)); + assert_eq!(x.0, 17); + assert_eq!(x.1, 37); +} + +#[test] +fn distance() { + let arr = [CachePadded::new(17u8), CachePadded::new(37u8)]; + let a = &*arr[0] as *const u8; + let b = &*arr[1] as *const u8; + assert!(unsafe { a.offset(64) } <= b); +} + +#[test] +fn different_sizes() { + CachePadded::new(17u8); + CachePadded::new(17u16); + CachePadded::new(17u32); + CachePadded::new([17u64; 0]); + CachePadded::new([17u64; 1]); + CachePadded::new([17u64; 2]); + CachePadded::new([17u64; 3]); + CachePadded::new([17u64; 4]); + CachePadded::new([17u64; 5]); + CachePadded::new([17u64; 6]); + CachePadded::new([17u64; 7]); + CachePadded::new([17u64; 8]); +} + +#[test] +fn large() { + let a = [17u64; 9]; + let b = CachePadded::new(a); + assert!(mem::size_of_val(&a) <= mem::size_of_val(&b)); +} + +#[test] +fn debug() { + assert_eq!( + format!("{:?}", CachePadded::new(17u64)), + "CachePadded { value: 17 }" + ); +} + +#[test] +fn drops() { + let count = Cell::new(0); + + struct Foo<'a>(&'a Cell); + + impl<'a> Drop for Foo<'a> { + fn drop(&mut self) { + self.0.set(self.0.get() + 1); + } + } + + let a = CachePadded::new(Foo(&count)); + let b = CachePadded::new(Foo(&count)); + + assert_eq!(count.get(), 0); + drop(a); + assert_eq!(count.get(), 1); + drop(b); + assert_eq!(count.get(), 2); +} + +#[test] +fn clone() { + let a = CachePadded::new(17); + let b = a.clone(); + assert_eq!(*a, *b); +} + +#[test] +fn runs_custom_clone() { + let count = Cell::new(0); + + struct Foo<'a>(&'a Cell); + + impl<'a> Clone for Foo<'a> { + fn clone(&self) -> Foo<'a> { + self.0.set(self.0.get() + 1); + Foo::<'a>(self.0) + } + } + + let a = CachePadded::new(Foo(&count)); + let _ = a.clone(); + + assert_eq!(count.get(), 1); +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/tests/parker.rs b/third_party/rust/crossbeam-utils-0.6.5/tests/parker.rs new file mode 100644 index 000000000000..fab07b3a3c7a --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/tests/parker.rs @@ -0,0 +1,42 @@ +extern crate crossbeam_utils; + +use std::thread::sleep; +use std::time::Duration; +use std::u32; + +use crossbeam_utils::sync::Parker; +use crossbeam_utils::thread; + +#[test] +fn park_timeout_unpark_before() { + let p = Parker::new(); + for _ in 0..10 { + p.unparker().unpark(); + p.park_timeout(Duration::from_millis(u32::MAX as u64)); + } +} + +#[test] +fn park_timeout_unpark_not_called() { + let p = Parker::new(); + for _ in 0..10 { + p.park_timeout(Duration::from_millis(10)); + } +} + +#[test] +fn park_timeout_unpark_called_other_thread() { + for _ in 0..10 { + let p = Parker::new(); + let u = p.unparker().clone(); + + thread::scope(|scope| { + scope.spawn(move |_| { + sleep(Duration::from_millis(50)); + u.unpark(); + }); + + p.park_timeout(Duration::from_millis(u32::MAX as u64)); + }).unwrap(); + } +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/tests/sharded_lock.rs b/third_party/rust/crossbeam-utils-0.6.5/tests/sharded_lock.rs new file mode 100644 index 000000000000..08dc966c8032 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/tests/sharded_lock.rs @@ -0,0 +1,245 @@ +extern crate crossbeam_utils; +extern crate rand; + +use std::sync::mpsc::channel; +use std::thread; +use std::sync::{Arc, TryLockError}; +use std::sync::atomic::{AtomicUsize, Ordering}; + +use crossbeam_utils::sync::ShardedLock; +use rand::Rng; + +#[derive(Eq, PartialEq, Debug)] +struct NonCopy(i32); + +#[test] +fn smoke() { + let l = ShardedLock::new(()); + drop(l.read().unwrap()); + drop(l.write().unwrap()); + drop((l.read().unwrap(), l.read().unwrap())); + drop(l.write().unwrap()); +} + +#[test] +fn frob() { + const N: u32 = 10; + const M: usize = 1000; + + let r = Arc::new(ShardedLock::new(())); + + let (tx, rx) = channel::<()>(); + for _ in 0..N { + let tx = tx.clone(); + let r = r.clone(); + thread::spawn(move || { + let mut rng = rand::thread_rng(); + for _ in 0..M { + if rng.gen_bool(1.0 / (N as f64)) { + drop(r.write().unwrap()); + } else { + drop(r.read().unwrap()); + } + } + drop(tx); + }); + } + drop(tx); + let _ = rx.recv(); +} + +#[test] +fn arc_poison_wr() { + let arc = Arc::new(ShardedLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.write().unwrap(); + panic!(); + }).join(); + assert!(arc.read().is_err()); +} + +#[test] +fn arc_poison_ww() { + let arc = Arc::new(ShardedLock::new(1)); + assert!(!arc.is_poisoned()); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.write().unwrap(); + panic!(); + }).join(); + assert!(arc.write().is_err()); + assert!(arc.is_poisoned()); +} + +#[test] +fn arc_no_poison_rr() { + let arc = Arc::new(ShardedLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.read().unwrap(); + panic!(); + }).join(); + let lock = arc.read().unwrap(); + assert_eq!(*lock, 1); +} +#[test] +fn arc_no_poison_sl() { + let arc = Arc::new(ShardedLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.read().unwrap(); + panic!() + }).join(); + let lock = arc.write().unwrap(); + assert_eq!(*lock, 1); +} + +#[test] +fn arc() { + let arc = Arc::new(ShardedLock::new(0)); + let arc2 = arc.clone(); + let (tx, rx) = channel(); + + thread::spawn(move || { + let mut lock = arc2.write().unwrap(); + for _ in 0..10 { + let tmp = *lock; + *lock = -1; + thread::yield_now(); + *lock = tmp + 1; + } + tx.send(()).unwrap(); + }); + + + let mut children = Vec::new(); + for _ in 0..5 { + let arc3 = arc.clone(); + children.push(thread::spawn(move || { + let lock = arc3.read().unwrap(); + assert!(*lock >= 0); + })); + } + + + for r in children { + assert!(r.join().is_ok()); + } + + + rx.recv().unwrap(); + let lock = arc.read().unwrap(); + assert_eq!(*lock, 10); +} + +#[test] +fn arc_access_in_unwind() { + let arc = Arc::new(ShardedLock::new(1)); + let arc2 = arc.clone(); + let _ = thread::spawn(move || -> () { + struct Unwinder { + i: Arc>, + } + impl Drop for Unwinder { + fn drop(&mut self) { + let mut lock = self.i.write().unwrap(); + *lock += 1; + } + } + let _u = Unwinder { i: arc2 }; + panic!(); + }).join(); + let lock = arc.read().unwrap(); + assert_eq!(*lock, 2); +} + +#[test] +fn unsized_type() { + let sl: &ShardedLock<[i32]> = &ShardedLock::new([1, 2, 3]); + { + let b = &mut *sl.write().unwrap(); + b[0] = 4; + b[2] = 5; + } + let comp: &[i32] = &[4, 2, 5]; + assert_eq!(&*sl.read().unwrap(), comp); +} + +#[test] +fn try_write() { + let lock = ShardedLock::new(0isize); + let read_guard = lock.read().unwrap(); + + let write_result = lock.try_write(); + match write_result { + Err(TryLockError::WouldBlock) => (), + Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"), + Err(_) => assert!(false, "unexpected error"), + } + + drop(read_guard); +} + +#[test] +fn test_into_inner() { + let m = ShardedLock::new(NonCopy(10)); + assert_eq!(m.into_inner().unwrap(), NonCopy(10)); +} + +#[test] +fn test_into_inner_drop() { + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = ShardedLock::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = m.into_inner().unwrap(); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); +} + +#[test] +fn test_into_inner_poison() { + let m = Arc::new(ShardedLock::new(NonCopy(10))); + let m2 = m.clone(); + let _ = thread::spawn(move || { + let _lock = m2.write().unwrap(); + panic!("test panic in inner thread to poison ShardedLock"); + }).join(); + + assert!(m.is_poisoned()); + match Arc::try_unwrap(m).unwrap().into_inner() { + Err(e) => assert_eq!(e.into_inner(), NonCopy(10)), + Ok(x) => panic!("into_inner of poisoned ShardedLock is Ok: {:?}", x), + } +} + +#[test] +fn test_get_mut() { + let mut m = ShardedLock::new(NonCopy(10)); + *m.get_mut().unwrap() = NonCopy(20); + assert_eq!(m.into_inner().unwrap(), NonCopy(20)); +} + +#[test] +fn test_get_mut_poison() { + let m = Arc::new(ShardedLock::new(NonCopy(10))); + let m2 = m.clone(); + let _ = thread::spawn(move || { + let _lock = m2.write().unwrap(); + panic!("test panic in inner thread to poison ShardedLock"); + }).join(); + + assert!(m.is_poisoned()); + match Arc::try_unwrap(m).unwrap().get_mut() { + Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)), + Ok(x) => panic!("get_mut of poisoned ShardedLock is Ok: {:?}", x), + } +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/tests/thread.rs b/third_party/rust/crossbeam-utils-0.6.5/tests/thread.rs new file mode 100644 index 000000000000..e34a0221d990 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/tests/thread.rs @@ -0,0 +1,175 @@ +extern crate crossbeam_utils; + +use std::any::Any; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread::sleep; +use std::time::Duration; + +use crossbeam_utils::thread; + +const THREADS: usize = 10; +const SMALL_STACK_SIZE: usize = 20; + +#[test] +fn join() { + let counter = AtomicUsize::new(0); + thread::scope(|scope| { + let handle = scope.spawn(|_| { + counter.store(1, Ordering::Relaxed); + }); + assert!(handle.join().is_ok()); + + let panic_handle = scope.spawn(|_| { + panic!("\"My honey is running out!\", said Pooh."); + }); + assert!(panic_handle.join().is_err()); + }).unwrap(); + + + assert_eq!(1, counter.load(Ordering::Relaxed)); +} + +#[test] +fn counter() { + let counter = AtomicUsize::new(0); + thread::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|_| { + counter.fetch_add(1, Ordering::Relaxed); + }); + } + }).unwrap(); + + assert_eq!(THREADS, counter.load(Ordering::Relaxed)); +} + +#[test] +fn counter_builder() { + let counter = AtomicUsize::new(0); + thread::scope(|scope| { + for i in 0..THREADS { + scope + .builder() + .name(format!("child-{}", i)) + .stack_size(SMALL_STACK_SIZE) + .spawn(|_| { + counter.fetch_add(1, Ordering::Relaxed); + }).unwrap(); + } + }).unwrap(); + + assert_eq!(THREADS, counter.load(Ordering::Relaxed)); +} + +#[test] +fn counter_panic() { + let counter = AtomicUsize::new(0); + let result = thread::scope(|scope| { + scope.spawn(|_| { + panic!("\"My honey is running out!\", said Pooh."); + }); + sleep(Duration::from_millis(100)); + + for _ in 0..THREADS { + scope.spawn(|_| { + counter.fetch_add(1, Ordering::Relaxed); + }); + } + }); + + assert_eq!(THREADS, counter.load(Ordering::Relaxed)); + assert!(result.is_err()); +} + +#[test] +fn panic_twice() { + let result = thread::scope(|scope| { + scope.spawn(|_| { + sleep(Duration::from_millis(500)); + panic!("thread #1"); + }); + scope.spawn(|_| { + panic!("thread #2"); + }); + }); + + let err = result.unwrap_err(); + let vec = err + .downcast_ref::>>() + .unwrap(); + assert_eq!(2, vec.len()); + + let first = vec[0].downcast_ref::<&str>().unwrap(); + let second = vec[1].downcast_ref::<&str>().unwrap(); + assert_eq!("thread #1", *first); + assert_eq!("thread #2", *second) +} + +#[test] +fn panic_many() { + let result = thread::scope(|scope| { + scope.spawn(|_| panic!("deliberate panic #1")); + scope.spawn(|_| panic!("deliberate panic #2")); + scope.spawn(|_| panic!("deliberate panic #3")); + }); + + let err = result.unwrap_err(); + let vec = err + .downcast_ref::>>() + .unwrap(); + assert_eq!(3, vec.len()); + + for panic in vec.iter() { + let panic = panic.downcast_ref::<&str>().unwrap(); + assert!( + *panic == "deliberate panic #1" + || *panic == "deliberate panic #2" + || *panic == "deliberate panic #3" + ); + } +} + +#[test] +fn nesting() { + let var = "foo".to_string(); + + struct Wrapper<'a> { + var: &'a String, + } + + impl<'a> Wrapper<'a> { + fn recurse(&'a self, scope: &thread::Scope<'a>, depth: usize) { + assert_eq!(self.var, "foo"); + + if depth > 0 { + scope.spawn(move |scope| { + self.recurse(scope, depth - 1); + }); + } + } + } + + let wrapper = Wrapper { var: &var }; + + thread::scope(|scope| { + scope.spawn(|scope| { + scope.spawn(|scope| { + wrapper.recurse(scope, 5); + }); + }); + }).unwrap(); +} + +#[test] +fn join_nested() { + thread::scope(|scope| { + scope.spawn(|scope| { + let handle = scope.spawn(|_| 7); + + sleep(Duration::from_millis(200)); + handle.join().unwrap(); + }); + + sleep(Duration::from_millis(100)); + }).unwrap(); +} diff --git a/third_party/rust/crossbeam-utils-0.6.5/tests/wait_group.rs b/third_party/rust/crossbeam-utils-0.6.5/tests/wait_group.rs new file mode 100644 index 000000000000..75af0918c881 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.6.5/tests/wait_group.rs @@ -0,0 +1,66 @@ +extern crate crossbeam_utils; + +use std::sync::mpsc; +use std::thread; +use std::time::Duration; + +use crossbeam_utils::sync::WaitGroup; + +const THREADS: usize = 10; + +#[test] +fn wait() { + let wg = WaitGroup::new(); + let (tx, rx) = mpsc::channel(); + + for _ in 0..THREADS { + let wg = wg.clone(); + let tx = tx.clone(); + + thread::spawn(move || { + wg.wait(); + tx.send(()).unwrap(); + }); + } + + thread::sleep(Duration::from_millis(100)); + + + + assert!(rx.try_recv().is_err()); + + wg.wait(); + + + for _ in 0..THREADS { + rx.recv().unwrap(); + } +} + +#[test] +fn wait_and_drop() { + let wg = WaitGroup::new(); + let (tx, rx) = mpsc::channel(); + + for _ in 0..THREADS { + let wg = wg.clone(); + let tx = tx.clone(); + + thread::spawn(move || { + thread::sleep(Duration::from_millis(100)); + tx.send(()).unwrap(); + drop(wg); + }); + } + + + + assert!(rx.try_recv().is_err()); + + wg.wait(); + + + for _ in 0..THREADS { + rx.try_recv().unwrap(); + } +} diff --git a/third_party/rust/crossbeam-utils/.cargo-checksum.json b/third_party/rust/crossbeam-utils/.cargo-checksum.json index ec17be04c271..330718f051e4 100644 --- a/third_party/rust/crossbeam-utils/.cargo-checksum.json +++ b/third_party/rust/crossbeam-utils/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"e58bfef23e76d04b244941fd4ecdb35837a1a6f1370bf4596cc0280193c9a4f9","Cargo.toml":"2d4d20231a89e61fa6d1d83ad853b274e71d243c992eda5a9de0c9e8ca428ba5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"63ba61fd2e75aa90572476eda5246fc766846af40d31e0bdccbf763d9f0799ba","benches/atomic_cell.rs":"ada69698def9d4eab485a6e0da235aaac001efe49a6b0d6f5c5be381a645310f","src/atomic/atomic_cell.rs":"97a9ec7ac2625ee0a951b984a419fbeab62173ed9c23cab47dfc13ed25e8ee6c","src/atomic/consume.rs":"bfdc7e2d8370a5a3bb1699b6214347c359d66fcc92a2d1345a513676ac91d821","src/atomic/mod.rs":"404eacae422012f3628cb44262df73a5891fe02a17ab345b832e3062982b5a20","src/backoff.rs":"029fede365eaa3408c7359cf868303120903976304aee546aeedcb80085568d5","src/cache_padded.rs":"95b10657b4e50316d2213894e195c61602ff0c6655cc965301de1584fb7d61c7","src/lib.rs":"957df3bd2875147aa1b939fc47f1a8a72719748e9001f27dba2f3589e27a73b4","src/sync/mod.rs":"4c8ad6ec4601f212791b0b531b46ee5decec2f1d14746aa7f2c18e36c609cd8e","src/sync/parker.rs":"55324bbea5b7c6838a0f8467a5b8a5dbd5526c8e1c7fd4f6d64dad1ab19f9be9","src/sync/sharded_lock.rs":"7a401ba621233732c26cf49324748269359d7bc5dc27e0ec26c9493e9a5ec97d","src/sync/wait_group.rs":"21708bbd46daa98e9f788765a9a4ef3b087a8d1e97a6e9406b4a960c95e44ca0","src/thread.rs":"384e3c6e6db565e752169223205991f1eadb1258b1d416758172a40a6c9bd645","tests/atomic_cell.rs":"690f516c7e827b18adec5da1c3249ebb26ff674c5887d863ddc94fe1600b9c28","tests/cache_padded.rs":"02235757a554279dae5053d46314a765059ec036c63a05336353994c2aa344d1","tests/parker.rs":"996212c084286567638919c27d46a250a5d592d8e1a97c1e6a4d7e10c060e4dd","tests/sharded_lock.rs":"1e2e8a355b74d89569873fbba7772235bc64d13a7209ee673f368f4fe6f70c65","tests/thread.rs":"0d86998085a8aace79e5b3dae61aa8bd864492f44aafcce6ec85778954f55809","tests/wait_group.rs":"e3d5168581fb511b760f4249ca487b919cffc60ac2b4610a78db99899772dd5b"},"package":"f8306fcef4a7b563b76b7dd949ca48f52bc1141aa067d2ea09565f3e2652aa5c"} \ No newline at end of file +{"files":{"CHANGELOG.md":"c728a651e916eeb36060274c25cf28817920caf498eb5a367fecdaebe3139f5b","Cargo.toml":"d72c5990059398ec7a203d13a4fc8c5dd7e1c051199415d33475675acb377f9a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","README.md":"9010e511423b1159a33c84433d9e6cb8df1d08938e4d6cc8656bcec9bdde0eb8","benches/atomic_cell.rs":"fa38d34ddc593bf0999fd65d95ee349c53a11290fbe7bf24870a7e24754ae2ac","build.rs":"825c47ae19028dc4b28101ec71c04e7e41b8b185f6ecbeacee223596524c86ad","src/atomic/atomic_cell.rs":"609adf6aa30d8685c1f708887a17c19f90f8751400b83d8146d078afb5a31c48","src/atomic/consume.rs":"bfdc7e2d8370a5a3bb1699b6214347c359d66fcc92a2d1345a513676ac91d821","src/atomic/mod.rs":"6c3efec60aee6a2e68dfa6fe3c059beab8429c150459ce5cfc736e8b5f95301e","src/atomic/seq_lock.rs":"4797f76beb0ec3eb363c2d49e9548adc8d042867b1602c1b8ca6269463d84e82","src/atomic/seq_lock_wide.rs":"d27243fae36ae9ff72cbf24defcac4abb2aef3fa11a47f352702708a1eee6a06","src/backoff.rs":"bc9d2afdd070e0746bc48ff2159bf47b0cfaa68ea09f47eaed18ccc32fc87d67","src/cache_padded.rs":"864f210089eddfd130830f5c700115c2f8b974b71659d7e8ef7bd5e09d7e1f96","src/lib.rs":"63096ede0c6ccdee05e910c2cce41da6df283868b658f9aa18c8fba8ac0f901d","src/sync/mod.rs":"4c8ad6ec4601f212791b0b531b46ee5decec2f1d14746aa7f2c18e36c609cd8e","src/sync/parker.rs":"55324bbea5b7c6838a0f8467a5b8a5dbd5526c8e1c7fd4f6d64dad1ab19f9be9","src/sync/sharded_lock.rs":"7a401ba621233732c26cf49324748269359d7bc5dc27e0ec26c9493e9a5ec97d","src/sync/wait_group.rs":"21708bbd46daa98e9f788765a9a4ef3b087a8d1e97a6e9406b4a960c95e44ca0","src/thread.rs":"81f6ee718c5251083024583d351a1713cb6a850c284f68e5fa8a35d2ed4b33ba","tests/atomic_cell.rs":"a3c9e7d5832c92fa7e39710a3dc84b4a906eb93479386fba75ff851dd1c908ae","tests/cache_padded.rs":"02235757a554279dae5053d46314a765059ec036c63a05336353994c2aa344d1","tests/parker.rs":"3e5c4e170cebdd9f815f2b31a2b6a01da753fc03e2b1d8e9b899d009f62c6b83","tests/sharded_lock.rs":"3b42218397f1260bed4e0a229f55da83439c0ec9effbbefc86251c9d208979bb","tests/thread.rs":"4be7b293b5f13d7a158a231ba7f7b086bd8fe19aaf11b1c9a8a6cdf7bba6fdfc","tests/wait_group.rs":"e3d5168581fb511b760f4249ca487b919cffc60ac2b4610a78db99899772dd5b"},"package":"ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4"} \ No newline at end of file diff --git a/third_party/rust/crossbeam-utils/CHANGELOG.md b/third_party/rust/crossbeam-utils/CHANGELOG.md index e3a2bdd0727c..160e92888157 100644 --- a/third_party/rust/crossbeam-utils/CHANGELOG.md +++ b/third_party/rust/crossbeam-utils/CHANGELOG.md @@ -1,3 +1,18 @@ +# Version 0.7.0 + +- Bump the minimum required version to 1.28. +- Fix breakage with nightly feature due to rust-lang/rust#65214. +- Apply `#[repr(transparent)]` to `AtomicCell`. +- Make `AtomicCell::new()` const function at 1.31+. + +# Version 0.6.6 + +- Add `UnwindSafe` and `RefUnwindSafe` impls for `AtomicCell`. +- Add `AtomicCell::as_ptr()`. +- Add `AtomicCell::take()`. +- Fix a bug in `AtomicCell::compare_exchange()` and `AtomicCell::compare_and_swap()`. +- Various documentation improvements. + # Version 0.6.5 - Rename `Backoff::is_complete()` to `Backoff::is_completed()`. @@ -22,7 +37,7 @@ - Fix a soundness bug in `Scope::spawn()`. - Remove the `T: 'scope` bound on `ScopedJoinHandle`. - + # Version 0.6.0 - Move `AtomicConsume` to `atomic` module. diff --git a/third_party/rust/crossbeam-utils/Cargo.toml b/third_party/rust/crossbeam-utils/Cargo.toml index 5c2c84b3cd36..c16268bf6ec0 100644 --- a/third_party/rust/crossbeam-utils/Cargo.toml +++ b/third_party/rust/crossbeam-utils/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "crossbeam-utils" -version = "0.6.5" +version = "0.7.0" authors = ["The Crossbeam Project Developers"] description = "Utilities for concurrent programming" homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils" @@ -23,15 +23,18 @@ categories = ["algorithms", "concurrency", "data-structures", "no-std"] license = "MIT/Apache-2.0" repository = "https://github.com/crossbeam-rs/crossbeam" [dependencies.cfg-if] -version = "0.1" +version = "0.1.2" [dependencies.lazy_static] version = "1.1.0" optional = true [dev-dependencies.rand] version = "0.6" +[build-dependencies.autocfg] +version = "0.1.6" [features] +alloc = [] default = ["std"] nightly = [] std = ["lazy_static"] diff --git a/third_party/rust/crossbeam-utils/LICENSE-MIT b/third_party/rust/crossbeam-utils/LICENSE-MIT index 31aa79387f27..068d491fd551 100644 --- a/third_party/rust/crossbeam-utils/LICENSE-MIT +++ b/third_party/rust/crossbeam-utils/LICENSE-MIT @@ -1,3 +1,7 @@ +The MIT License (MIT) + +Copyright (c) 2019 The Crossbeam Project Developers + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the diff --git a/third_party/rust/crossbeam-utils/README.md b/third_party/rust/crossbeam-utils/README.md index a454c141ed76..ffb1ab424828 100644 --- a/third_party/rust/crossbeam-utils/README.md +++ b/third_party/rust/crossbeam-utils/README.md @@ -8,15 +8,16 @@ https://github.com/crossbeam-rs/crossbeam-utils/tree/master/src) https://crates.io/crates/crossbeam-utils) [![Documentation](https://docs.rs/crossbeam-utils/badge.svg)]( https://docs.rs/crossbeam-utils) -[![Rust 1.26+](https://img.shields.io/badge/rust-1.26+-lightgray.svg)]( +[![Rust 1.28+](https://img.shields.io/badge/rust-1.28+-lightgray.svg)]( https://www.rust-lang.org) +[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.gg/BBYwKq) This crate provides miscellaneous tools for concurrent programming: #### Atomics -* [`AtomicCell`], a thread-safe mutable memory location.(\*) -* [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.(\*) +* [`AtomicCell`], a thread-safe mutable memory location.(no_std) +* [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.(no_std) #### Thread synchronization @@ -26,11 +27,11 @@ This crate provides miscellaneous tools for concurrent programming: #### Utilities -* [`Backoff`], for exponential backoff in spin loops.(\*) -* [`CachePadded`], for padding and aligning a value to the length of a cache line.(\*) +* [`Backoff`], for exponential backoff in spin loops.(no_std) +* [`CachePadded`], for padding and aligning a value to the length of a cache line.(no_std) * [`scope`], for spawning threads that borrow local variables from the stack. -*Features marked with (\*) can be used in `no_std` environments.* +*Features marked with (no_std) can be used in `no_std` environments.*
[`AtomicCell`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/atomic/struct.AtomicCell.html [`AtomicConsume`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/atomic/trait.AtomicConsume.html @@ -47,7 +48,7 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -crossbeam-utils = "0.6" +crossbeam-utils = "0.7" ``` Next, add this to your crate: @@ -56,6 +57,10 @@ Next, add this to your crate: extern crate crossbeam_utils; ``` +## Compatibility + +The minimum supported Rust version is 1.28. Any change to this is considered a breaking change. + ## License Licensed under either of diff --git a/third_party/rust/crossbeam-utils/benches/atomic_cell.rs b/third_party/rust/crossbeam-utils/benches/atomic_cell.rs index aae17d41192a..8587dba1d9f4 100644 --- a/third_party/rust/crossbeam-utils/benches/atomic_cell.rs +++ b/third_party/rust/crossbeam-utils/benches/atomic_cell.rs @@ -51,20 +51,18 @@ fn concurrent_load_u8(b: &mut test::Bencher) { thread::scope(|scope| { for _ in 0..THREADS { - scope.spawn(|_| { - loop { - start.wait(); - - let mut sum = 0; - for _ in 0..STEPS { - sum += a.load(); - } - test::black_box(sum); - - end.wait(); - if exit.load() { - break; - } + scope.spawn(|_| loop { + start.wait(); + + let mut sum = 0; + for _ in 0..STEPS { + sum += a.load(); + } + test::black_box(sum); + + end.wait(); + if exit.load() { + break; } }); } @@ -80,7 +78,8 @@ fn concurrent_load_u8(b: &mut test::Bencher) { start.wait(); exit.store(true); end.wait(); - }).unwrap(); + }) + .unwrap(); } #[bench] @@ -126,20 +125,18 @@ fn concurrent_load_usize(b: &mut test::Bencher) { thread::scope(|scope| { for _ in 0..THREADS { - scope.spawn(|_| { - loop { - start.wait(); - - let mut sum = 0; - for _ in 0..STEPS { - sum += a.load(); - } - test::black_box(sum); - - end.wait(); - if exit.load() { - break; - } + scope.spawn(|_| loop { + start.wait(); + + let mut sum = 0; + for _ in 0..STEPS { + sum += a.load(); + } + test::black_box(sum); + + end.wait(); + if exit.load() { + break; } }); } @@ -155,5 +152,6 @@ fn concurrent_load_usize(b: &mut test::Bencher) { start.wait(); exit.store(true); end.wait(); - }).unwrap(); + }) + .unwrap(); } diff --git a/third_party/rust/crossbeam-utils/build.rs b/third_party/rust/crossbeam-utils/build.rs new file mode 100644 index 000000000000..d451c24b2f1f --- /dev/null +++ b/third_party/rust/crossbeam-utils/build.rs @@ -0,0 +1,8 @@ +extern crate autocfg; + +fn main() { + let cfg = autocfg::new(); + if cfg.probe_rustc_version(1, 31) { + println!("cargo:rustc-cfg=has_min_const_fn"); + } +} diff --git a/third_party/rust/crossbeam-utils/src/atomic/atomic_cell.rs b/third_party/rust/crossbeam-utils/src/atomic/atomic_cell.rs index 0d4670801e65..b67b12705a41 100644 --- a/third_party/rust/crossbeam-utils/src/atomic/atomic_cell.rs +++ b/third_party/rust/crossbeam-utils/src/atomic/atomic_cell.rs @@ -2,11 +2,12 @@ use core::cell::UnsafeCell; use core::fmt; use core::mem; use core::ptr; -use core::slice; -use core::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering}; +use core::sync::atomic::{self, AtomicBool, Ordering}; -use Backoff; +#[cfg(feature = "std")] +use std::panic::{RefUnwindSafe, UnwindSafe}; +use super::seq_lock::SeqLock; @@ -17,7 +18,13 @@ use Backoff; -pub struct AtomicCell { + + + + + +#[repr(transparent)] +pub struct AtomicCell { @@ -29,6 +36,11 @@ pub struct AtomicCell { unsafe impl Send for AtomicCell {} unsafe impl Sync for AtomicCell {} +#[cfg(feature = "std")] +impl UnwindSafe for AtomicCell {} +#[cfg(feature = "std")] +impl RefUnwindSafe for AtomicCell {} + impl AtomicCell { @@ -39,6 +51,7 @@ impl AtomicCell { + #[cfg(not(has_min_const_fn))] pub fn new(val: T) -> AtomicCell { AtomicCell { value: UnsafeCell::new(val), @@ -54,11 +67,11 @@ impl AtomicCell { - - - - pub fn get_mut(&mut self) -> &mut T { - unsafe { &mut *self.value.get() } + #[cfg(has_min_const_fn)] + pub const fn new(val: T) -> AtomicCell { + AtomicCell { + value: UnsafeCell::new(val), + } } @@ -150,6 +163,61 @@ impl AtomicCell { } } +impl AtomicCell { + + + + + + + + + + + + #[inline] + pub fn as_ptr(&self) -> *mut T { + self.value.get() + } + + + + + + + + + + + + + + #[doc(hidden)] + #[deprecated(note = "this method is unsound and will be removed in the next release")] + pub fn get_mut(&mut self) -> &mut T { + unsafe { &mut *self.value.get() } + } +} + +impl AtomicCell { + + + + + + + + + + + + + + pub fn take(&self) -> T { + self.swap(Default::default()) + } +} + impl AtomicCell { @@ -211,23 +279,8 @@ impl AtomicCell { - pub fn compare_exchange(&self, mut current: T, new: T) -> Result { - loop { - match unsafe { atomic_compare_exchange_weak(self.value.get(), current, new) } { - Ok(_) => return Ok(current), - Err(previous) => { - if previous != current { - return Err(previous); - } - - - - - - current = previous; - } - } - } + pub fn compare_exchange(&self, current: T, new: T) -> Result { + unsafe { atomic_compare_exchange_weak(self.value.get(), current, new) } } } @@ -252,7 +305,7 @@ macro_rules! impl_arithmetic { pub fn fetch_add(&self, val: $t) -> $t { if can_transmute::<$t, atomic::AtomicUsize>() { let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) }; - a.fetch_add(val as usize, Ordering::SeqCst) as $t + a.fetch_add(val as usize, Ordering::AcqRel) as $t } else { let _guard = lock(self.value.get() as usize).write(); let value = unsafe { &mut *(self.value.get()) }; @@ -280,7 +333,7 @@ macro_rules! impl_arithmetic { pub fn fetch_sub(&self, val: $t) -> $t { if can_transmute::<$t, atomic::AtomicUsize>() { let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) }; - a.fetch_sub(val as usize, Ordering::SeqCst) as $t + a.fetch_sub(val as usize, Ordering::AcqRel) as $t } else { let _guard = lock(self.value.get() as usize).write(); let value = unsafe { &mut *(self.value.get()) }; @@ -306,7 +359,7 @@ macro_rules! impl_arithmetic { pub fn fetch_and(&self, val: $t) -> $t { if can_transmute::<$t, atomic::AtomicUsize>() { let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) }; - a.fetch_and(val as usize, Ordering::SeqCst) as $t + a.fetch_and(val as usize, Ordering::AcqRel) as $t } else { let _guard = lock(self.value.get() as usize).write(); let value = unsafe { &mut *(self.value.get()) }; @@ -332,7 +385,7 @@ macro_rules! impl_arithmetic { pub fn fetch_or(&self, val: $t) -> $t { if can_transmute::<$t, atomic::AtomicUsize>() { let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) }; - a.fetch_or(val as usize, Ordering::SeqCst) as $t + a.fetch_or(val as usize, Ordering::AcqRel) as $t } else { let _guard = lock(self.value.get() as usize).write(); let value = unsafe { &mut *(self.value.get()) }; @@ -358,7 +411,7 @@ macro_rules! impl_arithmetic { pub fn fetch_xor(&self, val: $t) -> $t { if can_transmute::<$t, atomic::AtomicUsize>() { let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) }; - a.fetch_xor(val as usize, Ordering::SeqCst) as $t + a.fetch_xor(val as usize, Ordering::AcqRel) as $t } else { let _guard = lock(self.value.get() as usize).write(); let value = unsafe { &mut *(self.value.get()) }; @@ -388,7 +441,7 @@ macro_rules! impl_arithmetic { #[inline] pub fn fetch_add(&self, val: $t) -> $t { let a = unsafe { &*(self.value.get() as *const $atomic) }; - a.fetch_add(val, Ordering::SeqCst) + a.fetch_add(val, Ordering::AcqRel) } /// Decrements the current value by `val` and returns the previous value. @@ -408,7 +461,7 @@ macro_rules! impl_arithmetic { #[inline] pub fn fetch_sub(&self, val: $t) -> $t { let a = unsafe { &*(self.value.get() as *const $atomic) }; - a.fetch_sub(val, Ordering::SeqCst) + a.fetch_sub(val, Ordering::AcqRel) } /// Applies bitwise "and" to the current value and returns the previous value. @@ -426,7 +479,7 @@ macro_rules! impl_arithmetic { #[inline] pub fn fetch_and(&self, val: $t) -> $t { let a = unsafe { &*(self.value.get() as *const $atomic) }; - a.fetch_and(val, Ordering::SeqCst) + a.fetch_and(val, Ordering::AcqRel) } /// Applies bitwise "or" to the current value and returns the previous value. @@ -444,7 +497,7 @@ macro_rules! impl_arithmetic { #[inline] pub fn fetch_or(&self, val: $t) -> $t { let a = unsafe { &*(self.value.get() as *const $atomic) }; - a.fetch_or(val, Ordering::SeqCst) + a.fetch_or(val, Ordering::AcqRel) } /// Applies bitwise "xor" to the current value and returns the previous value. @@ -462,7 +515,7 @@ macro_rules! impl_arithmetic { #[inline] pub fn fetch_xor(&self, val: $t) -> $t { let a = unsafe { &*(self.value.get() as *const $atomic) }; - a.fetch_xor(val, Ordering::SeqCst) + a.fetch_xor(val, Ordering::AcqRel) } } }; @@ -528,7 +581,7 @@ impl AtomicCell { #[inline] pub fn fetch_and(&self, val: bool) -> bool { let a = unsafe { &*(self.value.get() as *const AtomicBool) }; - a.fetch_and(val, Ordering::SeqCst) + a.fetch_and(val, Ordering::AcqRel) } @@ -549,7 +602,7 @@ impl AtomicCell { #[inline] pub fn fetch_or(&self, val: bool) -> bool { let a = unsafe { &*(self.value.get() as *const AtomicBool) }; - a.fetch_or(val, Ordering::SeqCst) + a.fetch_or(val, Ordering::AcqRel) } @@ -570,7 +623,7 @@ impl AtomicCell { #[inline] pub fn fetch_xor(&self, val: bool) -> bool { let a = unsafe { &*(self.value.get() as *const AtomicBool) }; - a.fetch_xor(val, Ordering::SeqCst) + a.fetch_xor(val, Ordering::AcqRel) } } @@ -589,102 +642,12 @@ impl fmt::Debug for AtomicCell { } -fn byte_eq(a: &T, b: &T) -> bool { - unsafe { - let a = slice::from_raw_parts(a as *const _ as *const u8, mem::size_of::()); - let b = slice::from_raw_parts(b as *const _ as *const u8, mem::size_of::()); - a == b - } -} - - fn can_transmute() -> bool { mem::size_of::
() == mem::size_of::() && mem::align_of::() >= mem::align_of::() } -struct Lock { - - - - - state: AtomicUsize, -} - -impl Lock { - - - - #[inline] - fn optimistic_read(&self) -> Option { - let state = self.state.load(Ordering::Acquire); - if state == 1 { - None - } else { - Some(state) - } - } - - - - - - #[inline] - fn validate_read(&self, stamp: usize) -> bool { - atomic::fence(Ordering::Acquire); - self.state.load(Ordering::Relaxed) == stamp - } - - - #[inline] - fn write(&'static self) -> WriteGuard { - let backoff = Backoff::new(); - loop { - let previous = self.state.swap(1, Ordering::Acquire); - - if previous != 1 { - atomic::fence(Ordering::Release); - - return WriteGuard { - lock: self, - state: previous, - }; - } - - backoff.snooze(); - } - } -} - - -struct WriteGuard { - - lock: &'static Lock, - - - state: usize, -} - -impl WriteGuard { - - #[inline] - fn abort(self) { - self.lock.state.store(self.state, Ordering::Release); - } -} - -impl Drop for WriteGuard { - #[inline] - fn drop(&mut self) { - - self.lock - .state - .store(self.state.wrapping_add(2), Ordering::Release); - } -} - - @@ -694,7 +657,7 @@ impl Drop for WriteGuard { #[inline] #[must_use] -fn lock(addr: usize) -> &'static Lock { +fn lock(addr: usize) -> &'static SeqLock { @@ -719,10 +682,9 @@ fn lock(addr: usize) -> &'static Lock { const LEN: usize = 97; - const L: Lock = Lock { - state: AtomicUsize::new(0), - }; - static LOCKS: [Lock; LEN] = [ + const L: SeqLock = SeqLock::INIT; + + static LOCKS: [SeqLock; LEN] = [ L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, @@ -813,7 +775,7 @@ where T, a, { a = &*(src as *const _ as *const _); - mem::transmute_copy(&a.load(Ordering::SeqCst)) + mem::transmute_copy(&a.load(Ordering::Acquire)) }, { let lock = lock(src as usize); @@ -851,13 +813,12 @@ unsafe fn atomic_store(dst: *mut T, val: T) { T, a, { a = &*(dst as *const _ as *const _); - let res = a.store(mem::transmute_copy(&val), Ordering::SeqCst); + a.store(mem::transmute_copy(&val), Ordering::Release); mem::forget(val); - res }, { let _guard = lock(dst as usize).write(); - ptr::write(dst, val) + ptr::write(dst, val); } } } @@ -871,7 +832,7 @@ unsafe fn atomic_swap(dst: *mut T, val: T) -> T { T, a, { a = &*(dst as *const _ as *const _); - let res = mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::SeqCst)); + let res = mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::AcqRel)); mem::forget(val); res }, @@ -889,29 +850,46 @@ unsafe fn atomic_swap(dst: *mut T, val: T) -> T { -unsafe fn atomic_compare_exchange_weak(dst: *mut T, current: T, new: T) -> Result +unsafe fn atomic_compare_exchange_weak(dst: *mut T, mut current: T, new: T) -> Result where - T: Copy, + T: Copy + Eq, { atomic! { T, a, { a = &*(dst as *const _ as *const _); - let res = a.compare_exchange_weak( - mem::transmute_copy(¤t), - mem::transmute_copy(&new), - Ordering::SeqCst, - Ordering::SeqCst, - ); - match res { - Ok(v) => Ok(mem::transmute_copy(&v)), - Err(v) => Err(mem::transmute_copy(&v)), + let mut current_raw = mem::transmute_copy(¤t); + let new_raw = mem::transmute_copy(&new); + + loop { + match a.compare_exchange_weak( + current_raw, + new_raw, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => break Ok(current), + Err(previous_raw) => { + let previous = mem::transmute_copy(&previous_raw); + + if !T::eq(&previous, ¤t) { + break Err(previous); + } + + // The compare-exchange operation has failed and didn't store `new`. The + // failure is either spurious, or `previous` was semantically equal to + // `current` but not byte-equal. Let's retry with `previous` as the new + // `current`. + current = previous; + current_raw = previous_raw; + } + } } }, { let guard = lock(dst as usize).write(); - if byte_eq(&*dst, ¤t) { + if T::eq(&*dst, ¤t) { Ok(ptr::replace(dst, new)) } else { let val = ptr::read(dst); diff --git a/third_party/rust/crossbeam-utils/src/atomic/mod.rs b/third_party/rust/crossbeam-utils/src/atomic/mod.rs index 82e5ece8bcb0..39f6be044f01 100644 --- a/third_party/rust/crossbeam-utils/src/atomic/mod.rs +++ b/third_party/rust/crossbeam-utils/src/atomic/mod.rs @@ -1,5 +1,23 @@ +cfg_if! { + // Use "wide" sequence lock if the pointer width <= 32 for preventing its counter against wrap + // around. + // + // We are ignoring too wide architectures (pointer width >= 256), since such a system will not + // appear in a conceivable future. + // + // In narrow architectures (pointer width <= 16), the counter is still <= 32-bit and may be + // vulnerable to wrap around. But it's mostly okay, since in such a primitive hardware, the + // counter will not be increased that fast. + if #[cfg(any(target_pointer_width = "64", target_pointer_width = "128"))] { + mod seq_lock; + } else { + #[path = "seq_lock_wide.rs"] + mod seq_lock; + } +} + mod atomic_cell; mod consume; diff --git a/third_party/rust/crossbeam-utils/src/atomic/seq_lock.rs b/third_party/rust/crossbeam-utils/src/atomic/seq_lock.rs new file mode 100644 index 000000000000..f28d6ec58165 --- /dev/null +++ b/third_party/rust/crossbeam-utils/src/atomic/seq_lock.rs @@ -0,0 +1,88 @@ +use core::sync::atomic::{self, AtomicUsize, Ordering}; + +use Backoff; + + +pub struct SeqLock { + + + + + state: AtomicUsize, +} + +impl SeqLock { + pub const INIT: Self = Self { + state: AtomicUsize::new(0), + }; + + + + + #[inline] + pub fn optimistic_read(&self) -> Option { + let state = self.state.load(Ordering::Acquire); + if state == 1 { + None + } else { + Some(state) + } + } + + + + + + #[inline] + pub fn validate_read(&self, stamp: usize) -> bool { + atomic::fence(Ordering::Acquire); + self.state.load(Ordering::Relaxed) == stamp + } + + + #[inline] + pub fn write(&'static self) -> SeqLockWriteGuard { + let backoff = Backoff::new(); + loop { + let previous = self.state.swap(1, Ordering::Acquire); + + if previous != 1 { + atomic::fence(Ordering::Release); + + return SeqLockWriteGuard { + lock: self, + state: previous, + }; + } + + backoff.snooze(); + } + } +} + + +pub struct SeqLockWriteGuard { + + lock: &'static SeqLock, + + + state: usize, +} + +impl SeqLockWriteGuard { + + #[inline] + pub fn abort(self) { + self.lock.state.store(self.state, Ordering::Release); + } +} + +impl Drop for SeqLockWriteGuard { + #[inline] + fn drop(&mut self) { + + self.lock + .state + .store(self.state.wrapping_add(2), Ordering::Release); + } +} diff --git a/third_party/rust/crossbeam-utils/src/atomic/seq_lock_wide.rs b/third_party/rust/crossbeam-utils/src/atomic/seq_lock_wide.rs new file mode 100644 index 000000000000..1423aa61c888 --- /dev/null +++ b/third_party/rust/crossbeam-utils/src/atomic/seq_lock_wide.rs @@ -0,0 +1,132 @@ +use core::sync::atomic::{self, AtomicUsize, Ordering}; + +use Backoff; + + + + + +pub struct SeqLock { + + state_hi: AtomicUsize, + + + + + + state_lo: AtomicUsize, +} + +impl SeqLock { + pub const INIT: Self = Self { + state_hi: AtomicUsize::new(0), + state_lo: AtomicUsize::new(0), + }; + + + + + #[inline] + pub fn optimistic_read(&self) -> Option<(usize, usize)> { + + + + + + + let state_hi = self.state_hi.load(Ordering::Acquire); + let state_lo = self.state_lo.load(Ordering::Acquire); + if state_lo == 1 { + None + } else { + Some((state_hi, state_lo)) + } + } + + + + + + #[inline] + pub fn validate_read(&self, stamp: (usize, usize)) -> bool { + + + + atomic::fence(Ordering::Acquire); + + + + + + + let state_lo = self.state_lo.load(Ordering::Acquire); + + + + let state_hi = self.state_hi.load(Ordering::Relaxed); + + + + + (state_hi, state_lo) == stamp + } + + + #[inline] + pub fn write(&'static self) -> SeqLockWriteGuard { + let backoff = Backoff::new(); + loop { + let previous = self.state_lo.swap(1, Ordering::Acquire); + + if previous != 1 { + + + atomic::fence(Ordering::Release); + + return SeqLockWriteGuard { + lock: self, + state_lo: previous, + }; + } + + backoff.snooze(); + } + } +} + + +pub struct SeqLockWriteGuard { + + lock: &'static SeqLock, + + + state_lo: usize, +} + +impl SeqLockWriteGuard { + + #[inline] + pub fn abort(self) { + self.lock.state_lo.store(self.state_lo, Ordering::Release); + } +} + +impl Drop for SeqLockWriteGuard { + #[inline] + fn drop(&mut self) { + let state_lo = self.state_lo.wrapping_add(2); + + + + + if state_lo == 0 { + let state_hi = self.lock.state_hi.load(Ordering::Relaxed); + self.lock.state_hi.store(state_hi.wrapping_add(1), Ordering::Release); + } + + + + + self.lock.state_lo.store(state_lo, Ordering::Release); + } +} diff --git a/third_party/rust/crossbeam-utils/src/backoff.rs b/third_party/rust/crossbeam-utils/src/backoff.rs index fb09fd8806b6..6fcc935ddc82 100644 --- a/third_party/rust/crossbeam-utils/src/backoff.rs +++ b/third_party/rust/crossbeam-utils/src/backoff.rs @@ -93,9 +93,7 @@ impl Backoff { #[inline] pub fn new() -> Self { - Backoff { - step: Cell::new(0), - } + Backoff { step: Cell::new(0) } } diff --git a/third_party/rust/crossbeam-utils/src/cache_padded.rs b/third_party/rust/crossbeam-utils/src/cache_padded.rs index 62258974fe5a..26fa2f0ee37e 100644 --- a/third_party/rust/crossbeam-utils/src/cache_padded.rs +++ b/third_party/rust/crossbeam-utils/src/cache_padded.rs @@ -39,6 +39,14 @@ use core::ops::{Deref, DerefMut}; + + + + + + + + @@ -49,7 +57,14 @@ use core::ops::{Deref, DerefMut}; #[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] -#[repr(align(64))] + + + + + + +#[cfg_attr(target_arch = "x86_64", repr(align(128)))] +#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))] pub struct CachePadded { value: T, } diff --git a/third_party/rust/crossbeam-utils/src/lib.rs b/third_party/rust/crossbeam-utils/src/lib.rs index 27524f6bde19..d07c7fba1d55 100644 --- a/third_party/rust/crossbeam-utils/src/lib.rs +++ b/third_party/rust/crossbeam-utils/src/lib.rs @@ -29,7 +29,6 @@ #![warn(missing_docs)] #![warn(missing_debug_implementations)] #![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(feature = "nightly", feature(alloc))] #![cfg_attr(feature = "nightly", feature(cfg_target_has_atomic))] #[macro_use] @@ -38,16 +37,14 @@ extern crate cfg_if; extern crate core; cfg_if! { - if #[cfg(feature = "nightly")] { + if #[cfg(feature = "alloc")] { extern crate alloc; - } else { - mod alloc { - extern crate std; - pub use self::std::*; - } + } else if #[cfg(feature = "std")] { + extern crate std as alloc; } } +#[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))] pub mod atomic; mod cache_padded; diff --git a/third_party/rust/crossbeam-utils/src/thread.rs b/third_party/rust/crossbeam-utils/src/thread.rs index 6d905314b7d4..ee7687b88e4d 100644 --- a/third_party/rust/crossbeam-utils/src/thread.rs +++ b/third_party/rust/crossbeam-utils/src/thread.rs @@ -423,10 +423,10 @@ impl<'scope, 'env> ScopedThreadBuilder<'scope, 'env> { let closure = move || closure.take().unwrap()(); - let closure: Box = Box::new(closure); - let closure: Box = unsafe { mem::transmute(closure) }; + let closure: Box = Box::new(closure); + let closure: Box = unsafe { mem::transmute(closure) }; - + // Finally, spawn the closure. let mut closure = closure; self.builder.spawn(move || closure())? }; @@ -436,7 +436,7 @@ impl<'scope, 'env> ScopedThreadBuilder<'scope, 'env> { (handle, thread) }; - + // Add the handle to the shared list of join handles. self.scope.handles.lock().unwrap().push(Arc::clone(&handle)); Ok(ScopedJoinHandle { @@ -451,72 +451,72 @@ impl<'scope, 'env> ScopedThreadBuilder<'scope, 'env> { unsafe impl<'scope, T> Send for ScopedJoinHandle<'scope, T> {} unsafe impl<'scope, T> Sync for ScopedJoinHandle<'scope, T> {} - +/// A handle that can be used to join its scoped thread. pub struct ScopedJoinHandle<'scope, T> { - + /// A join handle to the spawned thread. handle: SharedOption>, - + /// Holds the result of the inner closure. result: SharedOption, - + /// A handle to the the spawned thread. thread: thread::Thread, - + /// Borrows the parent scope with lifetime `'scope`. _marker: PhantomData<&'scope ()>, } impl<'scope, T> ScopedJoinHandle<'scope, T> { - - - - - - - - - - - - - - - - - - - - - - - - - - - + /// Waits for the thread to finish and returns its result. + /// + /// If the child thread panics, an error is returned. + /// + /// # Panics + /// + /// This function may panic on some platforms if a thread attempts to join itself or otherwise + /// may create a deadlock with joining threads. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// let handle1 = s.spawn(|_| println!("I'm a happy thread :)")); + /// let handle2 = s.spawn(|_| panic!("I'm a sad thread :(")); + /// + /// // Join the first thread and verify that it succeeded. + /// let res = handle1.join(); + /// assert!(res.is_ok()); + /// + /// // Join the second thread and verify that it panicked. + /// let res = handle2.join(); + /// assert!(res.is_err()); + /// }).unwrap(); + /// ``` pub fn join(self) -> thread::Result { - - + // Take out the handle. The handle will surely be available because the root scope waits + // for nested scopes before joining remaining threads. let handle = self.handle.lock().unwrap().take().unwrap(); - + // Join the thread and then take the result out of its inner closure. handle .join() .map(|()| self.result.lock().unwrap().take().unwrap()) } - - - - - - - - - - - - + /// Returns a handle to the underlying thread. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// let handle = s.spawn(|_| println!("A child thread is running")); + /// println!("The child thread ID: {:?}", handle.thread().id()); + /// }).unwrap(); + /// ``` pub fn thread(&self) -> &thread::Thread { &self.thread } diff --git a/third_party/rust/crossbeam-utils/tests/atomic_cell.rs b/third_party/rust/crossbeam-utils/tests/atomic_cell.rs index 37c901f04288..597c5fd111f8 100644 --- a/third_party/rust/crossbeam-utils/tests/atomic_cell.rs +++ b/third_party/rust/crossbeam-utils/tests/atomic_cell.rs @@ -206,3 +206,29 @@ fn modular_usize() { assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100))); assert_eq!(a.load().0, 15); } + +#[test] +fn garbage_padding() { + #[derive(Copy, Clone, Eq, PartialEq)] + struct Object { + a: i64, + b: i32, + } + + let cell = AtomicCell::new(Object { a: 0, b: 0 }); + let _garbage = [0xfe, 0xfe, 0xfe, 0xfe, 0xfe]; + let next = Object { a: 0, b: 0 }; + + let prev = cell.load(); + assert!(cell.compare_exchange(prev, next).is_ok()); + println!(); +} + +#[cfg(has_min_const_fn)] +#[test] +fn const_atomic_cell_new() { + static CELL: AtomicCell = AtomicCell::new(0); + + CELL.store(1); + assert_eq!(CELL.load(), 1); +} diff --git a/third_party/rust/crossbeam-utils/tests/parker.rs b/third_party/rust/crossbeam-utils/tests/parker.rs index fab07b3a3c7a..3f4514626a2a 100644 --- a/third_party/rust/crossbeam-utils/tests/parker.rs +++ b/third_party/rust/crossbeam-utils/tests/parker.rs @@ -37,6 +37,7 @@ fn park_timeout_unpark_called_other_thread() { }); p.park_timeout(Duration::from_millis(u32::MAX as u64)); - }).unwrap(); + }) + .unwrap(); } } diff --git a/third_party/rust/crossbeam-utils/tests/sharded_lock.rs b/third_party/rust/crossbeam-utils/tests/sharded_lock.rs index 08dc966c8032..b155ec37bbf2 100644 --- a/third_party/rust/crossbeam-utils/tests/sharded_lock.rs +++ b/third_party/rust/crossbeam-utils/tests/sharded_lock.rs @@ -1,10 +1,10 @@ extern crate crossbeam_utils; extern crate rand; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; -use std::thread; use std::sync::{Arc, TryLockError}; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread; use crossbeam_utils::sync::ShardedLock; use rand::Rng; @@ -55,7 +55,8 @@ fn arc_poison_wr() { let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.write().unwrap(); panic!(); - }).join(); + }) + .join(); assert!(arc.read().is_err()); } @@ -67,7 +68,8 @@ fn arc_poison_ww() { let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.write().unwrap(); panic!(); - }).join(); + }) + .join(); assert!(arc.write().is_err()); assert!(arc.is_poisoned()); } @@ -79,7 +81,8 @@ fn arc_no_poison_rr() { let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.read().unwrap(); panic!(); - }).join(); + }) + .join(); let lock = arc.read().unwrap(); assert_eq!(*lock, 1); } @@ -90,7 +93,8 @@ fn arc_no_poison_sl() { let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.read().unwrap(); panic!() - }).join(); + }) + .join(); let lock = arc.write().unwrap(); assert_eq!(*lock, 1); } @@ -149,7 +153,8 @@ fn arc_access_in_unwind() { } let _u = Unwinder { i: arc2 }; panic!(); - }).join(); + }) + .join(); let lock = arc.read().unwrap(); assert_eq!(*lock, 2); } @@ -174,7 +179,10 @@ fn try_write() { let write_result = lock.try_write(); match write_result { Err(TryLockError::WouldBlock) => (), - Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"), + Ok(_) => assert!( + false, + "try_write should not succeed while read_guard is in scope" + ), Err(_) => assert!(false, "unexpected error"), } @@ -212,7 +220,8 @@ fn test_into_inner_poison() { let _ = thread::spawn(move || { let _lock = m2.write().unwrap(); panic!("test panic in inner thread to poison ShardedLock"); - }).join(); + }) + .join(); assert!(m.is_poisoned()); match Arc::try_unwrap(m).unwrap().into_inner() { @@ -235,7 +244,8 @@ fn test_get_mut_poison() { let _ = thread::spawn(move || { let _lock = m2.write().unwrap(); panic!("test panic in inner thread to poison ShardedLock"); - }).join(); + }) + .join(); assert!(m.is_poisoned()); match Arc::try_unwrap(m).unwrap().get_mut() { diff --git a/third_party/rust/crossbeam-utils/tests/thread.rs b/third_party/rust/crossbeam-utils/tests/thread.rs index e34a0221d990..c4341615efa5 100644 --- a/third_party/rust/crossbeam-utils/tests/thread.rs +++ b/third_party/rust/crossbeam-utils/tests/thread.rs @@ -23,7 +23,8 @@ fn join() { panic!("\"My honey is running out!\", said Pooh."); }); assert!(panic_handle.join().is_err()); - }).unwrap(); + }) + .unwrap(); assert_eq!(1, counter.load(Ordering::Relaxed)); @@ -38,7 +39,8 @@ fn counter() { counter.fetch_add(1, Ordering::Relaxed); }); } - }).unwrap(); + }) + .unwrap(); assert_eq!(THREADS, counter.load(Ordering::Relaxed)); } @@ -54,9 +56,11 @@ fn counter_builder() { .stack_size(SMALL_STACK_SIZE) .spawn(|_| { counter.fetch_add(1, Ordering::Relaxed); - }).unwrap(); + }) + .unwrap(); } - }).unwrap(); + }) + .unwrap(); assert_eq!(THREADS, counter.load(Ordering::Relaxed)); } @@ -95,7 +99,7 @@ fn panic_twice() { let err = result.unwrap_err(); let vec = err - .downcast_ref::>>() + .downcast_ref::>>() .unwrap(); assert_eq!(2, vec.len()); @@ -115,7 +119,7 @@ fn panic_many() { let err = result.unwrap_err(); let vec = err - .downcast_ref::>>() + .downcast_ref::>>() .unwrap(); assert_eq!(3, vec.len()); @@ -157,7 +161,8 @@ fn nesting() { wrapper.recurse(scope, 5); }); }); - }).unwrap(); + }) + .unwrap(); } #[test] @@ -171,5 +176,6 @@ fn join_nested() { }); sleep(Duration::from_millis(100)); - }).unwrap(); + }) + .unwrap(); } diff --git a/third_party/rust/d3d12/.cargo-checksum.json b/third_party/rust/d3d12/.cargo-checksum.json new file mode 100644 index 000000000000..529dd953e96b --- /dev/null +++ b/third_party/rust/d3d12/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"9dcf05b4a1c295a2775190bcb1df8cb3ffe8f39a5e91ea551d7af84793f941e7","README.md":"4708fe5036c6bb6902f1b2d191a99117ba436ffec5678a0dfc6c99a9da5f2f1c","appveyor.yml":"14da30f5712e0eaec1acbb74649f391597095d8f8aaccf7a528a358936e8f98b","bors.toml":"31d348faf24d2bac765198bce62b4c4f1d6987e567eaed3d380a6704fec5183c","src/com.rs":"874a6ecea743a1f37f10649d71850cad807bd87cce883479cdb106d57aea863b","src/command_allocator.rs":"cda791b138019bae082fe664bd735de1e5fa6b4ee979e180e1945d50b4858ef0","src/command_list.rs":"a50a8bdebd859cfbd64c02640468665c221bb107b3ae5c1a30a1de20f4e7a299","src/debug.rs":"b26d102c033933c7935cedadfa3ea69e6b4ab95d58d5165911debec729b8bdb5","src/descriptor.rs":"93b4f24565494fb1aecf5cc8f677d3fc56bbaf742717b77d9846259fa300891e","src/device.rs":"2738fce770e3392c263f2745d1bdcb40b80b60288fb08e4904419000a85bffed","src/dxgi.rs":"93547cdf0c90dd0681b5b5dfa3ebcb6f9764728537286b546d708b4e281bad06","src/heap.rs":"ee397804e083d04486fc6e48d71acdce341ee717cc19fa5c370455d7bf7c042b","src/lib.rs":"d1421cacbdc80528eb1086a6bb1d778afd70b2746ba4ab905d9c067179601e41","src/pso.rs":"073a936f7004c813b2e19fe3c5a541d0554327c598ef6aeadb774cd3c78e9743","src/query.rs":"ea36425db9a27422c361c706f3521341fa3a6fe34ef2d211ff7cfbe792c3f93b","src/queue.rs":"d0cbecfb3e538dd37e573a76a4bd2c78cde33b17c96af5b94672f992a195ede6","src/resource.rs":"cbe66c54ba11c994f644235b725402c7180113d0ed965f1878d64f824cd437df","src/sync.rs":"a6921a1f64eb0153e52e22c3c1cc12c7381c2823ed47a0f7de5834f14f3acd2b"},"package":"bc7ed48e89905e5e146bcc1951cc3facb9e44aea9adf5dc01078cda1bd24b662"} \ No newline at end of file diff --git a/third_party/rust/d3d12/Cargo.toml b/third_party/rust/d3d12/Cargo.toml new file mode 100644 index 000000000000..9884e7608c3d --- /dev/null +++ b/third_party/rust/d3d12/Cargo.toml @@ -0,0 +1,37 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "d3d12" +version = "0.3.0" +authors = ["msiglreith ", "Dzmitry Malyshau "] +description = "Low level D3D12 API wrapper" +documentation = "https://docs.rs/d3d12" +keywords = ["windows", "graphics"] +categories = ["memory-management"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/gfx-rs/d3d12-rs" +[package.metadata.docs.rs] +default-target = "x86_64-pc-windows-msvc" +[dependencies.bitflags] +version = "1" + +[dependencies.libloading] +version = "0.5" +optional = true + +[dependencies.winapi] +version = "0.3" +features = ["dxgi1_2", "dxgi1_3", "dxgi1_4", "dxgidebug", "d3d12", "d3d12sdklayers", "d3dcommon", "d3dcompiler", "dxgiformat", "synchapi", "winerror"] + +[features] +implicit-link = [] diff --git a/third_party/rust/d3d12/README.md b/third_party/rust/d3d12/README.md new file mode 100644 index 000000000000..d5ad38191ccf --- /dev/null +++ b/third_party/rust/d3d12/README.md @@ -0,0 +1,2 @@ +# d3d12-rs +Rust wrapper for D3D12 diff --git a/third_party/rust/d3d12/appveyor.yml b/third_party/rust/d3d12/appveyor.yml new file mode 100644 index 000000000000..89923b55a40c --- /dev/null +++ b/third_party/rust/d3d12/appveyor.yml @@ -0,0 +1,29 @@ +skip_branch_with_pr: true +branches: + except: + - staging.tmp +environment: + global: + PATH: '%PATH%;C:\msys64\mingw64\bin;C:\msys64\usr\bin;%USERPROFILE%\.cargo\bin' + RUST_BACKTRACE: full + matrix: + - CHANNEL: stable + TARGET: x86_64-pc-windows-msvc + +skip_commits: + files: + - bors.toml + - '*.md' + +install: + - curl -sSf -o rustup-init.exe https://win.rustup.rs + - rustup-init -yv --default-toolchain %CHANNEL% --default-host %TARGET% + - rustc -vV + - cargo -vV + +build: false +test_script: + - cargo check + - cargo check --features libloading + - cargo check --features implicit-link + - cargo check --all-features diff --git a/third_party/rust/d3d12/bors.toml b/third_party/rust/d3d12/bors.toml new file mode 100644 index 000000000000..539c849e4dbc --- /dev/null +++ b/third_party/rust/d3d12/bors.toml @@ -0,0 +1,5 @@ +status = [ + "continuous-integration/appveyor/branch" +] + +timeout_sec = 18000 # 5 hours diff --git a/third_party/rust/d3d12/src/com.rs b/third_party/rust/d3d12/src/com.rs new file mode 100644 index 000000000000..1d118ce87a9d --- /dev/null +++ b/third_party/rust/d3d12/src/com.rs @@ -0,0 +1,102 @@ +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::ops::Deref; +use std::ptr; +use winapi::ctypes::c_void; +use winapi::um::unknwnbase::IUnknown; +use winapi::Interface; +use D3DResult; + +#[repr(transparent)] +pub struct WeakPtr(*mut T); + +impl WeakPtr { + pub fn null() -> Self { + WeakPtr(ptr::null_mut()) + } + + pub unsafe fn from_raw(raw: *mut T) -> Self { + WeakPtr(raw) + } + + pub fn is_null(&self) -> bool { + self.0.is_null() + } + + pub fn as_ptr(&self) -> *const T { + self.0 + } + + pub fn as_mut_ptr(&self) -> *mut T { + self.0 + } + + pub unsafe fn mut_void(&mut self) -> *mut *mut c_void { + &mut self.0 as *mut *mut _ as *mut *mut _ + } +} + +impl WeakPtr { + pub unsafe fn as_unknown(&self) -> &IUnknown { + debug_assert!(!self.is_null()); + &*(self.0 as *mut IUnknown) + } + + + pub unsafe fn cast(&self) -> D3DResult> + where + U: Interface, + { + let mut obj = WeakPtr::::null(); + let hr = self + .as_unknown() + .QueryInterface(&U::uuidof(), obj.mut_void()); + (obj, hr) + } + + + + pub unsafe fn destroy(&self) { + self.as_unknown().Release(); + } +} + +impl Clone for WeakPtr { + fn clone(&self) -> Self { + WeakPtr(self.0) + } +} + +impl Copy for WeakPtr {} + +impl Deref for WeakPtr { + type Target = T; + fn deref(&self) -> &T { + debug_assert!(!self.is_null()); + unsafe { &*self.0 } + } +} + +impl fmt::Debug for WeakPtr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "WeakPtr( ptr: {:?} )", self.0) + } +} + +impl PartialEq<*mut T> for WeakPtr { + fn eq(&self, other: &*mut T) -> bool { + self.0 == *other + } +} + +impl PartialEq for WeakPtr { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl Hash for WeakPtr { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} diff --git a/third_party/rust/d3d12/src/command_allocator.rs b/third_party/rust/d3d12/src/command_allocator.rs new file mode 100644 index 000000000000..0ec12dc5875b --- /dev/null +++ b/third_party/rust/d3d12/src/command_allocator.rs @@ -0,0 +1,14 @@ + + +use com::WeakPtr; +use winapi::um::d3d12; + +pub type CommandAllocator = WeakPtr; + +impl CommandAllocator { + pub fn reset(&self) { + unsafe { + self.Reset(); + } + } +} diff --git a/third_party/rust/d3d12/src/command_list.rs b/third_party/rust/d3d12/src/command_list.rs new file mode 100644 index 000000000000..44990ab3a7c5 --- /dev/null +++ b/third_party/rust/d3d12/src/command_list.rs @@ -0,0 +1,328 @@ + + +use com::WeakPtr; +use resource::DiscardRegion; +use std::{mem, ptr}; +use winapi::um::d3d12; +use { + CommandAllocator, CpuDescriptor, DescriptorHeap, Format, GpuAddress, GpuDescriptor, IndexCount, + InstanceCount, PipelineState, Rect, Resource, RootSignature, VertexCount, VertexOffset, + WorkGroupCount, HRESULT, +}; + +#[repr(u32)] +#[derive(Clone, Copy)] +pub enum CmdListType { + Direct = d3d12::D3D12_COMMAND_LIST_TYPE_DIRECT, + Bundle = d3d12::D3D12_COMMAND_LIST_TYPE_BUNDLE, + Compute = d3d12::D3D12_COMMAND_LIST_TYPE_COMPUTE, + Copy = d3d12::D3D12_COMMAND_LIST_TYPE_COPY, + + +} + +bitflags! { + pub struct ClearFlags: u32 { + const DEPTH = d3d12::D3D12_CLEAR_FLAG_DEPTH; + const STENCIL = d3d12::D3D12_CLEAR_FLAG_STENCIL; + } +} + +#[repr(transparent)] +pub struct IndirectArgument(d3d12::D3D12_INDIRECT_ARGUMENT_DESC); + +impl IndirectArgument { + pub fn draw() -> Self { + IndirectArgument(d3d12::D3D12_INDIRECT_ARGUMENT_DESC { + Type: d3d12::D3D12_INDIRECT_ARGUMENT_TYPE_DRAW, + ..unsafe { mem::zeroed() } + }) + } + + pub fn draw_indexed() -> Self { + IndirectArgument(d3d12::D3D12_INDIRECT_ARGUMENT_DESC { + Type: d3d12::D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED, + ..unsafe { mem::zeroed() } + }) + } + + pub fn dispatch() -> Self { + IndirectArgument(d3d12::D3D12_INDIRECT_ARGUMENT_DESC { + Type: d3d12::D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH, + ..unsafe { mem::zeroed() } + }) + } + + +} + +#[repr(transparent)] +pub struct ResourceBarrier(d3d12::D3D12_RESOURCE_BARRIER); + +impl ResourceBarrier { + pub fn transition( + resource: Resource, + subresource: u32, + state_before: d3d12::D3D12_RESOURCE_STATES, + state_after: d3d12::D3D12_RESOURCE_STATES, + flags: d3d12::D3D12_RESOURCE_BARRIER_FLAGS, + ) -> Self { + let mut barrier = d3d12::D3D12_RESOURCE_BARRIER { + Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, + Flags: flags, + ..unsafe { mem::zeroed() } + }; + unsafe { + *barrier.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: resource.as_mut_ptr(), + Subresource: subresource, + StateBefore: state_before, + StateAfter: state_after, + }; + } + ResourceBarrier(barrier) + } +} + +pub type CommandSignature = WeakPtr; +pub type CommandList = WeakPtr; +pub type GraphicsCommandList = WeakPtr; + +impl GraphicsCommandList { + pub fn as_list(&self) -> CommandList { + unsafe { CommandList::from_raw(self.as_mut_ptr() as *mut _) } + } + + pub fn close(&self) -> HRESULT { + unsafe { self.Close() } + } + + pub fn reset(&self, allocator: CommandAllocator, initial_pso: PipelineState) -> HRESULT { + unsafe { self.Reset(allocator.as_mut_ptr(), initial_pso.as_mut_ptr()) } + } + + pub fn discard_resource(&self, resource: Resource, region: DiscardRegion) { + debug_assert!(region.subregions.start < region.subregions.end); + unsafe { + self.DiscardResource( + resource.as_mut_ptr(), + &d3d12::D3D12_DISCARD_REGION { + NumRects: region.rects.len() as _, + pRects: region.rects.as_ptr(), + FirstSubresource: region.subregions.start, + NumSubresources: region.subregions.end - region.subregions.start - 1, + }, + ); + } + } + + pub fn clear_depth_stencil_view( + &self, + dsv: CpuDescriptor, + flags: ClearFlags, + depth: f32, + stencil: u8, + rects: &[Rect], + ) { + let num_rects = rects.len() as _; + let rects = if num_rects > 0 { + rects.as_ptr() + } else { + ptr::null() + }; + unsafe { + self.ClearDepthStencilView(dsv, flags.bits(), depth, stencil, num_rects, rects); + } + } + + pub fn clear_render_target_view(&self, rtv: CpuDescriptor, color: [f32; 4], rects: &[Rect]) { + let num_rects = rects.len() as _; + let rects = if num_rects > 0 { + rects.as_ptr() + } else { + ptr::null() + }; + unsafe { + self.ClearRenderTargetView(rtv, &color, num_rects, rects); + } + } + + pub fn dispatch(&self, count: WorkGroupCount) { + unsafe { + self.Dispatch(count[0], count[1], count[2]); + } + } + + pub fn draw( + &self, + num_vertices: VertexCount, + num_instances: InstanceCount, + start_vertex: VertexCount, + start_instance: InstanceCount, + ) { + unsafe { + self.DrawInstanced(num_vertices, num_instances, start_vertex, start_instance); + } + } + + pub fn draw_indexed( + &self, + num_indices: IndexCount, + num_instances: InstanceCount, + start_index: IndexCount, + base_vertex: VertexOffset, + start_instance: InstanceCount, + ) { + unsafe { + self.DrawIndexedInstanced( + num_indices, + num_instances, + start_index, + base_vertex, + start_instance, + ); + } + } + + pub fn set_index_buffer(&self, gpu_address: GpuAddress, size: u32, format: Format) { + let mut ibv = d3d12::D3D12_INDEX_BUFFER_VIEW { + BufferLocation: gpu_address, + SizeInBytes: size, + Format: format, + }; + unsafe { + self.IASetIndexBuffer(&mut ibv); + } + } + + pub fn set_blend_factor(&self, factor: [f32; 4]) { + unsafe { + self.OMSetBlendFactor(&factor); + } + } + + pub fn set_stencil_reference(&self, reference: u32) { + unsafe { + self.OMSetStencilRef(reference); + } + } + + pub fn set_pipeline_state(&self, pso: PipelineState) { + unsafe { + self.SetPipelineState(pso.as_mut_ptr()); + } + } + + pub fn execute_bundle(&self, bundle: GraphicsCommandList) { + unsafe { + self.ExecuteBundle(bundle.as_mut_ptr()); + } + } + + pub fn set_descriptor_heaps(&self, heaps: &[DescriptorHeap]) { + unsafe { + self.SetDescriptorHeaps( + heaps.len() as _, + heaps.as_ptr() as *mut &DescriptorHeap as *mut _, + ); + } + } + + pub fn set_compute_root_signature(&self, signature: RootSignature) { + unsafe { + self.SetComputeRootSignature(signature.as_mut_ptr()); + } + } + + pub fn set_graphics_root_signature(&self, signature: RootSignature) { + unsafe { + self.SetGraphicsRootSignature(signature.as_mut_ptr()); + } + } + + pub fn set_compute_root_descriptor_table( + &self, + root_index: u32, + base_descriptor: GpuDescriptor, + ) { + unsafe { + self.SetComputeRootDescriptorTable(root_index, base_descriptor); + } + } + + pub fn set_compute_root_constant_buffer_view( + &self, + root_index: u32, + buffer_location: GpuAddress, + ) { + unsafe { + self.SetComputeRootConstantBufferView(root_index, buffer_location); + } + } + + pub fn set_compute_root_shader_resource_view( + &self, + root_index: u32, + buffer_location: GpuAddress, + ) { + unsafe { + self.SetComputeRootShaderResourceView(root_index, buffer_location); + } + } + + pub fn set_compute_root_unordered_access_view( + &self, + root_index: u32, + buffer_location: GpuAddress, + ) { + unsafe { + self.SetComputeRootUnorderedAccessView(root_index, buffer_location); + } + } + + pub fn set_graphics_root_descriptor_table( + &self, + root_index: u32, + base_descriptor: GpuDescriptor, + ) { + unsafe { + self.SetGraphicsRootDescriptorTable(root_index, base_descriptor); + } + } + + pub fn set_graphics_root_constant_buffer_view( + &self, + root_index: u32, + buffer_location: GpuAddress, + ) { + unsafe { + self.SetGraphicsRootConstantBufferView(root_index, buffer_location); + } + } + + pub fn set_graphics_root_shader_resource_view( + &self, + root_index: u32, + buffer_location: GpuAddress, + ) { + unsafe { + self.SetGraphicsRootShaderResourceView(root_index, buffer_location); + } + } + + pub fn set_graphics_root_unordered_access_view( + &self, + root_index: u32, + buffer_location: GpuAddress, + ) { + unsafe { + self.SetGraphicsRootUnorderedAccessView(root_index, buffer_location); + } + } + + pub fn resource_barrier(&self, barriers: &[ResourceBarrier]) { + unsafe { + self.ResourceBarrier(barriers.len() as _, barriers.as_ptr() as _) + } + } +} diff --git a/third_party/rust/d3d12/src/debug.rs b/third_party/rust/d3d12/src/debug.rs new file mode 100644 index 000000000000..822b8fbf91f0 --- /dev/null +++ b/third_party/rust/d3d12/src/debug.rs @@ -0,0 +1,46 @@ +use com::WeakPtr; +#[cfg(any(feature = "libloading", feature = "implicit-link"))] +use winapi::Interface as _; +use winapi::um::d3d12sdklayers; + +pub type Debug = WeakPtr; + +#[cfg(feature = "libloading")] +impl crate::D3D12Lib { + pub fn get_debug_interface(&self) -> libloading::Result> { + type Fun = extern "system" fn( + winapi::shared::guiddef::REFIID, + *mut *mut winapi::ctypes::c_void, + ) -> crate::HRESULT; + + let mut debug = Debug::null(); + let hr = unsafe { + let func: libloading::Symbol = self.lib.get(b"D3D12GetDebugInterface")?; + func( + &d3d12sdklayers::ID3D12Debug::uuidof(), + debug.mut_void(), + ) + }; + + Ok((debug, hr)) + } +} + +impl Debug { + #[cfg(feature = "implicit-link")] + pub fn get_interface() -> crate::D3DResult { + let mut debug = Debug::null(); + let hr = unsafe { + winapi::um::d3d12::D3D12GetDebugInterface( + &d3d12sdklayers::ID3D12Debug::uuidof(), + debug.mut_void(), + ) + }; + + (debug, hr) + } + + pub fn enable_layer(&self) { + unsafe { self.EnableDebugLayer() } + } +} diff --git a/third_party/rust/d3d12/src/descriptor.rs b/third_party/rust/d3d12/src/descriptor.rs new file mode 100644 index 000000000000..9d2efa09fc90 --- /dev/null +++ b/third_party/rust/d3d12/src/descriptor.rs @@ -0,0 +1,297 @@ +use com::WeakPtr; +use std::mem; +use std::ops::Range; +use winapi::shared::dxgiformat; +use winapi::um::d3d12; +use {Blob, D3DResult, Error, TextureAddressMode}; + +pub type CpuDescriptor = d3d12::D3D12_CPU_DESCRIPTOR_HANDLE; +pub type GpuDescriptor = d3d12::D3D12_GPU_DESCRIPTOR_HANDLE; + +#[derive(Clone, Copy, Debug)] +pub struct Binding { + pub register: u32, + pub space: u32, +} + +#[repr(u32)] +#[derive(Clone, Copy, Debug)] +pub enum DescriptorHeapType { + CbvSrvUav = d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, + Sampler = d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER, + Rtv = d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_RTV, + Dsv = d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_DSV, +} + +bitflags! { + pub struct DescriptorHeapFlags: u32 { + const SHADER_VISIBLE = d3d12::D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE; + } +} + +pub type DescriptorHeap = WeakPtr; + +impl DescriptorHeap { + pub fn start_cpu_descriptor(&self) -> CpuDescriptor { + unsafe { self.GetCPUDescriptorHandleForHeapStart() } + } + + pub fn start_gpu_descriptor(&self) -> GpuDescriptor { + unsafe { self.GetGPUDescriptorHandleForHeapStart() } + } +} + +#[repr(u32)] +#[derive(Clone, Copy, Debug)] +pub enum ShaderVisibility { + All = d3d12::D3D12_SHADER_VISIBILITY_ALL, + VS = d3d12::D3D12_SHADER_VISIBILITY_VERTEX, + HS = d3d12::D3D12_SHADER_VISIBILITY_HULL, + DS = d3d12::D3D12_SHADER_VISIBILITY_DOMAIN, + GS = d3d12::D3D12_SHADER_VISIBILITY_GEOMETRY, + PS = d3d12::D3D12_SHADER_VISIBILITY_PIXEL, +} + +#[repr(u32)] +#[derive(Clone, Copy, Debug)] +pub enum DescriptorRangeType { + SRV = d3d12::D3D12_DESCRIPTOR_RANGE_TYPE_SRV, + UAV = d3d12::D3D12_DESCRIPTOR_RANGE_TYPE_UAV, + CBV = d3d12::D3D12_DESCRIPTOR_RANGE_TYPE_CBV, + Sampler = d3d12::D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER, +} + +#[repr(transparent)] +pub struct DescriptorRange(d3d12::D3D12_DESCRIPTOR_RANGE); +impl DescriptorRange { + pub fn new(ty: DescriptorRangeType, count: u32, base_binding: Binding, offset: u32) -> Self { + DescriptorRange(d3d12::D3D12_DESCRIPTOR_RANGE { + RangeType: ty as _, + NumDescriptors: count, + BaseShaderRegister: base_binding.register, + RegisterSpace: base_binding.space, + OffsetInDescriptorsFromTableStart: offset, + }) + } +} + +#[repr(transparent)] +pub struct RootParameter(d3d12::D3D12_ROOT_PARAMETER); +impl RootParameter { + + pub fn descriptor_table(visibility: ShaderVisibility, ranges: &[DescriptorRange]) -> Self { + let mut param = d3d12::D3D12_ROOT_PARAMETER { + ParameterType: d3d12::D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE, + ShaderVisibility: visibility as _, + ..unsafe { mem::zeroed() } + }; + + *unsafe { param.u.DescriptorTable_mut() } = d3d12::D3D12_ROOT_DESCRIPTOR_TABLE { + NumDescriptorRanges: ranges.len() as _, + pDescriptorRanges: ranges.as_ptr() as *const _, + }; + + RootParameter(param) + } + + pub fn constants(visibility: ShaderVisibility, binding: Binding, num: u32) -> Self { + let mut param = d3d12::D3D12_ROOT_PARAMETER { + ParameterType: d3d12::D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS, + ShaderVisibility: visibility as _, + ..unsafe { mem::zeroed() } + }; + + *unsafe { param.u.Constants_mut() } = d3d12::D3D12_ROOT_CONSTANTS { + ShaderRegister: binding.register, + RegisterSpace: binding.space, + Num32BitValues: num, + }; + + RootParameter(param) + } + + fn descriptor( + ty: d3d12::D3D12_ROOT_PARAMETER_TYPE, + visibility: ShaderVisibility, + binding: Binding, + ) -> Self { + let mut param = d3d12::D3D12_ROOT_PARAMETER { + ParameterType: ty, + ShaderVisibility: visibility as _, + ..unsafe { mem::zeroed() } + }; + + *unsafe { param.u.Descriptor_mut() } = d3d12::D3D12_ROOT_DESCRIPTOR { + ShaderRegister: binding.register, + RegisterSpace: binding.space, + }; + + RootParameter(param) + } + + pub fn cbv_descriptor(visibility: ShaderVisibility, binding: Binding) -> Self { + Self::descriptor(d3d12::D3D12_ROOT_PARAMETER_TYPE_CBV, visibility, binding) + } + + pub fn srv_descriptor(visibility: ShaderVisibility, binding: Binding) -> Self { + Self::descriptor(d3d12::D3D12_ROOT_PARAMETER_TYPE_SRV, visibility, binding) + } + + pub fn uav_descriptor(visibility: ShaderVisibility, binding: Binding) -> Self { + Self::descriptor(d3d12::D3D12_ROOT_PARAMETER_TYPE_UAV, visibility, binding) + } +} + +#[repr(u32)] +#[derive(Copy, Clone, Debug)] +pub enum StaticBorderColor { + TransparentBlack = d3d12::D3D12_STATIC_BORDER_COLOR_TRANSPARENT_BLACK, + OpaqueBlack = d3d12::D3D12_STATIC_BORDER_COLOR_OPAQUE_BLACK, + OpaqueWhite = d3d12::D3D12_STATIC_BORDER_COLOR_OPAQUE_WHITE, +} + +#[repr(transparent)] +pub struct StaticSampler(d3d12::D3D12_STATIC_SAMPLER_DESC); +impl StaticSampler { + pub fn new( + visibility: ShaderVisibility, + binding: Binding, + filter: d3d12::D3D12_FILTER, + address_mode: TextureAddressMode, + mip_lod_bias: f32, + max_anisotropy: u32, + comparison_op: d3d12::D3D12_COMPARISON_FUNC, + border_color: StaticBorderColor, + lod: Range, + ) -> Self { + StaticSampler(d3d12::D3D12_STATIC_SAMPLER_DESC { + Filter: filter, + AddressU: address_mode[0], + AddressV: address_mode[1], + AddressW: address_mode[2], + MipLODBias: mip_lod_bias, + MaxAnisotropy: max_anisotropy, + ComparisonFunc: comparison_op, + BorderColor: border_color as _, + MinLOD: lod.start, + MaxLOD: lod.end, + ShaderRegister: binding.register, + RegisterSpace: binding.space, + ShaderVisibility: visibility as _, + }) + } +} + +#[repr(u32)] +#[derive(Copy, Clone, Debug)] +pub enum RootSignatureVersion { + V1_0 = d3d12::D3D_ROOT_SIGNATURE_VERSION_1_0, + V1_1 = d3d12::D3D_ROOT_SIGNATURE_VERSION_1_1, +} + +bitflags! { + pub struct RootSignatureFlags: u32 { + const ALLOW_IA_INPUT_LAYOUT = d3d12::D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT; + const DENY_VS_ROOT_ACCESS = d3d12::D3D12_ROOT_SIGNATURE_FLAG_DENY_VERTEX_SHADER_ROOT_ACCESS; + const DENY_HS_ROOT_ACCESS = d3d12::D3D12_ROOT_SIGNATURE_FLAG_DENY_HULL_SHADER_ROOT_ACCESS; + const DENY_DS_ROOT_ACCESS = d3d12::D3D12_ROOT_SIGNATURE_FLAG_DENY_DOMAIN_SHADER_ROOT_ACCESS; + const DENY_GS_ROOT_ACCESS = d3d12::D3D12_ROOT_SIGNATURE_FLAG_DENY_GEOMETRY_SHADER_ROOT_ACCESS; + const DENY_PS_ROOT_ACCESS = d3d12::D3D12_ROOT_SIGNATURE_FLAG_DENY_PIXEL_SHADER_ROOT_ACCESS; + } +} + +pub type RootSignature = WeakPtr; +pub type BlobResult = D3DResult<(Blob, Error)>; + +#[cfg(feature = "libloading")] +impl crate::D3D12Lib { + pub fn serialize_root_signature( + &self, + version: RootSignatureVersion, + parameters: &[RootParameter], + static_samplers: &[StaticSampler], + flags: RootSignatureFlags, + ) -> libloading::Result { + use winapi::um::d3dcommon::ID3DBlob; + type Fun = extern "system" fn( + *const d3d12::D3D12_ROOT_SIGNATURE_DESC, + d3d12::D3D_ROOT_SIGNATURE_VERSION, + *mut *mut ID3DBlob, + *mut *mut ID3DBlob, + ) -> crate::HRESULT; + + let desc = d3d12::D3D12_ROOT_SIGNATURE_DESC { + NumParameters: parameters.len() as _, + pParameters: parameters.as_ptr() as *const _, + NumStaticSamplers: static_samplers.len() as _, + pStaticSamplers: static_samplers.as_ptr() as _, + Flags: flags.bits(), + }; + + let mut blob = Blob::null(); + let mut error = Error::null(); + let hr = unsafe { + let func: libloading::Symbol = self.lib.get(b"D3D12SerializeRootSignature")?; + func( + &desc, + version as _, + blob.mut_void() as *mut *mut _, + error.mut_void() as *mut *mut _, + ) + }; + + Ok(((blob, error), hr)) + } +} + +impl RootSignature { + #[cfg(feature = "implicit-link")] + pub fn serialize( + version: RootSignatureVersion, + parameters: &[RootParameter], + static_samplers: &[StaticSampler], + flags: RootSignatureFlags, + ) -> BlobResult { + let mut blob = Blob::null(); + let mut error = Error::null(); + + let desc = d3d12::D3D12_ROOT_SIGNATURE_DESC { + NumParameters: parameters.len() as _, + pParameters: parameters.as_ptr() as *const _, + NumStaticSamplers: static_samplers.len() as _, + pStaticSamplers: static_samplers.as_ptr() as _, + Flags: flags.bits(), + }; + + let hr = unsafe { + d3d12::D3D12SerializeRootSignature( + &desc, + version as _, + blob.mut_void() as *mut *mut _, + error.mut_void() as *mut *mut _, + ) + }; + + ((blob, error), hr) + } +} + +#[repr(transparent)] +pub struct RenderTargetViewDesc(pub(crate) d3d12::D3D12_RENDER_TARGET_VIEW_DESC); + +impl RenderTargetViewDesc { + pub fn texture_2d(format: dxgiformat::DXGI_FORMAT, mip_slice: u32, plane_slice: u32) -> Self { + let mut desc = d3d12::D3D12_RENDER_TARGET_VIEW_DESC { + Format: format, + ViewDimension: d3d12::D3D12_RTV_DIMENSION_TEXTURE2D, + ..unsafe { mem::zeroed() } + }; + + *unsafe { desc.u.Texture2D_mut() } = d3d12::D3D12_TEX2D_RTV { + MipSlice: mip_slice, + PlaneSlice: plane_slice, + }; + + RenderTargetViewDesc(desc) + } +} diff --git a/third_party/rust/d3d12/src/device.rs b/third_party/rust/d3d12/src/device.rs new file mode 100644 index 000000000000..71f2bb2329c7 --- /dev/null +++ b/third_party/rust/d3d12/src/device.rs @@ -0,0 +1,346 @@ + + +use com::WeakPtr; +use command_list::{CmdListType, CommandSignature, IndirectArgument}; +use descriptor::{CpuDescriptor, DescriptorHeapFlags, DescriptorHeapType, RenderTargetViewDesc}; +use heap::{Heap, HeapFlags, HeapProperties}; +use std::ops::Range; +use winapi::um::d3d12; +use winapi::Interface; +use {pso, query, queue}; +use { + Blob, CachedPSO, CommandAllocator, CommandQueue, D3DResult, DescriptorHeap, + Fence, GraphicsCommandList, NodeMask, PipelineState, QueryHeap, Resource, RootSignature, + Shader, TextureAddressMode, +}; + +pub type Device = WeakPtr; + +#[cfg(feature = "libloading")] +impl crate::D3D12Lib { + pub fn create_device( + &self, + adapter: WeakPtr, + feature_level: crate::FeatureLevel, + ) -> libloading::Result> { + type Fun = extern "system" fn( + *mut winapi::um::unknwnbase::IUnknown, + winapi::um::d3dcommon::D3D_FEATURE_LEVEL, + winapi::shared::guiddef::REFGUID, + *mut *mut winapi::ctypes::c_void, + ) -> crate::HRESULT; + + let mut device = Device::null(); + let hr = unsafe { + let func: libloading::Symbol = self.lib.get(b"D3D12CreateDevice")?; + func( + adapter.as_unknown() as *const _ as *mut _, + feature_level as _, + &d3d12::ID3D12Device::uuidof(), + device.mut_void(), + ) + }; + + Ok((device, hr)) + } +} + +impl Device { + #[cfg(feature = "implicit-link")] + pub fn create( + adapter: WeakPtr, + feature_level: crate::FeatureLevel, + ) -> D3DResult { + let mut device = Device::null(); + let hr = unsafe { + d3d12::D3D12CreateDevice( + adapter.as_unknown() as *const _ as *mut _, + feature_level as _, + &d3d12::ID3D12Device::uuidof(), + device.mut_void(), + ) + }; + + (device, hr) + } + + pub fn create_heap( + &self, + size_in_bytes: u64, + properties: HeapProperties, + alignment: u64, + flags: HeapFlags, + ) -> D3DResult { + let mut heap = Heap::null(); + + let desc = d3d12::D3D12_HEAP_DESC { + SizeInBytes: size_in_bytes, + Properties: properties.0, + Alignment: alignment, + Flags: flags.bits(), + }; + + let hr = unsafe { self.CreateHeap(&desc, &d3d12::ID3D12Heap::uuidof(), heap.mut_void()) }; + + (heap, hr) + } + + pub fn create_command_allocator(&self, list_type: CmdListType) -> D3DResult { + let mut allocator = CommandAllocator::null(); + let hr = unsafe { + self.CreateCommandAllocator( + list_type as _, + &d3d12::ID3D12CommandAllocator::uuidof(), + allocator.mut_void(), + ) + }; + + (allocator, hr) + } + + pub fn create_command_queue( + &self, + list_type: CmdListType, + priority: queue::Priority, + flags: queue::CommandQueueFlags, + node_mask: NodeMask, + ) -> D3DResult { + let desc = d3d12::D3D12_COMMAND_QUEUE_DESC { + Type: list_type as _, + Priority: priority as _, + Flags: flags.bits(), + NodeMask: node_mask, + }; + + let mut queue = CommandQueue::null(); + let hr = unsafe { + self.CreateCommandQueue( + &desc, + &d3d12::ID3D12CommandQueue::uuidof(), + queue.mut_void(), + ) + }; + + (queue, hr) + } + + pub fn create_descriptor_heap( + &self, + num_descriptors: u32, + heap_type: DescriptorHeapType, + flags: DescriptorHeapFlags, + node_mask: NodeMask, + ) -> D3DResult { + let desc = d3d12::D3D12_DESCRIPTOR_HEAP_DESC { + Type: heap_type as _, + NumDescriptors: num_descriptors, + Flags: flags.bits(), + NodeMask: node_mask, + }; + + let mut heap = DescriptorHeap::null(); + let hr = unsafe { + self.CreateDescriptorHeap( + &desc, + &d3d12::ID3D12DescriptorHeap::uuidof(), + heap.mut_void(), + ) + }; + + (heap, hr) + } + + pub fn get_descriptor_increment_size(&self, heap_type: DescriptorHeapType) -> u32 { + unsafe { self.GetDescriptorHandleIncrementSize(heap_type as _) } + } + + pub fn create_graphics_command_list( + &self, + list_type: CmdListType, + allocator: CommandAllocator, + initial: PipelineState, + node_mask: NodeMask, + ) -> D3DResult { + let mut command_list = GraphicsCommandList::null(); + let hr = unsafe { + self.CreateCommandList( + node_mask, + list_type as _, + allocator.as_mut_ptr(), + initial.as_mut_ptr(), + &d3d12::ID3D12GraphicsCommandList::uuidof(), + command_list.mut_void(), + ) + }; + + (command_list, hr) + } + + pub fn create_query_heap( + &self, + heap_ty: query::QueryHeapType, + count: u32, + node_mask: NodeMask, + ) -> D3DResult { + let desc = d3d12::D3D12_QUERY_HEAP_DESC { + Type: heap_ty as _, + Count: count, + NodeMask: node_mask, + }; + + let mut query_heap = QueryHeap::null(); + let hr = unsafe { + self.CreateQueryHeap( + &desc, + &d3d12::ID3D12QueryHeap::uuidof(), + query_heap.mut_void(), + ) + }; + + (query_heap, hr) + } + + pub fn create_graphics_pipeline_state( + &self, + _root_signature: RootSignature, + _vs: Shader, + _ps: Shader, + _gs: Shader, + _hs: Shader, + _ds: Shader, + _node_mask: NodeMask, + _cached_pso: CachedPSO, + _flags: pso::PipelineStateFlags, + ) -> D3DResult { + unimplemented!() + } + + pub fn create_compute_pipeline_state( + &self, + root_signature: RootSignature, + cs: Shader, + node_mask: NodeMask, + cached_pso: CachedPSO, + flags: pso::PipelineStateFlags, + ) -> D3DResult { + let mut pipeline = PipelineState::null(); + let desc = d3d12::D3D12_COMPUTE_PIPELINE_STATE_DESC { + pRootSignature: root_signature.as_mut_ptr(), + CS: *cs, + NodeMask: node_mask, + CachedPSO: *cached_pso, + Flags: flags.bits(), + }; + + let hr = unsafe { + self.CreateComputePipelineState( + &desc, + &d3d12::ID3D12PipelineState::uuidof(), + pipeline.mut_void(), + ) + }; + + (pipeline, hr) + } + + pub fn create_sampler( + &self, + sampler: CpuDescriptor, + filter: d3d12::D3D12_FILTER, + address_mode: TextureAddressMode, + mip_lod_bias: f32, + max_anisotropy: u32, + comparison_op: d3d12::D3D12_COMPARISON_FUNC, + border_color: [f32; 4], + lod: Range, + ) { + let desc = d3d12::D3D12_SAMPLER_DESC { + Filter: filter, + AddressU: address_mode[0], + AddressV: address_mode[1], + AddressW: address_mode[2], + MipLODBias: mip_lod_bias, + MaxAnisotropy: max_anisotropy, + ComparisonFunc: comparison_op, + BorderColor: border_color, + MinLOD: lod.start, + MaxLOD: lod.end, + }; + + unsafe { + self.CreateSampler(&desc, sampler); + } + } + + pub fn create_root_signature( + &self, + blob: Blob, + node_mask: NodeMask, + ) -> D3DResult { + let mut signature = RootSignature::null(); + let hr = unsafe { + self.CreateRootSignature( + node_mask, + blob.GetBufferPointer(), + blob.GetBufferSize(), + &d3d12::ID3D12RootSignature::uuidof(), + signature.mut_void(), + ) + }; + + (signature, hr) + } + + pub fn create_command_signature( + &self, + root_signature: RootSignature, + arguments: &[IndirectArgument], + stride: u32, + node_mask: NodeMask, + ) -> D3DResult { + let mut signature = CommandSignature::null(); + let desc = d3d12::D3D12_COMMAND_SIGNATURE_DESC { + ByteStride: stride, + NumArgumentDescs: arguments.len() as _, + pArgumentDescs: arguments.as_ptr() as *const _, + NodeMask: node_mask, + }; + + let hr = unsafe { + self.CreateCommandSignature( + &desc, + root_signature.as_mut_ptr(), + &d3d12::ID3D12CommandSignature::uuidof(), + signature.mut_void(), + ) + }; + + (signature, hr) + } + + pub fn create_render_target_view( + &self, + resource: Resource, + desc: &RenderTargetViewDesc, + descriptor: CpuDescriptor, + ) { + unsafe { + self.CreateRenderTargetView(resource.as_mut_ptr(), &desc.0 as *const _, descriptor); + } + } + + + pub fn create_fence(&self, initial: u64) -> D3DResult { + let mut fence = Fence::null(); + let hr = unsafe { + self.CreateFence( + initial, + d3d12::D3D12_FENCE_FLAG_NONE, + &d3d12::ID3D12Fence::uuidof(), + fence.mut_void(), + ) + }; + + (fence, hr) + } +} diff --git a/third_party/rust/d3d12/src/dxgi.rs b/third_party/rust/d3d12/src/dxgi.rs new file mode 100644 index 000000000000..777f4fb4452e --- /dev/null +++ b/third_party/rust/d3d12/src/dxgi.rs @@ -0,0 +1,219 @@ +use com::WeakPtr; +use std::ptr; +use winapi::shared::windef::HWND; +use winapi::shared::{dxgi, dxgi1_2, dxgi1_3, dxgi1_4, dxgiformat, dxgitype}; +use winapi::um::{dxgidebug, d3d12}; +use winapi::Interface; +use {CommandQueue, D3DResult, Resource, SampleDesc, HRESULT}; + +bitflags! { + pub struct FactoryCreationFlags: u32 { + const DEBUG = dxgi1_3::DXGI_CREATE_FACTORY_DEBUG; + } +} + +#[repr(u32)] +#[derive(Debug, Copy, Clone)] +pub enum Scaling { + Stretch = dxgi1_2::DXGI_SCALING_STRETCH, + Identity = dxgi1_2::DXGI_SCALING_NONE, + Aspect = dxgi1_2::DXGI_SCALING_ASPECT_RATIO_STRETCH, +} + +#[repr(u32)] +#[derive(Debug, Copy, Clone)] +pub enum SwapEffect { + Discard = dxgi::DXGI_SWAP_EFFECT_DISCARD, + Sequential = dxgi::DXGI_SWAP_EFFECT_SEQUENTIAL, + FlipDiscard = dxgi::DXGI_SWAP_EFFECT_FLIP_DISCARD, + FlipSequential = dxgi::DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL, +} + +#[repr(u32)] +#[derive(Debug, Copy, Clone)] +pub enum AlphaMode { + Unspecified = dxgi1_2::DXGI_ALPHA_MODE_UNSPECIFIED, + Premultiplied = dxgi1_2::DXGI_ALPHA_MODE_PREMULTIPLIED, + Straight = dxgi1_2::DXGI_ALPHA_MODE_STRAIGHT, + Ignore = dxgi1_2::DXGI_ALPHA_MODE_IGNORE, + ForceDword = dxgi1_2::DXGI_ALPHA_MODE_FORCE_DWORD, +} + +pub type Adapter1 = WeakPtr; +pub type Factory2 = WeakPtr; +pub type Factory4 = WeakPtr; +pub type InfoQueue = WeakPtr; +pub type SwapChain = WeakPtr; +pub type SwapChain1 = WeakPtr; +pub type SwapChain3 = WeakPtr; + +#[cfg(feature = "libloading")] +#[derive(Debug)] +pub struct DxgiLib { + lib: libloading::Library, +} + +#[cfg(feature = "libloading")] +impl DxgiLib { + pub fn new() -> libloading::Result { + libloading::Library::new("dxgi.dll") + .map(|lib| DxgiLib { + lib, + }) + } + + pub fn create_factory2( + &self, flags: FactoryCreationFlags + ) -> libloading::Result> { + type Fun = extern "system" fn( + winapi::shared::minwindef::UINT, + winapi::shared::guiddef::REFIID, + *mut *mut winapi::ctypes::c_void, + ) -> HRESULT; + + let mut factory = Factory4::null(); + let hr = unsafe { + let func: libloading::Symbol = self.lib.get(b"CreateDXGIFactory2")?; + func( + flags.bits(), + &dxgi1_4::IDXGIFactory4::uuidof(), + factory.mut_void(), + ) + }; + + Ok((factory, hr)) + } + + pub fn get_debug_interface1(&self) -> libloading::Result> { + type Fun = extern "system" fn( + winapi::shared::minwindef::UINT, + winapi::shared::guiddef::REFIID, + *mut *mut winapi::ctypes::c_void, + ) -> HRESULT; + + let mut queue = InfoQueue::null(); + let hr = unsafe { + let func: libloading::Symbol = self.lib.get(b"DXGIGetDebugInterface1")?; + func( + 0, + &dxgidebug::IDXGIInfoQueue::uuidof(), + queue.mut_void(), + ) + }; + Ok((queue, hr)) + } +} + + +pub struct SwapchainDesc { + pub width: u32, + pub height: u32, + pub format: dxgiformat::DXGI_FORMAT, + pub stereo: bool, + pub sample: SampleDesc, + pub buffer_usage: dxgitype::DXGI_USAGE, + pub buffer_count: u32, + pub scaling: Scaling, + pub swap_effect: SwapEffect, + pub alpha_mode: AlphaMode, + pub flags: u32, +} + +impl Factory2 { + + pub fn create_swapchain_for_hwnd( + &self, + queue: CommandQueue, + hwnd: HWND, + desc: &SwapchainDesc, + ) -> D3DResult { + let desc = dxgi1_2::DXGI_SWAP_CHAIN_DESC1 { + AlphaMode: desc.alpha_mode as _, + BufferCount: desc.buffer_count, + Width: desc.width, + Height: desc.height, + Format: desc.format, + Flags: desc.flags, + BufferUsage: desc.buffer_usage, + SampleDesc: dxgitype::DXGI_SAMPLE_DESC { + Count: desc.sample.count, + Quality: desc.sample.quality, + }, + Scaling: desc.scaling as _, + Stereo: desc.stereo as _, + SwapEffect: desc.swap_effect as _, + }; + + let mut swap_chain = SwapChain1::null(); + let hr = unsafe { + self.CreateSwapChainForHwnd( + queue.as_mut_ptr() as *mut _, + hwnd, + &desc, + ptr::null(), + ptr::null_mut(), + swap_chain.mut_void() as *mut *mut _, + ) + }; + + (swap_chain, hr) + } +} + +impl Factory4 { + #[cfg(feature = "implicit-link")] + pub fn create(flags: FactoryCreationFlags) -> D3DResult { + let mut factory = Factory4::null(); + let hr = unsafe { + dxgi1_3::CreateDXGIFactory2( + flags.bits(), + &dxgi1_4::IDXGIFactory4::uuidof(), + factory.mut_void(), + ) + }; + + (factory, hr) + } + + pub fn as_factory2(&self) -> Factory2 { + unsafe { Factory2::from_raw(self.as_mut_ptr() as *mut _) } + } + + pub fn enumerate_adapters(&self, id: u32) -> D3DResult { + let mut adapter = Adapter1::null(); + let hr = unsafe { self.EnumAdapters1(id, adapter.mut_void() as *mut *mut _) }; + + (adapter, hr) + } +} + +impl SwapChain { + pub fn get_buffer(&self, id: u32) -> D3DResult { + let mut resource = Resource::null(); + let hr = + unsafe { self.GetBuffer(id, &d3d12::ID3D12Resource::uuidof(), resource.mut_void()) }; + + (resource, hr) + } + + + pub fn present(&self, interval: u32, flags: u32) -> HRESULT { + unsafe { self.Present(interval, flags) } + } +} + +impl SwapChain1 { + pub fn as_swapchain0(&self) -> SwapChain { + unsafe { SwapChain::from_raw(self.as_mut_ptr() as *mut _) } + } +} + +impl SwapChain3 { + pub fn as_swapchain0(&self) -> SwapChain { + unsafe { SwapChain::from_raw(self.as_mut_ptr() as *mut _) } + } + + pub fn get_current_back_buffer_index(&self) -> u32 { + unsafe { self.GetCurrentBackBufferIndex() } + } +} diff --git a/third_party/rust/d3d12/src/heap.rs b/third_party/rust/d3d12/src/heap.rs new file mode 100644 index 000000000000..acf0aead1137 --- /dev/null +++ b/third_party/rust/d3d12/src/heap.rs @@ -0,0 +1,86 @@ +use com::WeakPtr; +use winapi::um::d3d12; + +pub type Heap = WeakPtr; + +#[repr(u32)] +#[derive(Clone, Copy)] +pub enum HeapType { + Default = d3d12::D3D12_HEAP_TYPE_DEFAULT, + Upload = d3d12::D3D12_HEAP_TYPE_UPLOAD, + Readback = d3d12::D3D12_HEAP_TYPE_READBACK, + Custom = d3d12::D3D12_HEAP_TYPE_CUSTOM, +} + +#[repr(u32)] +#[derive(Clone, Copy)] +pub enum CpuPageProperty { + Unknown = d3d12::D3D12_CPU_PAGE_PROPERTY_UNKNOWN, + NotAvailable = d3d12::D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE, + WriteCombine = d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE, + WriteBack = d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_BACK, +} + +#[repr(u32)] +#[derive(Clone, Copy)] +pub enum MemoryPool { + Unknown = d3d12::D3D12_CPU_PAGE_PROPERTY_UNKNOWN, + L0 = d3d12::D3D12_MEMORY_POOL_L0, + L1 = d3d12::D3D12_MEMORY_POOL_L1, +} + +bitflags! { + pub struct HeapFlags: u32 { + const NONE = d3d12::D3D12_HEAP_FLAG_NONE; + const SHARED = d3d12::D3D12_HEAP_FLAG_SHARED; + const DENY_BUFFERS = d3d12::D3D12_HEAP_FLAG_DENY_BUFFERS; + const ALLOW_DISPLAY = d3d12::D3D12_HEAP_FLAG_ALLOW_DISPLAY; + const SHARED_CROSS_ADAPTER = d3d12::D3D12_HEAP_FLAG_SHARED_CROSS_ADAPTER; + const DENT_RT_DS_TEXTURES = d3d12::D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES; + const DENY_NON_RT_DS_TEXTURES = d3d12::D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES; + const HARDWARE_PROTECTED = d3d12::D3D12_HEAP_FLAG_HARDWARE_PROTECTED; + const ALLOW_WRITE_WATCH = d3d12::D3D12_HEAP_FLAG_ALLOW_WRITE_WATCH; + const ALLOW_ALL_BUFFERS_AND_TEXTURES = d3d12::D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES; + const ALLOW_ONLY_BUFFERS = d3d12::D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS; + const ALLOW_ONLY_NON_RT_DS_TEXTURES = d3d12::D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES; + const ALLOW_ONLY_RT_DS_TEXTURES = d3d12::D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES; + } +} + +#[repr(transparent)] +pub struct HeapProperties(pub d3d12::D3D12_HEAP_PROPERTIES); +impl HeapProperties { + pub fn new( + heap_type: HeapType, + cpu_page_property: CpuPageProperty, + memory_pool_preference: MemoryPool, + creation_node_mask: u32, + visible_node_mask: u32, + ) -> Self { + HeapProperties(d3d12::D3D12_HEAP_PROPERTIES { + Type: heap_type as _, + CPUPageProperty: cpu_page_property as _, + MemoryPoolPreference: memory_pool_preference as _, + CreationNodeMask: creation_node_mask, + VisibleNodeMask: visible_node_mask, + }) + } +} + +#[repr(transparent)] +pub struct HeapDesc(d3d12::D3D12_HEAP_DESC); +impl HeapDesc { + pub fn new( + size_in_bytes: u64, + properties: HeapProperties, + alignment: u64, + flags: HeapFlags, + ) -> Self { + HeapDesc(d3d12::D3D12_HEAP_DESC { + SizeInBytes: size_in_bytes, + Properties: properties.0, + Alignment: alignment, + Flags: flags.bits(), + }) + } +} diff --git a/third_party/rust/d3d12/src/lib.rs b/third_party/rust/d3d12/src/lib.rs new file mode 100644 index 000000000000..8fe62959f577 --- /dev/null +++ b/third_party/rust/d3d12/src/lib.rs @@ -0,0 +1,101 @@ +extern crate winapi; +#[macro_use] +extern crate bitflags; + +use std::ffi::CStr; +use winapi::shared::dxgiformat; +use winapi::um::{d3d12, d3dcommon}; + +mod com; +mod command_allocator; +mod command_list; +mod debug; +mod descriptor; +mod device; +mod dxgi; +mod heap; +mod pso; +mod query; +mod queue; +mod resource; +mod sync; + +pub use crate::com::*; +pub use crate::command_allocator::*; +pub use crate::command_list::*; +pub use crate::debug::*; +pub use crate::descriptor::*; +pub use crate::device::*; +pub use crate::dxgi::*; +pub use crate::heap::*; +pub use crate::pso::*; +pub use crate::query::*; +pub use crate::queue::*; +pub use crate::resource::*; +pub use crate::sync::*; + +pub use winapi::shared::winerror::HRESULT; + +pub type D3DResult = (T, HRESULT); +pub type GpuAddress = d3d12::D3D12_GPU_VIRTUAL_ADDRESS; +pub type Format = dxgiformat::DXGI_FORMAT; +pub type Rect = d3d12::D3D12_RECT; +pub type NodeMask = u32; + + +pub type VertexCount = u32; + +pub type VertexOffset = i32; + +pub type IndexCount = u32; + +pub type InstanceCount = u32; + +pub type WorkGroupCount = [u32; 3]; + +pub type TextureAddressMode = [d3d12::D3D12_TEXTURE_ADDRESS_MODE; 3]; + +pub struct SampleDesc { + pub count: u32, + pub quality: u32, +} + +#[repr(u32)] +pub enum FeatureLevel { + L9_1 = d3dcommon::D3D_FEATURE_LEVEL_9_1, + L9_2 = d3dcommon::D3D_FEATURE_LEVEL_9_2, + L9_3 = d3dcommon::D3D_FEATURE_LEVEL_9_3, + L10_0 = d3dcommon::D3D_FEATURE_LEVEL_10_0, + L10_1 = d3dcommon::D3D_FEATURE_LEVEL_10_1, + L11_0 = d3dcommon::D3D_FEATURE_LEVEL_11_0, + L11_1 = d3dcommon::D3D_FEATURE_LEVEL_11_1, + L12_0 = d3dcommon::D3D_FEATURE_LEVEL_12_0, + L12_1 = d3dcommon::D3D_FEATURE_LEVEL_12_1, +} + +pub type Blob = self::com::WeakPtr; + +pub type Error = self::com::WeakPtr; +impl Error { + pub unsafe fn as_c_str(&self) -> &CStr { + debug_assert!(!self.is_null()); + let data = self.GetBufferPointer(); + CStr::from_ptr(data as *const _ as *const _) + } +} + +#[cfg(feature = "libloading")] +#[derive(Debug)] +pub struct D3D12Lib { + lib: libloading::Library, +} + +#[cfg(feature = "libloading")] +impl D3D12Lib { + pub fn new() -> libloading::Result { + libloading::Library::new("d3d12.dll") + .map(|lib| D3D12Lib { + lib, + }) + } +} diff --git a/third_party/rust/d3d12/src/pso.rs b/third_party/rust/d3d12/src/pso.rs new file mode 100644 index 000000000000..e99d03c52043 --- /dev/null +++ b/third_party/rust/d3d12/src/pso.rs @@ -0,0 +1,164 @@ + + +use com::WeakPtr; +use std::ops::Deref; +use std::{ffi, ptr}; +use winapi::um::{d3d12, d3dcompiler}; +use {Blob, D3DResult, Error}; + +bitflags! { + pub struct PipelineStateFlags: u32 { + const TOOL_DEBUG = d3d12::D3D12_PIPELINE_STATE_FLAG_TOOL_DEBUG; + } +} + +bitflags! { + pub struct ShaderCompileFlags: u32 { + const DEBUG = d3dcompiler::D3DCOMPILE_DEBUG; + const SKIP_VALIDATION = d3dcompiler::D3DCOMPILE_SKIP_VALIDATION; + const SKIP_OPTIMIZATION = d3dcompiler::D3DCOMPILE_SKIP_OPTIMIZATION; + const PACK_MATRIX_ROW_MAJOR = d3dcompiler::D3DCOMPILE_PACK_MATRIX_ROW_MAJOR; + const PACK_MATRIX_COLUMN_MAJOR = d3dcompiler::D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR; + const PARTIAL_PRECISION = d3dcompiler::D3DCOMPILE_PARTIAL_PRECISION; + // TODO: add missing flags + } +} + +#[derive(Copy, Clone)] +pub struct Shader(d3d12::D3D12_SHADER_BYTECODE); +impl Shader { + pub fn null() -> Self { + Shader(d3d12::D3D12_SHADER_BYTECODE { + BytecodeLength: 0, + pShaderBytecode: ptr::null(), + }) + } + + + pub fn from_blob(blob: Blob) -> Self { + Shader(d3d12::D3D12_SHADER_BYTECODE { + BytecodeLength: unsafe { blob.GetBufferSize() }, + pShaderBytecode: unsafe { blob.GetBufferPointer() }, + }) + } + + + + + pub fn compile( + code: &[u8], + target: &ffi::CStr, + entry: &ffi::CStr, + flags: ShaderCompileFlags, + ) -> D3DResult<(Blob, Error)> { + let mut shader = Blob::null(); + let mut error = Error::null(); + + let hr = unsafe { + d3dcompiler::D3DCompile( + code.as_ptr() as *const _, + code.len(), + ptr::null(), + ptr::null(), + ptr::null_mut(), + entry.as_ptr() as *const _, + target.as_ptr() as *const _, + flags.bits(), + 0, + shader.mut_void() as *mut *mut _, + error.mut_void() as *mut *mut _, + ) + }; + + ((shader, error), hr) + } +} + +impl Deref for Shader { + type Target = d3d12::D3D12_SHADER_BYTECODE; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl From> for Shader { + fn from(blob: Option) -> Self { + match blob { + Some(b) => Shader::from_blob(b), + None => Shader::null(), + } + } +} + +#[derive(Copy, Clone)] +pub struct CachedPSO(d3d12::D3D12_CACHED_PIPELINE_STATE); +impl CachedPSO { + pub fn null() -> Self { + CachedPSO(d3d12::D3D12_CACHED_PIPELINE_STATE { + CachedBlobSizeInBytes: 0, + pCachedBlob: ptr::null(), + }) + } + + + pub fn from_blob(blob: Blob) -> Self { + CachedPSO(d3d12::D3D12_CACHED_PIPELINE_STATE { + CachedBlobSizeInBytes: unsafe { blob.GetBufferSize() }, + pCachedBlob: unsafe { blob.GetBufferPointer() }, + }) + } +} + +impl Deref for CachedPSO { + type Target = d3d12::D3D12_CACHED_PIPELINE_STATE; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +pub type PipelineState = WeakPtr; + +#[repr(u32)] +pub enum Subobject { + RootSignature = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_ROOT_SIGNATURE, + VS = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_VS, + PS = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_PS, + DS = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DS, + HS = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_HS, + GS = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_GS, + CS = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_CS, + StreamOutput = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_STREAM_OUTPUT, + Blend = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_BLEND, + SampleMask = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_SAMPLE_MASK, + Rasterizer = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_RASTERIZER, + DepthStencil = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DEPTH_STENCIL, + InputLayout = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_INPUT_LAYOUT, + IBStripCut = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_IB_STRIP_CUT_VALUE, + PrimitiveTopology = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_PRIMITIVE_TOPOLOGY, + RTFormats = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_RENDER_TARGET_FORMATS, + DSFormat = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DEPTH_STENCIL_FORMAT, + SampleDesc = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_SAMPLE_DESC, + NodeMask = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_NODE_MASK, + CachedPSO = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_CACHED_PSO, + Flags = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_FLAGS, + DepthStencil1 = d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DEPTH_STENCIL1, + +} + + +#[repr(C)] +pub struct PipelineStateSubobject { + subobject_align: [usize; 0], + subobject_type: d3d12::D3D12_PIPELINE_STATE_SUBOBJECT_TYPE, + subobject: T, +} + +impl PipelineStateSubobject { + pub fn new(subobject_type: Subobject, subobject: T) -> Self { + PipelineStateSubobject { + subobject_align: [], + subobject_type: subobject_type as _, + subobject, + } + } +} diff --git a/third_party/rust/d3d12/src/query.rs b/third_party/rust/d3d12/src/query.rs new file mode 100644 index 000000000000..c58231c3caf1 --- /dev/null +++ b/third_party/rust/d3d12/src/query.rs @@ -0,0 +1,15 @@ +use com::WeakPtr; +use winapi::um::d3d12; + +#[repr(u32)] +#[derive(Debug, Copy, Clone)] +pub enum QueryHeapType { + Occlusion = d3d12::D3D12_QUERY_HEAP_TYPE_OCCLUSION, + Timestamp = d3d12::D3D12_QUERY_HEAP_TYPE_TIMESTAMP, + PipelineStatistics = d3d12::D3D12_QUERY_HEAP_TYPE_PIPELINE_STATISTICS, + SOStatistics = d3d12::D3D12_QUERY_HEAP_TYPE_SO_STATISTICS, + + +} + +pub type QueryHeap = WeakPtr; diff --git a/third_party/rust/d3d12/src/queue.rs b/third_party/rust/d3d12/src/queue.rs new file mode 100644 index 000000000000..346b9af6ca9d --- /dev/null +++ b/third_party/rust/d3d12/src/queue.rs @@ -0,0 +1,34 @@ +use crate::CommandList; +use com::WeakPtr; +use sync::Fence; +use winapi::um::d3d12; +use HRESULT; + +#[repr(u32)] +pub enum Priority { + Normal = d3d12::D3D12_COMMAND_QUEUE_PRIORITY_NORMAL, + High = d3d12::D3D12_COMMAND_QUEUE_PRIORITY_HIGH, + GlobalRealtime = d3d12::D3D12_COMMAND_QUEUE_PRIORITY_GLOBAL_REALTIME, +} + +bitflags! { + pub struct CommandQueueFlags: u32 { + const DISABLE_GPU_TIMEOUT = d3d12::D3D12_COMMAND_QUEUE_FLAG_DISABLE_GPU_TIMEOUT; + } +} + +pub type CommandQueue = WeakPtr; + +impl CommandQueue { + pub fn execute_command_lists(&self, command_lists: &[CommandList]) { + let command_lists = command_lists + .iter() + .map(CommandList::as_mut_ptr) + .collect::>(); + unsafe { self.ExecuteCommandLists(command_lists.len() as _, command_lists.as_ptr()) } + } + + pub fn signal(&self, fence: Fence, value: u64) -> HRESULT { + unsafe { self.Signal(fence.as_mut_ptr(), value) } + } +} diff --git a/third_party/rust/d3d12/src/resource.rs b/third_party/rust/d3d12/src/resource.rs new file mode 100644 index 000000000000..2926921612a6 --- /dev/null +++ b/third_party/rust/d3d12/src/resource.rs @@ -0,0 +1,55 @@ + + +use com::WeakPtr; +use std::ops::Range; +use std::ptr; +use winapi::um::d3d12; +use {D3DResult, Rect}; + +pub type Subresource = u32; + +pub struct DiscardRegion<'a> { + pub rects: &'a [Rect], + pub subregions: Range, +} + +pub type Resource = WeakPtr; + +impl Resource { + + pub fn map( + &self, + subresource: Subresource, + read_range: Option>, + ) -> D3DResult<*mut ()> { + let mut ptr = ptr::null_mut(); + let read_range = read_range.map(|r| d3d12::D3D12_RANGE { + Begin: r.start, + End: r.end, + }); + let read = match read_range { + Some(ref r) => r as *const _, + None => ptr::null(), + }; + let hr = unsafe { self.Map(subresource, read, &mut ptr) }; + + (ptr as _, hr) + } + + pub fn unmap(&self, subresource: Subresource, write_range: Option>) { + let write_range = write_range.map(|r| d3d12::D3D12_RANGE { + Begin: r.start, + End: r.end, + }); + let write = match write_range { + Some(ref r) => r as *const _, + None => ptr::null(), + }; + + unsafe { self.Unmap(subresource, write) }; + } + + pub fn gpu_virtual_address(&self) -> u64 { + unsafe { self.GetGPUVirtualAddress() } + } +} diff --git a/third_party/rust/d3d12/src/sync.rs b/third_party/rust/d3d12/src/sync.rs new file mode 100644 index 000000000000..9a4ebfaaa6ee --- /dev/null +++ b/third_party/rust/d3d12/src/sync.rs @@ -0,0 +1,41 @@ +use com::WeakPtr; +use std::ptr; +use winapi::um::d3d12; +use winapi::um::{synchapi, winnt}; +use HRESULT; + +#[derive(Copy, Clone)] +#[repr(transparent)] +pub struct Event(pub winnt::HANDLE); +impl Event { + pub fn create(manual_reset: bool, initial_state: bool) -> Self { + Event(unsafe { + synchapi::CreateEventA( + ptr::null_mut(), + manual_reset as _, + initial_state as _, + ptr::null(), + ) + }) + } + + + pub fn wait(&self, timeout_ms: u32) -> u32 { + unsafe { synchapi::WaitForSingleObject(self.0, timeout_ms) } + } +} + +pub type Fence = WeakPtr; +impl Fence { + pub fn set_event_on_completion(&self, event: Event, value: u64) -> HRESULT { + unsafe { self.SetEventOnCompletion(value, event.0) } + } + + pub fn get_value(&self) -> u64 { + unsafe { self.GetCompletedValue() } + } + + pub fn signal(&self, value: u64) -> HRESULT { + unsafe { self.Signal(value) } + } +} diff --git a/third_party/rust/gfx-auxil/.cargo-checksum.json b/third_party/rust/gfx-auxil/.cargo-checksum.json new file mode 100644 index 000000000000..af040e124d16 --- /dev/null +++ b/third_party/rust/gfx-auxil/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"f06acc2f73f07e900bfdb0539ecac85b0481993babe8c5fdf7e079c54179c5a4","src/lib.rs":"3a200e513417044eaa986ac61810d9087193369afa1aeb4469a7306fd64249ec"},"package":"572eee952a9a23c99cfe3e4fd95d277784058a89ac3c77ff6fa3d80a4e321919"} \ No newline at end of file diff --git a/third_party/rust/gfx-auxil/Cargo.toml b/third_party/rust/gfx-auxil/Cargo.toml new file mode 100644 index 000000000000..8220e42388d1 --- /dev/null +++ b/third_party/rust/gfx-auxil/Cargo.toml @@ -0,0 +1,35 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "gfx-auxil" +version = "0.1.0" +authors = ["The Gfx-rs Developers"] +description = "Implementation details shared between gfx-rs backends" +homepage = "https://github.com/gfx-rs/gfx" +documentation = "https://docs.rs/gfx-auxil" +keywords = ["graphics", "gamedev"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/gfx-rs/gfx" + +[lib] +name = "gfx_auxil" +[dependencies.fxhash] +version = "0.2.1" + +[dependencies.hal] +version = "0.4" +package = "gfx-hal" + +[dependencies.spirv_cross] +version = "0.16" diff --git a/third_party/rust/gfx-auxil/src/lib.rs b/third_party/rust/gfx-auxil/src/lib.rs new file mode 100644 index 000000000000..f26352cf109e --- /dev/null +++ b/third_party/rust/gfx-auxil/src/lib.rs @@ -0,0 +1,50 @@ +use { + hal::{device::ShaderError, pso}, + spirv_cross::spirv, +}; + + +pub type FastHashMap = std::collections::HashMap>; + +pub fn spirv_cross_specialize_ast( + ast: &mut spirv::Ast, + specialization: &pso::Specialization, +) -> Result<(), ShaderError> +where + T: spirv::Target, + spirv::Ast: spirv::Compile + spirv::Parse, +{ + let spec_constants = ast.get_specialization_constants().map_err(|err| { + ShaderError::CompilationFailed(match err { + spirv_cross::ErrorCode::CompilationError(msg) => msg, + spirv_cross::ErrorCode::Unhandled => "Unexpected specialization constant error".into(), + }) + })?; + + for spec_constant in spec_constants { + if let Some(constant) = specialization + .constants + .iter() + .find(|c| c.id == spec_constant.constant_id) + { + + let value = specialization.data + [constant.range.start as usize .. constant.range.end as usize] + .iter() + .rev() + .fold(0u64, |u, &b| (u << 8) + b as u64); + + ast.set_scalar_constant(spec_constant.id, value) + .map_err(|err| { + ShaderError::CompilationFailed(match err { + spirv_cross::ErrorCode::CompilationError(msg) => msg, + spirv_cross::ErrorCode::Unhandled => { + "Unexpected specialization constant error".into() + } + }) + })?; + } + } + + Ok(()) +} diff --git a/third_party/rust/gfx-backend-dx11/.cargo-checksum.json b/third_party/rust/gfx-backend-dx11/.cargo-checksum.json new file mode 100644 index 000000000000..6797c3de5d1c --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"d1a212ec3097c36853d5a06f59ce7b863611b308160c6dcba97a2fcaf5a2a10e","README.md":"aa7ff84146655d3957c043b5f71dc439712392c3a18c8c397d8c179bc43f75c0","shaders/blit.hlsl":"a00c57d25b6704a57cd17923c5b7a47608b3ab17b96e7e2ab1172283dc841194","shaders/clear.hlsl":"9b6747a76dabe37ff8e069cdbb8a9c22f6cf71a6d3041d358cd1569d1bb8e10f","shaders/copy.hlsl":"0a164e64b28e62e1d8895159c13e5aa9c74891f61d54939c0f79b08a2a5223c9","src/conv.rs":"253cfbbc8c5ccc02a4d72afe139819b059047b6743f2d7facec844454d9101d4","src/debug.rs":"6da6b8c5172a6b061e2f3db13473c6013d12c6467ddd073f4de8ee3112c69c8c","src/device.rs":"c446408ce389a10c3e02ff198e1e56966082c1b8cb2fdc5b27d09fc6b2904ae0","src/dxgi.rs":"4923fe1333cae98be33718dfb9a3a57470e6c9a0a6be9f7c738006cab9a3bc93","src/internal.rs":"d7b25c6e8b96a333531d525217723cc0be5def8788ef2c3084e29a31dc8e626d","src/lib.rs":"f36e568ec0b5ee132439b1ee344305aaf8c909b25feccbf90476e4bda061d692","src/shader.rs":"8830e96b702601a621c65b2560a58c752b5d8e123569f73a5017abcc2df7889a"},"package":"c66c77836ff26cf9916e5c8745715a22eae1fc61d994ffa0bea8a7dbd708ece2"} \ No newline at end of file diff --git a/third_party/rust/gfx-backend-dx11/Cargo.toml b/third_party/rust/gfx-backend-dx11/Cargo.toml new file mode 100644 index 000000000000..ba091f253fd6 --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/Cargo.toml @@ -0,0 +1,69 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "gfx-backend-dx11" +version = "0.4.2" +authors = ["The Gfx-rs Developers"] +description = "DirectX-11 API backend for gfx-rs" +homepage = "https://github.com/gfx-rs/gfx" +documentation = "https://docs.rs/gfx-backend-dx11" +readme = "README.md" +keywords = ["graphics", "gamedev"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/gfx-rs/gfx" +[package.metadata.docs.rs] +default-target = "x86_64-pc-windows-msvc" + +[lib] +name = "gfx_backend_dx11" +[dependencies.auxil] +version = "0.1" +package = "gfx-auxil" + +[dependencies.bitflags] +version = "1" + +[dependencies.gfx-hal] +version = "0.4" + +[dependencies.libloading] +version = "0.5" + +[dependencies.log] +version = "0.4" + +[dependencies.parking_lot] +version = "0.9" + +[dependencies.range-alloc] +version = "0.1" + +[dependencies.raw-window-handle] +version = "0.3" + +[dependencies.smallvec] +version = "0.6" + +[dependencies.spirv_cross] +version = "0.16" +features = ["hlsl"] + +[dependencies.winapi] +version = "0.3" +features = ["basetsd", "d3d11", "d3d11sdklayers", "d3dcommon", "d3dcompiler", "dxgi1_2", "dxgi1_3", "dxgi1_4", "dxgi1_5", "dxgiformat", "dxgitype", "handleapi", "minwindef", "synchapi", "unknwnbase", "winbase", "windef", "winerror", "winnt", "winuser"] + +[dependencies.wio] +version = "0.2" + +[features] +default = [] diff --git a/third_party/rust/gfx-backend-dx11/README.md b/third_party/rust/gfx-backend-dx11/README.md new file mode 100644 index 000000000000..5b65cd56bbfc --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/README.md @@ -0,0 +1,13 @@ +# gfx_device_dx11 + +[DX11](https://msdn.microsoft.com/en-us/library/windows/desktop/ff476080(v=vs.85).aspx) backend for gfx. + +## Normalized Coordinates + +Render | Depth | Texture +-------|-------|-------- +![render_coordinates](../../../info/gl_render_coordinates.png) | ![depth_coordinates](../../../info/dx_depth_coordinates.png) | ![texture_coordinates](../../../info/dx_texture_coordinates.png) + +## Mirroring + +TODO diff --git a/third_party/rust/gfx-backend-dx11/shaders/blit.hlsl b/third_party/rust/gfx-backend-dx11/shaders/blit.hlsl new file mode 100644 index 000000000000..4bdd006abed7 --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/shaders/blit.hlsl @@ -0,0 +1,63 @@ +cbuffer Region : register(b0) { + float2 offset; + float2 extent; + float z; + float level; +}; + +struct VsOutput { + float4 pos: SV_POSITION; + float4 uv: TEXCOORD0; +}; + +// Create a screen filling triangle +VsOutput vs_blit_2d(uint id: SV_VertexID) { + float2 coord = float2((id << 1) & 2, id & 2); + VsOutput output = { + float4(float2(-1.0, 1.0) + coord * float2(2.0, -2.0), 0.0, 1.0), + float4(offset + coord * extent, z, level) + }; + return output; +} + +SamplerState BlitSampler : register(s0); + +Texture2DArray BlitSrc_Uint : register(t0); +Texture2DArray BlitSrc_Sint : register(t0); +Texture2DArray BlitSrc_Float : register(t0); + +// TODO: get rid of GetDimensions call +uint4 Nearest_Uint(float4 uv) +{ + float4 size; + BlitSrc_Uint.GetDimensions(0, size.x, size.y, size.z, size.w); + + float2 pix = uv.xy * size.xy; + + return BlitSrc_Uint.Load(int4(int2(pix), uv.zw)); +} + +int4 Nearest_Sint(float4 uv) +{ + float4 size; + BlitSrc_Sint.GetDimensions(0, size.x, size.y, size.z, size.w); + + float2 pix = uv.xy * size.xy; + + return BlitSrc_Sint.Load(int4(int2(pix), uv.zw)); +} + +uint4 ps_blit_2d_uint(VsOutput input) : SV_Target +{ + return Nearest_Uint(input.uv); +} + +int4 ps_blit_2d_int(VsOutput input) : SV_Target +{ + return Nearest_Sint(input.uv); +} + +float4 ps_blit_2d_float(VsOutput input) : SV_Target +{ + return BlitSrc_Float.SampleLevel(BlitSampler, input.uv.xyz, input.uv.w); +} diff --git a/third_party/rust/gfx-backend-dx11/shaders/clear.hlsl b/third_party/rust/gfx-backend-dx11/shaders/clear.hlsl new file mode 100644 index 000000000000..3f8f3a4e87dd --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/shaders/clear.hlsl @@ -0,0 +1,22 @@ +cbuffer ClearColorF32 : register(b0) { float4 ClearF32; }; +cbuffer ClearColorU32 : register(b0) { uint4 ClearU32; }; +cbuffer ClearColorI32 : register(b0) { int4 ClearI32; }; +cbuffer ClearColorDepth : register(b0) { float ClearDepth; }; + +// fullscreen triangle +float4 vs_partial_clear(uint id : SV_VertexID) : SV_Position +{ + return float4( + float(id / 2) * 4.0 - 1.0, + float(id % 2) * 4.0 - 1.0, + 0.0, + 1.0 + ); +} + +// TODO: send constants through VS as flat attributes +float4 ps_partial_clear_float() : SV_Target0 { return ClearF32; } +uint4 ps_partial_clear_uint() : SV_Target0 { return ClearU32; } +int4 ps_partial_clear_int() : SV_Target0 { return ClearI32; } +float ps_partial_clear_depth() : SV_Depth { return ClearDepth; } +void ps_partial_clear_stencil() { } diff --git a/third_party/rust/gfx-backend-dx11/shaders/copy.hlsl b/third_party/rust/gfx-backend-dx11/shaders/copy.hlsl new file mode 100644 index 000000000000..95dcfc46602f --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/shaders/copy.hlsl @@ -0,0 +1,517 @@ +struct BufferCopy { + uint4 SrcDst; +}; + +struct ImageCopy { + uint4 Src; + uint4 Dst; +}; + +struct BufferImageCopy { + // x=offset, yz=size + uint4 BufferVars; + uint4 ImageOffset; + uint4 ImageExtent; + uint4 ImageSize; +}; + +cbuffer CopyConstants : register(b0) { + BufferCopy BufferCopies; + ImageCopy ImageCopies; + BufferImageCopy BufferImageCopies; +}; + + +uint3 GetDestBounds() +{ + return min( + BufferImageCopies.ImageOffset + BufferImageCopies.ImageExtent, + BufferImageCopies.ImageSize + ); +} + +uint3 GetImageCopyDst(uint3 dispatch_thread_id) +{ + return uint3(ImageCopies.Dst.xy + dispatch_thread_id.xy, ImageCopies.Dst.z); +} + +uint3 GetImageCopySrc(uint3 dispatch_thread_id) +{ + return uint3(ImageCopies.Src.xy + dispatch_thread_id.xy, ImageCopies.Src.z); +} + +uint3 GetImageDst(uint3 dispatch_thread_id) +{ + return uint3(BufferImageCopies.ImageOffset.xy + dispatch_thread_id.xy, BufferImageCopies.ImageOffset.z); +} + +uint3 GetImageSrc(uint3 dispatch_thread_id) +{ + return uint3(BufferImageCopies.ImageOffset.xy + dispatch_thread_id.xy, BufferImageCopies.ImageOffset.z); +} + +uint GetBufferDst128(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 16 + dispatch_thread_id.y * 16 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} +uint GetBufferSrc128(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 16 + dispatch_thread_id.y * 16 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} + +uint GetBufferDst64(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 8 + dispatch_thread_id.y * 8 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} +uint GetBufferSrc64(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 8 + dispatch_thread_id.y * 8 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} + +uint GetBufferDst32(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * 4 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} +uint GetBufferSrc32(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * 4 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} + +uint GetBufferDst16(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * 2 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} +uint GetBufferSrc16(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * 2 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} + +uint GetBufferDst8(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} +uint GetBufferSrc8(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} + + +uint4 Uint32ToUint8x4(uint data) +{ + return (data >> uint4(0, 8, 16, 24)) & 0xFF; +} + +uint2 Uint32ToUint16x2(uint data) +{ + return (data >> uint2(0, 16)) & 0xFFFF; +} + +uint Uint8x4ToUint32(uint4 data) +{ + return dot(min(data, 0xFF), 1 << uint4(0, 8, 16, 24)); +} + +uint Uint16x2ToUint32(uint2 data) +{ + return dot(min(data, 0xFFFF), 1 << uint2(0, 16)); +} + +uint2 Uint16ToUint8x2(uint data) +{ + return (data >> uint2(0, 8)) & 0xFF; +} + +uint Uint8x2ToUint16(uint2 data) +{ + return dot(min(data, 0xFF), 1 << uint2(0, 8)); +} + +uint4 Float4ToUint8x4(float4 data) +{ + return uint4(data * 255 + .5f); +} + +// Buffers are always R32-aligned +ByteAddressBuffer BufferCopySrc : register(t0); +RWByteAddressBuffer BufferCopyDst : register(u0); + +Texture2DArray ImageCopySrc : register(t0); +RWTexture2DArray ImageCopyDstR : register(u0); +RWTexture2DArray ImageCopyDstRg : register(u0); +RWTexture2DArray ImageCopyDstRgba : register(u0); + +Texture2DArray ImageCopySrcBgra : register(t0); + +// Image<->Image copies +[numthreads(1, 1, 1)] +void cs_copy_image2d_r8g8_image2d_r16(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstR[dst_idx] = Uint8x2ToUint16(ImageCopySrc[src_idx]); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r16_image2d_r8g8(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstRg[dst_idx] = Uint16ToUint8x2(ImageCopySrc[src_idx]); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r8g8b8a8_image2d_r32(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstR[dst_idx] = Uint8x4ToUint32(ImageCopySrc[src_idx]); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r8g8b8a8_image2d_r16g16(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstRg[dst_idx] = Uint32ToUint16x2(Uint8x4ToUint32(ImageCopySrc[src_idx])); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r16g16_image2d_r32(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstR[dst_idx] = Uint16x2ToUint32(ImageCopySrc[src_idx]); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r16g16_image2d_r8g8b8a8(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstRgba[dst_idx] = Uint32ToUint8x4(Uint16x2ToUint32(ImageCopySrc[src_idx])); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r32_image2d_r16g16(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstRg[dst_idx] = Uint32ToUint16x2(ImageCopySrc[src_idx]); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r32_image2d_r8g8b8a8(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstRgba[dst_idx] = Uint32ToUint8x4(ImageCopySrc[src_idx]); +} + +#define COPY_NUM_THREAD_X 8 +#define COPY_NUM_THREAD_Y 8 + +// Buffer<->Image copies + +// R32G32B32A32 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r32g32b32a32(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc128(dispatch_thread_id); + + ImageCopyDstRgba[dst_idx] = uint4( + BufferCopySrc.Load(src_idx), + BufferCopySrc.Load(src_idx + 1 * 4), + BufferCopySrc.Load(src_idx + 2 * 4), + BufferCopySrc.Load(src_idx + 3 * 4) + ); +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r32g32b32a32_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint4 data = ImageCopySrc[src_idx]; + uint dst_idx = GetBufferDst128(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, data.x); + BufferCopyDst.Store(dst_idx + 1 * 4, data.y); + BufferCopyDst.Store(dst_idx + 2 * 4, data.z); + BufferCopyDst.Store(dst_idx + 3 * 4, data.w); +} + +// R32G32 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r32g32(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc64(dispatch_thread_id); + + ImageCopyDstRg[dst_idx] = uint2( + BufferCopySrc.Load(src_idx), + BufferCopySrc.Load(src_idx + 1 * 4) + ); +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r32g32_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint2 data = ImageCopySrc[src_idx].rg; + uint dst_idx = GetBufferDst64(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx , data.x); + BufferCopyDst.Store(dst_idx + 1 * 4, data.y); +} + +// R16G16B16A16 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r16g16b16a16(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc64(dispatch_thread_id); + + ImageCopyDstRgba[dst_idx] = uint4( + Uint32ToUint16x2(BufferCopySrc.Load(src_idx)), + Uint32ToUint16x2(BufferCopySrc.Load(src_idx + 1 * 4)) + ); +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r16g16b16a16_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint4 data = ImageCopySrc[src_idx]; + uint dst_idx = GetBufferDst64(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, Uint16x2ToUint32(data.xy)); + BufferCopyDst.Store(dst_idx + 1 * 4, Uint16x2ToUint32(data.zw)); +} + +// R32 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r32(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc32(dispatch_thread_id); + + ImageCopyDstR[dst_idx] = BufferCopySrc.Load(src_idx); +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r32_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst32(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, ImageCopySrc[src_idx].r); +} + +// R16G16 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r16g16(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc32(dispatch_thread_id); + + ImageCopyDstRg[dst_idx] = Uint32ToUint16x2(BufferCopySrc.Load(src_idx)); +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r16g16_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst32(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, Uint16x2ToUint32(ImageCopySrc[src_idx].xy)); +} + +// R8G8B8A8 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r8g8b8a8(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc32(dispatch_thread_id); + + ImageCopyDstRgba[dst_idx] = Uint32ToUint8x4(BufferCopySrc.Load(src_idx)); +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r8g8b8a8_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst32(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, Uint8x4ToUint32(ImageCopySrc[src_idx])); +} + +// B8G8R8A8 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_b8g8r8a8_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst32(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, Uint8x4ToUint32(Float4ToUint8x4(ImageCopySrcBgra[src_idx].bgra))); +} + +// R16 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r16(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(uint3(2, 1, 0) * dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc16(dispatch_thread_id); + uint2 data = Uint32ToUint16x2(BufferCopySrc.Load(src_idx)); + + ImageCopyDstR[dst_idx ] = data.x; + ImageCopyDstR[dst_idx + uint3(1, 0, 0)] = data.y; +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r16_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(uint3(2, 1, 0) * dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst16(dispatch_thread_id); + + uint upper = ImageCopySrc[src_idx].r; + uint lower = ImageCopySrc[src_idx + uint3(1, 0, 0)].r; + + BufferCopyDst.Store(dst_idx, Uint16x2ToUint32(uint2(upper, lower))); +} + +// R8G8 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r8g8(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(uint3(2, 1, 0) * dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc16(dispatch_thread_id); + + uint4 data = Uint32ToUint8x4(BufferCopySrc.Load(src_idx)); + + ImageCopyDstRg[dst_idx ] = data.xy; + ImageCopyDstRg[dst_idx + uint3(1, 0, 0)] = data.zw; +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r8g8_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(uint3(2, 1, 0) * dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst16(dispatch_thread_id); + + uint2 lower = ImageCopySrc[src_idx].xy; + uint2 upper = ImageCopySrc[src_idx + uint3(1, 0, 0)].xy; + + BufferCopyDst.Store(dst_idx, Uint8x4ToUint32(uint4(lower.x, lower.y, upper.x, upper.y))); +} + +// R8 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r8(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(uint3(4, 1, 0) * dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc8(dispatch_thread_id); + uint4 data = Uint32ToUint8x4(BufferCopySrc.Load(src_idx)); + + ImageCopyDstR[dst_idx ] = data.x; + ImageCopyDstR[dst_idx + uint3(1, 0, 0)] = data.y; + ImageCopyDstR[dst_idx + uint3(2, 0, 0)] = data.z; + ImageCopyDstR[dst_idx + uint3(3, 0, 0)] = data.w; +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r8_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(uint3(4, 1, 0) * dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst8(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, Uint8x4ToUint32(uint4( + ImageCopySrc[src_idx].r, + ImageCopySrc[src_idx + uint3(1, 0, 0)].r, + ImageCopySrc[src_idx + uint3(2, 0, 0)].r, + ImageCopySrc[src_idx + uint3(3, 0, 0)].r + ))); +} diff --git a/third_party/rust/gfx-backend-dx11/src/conv.rs b/third_party/rust/gfx-backend-dx11/src/conv.rs new file mode 100644 index 000000000000..ccc9d3cc22cc --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/src/conv.rs @@ -0,0 +1,824 @@ +use hal::format::Format; +use hal::image::{Anisotropic, Filter, WrapMode}; +use hal::pso::{ + BlendDesc, + BlendOp, + ColorBlendDesc, + Comparison, + DepthBias, + DepthStencilDesc, + Face, + Factor, + FrontFace, + InputAssemblerDesc, + PolygonMode, + Rasterizer, + Rect, + Sided, + Stage, + State, + StencilFace, + StencilOp, + StencilValue, + Viewport, +}; +use hal::IndexType; + +use spirv_cross::spirv; + +use winapi::shared::dxgiformat::*; +use winapi::shared::minwindef::{FALSE, INT, TRUE}; + +use winapi::um::d3d11::*; +use winapi::um::d3dcommon::*; + +use std::mem; + +pub fn map_index_type(ty: IndexType) -> DXGI_FORMAT { + match ty { + IndexType::U16 => DXGI_FORMAT_R16_UINT, + IndexType::U32 => DXGI_FORMAT_R32_UINT, + } +} + + +pub fn viewable_format(format: DXGI_FORMAT) -> DXGI_FORMAT { + match format { + DXGI_FORMAT_D32_FLOAT_S8X24_UINT => DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS, + DXGI_FORMAT_D32_FLOAT => DXGI_FORMAT_R32_FLOAT, + DXGI_FORMAT_D16_UNORM => DXGI_FORMAT_R16_UNORM, + _ => format, + } +} + + +pub fn map_format(format: Format) -> Option { + use hal::format::Format::*; + + let format = match format { + R5g6b5Unorm => DXGI_FORMAT_B5G6R5_UNORM, + R5g5b5a1Unorm => DXGI_FORMAT_B5G5R5A1_UNORM, + R8Unorm => DXGI_FORMAT_R8_UNORM, + R8Snorm => DXGI_FORMAT_R8_SNORM, + R8Uint => DXGI_FORMAT_R8_UINT, + R8Sint => DXGI_FORMAT_R8_SINT, + Rg8Unorm => DXGI_FORMAT_R8G8_UNORM, + Rg8Snorm => DXGI_FORMAT_R8G8_SNORM, + Rg8Uint => DXGI_FORMAT_R8G8_UINT, + Rg8Sint => DXGI_FORMAT_R8G8_SINT, + Rgba8Unorm => DXGI_FORMAT_R8G8B8A8_UNORM, + Rgba8Snorm => DXGI_FORMAT_R8G8B8A8_SNORM, + Rgba8Uint => DXGI_FORMAT_R8G8B8A8_UINT, + Rgba8Sint => DXGI_FORMAT_R8G8B8A8_SINT, + Rgba8Srgb => DXGI_FORMAT_R8G8B8A8_UNORM_SRGB, + Bgra8Unorm => DXGI_FORMAT_B8G8R8A8_UNORM, + Bgra8Srgb => DXGI_FORMAT_B8G8R8A8_UNORM_SRGB, + A2b10g10r10Unorm => DXGI_FORMAT_R10G10B10A2_UNORM, + A2b10g10r10Uint => DXGI_FORMAT_R10G10B10A2_UINT, + R16Unorm => DXGI_FORMAT_R16_UNORM, + R16Snorm => DXGI_FORMAT_R16_SNORM, + R16Uint => DXGI_FORMAT_R16_UINT, + R16Sint => DXGI_FORMAT_R16_SINT, + R16Sfloat => DXGI_FORMAT_R16_FLOAT, + Rg16Unorm => DXGI_FORMAT_R16G16_UNORM, + Rg16Snorm => DXGI_FORMAT_R16G16_SNORM, + Rg16Uint => DXGI_FORMAT_R16G16_UINT, + Rg16Sint => DXGI_FORMAT_R16G16_SINT, + Rg16Sfloat => DXGI_FORMAT_R16G16_FLOAT, + Rgba16Unorm => DXGI_FORMAT_R16G16B16A16_UNORM, + Rgba16Snorm => DXGI_FORMAT_R16G16B16A16_SNORM, + Rgba16Uint => DXGI_FORMAT_R16G16B16A16_UINT, + Rgba16Sint => DXGI_FORMAT_R16G16B16A16_SINT, + Rgba16Sfloat => DXGI_FORMAT_R16G16B16A16_FLOAT, + R32Uint => DXGI_FORMAT_R32_UINT, + R32Sint => DXGI_FORMAT_R32_SINT, + R32Sfloat => DXGI_FORMAT_R32_FLOAT, + Rg32Uint => DXGI_FORMAT_R32G32_UINT, + Rg32Sint => DXGI_FORMAT_R32G32_SINT, + Rg32Sfloat => DXGI_FORMAT_R32G32_FLOAT, + Rgb32Uint => DXGI_FORMAT_R32G32B32_UINT, + Rgb32Sint => DXGI_FORMAT_R32G32B32_SINT, + Rgb32Sfloat => DXGI_FORMAT_R32G32B32_FLOAT, + Rgba32Uint => DXGI_FORMAT_R32G32B32A32_UINT, + Rgba32Sint => DXGI_FORMAT_R32G32B32A32_SINT, + Rgba32Sfloat => DXGI_FORMAT_R32G32B32A32_FLOAT, + B10g11r11Ufloat => DXGI_FORMAT_R11G11B10_FLOAT, + E5b9g9r9Ufloat => DXGI_FORMAT_R9G9B9E5_SHAREDEXP, + D16Unorm => DXGI_FORMAT_D16_UNORM, + D32Sfloat => DXGI_FORMAT_D32_FLOAT, + D32SfloatS8Uint => DXGI_FORMAT_D32_FLOAT_S8X24_UINT, + Bc1RgbUnorm => DXGI_FORMAT_BC1_UNORM, + Bc1RgbSrgb => DXGI_FORMAT_BC1_UNORM_SRGB, + Bc2Unorm => DXGI_FORMAT_BC2_UNORM, + Bc2Srgb => DXGI_FORMAT_BC2_UNORM_SRGB, + Bc3Unorm => DXGI_FORMAT_BC3_UNORM, + Bc3Srgb => DXGI_FORMAT_BC3_UNORM_SRGB, + Bc4Unorm => DXGI_FORMAT_BC4_UNORM, + Bc4Snorm => DXGI_FORMAT_BC4_SNORM, + Bc5Unorm => DXGI_FORMAT_BC5_UNORM, + Bc5Snorm => DXGI_FORMAT_BC5_SNORM, + Bc6hUfloat => DXGI_FORMAT_BC6H_UF16, + Bc6hSfloat => DXGI_FORMAT_BC6H_SF16, + Bc7Unorm => DXGI_FORMAT_BC7_UNORM, + Bc7Srgb => DXGI_FORMAT_BC7_UNORM_SRGB, + + _ => return None, + }; + + Some(format) +} + +pub fn map_format_nosrgb(format: Format) -> Option { + + + match format { + Format::Bgra8Srgb => Some(DXGI_FORMAT_B8G8R8A8_UNORM), + Format::Rgba8Srgb => Some(DXGI_FORMAT_R8G8B8A8_UNORM), + _ => map_format(format), + } +} + +#[derive(Debug, Clone)] +pub struct DecomposedDxgiFormat { + pub typeless: DXGI_FORMAT, + pub srv: Option, + pub rtv: Option, + pub uav: Option, + pub dsv: Option, + + + pub copy_uav: Option, + pub copy_srv: Option, +} + +impl DecomposedDxgiFormat { + pub const UNKNOWN: DecomposedDxgiFormat = DecomposedDxgiFormat { + typeless: DXGI_FORMAT_UNKNOWN, + srv: None, + rtv: None, + uav: None, + dsv: None, + copy_uav: None, + copy_srv: None, + }; + + + + + + + pub fn from_dxgi_format(format: DXGI_FORMAT) -> DecomposedDxgiFormat { + match format { + DXGI_FORMAT_R8G8B8A8_UNORM + | DXGI_FORMAT_R8G8B8A8_SNORM + | DXGI_FORMAT_R8G8B8A8_UINT + | DXGI_FORMAT_R8G8B8A8_SINT + | DXGI_FORMAT_R8G8B8A8_UNORM_SRGB => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R8G8B8A8_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32_UINT), + copy_srv: Some(DXGI_FORMAT_R8G8B8A8_UINT), + }, + + DXGI_FORMAT_B8G8R8A8_UNORM | DXGI_FORMAT_B8G8R8A8_UNORM_SRGB => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_B8G8R8A8_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(DXGI_FORMAT_B8G8R8A8_UNORM), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32_UINT), + copy_srv: Some(DXGI_FORMAT_B8G8R8A8_UNORM), + }, + + DXGI_FORMAT_A8_UNORM => DecomposedDxgiFormat { + typeless: format, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(format), + copy_srv: Some(format), + }, + + DXGI_FORMAT_R8_UNORM | DXGI_FORMAT_R8_SNORM | DXGI_FORMAT_R8_UINT + | DXGI_FORMAT_R8_SINT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R8_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R8_UINT), + copy_srv: Some(DXGI_FORMAT_R8_UINT), + }, + + DXGI_FORMAT_R8G8_UNORM + | DXGI_FORMAT_R8G8_SNORM + | DXGI_FORMAT_R8G8_UINT + | DXGI_FORMAT_R8G8_SINT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R8G8_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R8G8_UINT), + copy_srv: Some(DXGI_FORMAT_R8G8_UINT), + }, + + DXGI_FORMAT_D16_UNORM => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R16_TYPELESS, + srv: Some(DXGI_FORMAT_R16_FLOAT), + rtv: Some(DXGI_FORMAT_R16_FLOAT), + uav: Some(DXGI_FORMAT_R16_FLOAT), + dsv: Some(format), + copy_uav: Some(DXGI_FORMAT_R16_UINT), + copy_srv: Some(DXGI_FORMAT_R16_UINT), + }, + + DXGI_FORMAT_R16_UNORM + | DXGI_FORMAT_R16_SNORM + | DXGI_FORMAT_R16_UINT + | DXGI_FORMAT_R16_SINT + | DXGI_FORMAT_R16_FLOAT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R16_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: Some(DXGI_FORMAT_D16_UNORM), + copy_uav: Some(DXGI_FORMAT_R16_UINT), + copy_srv: Some(DXGI_FORMAT_R16_UINT), + }, + + DXGI_FORMAT_R16G16_UNORM + | DXGI_FORMAT_R16G16_SNORM + | DXGI_FORMAT_R16G16_UINT + | DXGI_FORMAT_R16G16_SINT + | DXGI_FORMAT_R16G16_FLOAT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R16G16_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32_UINT), + copy_srv: Some(DXGI_FORMAT_R16G16_UINT), + }, + + DXGI_FORMAT_R16G16B16A16_UNORM + | DXGI_FORMAT_R16G16B16A16_SNORM + | DXGI_FORMAT_R16G16B16A16_UINT + | DXGI_FORMAT_R16G16B16A16_SINT + | DXGI_FORMAT_R16G16B16A16_FLOAT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R16G16B16A16_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R16G16B16A16_UINT), + copy_srv: Some(DXGI_FORMAT_R16G16B16A16_UINT), + }, + + DXGI_FORMAT_D32_FLOAT_S8X24_UINT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R32G8X24_TYPELESS, + + srv: Some(DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS), + rtv: None, + uav: None, + dsv: Some(format), + copy_uav: None, + copy_srv: Some(DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS), + }, + + DXGI_FORMAT_D32_FLOAT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R32_TYPELESS, + srv: Some(DXGI_FORMAT_R32_FLOAT), + rtv: None, + uav: None, + dsv: Some(format), + copy_uav: Some(DXGI_FORMAT_R32_UINT), + copy_srv: Some(DXGI_FORMAT_R32_UINT), + }, + + DXGI_FORMAT_R32_UINT | DXGI_FORMAT_R32_SINT | DXGI_FORMAT_R32_FLOAT => { + DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R32_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: Some(DXGI_FORMAT_D32_FLOAT), + copy_uav: Some(DXGI_FORMAT_R32_UINT), + copy_srv: Some(DXGI_FORMAT_R32_UINT), + } + } + + DXGI_FORMAT_R32G32_UINT | DXGI_FORMAT_R32G32_SINT | DXGI_FORMAT_R32G32_FLOAT => { + DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R32G32_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32G32_UINT), + copy_srv: Some(DXGI_FORMAT_R32G32_UINT), + } + } + + + DXGI_FORMAT_R32G32B32_UINT + | DXGI_FORMAT_R32G32B32_SINT + | DXGI_FORMAT_R32G32B32_FLOAT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R32G32_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32G32B32_UINT), + copy_srv: Some(DXGI_FORMAT_R32G32B32_UINT), + }, + + DXGI_FORMAT_R32G32B32A32_UINT + | DXGI_FORMAT_R32G32B32A32_SINT + | DXGI_FORMAT_R32G32B32A32_FLOAT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R32G32B32A32_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32G32B32A32_UINT), + copy_srv: Some(DXGI_FORMAT_R32G32B32A32_UINT), + }, + + DXGI_FORMAT_R10G10B10A2_UNORM | DXGI_FORMAT_R10G10B10A2_UINT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R10G10B10A2_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32_UINT), + copy_srv: Some(DXGI_FORMAT_R10G10B10A2_UINT), + }, + + DXGI_FORMAT_R11G11B10_FLOAT => DecomposedDxgiFormat { + typeless: format, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(format), + copy_srv: Some(format), + }, + + DXGI_FORMAT_R9G9B9E5_SHAREDEXP => DecomposedDxgiFormat { + typeless: format, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + + copy_uav: None, + copy_srv: Some(format), + }, + + DXGI_FORMAT_BC1_UNORM | DXGI_FORMAT_BC1_UNORM_SRGB => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_BC1_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + + copy_uav: None, + copy_srv: Some(format), + }, + + DXGI_FORMAT_BC2_UNORM | DXGI_FORMAT_BC2_UNORM_SRGB => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_BC2_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + + copy_uav: None, + copy_srv: Some(format), + }, + + DXGI_FORMAT_BC3_UNORM | DXGI_FORMAT_BC3_UNORM_SRGB => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_BC3_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + + copy_uav: None, + copy_srv: Some(format), + }, + + DXGI_FORMAT_BC4_UNORM | DXGI_FORMAT_BC4_SNORM => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_BC4_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + + copy_uav: None, + copy_srv: Some(format), + }, + + DXGI_FORMAT_BC5_UNORM | DXGI_FORMAT_BC5_SNORM => DecomposedDxgiFormat { + typeless: format, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + + copy_uav: None, + copy_srv: Some(format), + }, + + DXGI_FORMAT_BC6H_UF16 | DXGI_FORMAT_BC6H_SF16 => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_BC6H_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + + copy_uav: None, + copy_srv: Some(format), + }, + + + DXGI_FORMAT_BC7_UNORM | DXGI_FORMAT_BC7_UNORM_SRGB => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_BC7_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + + copy_uav: None, + copy_srv: Some(format), + }, + + _ => unimplemented!(), + } + } +} + +pub fn map_viewport(viewport: &Viewport) -> D3D11_VIEWPORT { + D3D11_VIEWPORT { + TopLeftX: viewport.rect.x as _, + TopLeftY: viewport.rect.y as _, + Width: viewport.rect.w as _, + Height: viewport.rect.h as _, + MinDepth: viewport.depth.start, + MaxDepth: viewport.depth.end, + } +} + +pub fn map_rect(rect: &Rect) -> D3D11_RECT { + D3D11_RECT { + left: rect.x as _, + top: rect.y as _, + right: (rect.x + rect.w) as _, + bottom: (rect.y + rect.h) as _, + } +} + +pub fn map_topology(ia: &InputAssemblerDesc) -> D3D11_PRIMITIVE_TOPOLOGY { + use hal::pso::Primitive::*; + match (ia.primitive, ia.with_adjacency) { + (PointList, false) => D3D_PRIMITIVE_TOPOLOGY_POINTLIST, + (PointList, true) => panic!("Points can't have adjacency info"), + (LineList, false) => D3D_PRIMITIVE_TOPOLOGY_LINELIST, + (LineList, true) => D3D_PRIMITIVE_TOPOLOGY_LINELIST_ADJ, + (LineStrip, false) => D3D_PRIMITIVE_TOPOLOGY_LINESTRIP, + (LineStrip, true) => D3D_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ, + (TriangleList, false) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST, + (TriangleList, true) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ, + (TriangleStrip, false) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, + (TriangleStrip, true) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ, + (PatchList(num), false) => { + assert!(num != 0); + D3D_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST + (num as u32) - 1 + } + (_, true) => panic!("Patches can't have adjacency info"), + } +} + +fn map_fill_mode(mode: PolygonMode) -> D3D11_FILL_MODE { + match mode { + PolygonMode::Fill => D3D11_FILL_SOLID, + PolygonMode::Line(_) => D3D11_FILL_WIREFRAME, + + _ => unimplemented!(), + } +} + +fn map_cull_mode(mode: Face) -> D3D11_CULL_MODE { + match mode { + Face::NONE => D3D11_CULL_NONE, + Face::FRONT => D3D11_CULL_FRONT, + Face::BACK => D3D11_CULL_BACK, + _ => panic!("Culling both front and back faces is not supported"), + } +} + +pub(crate) fn map_rasterizer_desc(desc: &Rasterizer) -> D3D11_RASTERIZER_DESC { + let bias = match desc.depth_bias { + + Some(State::Static(db)) => db, + Some(_) | None => DepthBias::default(), + }; + D3D11_RASTERIZER_DESC { + FillMode: map_fill_mode(desc.polygon_mode), + CullMode: map_cull_mode(desc.cull_face), + FrontCounterClockwise: match desc.front_face { + FrontFace::Clockwise => FALSE, + FrontFace::CounterClockwise => TRUE, + }, + DepthBias: bias.const_factor as INT, + DepthBiasClamp: bias.clamp, + SlopeScaledDepthBias: bias.slope_factor, + DepthClipEnable: !desc.depth_clamping as _, + + ScissorEnable: TRUE, + + MultisampleEnable: FALSE, + + AntialiasedLineEnable: FALSE, + + } +} + +fn map_blend_factor(factor: Factor) -> D3D11_BLEND { + match factor { + Factor::Zero => D3D11_BLEND_ZERO, + Factor::One => D3D11_BLEND_ONE, + Factor::SrcColor => D3D11_BLEND_SRC_COLOR, + Factor::OneMinusSrcColor => D3D11_BLEND_INV_SRC_COLOR, + Factor::DstColor => D3D11_BLEND_DEST_COLOR, + Factor::OneMinusDstColor => D3D11_BLEND_INV_DEST_COLOR, + Factor::SrcAlpha => D3D11_BLEND_SRC_ALPHA, + Factor::OneMinusSrcAlpha => D3D11_BLEND_INV_SRC_ALPHA, + Factor::DstAlpha => D3D11_BLEND_DEST_ALPHA, + Factor::OneMinusDstAlpha => D3D11_BLEND_INV_DEST_ALPHA, + Factor::ConstColor | Factor::ConstAlpha => D3D11_BLEND_BLEND_FACTOR, + Factor::OneMinusConstColor | Factor::OneMinusConstAlpha => D3D11_BLEND_INV_BLEND_FACTOR, + Factor::SrcAlphaSaturate => D3D11_BLEND_SRC_ALPHA_SAT, + Factor::Src1Color => D3D11_BLEND_SRC1_COLOR, + Factor::OneMinusSrc1Color => D3D11_BLEND_INV_SRC1_COLOR, + Factor::Src1Alpha => D3D11_BLEND_SRC1_ALPHA, + Factor::OneMinusSrc1Alpha => D3D11_BLEND_INV_SRC1_ALPHA, + } +} + +fn map_alpha_blend_factor(factor: Factor) -> D3D11_BLEND { + match factor { + Factor::Zero => D3D11_BLEND_ZERO, + Factor::One => D3D11_BLEND_ONE, + Factor::SrcColor | Factor::SrcAlpha => D3D11_BLEND_SRC_ALPHA, + Factor::DstColor | Factor::DstAlpha => D3D11_BLEND_DEST_ALPHA, + Factor::OneMinusSrcColor | Factor::OneMinusSrcAlpha => D3D11_BLEND_INV_SRC_ALPHA, + Factor::OneMinusDstColor | Factor::OneMinusDstAlpha => D3D11_BLEND_INV_DEST_ALPHA, + Factor::ConstColor | Factor::ConstAlpha => D3D11_BLEND_BLEND_FACTOR, + Factor::OneMinusConstColor | Factor::OneMinusConstAlpha => D3D11_BLEND_INV_BLEND_FACTOR, + Factor::SrcAlphaSaturate => D3D11_BLEND_SRC_ALPHA_SAT, + Factor::Src1Color | Factor::Src1Alpha => D3D11_BLEND_SRC1_ALPHA, + Factor::OneMinusSrc1Color | Factor::OneMinusSrc1Alpha => D3D11_BLEND_INV_SRC1_ALPHA, + } +} + +fn map_blend_op(operation: BlendOp) -> (D3D11_BLEND_OP, D3D11_BLEND, D3D11_BLEND) { + match operation { + BlendOp::Add { src, dst } => ( + D3D11_BLEND_OP_ADD, + map_blend_factor(src), + map_blend_factor(dst), + ), + BlendOp::Sub { src, dst } => ( + D3D11_BLEND_OP_SUBTRACT, + map_blend_factor(src), + map_blend_factor(dst), + ), + BlendOp::RevSub { src, dst } => ( + D3D11_BLEND_OP_REV_SUBTRACT, + map_blend_factor(src), + map_blend_factor(dst), + ), + BlendOp::Min => (D3D11_BLEND_OP_MIN, D3D11_BLEND_ZERO, D3D11_BLEND_ZERO), + BlendOp::Max => (D3D11_BLEND_OP_MAX, D3D11_BLEND_ZERO, D3D11_BLEND_ZERO), + } +} + +fn map_alpha_blend_op(operation: BlendOp) -> (D3D11_BLEND_OP, D3D11_BLEND, D3D11_BLEND) { + match operation { + BlendOp::Add { src, dst } => ( + D3D11_BLEND_OP_ADD, + map_alpha_blend_factor(src), + map_alpha_blend_factor(dst), + ), + BlendOp::Sub { src, dst } => ( + D3D11_BLEND_OP_SUBTRACT, + map_alpha_blend_factor(src), + map_alpha_blend_factor(dst), + ), + BlendOp::RevSub { src, dst } => ( + D3D11_BLEND_OP_REV_SUBTRACT, + map_alpha_blend_factor(src), + map_alpha_blend_factor(dst), + ), + BlendOp::Min => (D3D11_BLEND_OP_MIN, D3D11_BLEND_ZERO, D3D11_BLEND_ZERO), + BlendOp::Max => (D3D11_BLEND_OP_MAX, D3D11_BLEND_ZERO, D3D11_BLEND_ZERO), + } +} + +fn map_blend_targets( + render_target_blends: &[ColorBlendDesc], +) -> [D3D11_RENDER_TARGET_BLEND_DESC; 8] { + let mut targets: [D3D11_RENDER_TARGET_BLEND_DESC; 8] = [unsafe { mem::zeroed() }; 8]; + + for (mut target, color_desc) in targets.iter_mut().zip(render_target_blends.iter()) { + target.RenderTargetWriteMask = color_desc.mask.bits() as _; + if let Some(ref blend) = color_desc.blend { + let (color_op, color_src, color_dst) = map_blend_op(blend.color); + let (alpha_op, alpha_src, alpha_dst) = map_alpha_blend_op(blend.alpha); + target.BlendEnable = TRUE; + target.BlendOp = color_op; + target.SrcBlend = color_src; + target.DestBlend = color_dst; + target.BlendOpAlpha = alpha_op; + target.SrcBlendAlpha = alpha_src; + target.DestBlendAlpha = alpha_dst; + } + } + + targets +} + +pub(crate) fn map_blend_desc(desc: &BlendDesc) -> D3D11_BLEND_DESC { + D3D11_BLEND_DESC { + + AlphaToCoverageEnable: FALSE, + IndependentBlendEnable: TRUE, + RenderTarget: map_blend_targets(&desc.targets), + } +} + +pub fn map_comparison(func: Comparison) -> D3D11_COMPARISON_FUNC { + match func { + Comparison::Never => D3D11_COMPARISON_NEVER, + Comparison::Less => D3D11_COMPARISON_LESS, + Comparison::LessEqual => D3D11_COMPARISON_LESS_EQUAL, + Comparison::Equal => D3D11_COMPARISON_EQUAL, + Comparison::GreaterEqual => D3D11_COMPARISON_GREATER_EQUAL, + Comparison::Greater => D3D11_COMPARISON_GREATER, + Comparison::NotEqual => D3D11_COMPARISON_NOT_EQUAL, + Comparison::Always => D3D11_COMPARISON_ALWAYS, + } +} + +fn map_stencil_op(op: StencilOp) -> D3D11_STENCIL_OP { + match op { + StencilOp::Keep => D3D11_STENCIL_OP_KEEP, + StencilOp::Zero => D3D11_STENCIL_OP_ZERO, + StencilOp::Replace => D3D11_STENCIL_OP_REPLACE, + StencilOp::IncrementClamp => D3D11_STENCIL_OP_INCR_SAT, + StencilOp::IncrementWrap => D3D11_STENCIL_OP_INCR, + StencilOp::DecrementClamp => D3D11_STENCIL_OP_DECR_SAT, + StencilOp::DecrementWrap => D3D11_STENCIL_OP_DECR, + StencilOp::Invert => D3D11_STENCIL_OP_INVERT, + } +} + +fn map_stencil_side(side: &StencilFace) -> D3D11_DEPTH_STENCILOP_DESC { + D3D11_DEPTH_STENCILOP_DESC { + StencilFailOp: map_stencil_op(side.op_fail), + StencilDepthFailOp: map_stencil_op(side.op_depth_fail), + StencilPassOp: map_stencil_op(side.op_pass), + StencilFunc: map_comparison(side.fun), + } +} + +pub(crate) fn map_depth_stencil_desc( + desc: &DepthStencilDesc, +) -> (D3D11_DEPTH_STENCIL_DESC, State) { + let (depth_on, depth_write, depth_func) = match desc.depth { + Some(ref depth) => (TRUE, depth.write, map_comparison(depth.fun)), + None => unsafe { mem::zeroed() }, + }; + + let (stencil_on, front, back, read_mask, write_mask, stencil_ref) = match desc.stencil { + Some(ref stencil) => { + let read_masks = stencil.read_masks.static_or(Sided::new(!0)); + let write_masks = stencil.read_masks.static_or(Sided::new(!0)); + let reference_value = match stencil.reference_values { + State::Static(ref values) => { + if values.front != values.back { + error!("Different reference values for front ({}) and back ({}) of the stencil", + values.front, values.back); + } + State::Static(values.front) + } + State::Dynamic => State::Dynamic, + }; + + if read_masks.front != read_masks.back || write_masks.front != write_masks.back { + error!( + "Different sides are specified for read ({:?} and write ({:?}) stencil masks", + read_masks, write_masks + ); + } + ( + TRUE, + map_stencil_side(&stencil.faces.front), + map_stencil_side(&stencil.faces.back), + read_masks.front, + write_masks.front, + reference_value, + ) + } + None => unsafe { mem::zeroed() }, + }; + + ( + D3D11_DEPTH_STENCIL_DESC { + DepthEnable: depth_on, + DepthWriteMask: if depth_write { + D3D11_DEPTH_WRITE_MASK_ALL + } else { + D3D11_DEPTH_WRITE_MASK_ZERO + }, + DepthFunc: depth_func, + StencilEnable: stencil_on, + StencilReadMask: read_mask as _, + StencilWriteMask: write_mask as _, + FrontFace: front, + BackFace: back, + }, + stencil_ref, + ) +} + +pub fn map_execution_model(model: spirv::ExecutionModel) -> Stage { + match model { + spirv::ExecutionModel::Vertex => Stage::Vertex, + spirv::ExecutionModel::Fragment => Stage::Fragment, + spirv::ExecutionModel::Geometry => Stage::Geometry, + spirv::ExecutionModel::GlCompute => Stage::Compute, + spirv::ExecutionModel::TessellationControl => Stage::Hull, + spirv::ExecutionModel::TessellationEvaluation => Stage::Domain, + spirv::ExecutionModel::Kernel => panic!("Kernel is not a valid execution model."), + } +} + +pub fn map_stage(stage: Stage) -> spirv::ExecutionModel { + match stage { + Stage::Vertex => spirv::ExecutionModel::Vertex, + Stage::Fragment => spirv::ExecutionModel::Fragment, + Stage::Geometry => spirv::ExecutionModel::Geometry, + Stage::Compute => spirv::ExecutionModel::GlCompute, + Stage::Hull => spirv::ExecutionModel::TessellationControl, + Stage::Domain => spirv::ExecutionModel::TessellationEvaluation, + } +} + +pub fn map_wrapping(wrap: WrapMode) -> D3D11_TEXTURE_ADDRESS_MODE { + match wrap { + WrapMode::Tile => D3D11_TEXTURE_ADDRESS_WRAP, + WrapMode::Mirror => D3D11_TEXTURE_ADDRESS_MIRROR, + WrapMode::Clamp => D3D11_TEXTURE_ADDRESS_CLAMP, + WrapMode::Border => D3D11_TEXTURE_ADDRESS_BORDER, + } +} + +pub fn map_anisotropic(anisotropic: Anisotropic) -> D3D11_FILTER { + match anisotropic { + Anisotropic::On(_) => D3D11_FILTER_ANISOTROPIC, + Anisotropic::Off => 0, + } +} + +fn map_filter_type(filter: Filter) -> D3D11_FILTER_TYPE { + match filter { + Filter::Nearest => D3D11_FILTER_TYPE_POINT, + Filter::Linear => D3D11_FILTER_TYPE_LINEAR, + } +} + + +pub fn map_filter( + mag_filter: Filter, + min_filter: Filter, + mip_filter: Filter, + reduction: D3D11_FILTER_REDUCTION_TYPE, + anisotropic: Anisotropic, +) -> D3D11_FILTER { + let mag = map_filter_type(mag_filter); + let min = map_filter_type(min_filter); + let mip = map_filter_type(mip_filter); + + (min & D3D11_FILTER_TYPE_MASK) << D3D11_MIN_FILTER_SHIFT + | (mag & D3D11_FILTER_TYPE_MASK) << D3D11_MAG_FILTER_SHIFT + | (mip & D3D11_FILTER_TYPE_MASK) << D3D11_MIP_FILTER_SHIFT + | (reduction & D3D11_FILTER_REDUCTION_TYPE_MASK) << D3D11_FILTER_REDUCTION_TYPE_SHIFT + | map_anisotropic(anisotropic) +} diff --git a/third_party/rust/gfx-backend-dx11/src/debug.rs b/third_party/rust/gfx-backend-dx11/src/debug.rs new file mode 100644 index 000000000000..d686afe08343 --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/src/debug.rs @@ -0,0 +1,92 @@ +use winapi::um::d3d11; + +use wio::com::ComPtr; +use wio::wide::ToWide; + +use std::ffi::OsStr; +use std::{env, fmt}; + + +#[allow(bad_style, unused)] +mod temp { + use winapi::shared::minwindef::{BOOL, INT}; + use winapi::um::unknwnbase::{IUnknown, IUnknownVtbl}; + use winapi::um::winnt::LPCWSTR; + + RIDL! {#[uuid(0xb2daad8b, 0x03d4, 0x4dbf, 0x95, 0xeb, 0x32, 0xab, 0x4b, 0x63, 0xd0, 0xab)] + interface ID3DUserDefinedAnnotation(ID3DUserDefinedAnnotationVtbl): + IUnknown(IUnknownVtbl) { + fn BeginEvent( + Name: LPCWSTR, + ) -> INT, + fn EndEvent() -> INT, + fn SetMarker( + Name: LPCWSTR, + ) -> (), + fn GetStatus() -> BOOL, + }} +} + +#[must_use] +#[cfg(debug_assertions)] +pub struct DebugScope { + annotation: ComPtr, +} + +#[cfg(debug_assertions)] +impl DebugScope { + pub fn with_name( + context: &ComPtr, + args: fmt::Arguments, + ) -> Option { + let name = format!("{}", args); + + + + if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED { + + if env::var("GFX_NO_RENDERDOC").is_ok() { + return None; + } + } + + let annotation = context.cast::().unwrap(); + let msg: &OsStr = name.as_ref(); + let msg: Vec = msg.to_wide_null(); + + unsafe { + annotation.BeginEvent(msg.as_ptr() as _); + } + + Some(DebugScope { annotation }) + } +} + +#[cfg(debug_assertions)] +impl Drop for DebugScope { + fn drop(&mut self) { + unsafe { + self.annotation.EndEvent(); + } + } +} + +#[cfg(debug_assertions)] +pub fn debug_marker(context: &ComPtr, args: fmt::Arguments) { + let name = format!("{}", args); + + + if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED { + if env::var("GFX_NO_RENDERDOC").is_ok() { + return; + } + } + + let annotation = context.cast::().unwrap(); + let msg: &OsStr = name.as_ref(); + let msg: Vec = msg.to_wide_null(); + + unsafe { + annotation.SetMarker(msg.as_ptr() as _); + } +} diff --git a/third_party/rust/gfx-backend-dx11/src/device.rs b/third_party/rust/gfx-backend-dx11/src/device.rs new file mode 100644 index 000000000000..2826d738e145 --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/src/device.rs @@ -0,0 +1,2628 @@ +use hal::adapter::MemoryProperties; +use hal::pso::VertexInputRate; +use hal::queue::QueueFamilyId; +use hal::range::RangeArg; +use hal::{buffer, device, format, image, memory, pass, pool, pso, query, window}; + +use winapi::shared::dxgi::{ + IDXGIFactory, + IDXGISwapChain, + DXGI_SWAP_CHAIN_DESC, + DXGI_SWAP_EFFECT_DISCARD, +}; +use winapi::shared::minwindef::TRUE; +use winapi::shared::windef::HWND; +use winapi::shared::{dxgiformat, dxgitype, winerror}; +use winapi::um::{d3d11, d3d11sdklayers, d3dcommon}; +use winapi::Interface as _; + +use wio::com::ComPtr; + +use std::borrow::Borrow; +use std::cell::RefCell; +use std::ops::Range; +use std::sync::Arc; +use std::{fmt, mem, ptr}; + +use parking_lot::{Condvar, Mutex}; + +use { + Backend, + Buffer, + BufferView, + CommandBuffer, + CommandPool, + ComputePipeline, + Descriptor, + DescriptorPool, + DescriptorSet, + DescriptorSetLayout, + Fence, + Framebuffer, + GraphicsPipeline, + Image, + ImageView, + InternalBuffer, + InternalImage, + Memory, + MemoryHeapFlags, + PipelineBinding, + PipelineLayout, + QueryPool, + RawFence, + RegisterMapping, + RegisterRemapping, + RenderPass, + Sampler, + Semaphore, + ShaderModule, + SubpassDesc, + Surface, + Swapchain, + ViewInfo, +}; + +use {conv, internal, shader}; + +struct InputLayout { + raw: ComPtr, + required_bindings: u32, + max_vertex_bindings: u32, + topology: d3d11::D3D11_PRIMITIVE_TOPOLOGY, + vertex_strides: Vec, +} + +pub struct Device { + raw: ComPtr, + pub(crate) context: ComPtr, + memory_properties: MemoryProperties, + memory_heap_flags: [MemoryHeapFlags; 3], + pub(crate) internal: internal::Internal, +} + +impl fmt::Debug for Device { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Device") + } +} + +impl Drop for Device { + fn drop(&mut self) { + if let Ok(debug) = self.raw.cast::() { + unsafe { + debug.ReportLiveDeviceObjects(d3d11sdklayers::D3D11_RLDO_DETAIL); + } + } + } +} + +unsafe impl Send for Device {} +unsafe impl Sync for Device {} + +impl Device { + pub fn as_raw(&self) -> *mut d3d11::ID3D11Device { + self.raw.as_raw() + } + + pub fn new( + device: ComPtr, + context: ComPtr, + memory_properties: MemoryProperties, + ) -> Self { + Device { + raw: device.clone(), + context, + memory_properties, + memory_heap_flags: [ + MemoryHeapFlags::DEVICE_LOCAL, + MemoryHeapFlags::HOST_COHERENT, + MemoryHeapFlags::HOST_VISIBLE, + ], + internal: internal::Internal::new(&device), + } + } + + fn create_rasterizer_state( + &self, + rasterizer_desc: &pso::Rasterizer, + ) -> Result, pso::CreationError> { + let mut rasterizer = ptr::null_mut(); + let desc = conv::map_rasterizer_desc(rasterizer_desc); + + let hr = unsafe { + self.raw + .CreateRasterizerState(&desc, &mut rasterizer as *mut *mut _ as *mut *mut _) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(rasterizer) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_blend_state( + &self, + blend_desc: &pso::BlendDesc, + ) -> Result, pso::CreationError> { + let mut blend = ptr::null_mut(); + let desc = conv::map_blend_desc(blend_desc); + + let hr = unsafe { + self.raw + .CreateBlendState(&desc, &mut blend as *mut *mut _ as *mut *mut _) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(blend) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_depth_stencil_state( + &self, + depth_desc: &pso::DepthStencilDesc, + ) -> Result< + ( + ComPtr, + pso::State, + ), + pso::CreationError, + > { + let mut depth = ptr::null_mut(); + let (desc, stencil_ref) = conv::map_depth_stencil_desc(depth_desc); + + let hr = unsafe { + self.raw + .CreateDepthStencilState(&desc, &mut depth as *mut *mut _ as *mut *mut _) + }; + + if winerror::SUCCEEDED(hr) { + Ok((unsafe { ComPtr::from_raw(depth) }, stencil_ref)) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_input_layout( + &self, + vs: ComPtr, + vertex_buffers: &[pso::VertexBufferDesc], + attributes: &[pso::AttributeDesc], + input_assembler: &pso::InputAssemblerDesc, + ) -> Result { + let mut layout = ptr::null_mut(); + + let mut vertex_strides = Vec::new(); + let mut required_bindings = 0u32; + let mut max_vertex_bindings = 0u32; + for buffer in vertex_buffers { + required_bindings |= 1 << buffer.binding as u32; + max_vertex_bindings = max_vertex_bindings.max(1u32 + buffer.binding as u32); + + while vertex_strides.len() <= buffer.binding as usize { + vertex_strides.push(0); + } + + vertex_strides[buffer.binding as usize] = buffer.stride; + } + + let input_elements = attributes + .iter() + .filter_map(|attrib| { + let buffer_desc = match vertex_buffers + .iter() + .find(|buffer_desc| buffer_desc.binding == attrib.binding) + { + Some(buffer_desc) => buffer_desc, + None => { + + + + return Some(Err(pso::CreationError::Other)); + } + }; + + let (slot_class, step_rate) = match buffer_desc.rate { + VertexInputRate::Vertex => (d3d11::D3D11_INPUT_PER_VERTEX_DATA, 0), + VertexInputRate::Instance(divisor) => { + (d3d11::D3D11_INPUT_PER_INSTANCE_DATA, divisor) + } + }; + let format = attrib.element.format; + + Some(Ok(d3d11::D3D11_INPUT_ELEMENT_DESC { + SemanticName: "TEXCOORD\0".as_ptr() as *const _, + SemanticIndex: attrib.location, + Format: match conv::map_format(format) { + Some(fm) => fm, + None => { + + + return Some(Err(pso::CreationError::Other)); + } + }, + InputSlot: attrib.binding as _, + AlignedByteOffset: attrib.element.offset, + InputSlotClass: slot_class, + InstanceDataStepRate: step_rate as _, + })) + }) + .collect::, _>>()?; + + let hr = unsafe { + self.raw.CreateInputLayout( + input_elements.as_ptr(), + input_elements.len() as _, + vs.GetBufferPointer(), + vs.GetBufferSize(), + &mut layout as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + let topology = conv::map_topology(input_assembler); + + Ok(InputLayout { + raw: unsafe { ComPtr::from_raw(layout) }, + required_bindings, + max_vertex_bindings, + topology, + vertex_strides, + }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_vertex_shader( + &self, + blob: ComPtr, + ) -> Result, pso::CreationError> { + let mut vs = ptr::null_mut(); + + let hr = unsafe { + self.raw.CreateVertexShader( + blob.GetBufferPointer(), + blob.GetBufferSize(), + ptr::null_mut(), + &mut vs as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(vs) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_pixel_shader( + &self, + blob: ComPtr, + ) -> Result, pso::CreationError> { + let mut ps = ptr::null_mut(); + + let hr = unsafe { + self.raw.CreatePixelShader( + blob.GetBufferPointer(), + blob.GetBufferSize(), + ptr::null_mut(), + &mut ps as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(ps) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_geometry_shader( + &self, + blob: ComPtr, + ) -> Result, pso::CreationError> { + let mut gs = ptr::null_mut(); + + let hr = unsafe { + self.raw.CreateGeometryShader( + blob.GetBufferPointer(), + blob.GetBufferSize(), + ptr::null_mut(), + &mut gs as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(gs) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_hull_shader( + &self, + blob: ComPtr, + ) -> Result, pso::CreationError> { + let mut hs = ptr::null_mut(); + + let hr = unsafe { + self.raw.CreateHullShader( + blob.GetBufferPointer(), + blob.GetBufferSize(), + ptr::null_mut(), + &mut hs as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(hs) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_domain_shader( + &self, + blob: ComPtr, + ) -> Result, pso::CreationError> { + let mut ds = ptr::null_mut(); + + let hr = unsafe { + self.raw.CreateDomainShader( + blob.GetBufferPointer(), + blob.GetBufferSize(), + ptr::null_mut(), + &mut ds as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(ds) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_compute_shader( + &self, + blob: ComPtr, + ) -> Result, pso::CreationError> { + let mut cs = ptr::null_mut(); + + let hr = unsafe { + self.raw.CreateComputeShader( + blob.GetBufferPointer(), + blob.GetBufferSize(), + ptr::null_mut(), + &mut cs as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(cs) }) + } else { + Err(pso::CreationError::Other) + } + } + + + fn extract_entry_point( + stage: pso::Stage, + source: &pso::EntryPoint, + layout: &PipelineLayout, + ) -> Result>, device::ShaderError> { + + match *source.module { + ShaderModule::Dxbc(ref _shader) => { + unimplemented!() + + + } + ShaderModule::Spirv(ref raw_data) => Ok(shader::compile_spirv_entrypoint( + raw_data, stage, source, layout, + )?), + } + } + + fn view_image_as_shader_resource( + &self, + info: &ViewInfo, + ) -> Result, image::ViewError> { + let mut desc: d3d11::D3D11_SHADER_RESOURCE_VIEW_DESC = unsafe { mem::zeroed() }; + desc.Format = info.format; + if desc.Format == dxgiformat::DXGI_FORMAT_D32_FLOAT_S8X24_UINT { + desc.Format = dxgiformat::DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS; + } + + #[allow(non_snake_case)] + let MostDetailedMip = info.range.levels.start as _; + #[allow(non_snake_case)] + let MipLevels = (info.range.levels.end - info.range.levels.start) as _; + #[allow(non_snake_case)] + let FirstArraySlice = info.range.layers.start as _; + #[allow(non_snake_case)] + let ArraySize = (info.range.layers.end - info.range.layers.start) as _; + + match info.view_kind { + image::ViewKind::D1 => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE1D; + *unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_SRV { + MostDetailedMip, + MipLevels, + } + } + image::ViewKind::D1Array => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE1DARRAY; + *unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_SRV { + MostDetailedMip, + MipLevels, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D2 => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2D; + *unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_SRV { + MostDetailedMip, + MipLevels, + } + } + image::ViewKind::D2Array => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2DARRAY; + *unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_SRV { + MostDetailedMip, + MipLevels, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D3 => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE3D; + *unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_SRV { + MostDetailedMip, + MipLevels, + } + } + image::ViewKind::Cube => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURECUBE; + *unsafe { desc.u.TextureCube_mut() } = d3d11::D3D11_TEXCUBE_SRV { + MostDetailedMip, + MipLevels, + } + } + image::ViewKind::CubeArray => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURECUBEARRAY; + *unsafe { desc.u.TextureCubeArray_mut() } = d3d11::D3D11_TEXCUBE_ARRAY_SRV { + MostDetailedMip, + MipLevels, + First2DArrayFace: FirstArraySlice, + NumCubes: ArraySize / 6, + } + } + } + + let mut srv = ptr::null_mut(); + let hr = unsafe { + self.raw.CreateShaderResourceView( + info.resource, + &desc, + &mut srv as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(srv) }) + } else { + Err(image::ViewError::Unsupported) + } + } + + fn view_image_as_unordered_access( + &self, + info: &ViewInfo, + ) -> Result, image::ViewError> { + let mut desc: d3d11::D3D11_UNORDERED_ACCESS_VIEW_DESC = unsafe { mem::zeroed() }; + desc.Format = info.format; + + #[allow(non_snake_case)] + let MipSlice = info.range.levels.start as _; + #[allow(non_snake_case)] + let FirstArraySlice = info.range.layers.start as _; + #[allow(non_snake_case)] + let ArraySize = (info.range.layers.end - info.range.layers.start) as _; + + match info.view_kind { + image::ViewKind::D1 => { + desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE1D; + *unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_UAV { + MipSlice: info.range.levels.start as _, + } + } + image::ViewKind::D1Array => { + desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE1DARRAY; + *unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_UAV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D2 => { + desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE2D; + *unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_UAV { + MipSlice: info.range.levels.start as _, + } + } + image::ViewKind::D2Array => { + desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE2DARRAY; + *unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_UAV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D3 => { + desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE3D; + *unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_UAV { + MipSlice, + FirstWSlice: FirstArraySlice, + WSize: ArraySize, + } + } + _ => unimplemented!(), + } + + let mut uav = ptr::null_mut(); + let hr = unsafe { + self.raw.CreateUnorderedAccessView( + info.resource, + &desc, + &mut uav as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(uav) }) + } else { + Err(image::ViewError::Unsupported) + } + } + + pub(crate) fn view_image_as_render_target( + &self, + info: &ViewInfo, + ) -> Result, image::ViewError> { + let mut desc: d3d11::D3D11_RENDER_TARGET_VIEW_DESC = unsafe { mem::zeroed() }; + desc.Format = info.format; + + #[allow(non_snake_case)] + let MipSlice = info.range.levels.start as _; + #[allow(non_snake_case)] + let FirstArraySlice = info.range.layers.start as _; + #[allow(non_snake_case)] + let ArraySize = (info.range.layers.end - info.range.layers.start) as _; + + match info.view_kind { + image::ViewKind::D1 => { + desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE1D; + *unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_RTV { MipSlice } + } + image::ViewKind::D1Array => { + desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE1DARRAY; + *unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_RTV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D2 => { + desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2D; + *unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_RTV { MipSlice } + } + image::ViewKind::D2Array => { + desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2DARRAY; + *unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_RTV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D3 => { + desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE3D; + *unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_RTV { + MipSlice, + FirstWSlice: FirstArraySlice, + WSize: ArraySize, + } + } + _ => unimplemented!(), + } + + let mut rtv = ptr::null_mut(); + let hr = unsafe { + self.raw.CreateRenderTargetView( + info.resource, + &desc, + &mut rtv as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(rtv) }) + } else { + Err(image::ViewError::Unsupported) + } + } + + fn view_image_as_depth_stencil( + &self, + info: &ViewInfo, + ) -> Result, image::ViewError> { + #![allow(non_snake_case)] + + let MipSlice = info.range.levels.start as _; + let FirstArraySlice = info.range.layers.start as _; + let ArraySize = (info.range.layers.end - info.range.layers.start) as _; + assert_eq!(info.range.levels.start + 1, info.range.levels.end); + assert!(info.range.layers.end <= info.kind.num_layers()); + + let mut desc: d3d11::D3D11_DEPTH_STENCIL_VIEW_DESC = unsafe { mem::zeroed() }; + desc.Format = info.format; + + match info.view_kind { + image::ViewKind::D2 => { + desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2D; + *unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_DSV { MipSlice } + } + image::ViewKind::D2Array => { + desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2DARRAY; + *unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_DSV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + _ => unimplemented!(), + } + + let mut dsv = ptr::null_mut(); + let hr = unsafe { + self.raw.CreateDepthStencilView( + info.resource, + &desc, + &mut dsv as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(dsv) }) + } else { + Err(image::ViewError::Unsupported) + } + } + + pub(crate) fn create_swapchain_impl( + &self, + config: &window::SwapchainConfig, + window_handle: HWND, + factory: ComPtr, + ) -> Result<(ComPtr, dxgiformat::DXGI_FORMAT), window::CreationError> { + + + + debug!("{:#?}", config); + let non_srgb_format = conv::map_format_nosrgb(config.format).unwrap(); + + let mut desc = DXGI_SWAP_CHAIN_DESC { + BufferDesc: dxgitype::DXGI_MODE_DESC { + Width: config.extent.width, + Height: config.extent.height, + + + RefreshRate: dxgitype::DXGI_RATIONAL { + Numerator: 1, + Denominator: 60, + }, + Format: non_srgb_format, + ScanlineOrdering: dxgitype::DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED, + Scaling: dxgitype::DXGI_MODE_SCALING_UNSPECIFIED, + }, + + SampleDesc: dxgitype::DXGI_SAMPLE_DESC { + Count: 1, + Quality: 0, + }, + BufferUsage: dxgitype::DXGI_USAGE_RENDER_TARGET_OUTPUT + | dxgitype::DXGI_USAGE_SHADER_INPUT, + BufferCount: config.image_count, + OutputWindow: window_handle, + + Windowed: TRUE, + + SwapEffect: DXGI_SWAP_EFFECT_DISCARD, + Flags: 0, + }; + + let dxgi_swapchain = { + let mut swapchain: *mut IDXGISwapChain = ptr::null_mut(); + let hr = unsafe { + factory.CreateSwapChain( + self.raw.as_raw() as *mut _, + &mut desc as *mut _, + &mut swapchain as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(hr, winerror::S_OK); + + unsafe { ComPtr::from_raw(swapchain) } + }; + Ok((dxgi_swapchain, non_srgb_format)) + } +} + +impl device::Device for Device { + unsafe fn allocate_memory( + &self, + mem_type: hal::MemoryTypeId, + size: u64, + ) -> Result { + let vec = Vec::with_capacity(size as usize); + Ok(Memory { + ty: self.memory_heap_flags[mem_type.0], + properties: self.memory_properties.memory_types[mem_type.0].properties, + size, + mapped_ptr: vec.as_ptr() as *mut _, + host_visible: Some(RefCell::new(vec)), + local_buffers: RefCell::new(Vec::new()), + local_images: RefCell::new(Vec::new()), + }) + } + + unsafe fn create_command_pool( + &self, + _family: QueueFamilyId, + _create_flags: pool::CommandPoolCreateFlags, + ) -> Result { + + Ok(CommandPool { + device: self.raw.clone(), + internal: self.internal.clone(), + }) + } + + unsafe fn destroy_command_pool(&self, _pool: CommandPool) { + + } + + unsafe fn create_render_pass<'a, IA, IS, ID>( + &self, + attachments: IA, + subpasses: IS, + _dependencies: ID, + ) -> Result + where + IA: IntoIterator, + IA::Item: Borrow, + IS: IntoIterator, + IS::Item: Borrow>, + ID: IntoIterator, + ID::Item: Borrow, + { + Ok(RenderPass { + attachments: attachments + .into_iter() + .map(|attachment| attachment.borrow().clone()) + .collect(), + subpasses: subpasses + .into_iter() + .map(|desc| { + let desc = desc.borrow(); + SubpassDesc { + color_attachments: desc + .colors + .iter() + .map(|color| color.borrow().clone()) + .collect(), + depth_stencil_attachment: desc.depth_stencil.map(|d| *d), + input_attachments: desc + .inputs + .iter() + .map(|input| input.borrow().clone()) + .collect(), + resolve_attachments: desc + .resolves + .iter() + .map(|resolve| resolve.borrow().clone()) + .collect(), + } + }) + .collect(), + }) + } + + unsafe fn create_pipeline_layout( + &self, + set_layouts: IS, + _push_constant_ranges: IR, + ) -> Result + where + IS: IntoIterator, + IS::Item: Borrow, + IR: IntoIterator, + IR::Item: Borrow<(pso::ShaderStageFlags, Range)>, + { + use pso::DescriptorType::*; + + let mut set_bindings = Vec::new(); + let mut set_remapping = Vec::new(); + + + + + + let mut s_offset = 0; + let mut t_offset = 0; + let mut c_offset = 0; + let mut u_offset = 0; + + fn get_descriptor_offset(ty: pso::DescriptorType, s: u32, t: u32, c: u32, u: u32) -> u32 { + match ty { + Sampler => s, + SampledImage | UniformTexelBuffer => t, + UniformBuffer | UniformBufferDynamic => c, + StorageTexelBuffer | StorageBuffer | InputAttachment | StorageBufferDynamic + | StorageImage => u, + CombinedImageSampler => unreachable!(), + } + } + + for layout in set_layouts { + let layout = layout.borrow(); + + let bindings = &layout.bindings; + + let stages = [ + pso::ShaderStageFlags::VERTEX, + pso::ShaderStageFlags::HULL, + pso::ShaderStageFlags::DOMAIN, + pso::ShaderStageFlags::GEOMETRY, + pso::ShaderStageFlags::FRAGMENT, + pso::ShaderStageFlags::COMPUTE, + ]; + + let mut optimized_bindings = Vec::new(); + + + + for &stage in &stages { + let mut state = None; + + for binding in bindings { + if !binding.stage.contains(stage) { + continue; + } + + state = match state { + None => { + if binding.stage.contains(stage) { + let offset = binding.handle_offset; + + Some(( + binding.ty, + binding.binding_range.start, + binding.binding_range.end, + offset, + offset, + )) + } else { + None + } + } + Some(( + mut ty, + mut start, + mut end, + mut start_offset, + mut current_offset, + )) => { + + + + if ty != binding.ty + || end != binding.binding_range.start + || current_offset + 1 != binding.handle_offset + { + let register_offset = get_descriptor_offset( + ty, s_offset, t_offset, c_offset, u_offset, + ); + + optimized_bindings.push(PipelineBinding { + stage, + ty, + binding_range: (register_offset + start) + .. (register_offset + end), + handle_offset: start_offset, + }); + + if binding.stage.contains(stage) { + ty = binding.ty; + start = binding.binding_range.start; + end = binding.binding_range.end; + + start_offset = binding.handle_offset; + current_offset = binding.handle_offset; + + Some((ty, start, end, start_offset, current_offset)) + } else { + None + } + } else { + end += 1; + current_offset += 1; + + Some((ty, start, end, start_offset, current_offset)) + } + } + } + } + + + if let Some((ty, start, end, start_offset, _)) = state { + let register_offset = + get_descriptor_offset(ty, s_offset, t_offset, c_offset, u_offset); + + optimized_bindings.push(PipelineBinding { + stage, + ty, + binding_range: (register_offset + start) .. (register_offset + end), + handle_offset: start_offset, + }); + } + } + + let offset_mappings = layout + .register_remap + .mapping + .iter() + .map(|register| { + let register_offset = + get_descriptor_offset(register.ty, s_offset, t_offset, c_offset, u_offset); + + RegisterMapping { + ty: register.ty, + spirv_binding: register.spirv_binding, + hlsl_register: register.hlsl_register + register_offset as u8, + combined: register.combined, + } + }) + .collect(); + + set_bindings.push(optimized_bindings); + set_remapping.push(RegisterRemapping { + mapping: offset_mappings, + num_s: layout.register_remap.num_s, + num_t: layout.register_remap.num_t, + num_c: layout.register_remap.num_c, + num_u: layout.register_remap.num_u, + }); + + s_offset += layout.register_remap.num_s as u32; + t_offset += layout.register_remap.num_t as u32; + c_offset += layout.register_remap.num_c as u32; + u_offset += layout.register_remap.num_u as u32; + } + + Ok(PipelineLayout { + set_bindings, + set_remapping, + }) + } + + unsafe fn create_pipeline_cache( + &self, + _data: Option<&[u8]>, + ) -> Result<(), device::OutOfMemory> { + Ok(()) + } + + unsafe fn get_pipeline_cache_data(&self, cache: &()) -> Result, device::OutOfMemory> { + + Ok(Vec::new()) + } + + unsafe fn destroy_pipeline_cache(&self, _: ()) { + + } + + unsafe fn merge_pipeline_caches(&self, _: &(), _: I) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<()>, + { + + Ok(()) + } + + unsafe fn create_graphics_pipeline<'a>( + &self, + desc: &pso::GraphicsPipelineDesc<'a, Backend>, + _cache: Option<&()>, + ) -> Result { + let build_shader = |stage: pso::Stage, source: Option<&pso::EntryPoint<'a, Backend>>| { + let source = match source { + Some(src) => src, + None => return Ok(None), + }; + + Self::extract_entry_point(stage, source, desc.layout) + .map_err(|err| pso::CreationError::Shader(err)) + }; + + let vs = build_shader(pso::Stage::Vertex, Some(&desc.shaders.vertex))?.unwrap(); + let ps = build_shader(pso::Stage::Fragment, desc.shaders.fragment.as_ref())?; + let gs = build_shader(pso::Stage::Geometry, desc.shaders.geometry.as_ref())?; + let ds = build_shader(pso::Stage::Domain, desc.shaders.domain.as_ref())?; + let hs = build_shader(pso::Stage::Hull, desc.shaders.hull.as_ref())?; + + let layout = self.create_input_layout( + vs.clone(), + &desc.vertex_buffers, + &desc.attributes, + &desc.input_assembler, + )?; + let rasterizer_state = self.create_rasterizer_state(&desc.rasterizer)?; + let blend_state = self.create_blend_state(&desc.blender)?; + let depth_stencil_state = Some(self.create_depth_stencil_state(&desc.depth_stencil)?); + + let vs = self.create_vertex_shader(vs)?; + let ps = if let Some(blob) = ps { + Some(self.create_pixel_shader(blob)?) + } else { + None + }; + let gs = if let Some(blob) = gs { + Some(self.create_geometry_shader(blob)?) + } else { + None + }; + let ds = if let Some(blob) = ds { + Some(self.create_domain_shader(blob)?) + } else { + None + }; + let hs = if let Some(blob) = hs { + Some(self.create_hull_shader(blob)?) + } else { + None + }; + + Ok(GraphicsPipeline { + vs, + gs, + ds, + hs, + ps, + topology: layout.topology, + input_layout: layout.raw, + rasterizer_state, + blend_state, + depth_stencil_state, + baked_states: desc.baked_states.clone(), + required_bindings: layout.required_bindings, + max_vertex_bindings: layout.max_vertex_bindings, + strides: layout.vertex_strides, + }) + } + + unsafe fn create_compute_pipeline<'a>( + &self, + desc: &pso::ComputePipelineDesc<'a, Backend>, + _cache: Option<&()>, + ) -> Result { + let build_shader = |stage: pso::Stage, source: Option<&pso::EntryPoint<'a, Backend>>| { + let source = match source { + Some(src) => src, + None => return Ok(None), + }; + + Self::extract_entry_point(stage, source, desc.layout) + .map_err(|err| pso::CreationError::Shader(err)) + }; + + let cs = build_shader(pso::Stage::Compute, Some(&desc.shader))?.unwrap(); + let cs = self.create_compute_shader(cs)?; + + Ok(ComputePipeline { cs }) + } + + unsafe fn create_framebuffer( + &self, + _renderpass: &RenderPass, + attachments: I, + extent: image::Extent, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + Ok(Framebuffer { + attachments: attachments + .into_iter() + .map(|att| att.borrow().clone()) + .collect(), + layers: extent.depth as _, + }) + } + + unsafe fn create_shader_module( + &self, + raw_data: &[u32], + ) -> Result { + Ok(ShaderModule::Spirv(raw_data.into())) + } + + unsafe fn create_buffer( + &self, + size: u64, + usage: buffer::Usage, + ) -> Result { + use buffer::Usage; + + let mut bind = 0; + + if usage.contains(Usage::UNIFORM) { + bind |= d3d11::D3D11_BIND_CONSTANT_BUFFER; + } + if usage.contains(Usage::VERTEX) { + bind |= d3d11::D3D11_BIND_VERTEX_BUFFER; + } + if usage.contains(Usage::INDEX) { + bind |= d3d11::D3D11_BIND_INDEX_BUFFER; + } + + + if usage.intersects(Usage::UNIFORM_TEXEL | Usage::STORAGE_TEXEL | Usage::TRANSFER_SRC) { + bind |= d3d11::D3D11_BIND_SHADER_RESOURCE; + } + + if usage.intersects(Usage::TRANSFER_DST | Usage::STORAGE) { + bind |= d3d11::D3D11_BIND_UNORDERED_ACCESS; + } + + + + let needs_disjoint_cb = bind & d3d11::D3D11_BIND_CONSTANT_BUFFER != 0 + && bind != d3d11::D3D11_BIND_CONSTANT_BUFFER; + + if needs_disjoint_cb { + bind ^= d3d11::D3D11_BIND_CONSTANT_BUFFER; + } + + fn up_align(x: u64, alignment: u64) -> u64 { + (x + alignment - 1) & !(alignment - 1) + } + + + let size = if usage.contains(Usage::UNIFORM) { + up_align(size, 16) + } else { + up_align(size, 4) + }; + + Ok(Buffer { + internal: InternalBuffer { + raw: ptr::null_mut(), + disjoint_cb: if needs_disjoint_cb { + Some(ptr::null_mut()) + } else { + None + }, + srv: None, + uav: None, + usage, + }, + ty: MemoryHeapFlags::empty(), + bound_range: 0 .. 0, + host_ptr: ptr::null_mut(), + bind, + requirements: memory::Requirements { + size, + alignment: 1, + type_mask: MemoryHeapFlags::all().bits(), + }, + }) + } + + unsafe fn get_buffer_requirements(&self, buffer: &Buffer) -> memory::Requirements { + buffer.requirements + } + + unsafe fn bind_buffer_memory( + &self, + memory: &Memory, + offset: u64, + buffer: &mut Buffer, + ) -> Result<(), device::BindError> { + debug!( + "usage={:?}, props={:b}", + buffer.internal.usage, memory.properties + ); + + #[allow(non_snake_case)] + let MiscFlags = if buffer.bind + & (d3d11::D3D11_BIND_SHADER_RESOURCE | d3d11::D3D11_BIND_UNORDERED_ACCESS) + != 0 + { + d3d11::D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS + } else { + 0 + }; + + let initial_data = memory + .host_visible + .as_ref() + .map(|p| d3d11::D3D11_SUBRESOURCE_DATA { + pSysMem: p.borrow().as_ptr().offset(offset as isize) as _, + SysMemPitch: 0, + SysMemSlicePitch: 0, + }); + + let raw = match memory.ty { + MemoryHeapFlags::DEVICE_LOCAL => { + + let desc = d3d11::D3D11_BUFFER_DESC { + ByteWidth: buffer.requirements.size as _, + Usage: d3d11::D3D11_USAGE_DEFAULT, + BindFlags: buffer.bind, + CPUAccessFlags: 0, + MiscFlags, + StructureByteStride: if buffer + .internal + .usage + .contains(buffer::Usage::TRANSFER_SRC) + { + 4 + } else { + 0 + }, + }; + + let mut buffer: *mut d3d11::ID3D11Buffer = ptr::null_mut(); + let hr = self.raw.CreateBuffer( + &desc, + if let Some(data) = initial_data { + &data + } else { + ptr::null_mut() + }, + &mut buffer as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + return Err(device::BindError::WrongMemory); + } + + ComPtr::from_raw(buffer) + } + MemoryHeapFlags::HOST_VISIBLE | MemoryHeapFlags::HOST_COHERENT => { + let desc = d3d11::D3D11_BUFFER_DESC { + ByteWidth: buffer.requirements.size as _, + + Usage: d3d11::D3D11_USAGE_DEFAULT, + BindFlags: buffer.bind, + CPUAccessFlags: 0, + MiscFlags, + StructureByteStride: if buffer + .internal + .usage + .contains(buffer::Usage::TRANSFER_SRC) + { + 4 + } else { + 0 + }, + }; + + let mut buffer: *mut d3d11::ID3D11Buffer = ptr::null_mut(); + let hr = self.raw.CreateBuffer( + &desc, + if let Some(data) = initial_data { + &data + } else { + ptr::null_mut() + }, + &mut buffer as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + return Err(device::BindError::WrongMemory); + } + + ComPtr::from_raw(buffer) + } + _ => unimplemented!(), + }; + + let disjoint_cb = if buffer.internal.disjoint_cb.is_some() { + let desc = d3d11::D3D11_BUFFER_DESC { + ByteWidth: buffer.requirements.size as _, + Usage: d3d11::D3D11_USAGE_DEFAULT, + BindFlags: d3d11::D3D11_BIND_CONSTANT_BUFFER, + CPUAccessFlags: 0, + MiscFlags: 0, + StructureByteStride: 0, + }; + + let mut buffer: *mut d3d11::ID3D11Buffer = ptr::null_mut(); + let hr = self.raw.CreateBuffer( + &desc, + if let Some(data) = initial_data { + &data + } else { + ptr::null_mut() + }, + &mut buffer as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + return Err(device::BindError::WrongMemory); + } + + Some(buffer) + } else { + None + }; + + let srv = if buffer.bind & d3d11::D3D11_BIND_SHADER_RESOURCE != 0 { + let mut desc = mem::zeroed::(); + desc.Format = dxgiformat::DXGI_FORMAT_R32_TYPELESS; + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_BUFFEREX; + *desc.u.BufferEx_mut() = d3d11::D3D11_BUFFEREX_SRV { + FirstElement: 0, + + NumElements: buffer.requirements.size as u32 / 4, + Flags: d3d11::D3D11_BUFFEREX_SRV_FLAG_RAW, + }; + + let mut srv = ptr::null_mut(); + let hr = self.raw.CreateShaderResourceView( + raw.as_raw() as *mut _, + &desc, + &mut srv as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + error!("CreateShaderResourceView failed: 0x{:x}", hr); + + return Err(device::BindError::WrongMemory); + } + + Some(srv) + } else { + None + }; + + let uav = if buffer.bind & d3d11::D3D11_BIND_UNORDERED_ACCESS != 0 { + let mut desc = mem::zeroed::(); + desc.Format = dxgiformat::DXGI_FORMAT_R32_TYPELESS; + desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_BUFFER; + *desc.u.Buffer_mut() = d3d11::D3D11_BUFFER_UAV { + FirstElement: 0, + NumElements: buffer.requirements.size as u32 / 4, + Flags: d3d11::D3D11_BUFFER_UAV_FLAG_RAW, + }; + + let mut uav = ptr::null_mut(); + let hr = self.raw.CreateUnorderedAccessView( + raw.as_raw() as *mut _, + &desc, + &mut uav as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + error!("CreateUnorderedAccessView failed: 0x{:x}", hr); + + return Err(device::BindError::WrongMemory); + } + + Some(uav) + } else { + None + }; + + let internal = InternalBuffer { + raw: raw.into_raw(), + disjoint_cb, + srv, + uav, + usage: buffer.internal.usage, + }; + let range = offset .. buffer.requirements.size; + + memory.bind_buffer(range.clone(), internal.clone()); + + let host_ptr = if let Some(vec) = &memory.host_visible { + vec.borrow().as_ptr() as *mut _ + } else { + ptr::null_mut() + }; + + buffer.internal = internal; + buffer.ty = memory.ty; + buffer.host_ptr = host_ptr; + buffer.bound_range = range; + + Ok(()) + } + + unsafe fn create_buffer_view>( + &self, + _buffer: &Buffer, + _format: Option, + _range: R, + ) -> Result { + unimplemented!() + } + + unsafe fn create_image( + &self, + kind: image::Kind, + mip_levels: image::Level, + format: format::Format, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Result { + use image::Usage; + + + + let surface_desc = format.base_format().0.desc(); + let bytes_per_texel = surface_desc.bits / 8; + let ext = kind.extent(); + let size = (ext.width * ext.height * ext.depth) as u64 * bytes_per_texel as u64; + let compressed = surface_desc.is_compressed(); + let depth = format.is_depth(); + + let mut bind = 0; + + if usage.intersects(Usage::TRANSFER_SRC | Usage::SAMPLED | Usage::STORAGE) { + bind |= d3d11::D3D11_BIND_SHADER_RESOURCE; + } + + + if !compressed && !depth { + if usage.intersects(Usage::COLOR_ATTACHMENT | Usage::TRANSFER_DST) { + bind |= d3d11::D3D11_BIND_RENDER_TARGET; + } + + if usage.intersects(Usage::TRANSFER_DST | Usage::STORAGE) { + bind |= d3d11::D3D11_BIND_UNORDERED_ACCESS; + } + } + + if usage.contains(Usage::DEPTH_STENCIL_ATTACHMENT) { + bind |= d3d11::D3D11_BIND_DEPTH_STENCIL; + } + + debug!("{:b}", bind); + + Ok(Image { + internal: InternalImage { + raw: ptr::null_mut(), + copy_srv: None, + srv: None, + unordered_access_views: Vec::new(), + depth_stencil_views: Vec::new(), + render_target_views: Vec::new(), + }, + decomposed_format: conv::DecomposedDxgiFormat::UNKNOWN, + kind, + mip_levels, + format, + usage, + tiling, + view_caps, + bind, + requirements: memory::Requirements { + size: size, + alignment: 1, + type_mask: MemoryHeapFlags::DEVICE_LOCAL.bits(), + }, + }) + } + + unsafe fn get_image_requirements(&self, image: &Image) -> memory::Requirements { + image.requirements + } + + unsafe fn get_image_subresource_footprint( + &self, + _image: &Image, + _sub: image::Subresource, + ) -> image::SubresourceFootprint { + unimplemented!() + } + + unsafe fn bind_image_memory( + &self, + memory: &Memory, + offset: u64, + image: &mut Image, + ) -> Result<(), device::BindError> { + use image::Usage; + use memory::Properties; + + let base_format = image.format.base_format(); + let format_desc = base_format.0.desc(); + + let compressed = format_desc.is_compressed(); + let depth = image.format.is_depth(); + let stencil = image.format.is_stencil(); + + let (bind, usage, cpu) = if memory.properties == Properties::DEVICE_LOCAL { + (image.bind, d3d11::D3D11_USAGE_DEFAULT, 0) + } else if memory.properties + == (Properties::DEVICE_LOCAL | Properties::CPU_VISIBLE | Properties::CPU_CACHED) + { + ( + image.bind, + d3d11::D3D11_USAGE_DYNAMIC, + d3d11::D3D11_CPU_ACCESS_WRITE, + ) + } else if memory.properties == (Properties::CPU_VISIBLE | Properties::CPU_CACHED) { + ( + 0, + d3d11::D3D11_USAGE_STAGING, + d3d11::D3D11_CPU_ACCESS_READ | d3d11::D3D11_CPU_ACCESS_WRITE, + ) + } else { + unimplemented!() + }; + + let dxgi_format = conv::map_format(image.format).unwrap(); + let decomposed = conv::DecomposedDxgiFormat::from_dxgi_format(dxgi_format); + let bpp = format_desc.bits as u32 / 8; + + let (view_kind, resource) = match image.kind { + image::Kind::D1(width, layers) => { + let initial_data = + memory + .host_visible + .as_ref() + .map(|_p| d3d11::D3D11_SUBRESOURCE_DATA { + pSysMem: memory.mapped_ptr.offset(offset as isize) as _, + SysMemPitch: 0, + SysMemSlicePitch: 0, + }); + + let desc = d3d11::D3D11_TEXTURE1D_DESC { + Width: width, + MipLevels: image.mip_levels as _, + ArraySize: layers as _, + Format: decomposed.typeless, + Usage: usage, + BindFlags: bind, + CPUAccessFlags: cpu, + MiscFlags: 0, + }; + + let mut resource = ptr::null_mut(); + let hr = self.raw.CreateTexture1D( + &desc, + if let Some(data) = initial_data { + &data + } else { + ptr::null_mut() + }, + &mut resource as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + error!("CreateTexture1D failed: 0x{:x}", hr); + + return Err(device::BindError::WrongMemory); + } + + (image::ViewKind::D1Array, resource) + } + image::Kind::D2(width, height, layers, _) => { + let mut initial_datas = Vec::new(); + + for _layer in 0 .. layers { + for level in 0 .. image.mip_levels { + let width = image.kind.extent().at_level(level).width; + + + initial_datas.push(d3d11::D3D11_SUBRESOURCE_DATA { + pSysMem: memory.mapped_ptr.offset(offset as isize) as _, + SysMemPitch: width * bpp, + SysMemSlicePitch: 0, + }); + } + } + + let desc = d3d11::D3D11_TEXTURE2D_DESC { + Width: width, + Height: height, + MipLevels: image.mip_levels as _, + ArraySize: layers as _, + Format: decomposed.typeless, + SampleDesc: dxgitype::DXGI_SAMPLE_DESC { + Count: 1, + Quality: 0, + }, + Usage: usage, + BindFlags: bind, + CPUAccessFlags: cpu, + MiscFlags: if image.view_caps.contains(image::ViewCapabilities::KIND_CUBE) { + d3d11::D3D11_RESOURCE_MISC_TEXTURECUBE + } else { + 0 + }, + }; + + let mut resource = ptr::null_mut(); + let hr = self.raw.CreateTexture2D( + &desc, + if !depth { + initial_datas.as_ptr() + } else { + ptr::null_mut() + }, + &mut resource as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + error!("CreateTexture2D failed: 0x{:x}", hr); + + return Err(device::BindError::WrongMemory); + } + + (image::ViewKind::D2Array, resource) + } + image::Kind::D3(width, height, depth) => { + let initial_data = + memory + .host_visible + .as_ref() + .map(|_p| d3d11::D3D11_SUBRESOURCE_DATA { + pSysMem: memory.mapped_ptr.offset(offset as isize) as _, + SysMemPitch: width * bpp, + SysMemSlicePitch: width * height * bpp, + }); + + let desc = d3d11::D3D11_TEXTURE3D_DESC { + Width: width, + Height: height, + Depth: depth, + MipLevels: image.mip_levels as _, + Format: decomposed.typeless, + Usage: usage, + BindFlags: bind, + CPUAccessFlags: cpu, + MiscFlags: 0, + }; + + let mut resource = ptr::null_mut(); + let hr = self.raw.CreateTexture3D( + &desc, + if let Some(data) = initial_data { + &data + } else { + ptr::null_mut() + }, + &mut resource as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + error!("CreateTexture3D failed: 0x{:x}", hr); + + return Err(device::BindError::WrongMemory); + } + + (image::ViewKind::D3, resource) + } + }; + + let mut unordered_access_views = Vec::new(); + + if image.usage.contains(Usage::TRANSFER_DST) && !compressed && !depth { + for mip in 0 .. image.mip_levels { + let view = ViewInfo { + resource: resource, + kind: image.kind, + caps: image::ViewCapabilities::empty(), + view_kind, + + + format: decomposed.copy_uav.unwrap(), + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: mip .. (mip + 1), + layers: 0 .. image.kind.num_layers(), + }, + }; + + unordered_access_views.push( + self.view_image_as_unordered_access(&view) + .map_err(|_| device::BindError::WrongMemory)?, + ); + } + } + + let (copy_srv, srv) = if image.usage.contains(image::Usage::TRANSFER_SRC) { + let mut view = ViewInfo { + resource: resource, + kind: image.kind, + caps: image::ViewCapabilities::empty(), + view_kind, + format: decomposed.copy_srv.unwrap(), + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: 0 .. image.mip_levels, + layers: 0 .. image.kind.num_layers(), + }, + }; + + let copy_srv = if !compressed { + Some( + self.view_image_as_shader_resource(&view) + .map_err(|_| device::BindError::WrongMemory)?, + ) + } else { + None + }; + + view.format = decomposed.srv.unwrap(); + + let srv = if !depth && !stencil { + Some( + self.view_image_as_shader_resource(&view) + .map_err(|_| device::BindError::WrongMemory)?, + ) + } else { + None + }; + + (copy_srv, srv) + } else { + (None, None) + }; + + let mut render_target_views = Vec::new(); + + if (image.usage.contains(image::Usage::COLOR_ATTACHMENT) + || image.usage.contains(image::Usage::TRANSFER_DST)) + && !compressed + && !depth + { + for layer in 0 .. image.kind.num_layers() { + for mip in 0 .. image.mip_levels { + let view = ViewInfo { + resource: resource, + kind: image.kind, + caps: image::ViewCapabilities::empty(), + view_kind, + format: decomposed.rtv.unwrap(), + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: mip .. (mip + 1), + layers: layer .. (layer + 1), + }, + }; + + render_target_views.push( + self.view_image_as_render_target(&view) + .map_err(|_| device::BindError::WrongMemory)?, + ); + } + } + }; + + let mut depth_stencil_views = Vec::new(); + + if depth { + for layer in 0 .. image.kind.num_layers() { + for mip in 0 .. image.mip_levels { + let view = ViewInfo { + resource: resource, + kind: image.kind, + caps: image::ViewCapabilities::empty(), + view_kind, + format: decomposed.dsv.unwrap(), + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: mip .. (mip + 1), + layers: layer .. (layer + 1), + }, + }; + + depth_stencil_views.push( + self.view_image_as_depth_stencil(&view) + .map_err(|_| device::BindError::WrongMemory)?, + ); + } + } + } + + let internal = InternalImage { + raw: resource, + copy_srv, + srv, + unordered_access_views, + depth_stencil_views, + render_target_views, + }; + + image.decomposed_format = decomposed; + image.internal = internal; + + Ok(()) + } + + unsafe fn create_image_view( + &self, + image: &Image, + view_kind: image::ViewKind, + format: format::Format, + _swizzle: format::Swizzle, + range: image::SubresourceRange, + ) -> Result { + let is_array = image.kind.num_layers() > 1; + + let info = ViewInfo { + resource: image.internal.raw, + kind: image.kind, + caps: image.view_caps, + + view_kind: if is_array && view_kind == image::ViewKind::D2 { + image::ViewKind::D2Array + } else if is_array && view_kind == image::ViewKind::D1 { + image::ViewKind::D1Array + } else { + view_kind + }, + format: conv::map_format(format).ok_or(image::ViewError::BadFormat(format))?, + range, + }; + + let srv_info = ViewInfo { + format: conv::viewable_format(info.format), + ..info.clone() + }; + + Ok(ImageView { + format, + srv_handle: if image.usage.intersects(image::Usage::SAMPLED) { + Some(self.view_image_as_shader_resource(&srv_info)?) + } else { + None + }, + rtv_handle: if image.usage.contains(image::Usage::COLOR_ATTACHMENT) { + Some(self.view_image_as_render_target(&info)?) + } else { + None + }, + uav_handle: if image.usage.contains(image::Usage::STORAGE) { + Some(self.view_image_as_unordered_access(&info)?) + } else { + None + }, + dsv_handle: if image.usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT) { + Some(self.view_image_as_depth_stencil(&info)?) + } else { + None + }, + }) + } + + unsafe fn create_sampler( + &self, + info: &image::SamplerDesc, + ) -> Result { + assert!(info.normalized); + + let op = match info.comparison { + Some(_) => d3d11::D3D11_FILTER_REDUCTION_TYPE_COMPARISON, + None => d3d11::D3D11_FILTER_REDUCTION_TYPE_STANDARD, + }; + + let desc = d3d11::D3D11_SAMPLER_DESC { + Filter: conv::map_filter( + info.min_filter, + info.mag_filter, + info.mip_filter, + op, + info.anisotropic, + ), + AddressU: conv::map_wrapping(info.wrap_mode.0), + AddressV: conv::map_wrapping(info.wrap_mode.1), + AddressW: conv::map_wrapping(info.wrap_mode.2), + MipLODBias: info.lod_bias.0, + MaxAnisotropy: match info.anisotropic { + image::Anisotropic::Off => 0, + image::Anisotropic::On(aniso) => aniso as _, + }, + ComparisonFunc: info.comparison.map_or(0, |comp| conv::map_comparison(comp)), + BorderColor: info.border.into(), + MinLOD: info.lod_range.start.0, + MaxLOD: info.lod_range.end.0, + }; + + let mut sampler = ptr::null_mut(); + let hr = self + .raw + .CreateSamplerState(&desc, &mut sampler as *mut *mut _ as *mut *mut _); + + assert_eq!(true, winerror::SUCCEEDED(hr)); + + Ok(Sampler { + sampler_handle: ComPtr::from_raw(sampler), + }) + } + + + unsafe fn create_descriptor_pool( + &self, + _max_sets: usize, + ranges: I, + _flags: pso::DescriptorPoolCreateFlags, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + let count = ranges + .into_iter() + .map(|r| { + let r = r.borrow(); + + r.count + * match r.ty { + pso::DescriptorType::CombinedImageSampler => 2, + _ => 1, + } + }) + .sum::(); + + Ok(DescriptorPool::with_capacity(count)) + } + + unsafe fn create_descriptor_set_layout( + &self, + layout_bindings: I, + _immutable_samplers: J, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + use pso::DescriptorType::*; + + let mut bindings = Vec::new(); + + let mut mapping = Vec::new(); + let mut num_t = 0; + let mut num_s = 0; + let mut num_c = 0; + let mut num_u = 0; + + + for binding in layout_bindings { + let binding = binding.borrow(); + + let hlsl_reg = match binding.ty { + Sampler => { + num_s += 1; + num_s + } + CombinedImageSampler => { + num_t += 1; + num_s += 1; + num_t + } + SampledImage | UniformTexelBuffer => { + num_t += 1; + num_t + } + UniformBuffer | UniformBufferDynamic => { + num_c += 1; + num_c + } + StorageTexelBuffer | StorageBuffer | InputAttachment | StorageBufferDynamic + | StorageImage => { + num_u += 1; + num_u + } + } - 1; + + + if binding.ty == pso::DescriptorType::CombinedImageSampler { + + + + let shared_reg = num_s.max(num_t); + + num_s = shared_reg; + num_t = shared_reg; + + let sampler_reg = num_s - 1; + let image_reg = num_t - 1; + + mapping.push(RegisterMapping { + ty: pso::DescriptorType::Sampler, + spirv_binding: binding.binding, + hlsl_register: sampler_reg as u8, + combined: true, + }); + mapping.push(RegisterMapping { + ty: pso::DescriptorType::SampledImage, + spirv_binding: binding.binding, + hlsl_register: image_reg as u8, + combined: true, + }); + + bindings.push(PipelineBinding { + stage: binding.stage_flags, + ty: pso::DescriptorType::Sampler, + binding_range: sampler_reg .. (sampler_reg + 1), + handle_offset: 0, + }); + bindings.push(PipelineBinding { + stage: binding.stage_flags, + ty: pso::DescriptorType::SampledImage, + binding_range: image_reg .. (image_reg + 1), + handle_offset: 0, + }); + } else { + mapping.push(RegisterMapping { + ty: binding.ty, + spirv_binding: binding.binding, + hlsl_register: hlsl_reg as u8, + combined: false, + }); + + bindings.push(PipelineBinding { + stage: binding.stage_flags, + ty: binding.ty, + binding_range: hlsl_reg .. (hlsl_reg + 1), + handle_offset: 0, + }); + } + } + + + + bindings.sort_unstable_by(|a, b| { + (b.ty as u32) + .cmp(&(a.ty as u32)) + .then(a.binding_range.start.cmp(&b.binding_range.start)) + .then(a.stage.cmp(&b.stage)) + }); + + + + + + + + + + + let mut s = 0; + let mut t = 0; + let mut c = 0; + let mut u = 0; + for mut binding in bindings.iter_mut() { + match binding.ty { + Sampler => { + binding.handle_offset = s; + s += 1; + } + SampledImage | UniformTexelBuffer => { + binding.handle_offset = num_s + t; + t += 1; + } + UniformBuffer | UniformBufferDynamic => { + binding.handle_offset = num_s + num_t + c; + c += 1; + } + StorageTexelBuffer | StorageBuffer | InputAttachment | StorageBufferDynamic + | StorageImage => { + binding.handle_offset = num_s + num_t + num_c + u; + u += 1; + } + CombinedImageSampler => unreachable!(), + } + } + + Ok(DescriptorSetLayout { + bindings, + handle_count: num_s + num_t + num_c + num_u, + register_remap: RegisterRemapping { + mapping, + num_s: num_s as _, + num_t: num_t as _, + num_c: num_c as _, + num_u: num_u as _, + }, + }) + } + + unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I) + where + I: IntoIterator>, + J: IntoIterator, + J::Item: Borrow>, + { + for write in write_iter { + + let target_binding = write.binding; + let (ty, first_offset, second_offset) = write.set.get_handle_offset(target_binding); + assert!((first_offset as usize) < write.set.len); + assert!((second_offset as usize) < write.set.len); + + for descriptor in write.descriptors { + let handle = write.set.handles.offset(first_offset as isize); + let second_handle = write.set.handles.offset(second_offset as isize); + + + + match *descriptor.borrow() { + pso::Descriptor::Buffer(buffer, ref _range) => match ty { + pso::DescriptorType::UniformBuffer + | pso::DescriptorType::UniformBufferDynamic => { + if buffer.ty == MemoryHeapFlags::HOST_COHERENT { + let old_buffer = (*handle).0 as *mut _; + + write.set.add_flush(old_buffer, buffer); + } + + *handle = if let Some(buffer) = buffer.internal.disjoint_cb { + Descriptor(buffer as *mut _) + } else { + Descriptor(buffer.internal.raw as *mut _) + }; + } + pso::DescriptorType::StorageBuffer => { + if buffer.ty == MemoryHeapFlags::HOST_COHERENT { + let old_buffer = (*handle).0 as *mut _; + + write.set.add_flush(old_buffer, buffer); + write.set.add_invalidate(old_buffer, buffer); + } + + *handle = Descriptor(buffer.internal.uav.unwrap() as *mut _); + } + _ => unreachable!(), + }, + pso::Descriptor::Image(image, _layout) => match ty { + pso::DescriptorType::SampledImage => { + *handle = + Descriptor(image.srv_handle.clone().unwrap().as_raw() as *mut _); + } + pso::DescriptorType::StorageImage => { + *handle = + Descriptor(image.uav_handle.clone().unwrap().as_raw() as *mut _); + } + pso::DescriptorType::InputAttachment => { + *handle = + Descriptor(image.srv_handle.clone().unwrap().as_raw() as *mut _); + } + _ => unreachable!(), + }, + pso::Descriptor::Sampler(sampler) => { + *handle = Descriptor(sampler.sampler_handle.as_raw() as *mut _); + } + pso::Descriptor::CombinedImageSampler(image, _layout, sampler) => { + *handle = Descriptor(sampler.sampler_handle.as_raw() as *mut _); + *second_handle = + Descriptor(image.srv_handle.clone().unwrap().as_raw() as *mut _); + } + pso::Descriptor::UniformTexelBuffer(_buffer_view) => {} + pso::Descriptor::StorageTexelBuffer(_buffer_view) => {} + } + } + } + } + + unsafe fn copy_descriptor_sets<'a, I>(&self, copy_iter: I) + where + I: IntoIterator, + I::Item: Borrow>, + { + for copy in copy_iter { + let copy = copy.borrow(); + + for offset in 0 .. copy.count { + let (dst_ty, dst_handle_offset, dst_second_handle_offset) = copy + .dst_set + .get_handle_offset(copy.dst_binding + offset as u32); + let (src_ty, src_handle_offset, src_second_handle_offset) = copy + .src_set + .get_handle_offset(copy.src_binding + offset as u32); + assert_eq!(dst_ty, src_ty); + + let dst_handle = copy.dst_set.handles.offset(dst_handle_offset as isize); + let src_handle = copy.dst_set.handles.offset(src_handle_offset as isize); + + match dst_ty { + pso::DescriptorType::CombinedImageSampler => { + let dst_second_handle = copy + .dst_set + .handles + .offset(dst_second_handle_offset as isize); + let src_second_handle = copy + .dst_set + .handles + .offset(src_second_handle_offset as isize); + + *dst_handle = *src_handle; + *dst_second_handle = *src_second_handle; + } + _ => *dst_handle = *src_handle, + } + } + } + } + + unsafe fn map_memory(&self, memory: &Memory, range: R) -> Result<*mut u8, device::MapError> + where + R: RangeArg, + { + assert_eq!(memory.host_visible.is_some(), true); + + Ok(memory + .mapped_ptr + .offset(*range.start().unwrap_or(&0) as isize)) + } + + unsafe fn unmap_memory(&self, memory: &Memory) { + assert_eq!(memory.host_visible.is_some(), true); + } + + unsafe fn flush_mapped_memory_ranges<'a, I, R>( + &self, + ranges: I, + ) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a Memory, R)>, + R: RangeArg, + { + let _scope = debug_scope!(&self.context, "FlushMappedRanges"); + + + for range in ranges.into_iter() { + let &(memory, ref range) = range.borrow(); + let range = memory.resolve(range); + + let _scope = debug_scope!(&self.context, "Range({:?})", range); + memory.flush(&self.context, range); + } + + Ok(()) + } + + unsafe fn invalidate_mapped_memory_ranges<'a, I, R>( + &self, + ranges: I, + ) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a Memory, R)>, + R: RangeArg, + { + let _scope = debug_scope!(&self.context, "InvalidateMappedRanges"); + + + for range in ranges.into_iter() { + let &(memory, ref range) = range.borrow(); + let range = *range.start().unwrap_or(&0) .. *range.end().unwrap_or(&memory.size); + + let _scope = debug_scope!(&self.context, "Range({:?})", range); + memory.invalidate( + &self.context, + range, + self.internal.working_buffer.clone(), + self.internal.working_buffer_size, + ); + } + + Ok(()) + } + + fn create_semaphore(&self) -> Result { + + Ok(Semaphore) + } + + fn create_fence(&self, signalled: bool) -> Result { + Ok(Arc::new(RawFence { + mutex: Mutex::new(signalled), + condvar: Condvar::new(), + })) + } + + unsafe fn reset_fence(&self, fence: &Fence) -> Result<(), device::OutOfMemory> { + *fence.mutex.lock() = false; + Ok(()) + } + + unsafe fn wait_for_fence( + &self, + fence: &Fence, + timeout_ns: u64, + ) -> Result { + use std::time::{Duration, Instant}; + + debug!("wait_for_fence {:?} for {} ns", fence, timeout_ns); + let mut guard = fence.mutex.lock(); + match timeout_ns { + 0 => Ok(*guard), + 0xFFFFFFFFFFFFFFFF => { + while !*guard { + fence.condvar.wait(&mut guard); + } + Ok(true) + } + _ => { + let total = Duration::from_nanos(timeout_ns as u64); + let now = Instant::now(); + while !*guard { + let duration = match total.checked_sub(now.elapsed()) { + Some(dur) => dur, + None => return Ok(false), + }; + let result = fence.condvar.wait_for(&mut guard, duration); + if result.timed_out() { + return Ok(false); + } + } + Ok(true) + } + } + } + + unsafe fn get_fence_status(&self, fence: &Fence) -> Result { + Ok(*fence.mutex.lock()) + } + + fn create_event(&self) -> Result<(), device::OutOfMemory> { + unimplemented!() + } + + unsafe fn get_event_status(&self, event: &()) -> Result { + unimplemented!() + } + + unsafe fn set_event(&self, event: &()) -> Result<(), device::OutOfMemory> { + unimplemented!() + } + + unsafe fn reset_event(&self, event: &()) -> Result<(), device::OutOfMemory> { + unimplemented!() + } + + unsafe fn free_memory(&self, memory: Memory) { + for (_range, internal) in memory.local_buffers.borrow_mut().iter() { + (*internal.raw).Release(); + if let Some(srv) = internal.srv { + (*srv).Release(); + } + } + } + + unsafe fn create_query_pool( + &self, + _query_ty: query::Type, + _count: query::Id, + ) -> Result { + unimplemented!() + } + + unsafe fn destroy_query_pool(&self, _pool: QueryPool) { + unimplemented!() + } + + unsafe fn get_query_pool_results( + &self, + _pool: &QueryPool, + _queries: Range, + _data: &mut [u8], + _stride: buffer::Offset, + _flags: query::ResultFlags, + ) -> Result { + unimplemented!() + } + + unsafe fn destroy_shader_module(&self, _shader_lib: ShaderModule) {} + + unsafe fn destroy_render_pass(&self, _rp: RenderPass) { + + } + + unsafe fn destroy_pipeline_layout(&self, _layout: PipelineLayout) { + + } + + unsafe fn destroy_graphics_pipeline(&self, _pipeline: GraphicsPipeline) {} + + unsafe fn destroy_compute_pipeline(&self, _pipeline: ComputePipeline) {} + + unsafe fn destroy_framebuffer(&self, _fb: Framebuffer) {} + + unsafe fn destroy_buffer(&self, _buffer: Buffer) {} + + unsafe fn destroy_buffer_view(&self, _view: BufferView) { + unimplemented!() + } + + unsafe fn destroy_image(&self, _image: Image) { + + + } + + unsafe fn destroy_image_view(&self, _view: ImageView) { + + } + + unsafe fn destroy_sampler(&self, _sampler: Sampler) {} + + unsafe fn destroy_descriptor_pool(&self, _pool: DescriptorPool) { + + } + + unsafe fn destroy_descriptor_set_layout(&self, _layout: DescriptorSetLayout) { + + } + + unsafe fn destroy_fence(&self, _fence: Fence) { + + } + + unsafe fn destroy_semaphore(&self, _semaphore: Semaphore) { + + } + + unsafe fn destroy_event(&self, _event: ()) { + + } + + unsafe fn create_swapchain( + &self, + surface: &mut Surface, + config: window::SwapchainConfig, + _old_swapchain: Option, + ) -> Result<(Swapchain, Vec), window::CreationError> { + let (dxgi_swapchain, non_srgb_format) = + self.create_swapchain_impl(&config, surface.wnd_handle, surface.factory.clone())?; + + let resource = { + let mut resource: *mut d3d11::ID3D11Resource = ptr::null_mut(); + assert_eq!( + winerror::S_OK, + dxgi_swapchain.GetBuffer( + 0 as _, + &d3d11::ID3D11Resource::uuidof(), + &mut resource as *mut *mut _ as *mut *mut _, + ) + ); + resource + }; + + let kind = image::Kind::D2(config.extent.width, config.extent.height, 1, 1); + let decomposed = + conv::DecomposedDxgiFormat::from_dxgi_format(conv::map_format(config.format).unwrap()); + + let mut view_info = ViewInfo { + resource, + kind, + caps: image::ViewCapabilities::empty(), + view_kind: image::ViewKind::D2, + format: decomposed.rtv.unwrap(), + + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: 0 .. 1, + layers: 0 .. 1, + }, + }; + let rtv = self.view_image_as_render_target(&view_info).unwrap(); + + view_info.format = non_srgb_format; + view_info.view_kind = image::ViewKind::D2Array; + let copy_srv = self.view_image_as_shader_resource(&view_info).unwrap(); + + let images = (0 .. config.image_count) + .map(|_i| { + + + + let internal = InternalImage { + raw: resource, + copy_srv: Some(copy_srv.clone()), + srv: None, + unordered_access_views: Vec::new(), + depth_stencil_views: Vec::new(), + render_target_views: vec![rtv.clone()], + }; + + Image { + kind, + usage: config.image_usage, + format: config.format, + view_caps: image::ViewCapabilities::empty(), + + decomposed_format: decomposed.clone(), + mip_levels: 1, + internal, + bind: 0, + requirements: memory::Requirements { + + size: 1, + alignment: 1, + type_mask: MemoryHeapFlags::DEVICE_LOCAL.bits(), + }, + tiling: image::Tiling::Optimal, + } + }) + .collect(); + + Ok((Swapchain { dxgi_swapchain }, images)) + } + + unsafe fn destroy_swapchain(&self, _swapchain: Swapchain) { + + } + + fn wait_idle(&self) -> Result<(), device::OutOfMemory> { + Ok(()) + + } + + unsafe fn set_image_name(&self, _image: &mut Image, _name: &str) { + + } + + unsafe fn set_buffer_name(&self, _buffer: &mut Buffer, _name: &str) { + + } + + unsafe fn set_command_buffer_name(&self, _command_buffer: &mut CommandBuffer, _name: &str) { + + } + + unsafe fn set_semaphore_name(&self, _semaphore: &mut Semaphore, _name: &str) { + + } + + unsafe fn set_fence_name(&self, _fence: &mut Fence, _name: &str) { + + } + + unsafe fn set_framebuffer_name(&self, _framebuffer: &mut Framebuffer, _name: &str) { + + } + + unsafe fn set_render_pass_name(&self, _render_pass: &mut RenderPass, _name: &str) { + + } + + unsafe fn set_descriptor_set_name(&self, _descriptor_set: &mut DescriptorSet, _name: &str) { + + } + + unsafe fn set_descriptor_set_layout_name( + &self, + _descriptor_set_layout: &mut DescriptorSetLayout, + _name: &str, + ) { + + } +} diff --git a/third_party/rust/gfx-backend-dx11/src/dxgi.rs b/third_party/rust/gfx-backend-dx11/src/dxgi.rs new file mode 100644 index 000000000000..c76e271c0564 --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/src/dxgi.rs @@ -0,0 +1,210 @@ +use hal::adapter::{AdapterInfo, DeviceType}; + +use winapi::shared::guiddef::{GUID, REFIID}; +use winapi::shared::{dxgi, dxgi1_2, dxgi1_3, dxgi1_4, dxgi1_5, winerror}; +use winapi::um::unknwnbase::IUnknown; +use winapi::Interface; + +use wio::com::ComPtr; + +use std::ffi::OsString; +use std::mem; +use std::os::windows::ffi::OsStringExt; +use std::ptr; + +#[derive(Debug, Copy, Clone)] +pub(crate) enum DxgiVersion { + + + + + + + + + + + + + + + Dxgi1_0, + + + + + + + + + + + + Dxgi1_2, + + + + + + + + + + Dxgi1_3, + + + + + + + Dxgi1_4, + + + + + + + Dxgi1_5, +} + +type DxgiFun = extern "system" fn(REFIID, *mut *mut winapi::ctypes::c_void) -> winerror::HRESULT; + +fn create_dxgi_factory1( + func: &DxgiFun, guid: &GUID +) -> Result, winerror::HRESULT> { + let mut factory: *mut IUnknown = ptr::null_mut(); + + let hr = func(guid, &mut factory as *mut *mut _ as *mut *mut _); + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(factory as *mut _) }) + } else { + Err(hr) + } +} + +pub(crate) fn get_dxgi_factory( +) -> Result<(ComPtr, DxgiVersion), winerror::HRESULT> { + let library = libloading::Library::new("dxgi.dll") + .map_err(|_| -1)?; + let func: libloading::Symbol = unsafe { + library.get(b"CreateDXGIFactory1") + }.map_err(|_| -1)?; + + + if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_5::IDXGIFactory5::uuidof()) { + return Ok((factory, DxgiVersion::Dxgi1_5)); + } + + if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_4::IDXGIFactory4::uuidof()) { + return Ok((factory, DxgiVersion::Dxgi1_4)); + } + + if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_3::IDXGIFactory3::uuidof()) { + return Ok((factory, DxgiVersion::Dxgi1_3)); + } + + if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_2::IDXGIFactory2::uuidof()) { + return Ok((factory, DxgiVersion::Dxgi1_2)); + } + + if let Ok(factory) = create_dxgi_factory1(&func, &dxgi::IDXGIFactory1::uuidof()) { + return Ok((factory, DxgiVersion::Dxgi1_0)); + } + + + match create_dxgi_factory1(&func, &dxgi::IDXGIFactory::uuidof()) { + Ok(factory) => Ok((factory, DxgiVersion::Dxgi1_0)), + Err(hr) => Err(hr), + } +} + +fn enum_adapters1( + idx: u32, + factory: *mut dxgi::IDXGIFactory, +) -> Result, winerror::HRESULT> { + let mut adapter: *mut dxgi::IDXGIAdapter = ptr::null_mut(); + + let hr = unsafe { + (*(factory as *mut dxgi::IDXGIFactory1)) + .EnumAdapters1(idx, &mut adapter as *mut *mut _ as *mut *mut _) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(adapter) }) + } else { + Err(hr) + } +} + +fn get_adapter_desc(adapter: *mut dxgi::IDXGIAdapter, version: DxgiVersion) -> AdapterInfo { + match version { + DxgiVersion::Dxgi1_0 => { + let mut desc: dxgi::DXGI_ADAPTER_DESC1 = unsafe { mem::zeroed() }; + unsafe { + (*(adapter as *mut dxgi::IDXGIAdapter1)).GetDesc1(&mut desc); + } + + let device_name = { + let len = desc.Description.iter().take_while(|&&c| c != 0).count(); + let name = ::from_wide(&desc.Description[.. len]); + name.to_string_lossy().into_owned() + }; + + AdapterInfo { + name: device_name, + vendor: desc.VendorId as usize, + device: desc.DeviceId as usize, + device_type: if (desc.Flags & dxgi::DXGI_ADAPTER_FLAG_SOFTWARE) != 0 { + DeviceType::VirtualGpu + } else { + DeviceType::DiscreteGpu + }, + } + } + DxgiVersion::Dxgi1_2 + | DxgiVersion::Dxgi1_3 + | DxgiVersion::Dxgi1_4 + | DxgiVersion::Dxgi1_5 => { + let mut desc: dxgi1_2::DXGI_ADAPTER_DESC2 = unsafe { mem::zeroed() }; + unsafe { + (*(adapter as *mut dxgi1_2::IDXGIAdapter2)).GetDesc2(&mut desc); + } + + let device_name = { + let len = desc.Description.iter().take_while(|&&c| c != 0).count(); + let name = ::from_wide(&desc.Description[.. len]); + name.to_string_lossy().into_owned() + }; + + AdapterInfo { + name: device_name, + vendor: desc.VendorId as usize, + device: desc.DeviceId as usize, + device_type: if (desc.Flags & dxgi::DXGI_ADAPTER_FLAG_SOFTWARE) != 0 { + DeviceType::VirtualGpu + } else { + DeviceType::DiscreteGpu + }, + } + } + } +} + +pub(crate) fn get_adapter( + idx: u32, + factory: *mut dxgi::IDXGIFactory, + version: DxgiVersion, +) -> Result<(ComPtr, AdapterInfo), winerror::HRESULT> { + let adapter = match version { + DxgiVersion::Dxgi1_0 + | DxgiVersion::Dxgi1_2 + | DxgiVersion::Dxgi1_3 + | DxgiVersion::Dxgi1_4 + | DxgiVersion::Dxgi1_5 => enum_adapters1(idx, factory)?, + }; + + let desc = get_adapter_desc(adapter.as_raw(), version); + + Ok((adapter, desc)) +} diff --git a/third_party/rust/gfx-backend-dx11/src/internal.rs b/third_party/rust/gfx-backend-dx11/src/internal.rs new file mode 100644 index 000000000000..fffb553ea5a9 --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/src/internal.rs @@ -0,0 +1,1308 @@ +use hal::pso::{Stage, Viewport}; +use hal::{command, image, pso}; + +use winapi::shared::dxgiformat; +use winapi::shared::minwindef::{FALSE, TRUE}; +use winapi::shared::winerror; +use winapi::um::d3d11; +use winapi::um::d3dcommon; + +use wio::com::ComPtr; + +use std::borrow::Borrow; +use std::{mem, ptr}; + +use smallvec::SmallVec; +use spirv_cross; + +use {conv, shader}; + +use {Buffer, Image, RenderPassCache}; + +#[repr(C)] +struct BufferCopy { + src: u32, + dst: u32, + _padding: [u32; 2], +} + +#[repr(C)] +struct ImageCopy { + src: [u32; 4], + dst: [u32; 4], +} + +#[repr(C)] +struct BufferImageCopy { + buffer_offset: u32, + buffer_size: [u32; 2], + _padding: u32, + image_offset: [u32; 4], + image_extent: [u32; 4], + + image_size: [u32; 4], +} + +#[repr(C)] +struct BufferImageCopyInfo { + buffer: BufferCopy, + image: ImageCopy, + buffer_image: BufferImageCopy, +} + +#[repr(C)] +struct BlitInfo { + offset: [f32; 2], + extent: [f32; 2], + z: f32, + level: f32, +} + +#[repr(C)] +struct PartialClearInfo { + + data: [u32; 4], +} + + +const COPY_THREAD_GROUP_X: u32 = 8; +const COPY_THREAD_GROUP_Y: u32 = 8; + + + + + + + + + +#[derive(Clone, Debug)] +pub struct Internal { + + vs_partial_clear: ComPtr, + ps_partial_clear_float: ComPtr, + ps_partial_clear_uint: ComPtr, + ps_partial_clear_int: ComPtr, + ps_partial_clear_depth: ComPtr, + ps_partial_clear_stencil: ComPtr, + partial_clear_depth_stencil_state: ComPtr, + partial_clear_depth_state: ComPtr, + partial_clear_stencil_state: ComPtr, + + + vs_blit_2d: ComPtr, + + sampler_nearest: ComPtr, + sampler_linear: ComPtr, + + ps_blit_2d_uint: ComPtr, + ps_blit_2d_int: ComPtr, + ps_blit_2d_float: ComPtr, + + + cs_copy_image2d_r8g8_image2d_r16: ComPtr, + cs_copy_image2d_r16_image2d_r8g8: ComPtr, + + cs_copy_image2d_r8g8b8a8_image2d_r32: ComPtr, + cs_copy_image2d_r8g8b8a8_image2d_r16g16: ComPtr, + cs_copy_image2d_r16g16_image2d_r32: ComPtr, + cs_copy_image2d_r16g16_image2d_r8g8b8a8: ComPtr, + cs_copy_image2d_r32_image2d_r16g16: ComPtr, + cs_copy_image2d_r32_image2d_r8g8b8a8: ComPtr, + + + cs_copy_image2d_r32g32b32a32_buffer: ComPtr, + cs_copy_image2d_r32g32_buffer: ComPtr, + cs_copy_image2d_r16g16b16a16_buffer: ComPtr, + cs_copy_image2d_r32_buffer: ComPtr, + cs_copy_image2d_r16g16_buffer: ComPtr, + cs_copy_image2d_r8g8b8a8_buffer: ComPtr, + cs_copy_image2d_r16_buffer: ComPtr, + cs_copy_image2d_r8g8_buffer: ComPtr, + cs_copy_image2d_r8_buffer: ComPtr, + cs_copy_image2d_b8g8r8a8_buffer: ComPtr, + + + cs_copy_buffer_image2d_r32g32b32a32: ComPtr, + cs_copy_buffer_image2d_r32g32: ComPtr, + cs_copy_buffer_image2d_r16g16b16a16: ComPtr, + cs_copy_buffer_image2d_r32: ComPtr, + cs_copy_buffer_image2d_r16g16: ComPtr, + cs_copy_buffer_image2d_r8g8b8a8: ComPtr, + cs_copy_buffer_image2d_r16: ComPtr, + cs_copy_buffer_image2d_r8g8: ComPtr, + cs_copy_buffer_image2d_r8: ComPtr, + + + internal_buffer: ComPtr, + + + pub working_buffer: ComPtr, + pub working_buffer_size: u64, +} + +fn compile_blob(src: &[u8], entrypoint: &str, stage: Stage) -> ComPtr { + unsafe { + ComPtr::from_raw( + shader::compile_hlsl_shader( + stage, + spirv_cross::hlsl::ShaderModel::V5_0, + entrypoint, + src, + ) + .unwrap(), + ) + } +} + +fn compile_vs( + device: &ComPtr, + src: &[u8], + entrypoint: &str, +) -> ComPtr { + let bytecode = compile_blob(src, entrypoint, Stage::Vertex); + let mut shader = ptr::null_mut(); + let hr = unsafe { + device.CreateVertexShader( + bytecode.GetBufferPointer(), + bytecode.GetBufferSize(), + ptr::null_mut(), + &mut shader as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(true, winerror::SUCCEEDED(hr)); + + unsafe { ComPtr::from_raw(shader) } +} + +fn compile_ps( + device: &ComPtr, + src: &[u8], + entrypoint: &str, +) -> ComPtr { + let bytecode = compile_blob(src, entrypoint, Stage::Fragment); + let mut shader = ptr::null_mut(); + let hr = unsafe { + device.CreatePixelShader( + bytecode.GetBufferPointer(), + bytecode.GetBufferSize(), + ptr::null_mut(), + &mut shader as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(true, winerror::SUCCEEDED(hr)); + + unsafe { ComPtr::from_raw(shader) } +} + +fn compile_cs( + device: &ComPtr, + src: &[u8], + entrypoint: &str, +) -> ComPtr { + let bytecode = compile_blob(src, entrypoint, Stage::Compute); + let mut shader = ptr::null_mut(); + let hr = unsafe { + device.CreateComputeShader( + bytecode.GetBufferPointer(), + bytecode.GetBufferSize(), + ptr::null_mut(), + &mut shader as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(true, winerror::SUCCEEDED(hr)); + + unsafe { ComPtr::from_raw(shader) } +} + +impl Internal { + pub fn new(device: &ComPtr) -> Self { + let internal_buffer = { + let desc = d3d11::D3D11_BUFFER_DESC { + ByteWidth: mem::size_of::() as _, + Usage: d3d11::D3D11_USAGE_DYNAMIC, + BindFlags: d3d11::D3D11_BIND_CONSTANT_BUFFER, + CPUAccessFlags: d3d11::D3D11_CPU_ACCESS_WRITE, + MiscFlags: 0, + StructureByteStride: 0, + }; + + let mut buffer = ptr::null_mut(); + let hr = unsafe { + device.CreateBuffer( + &desc, + ptr::null_mut(), + &mut buffer as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(true, winerror::SUCCEEDED(hr)); + + unsafe { ComPtr::from_raw(buffer) } + }; + + let (depth_stencil_state, depth_state, stencil_state) = { + let mut depth_state = ptr::null_mut(); + let mut stencil_state = ptr::null_mut(); + let mut depth_stencil_state = ptr::null_mut(); + + let mut desc = d3d11::D3D11_DEPTH_STENCIL_DESC { + DepthEnable: TRUE, + DepthWriteMask: d3d11::D3D11_DEPTH_WRITE_MASK_ALL, + DepthFunc: d3d11::D3D11_COMPARISON_ALWAYS, + StencilEnable: TRUE, + StencilReadMask: 0, + StencilWriteMask: !0, + FrontFace: d3d11::D3D11_DEPTH_STENCILOP_DESC { + StencilFailOp: d3d11::D3D11_STENCIL_OP_REPLACE, + StencilDepthFailOp: d3d11::D3D11_STENCIL_OP_REPLACE, + StencilPassOp: d3d11::D3D11_STENCIL_OP_REPLACE, + StencilFunc: d3d11::D3D11_COMPARISON_ALWAYS, + }, + BackFace: d3d11::D3D11_DEPTH_STENCILOP_DESC { + StencilFailOp: d3d11::D3D11_STENCIL_OP_REPLACE, + StencilDepthFailOp: d3d11::D3D11_STENCIL_OP_REPLACE, + StencilPassOp: d3d11::D3D11_STENCIL_OP_REPLACE, + StencilFunc: d3d11::D3D11_COMPARISON_ALWAYS, + }, + }; + + let hr = unsafe { + device.CreateDepthStencilState( + &desc, + &mut depth_stencil_state as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(winerror::S_OK, hr); + + desc.DepthEnable = TRUE; + desc.StencilEnable = FALSE; + + let hr = unsafe { + device + .CreateDepthStencilState(&desc, &mut depth_state as *mut *mut _ as *mut *mut _) + }; + assert_eq!(winerror::S_OK, hr); + + desc.DepthEnable = FALSE; + desc.StencilEnable = TRUE; + + let hr = unsafe { + device.CreateDepthStencilState( + &desc, + &mut stencil_state as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(winerror::S_OK, hr); + + unsafe { + ( + ComPtr::from_raw(depth_stencil_state), + ComPtr::from_raw(depth_state), + ComPtr::from_raw(stencil_state), + ) + } + }; + + let (sampler_nearest, sampler_linear) = { + let mut desc = d3d11::D3D11_SAMPLER_DESC { + Filter: d3d11::D3D11_FILTER_MIN_MAG_MIP_POINT, + AddressU: d3d11::D3D11_TEXTURE_ADDRESS_CLAMP, + AddressV: d3d11::D3D11_TEXTURE_ADDRESS_CLAMP, + AddressW: d3d11::D3D11_TEXTURE_ADDRESS_CLAMP, + MipLODBias: 0f32, + MaxAnisotropy: 0, + ComparisonFunc: 0, + BorderColor: [0f32; 4], + MinLOD: 0f32, + MaxLOD: d3d11::D3D11_FLOAT32_MAX, + }; + + let mut nearest = ptr::null_mut(); + let mut linear = ptr::null_mut(); + + assert_eq!(winerror::S_OK, unsafe { + device.CreateSamplerState(&desc, &mut nearest as *mut *mut _ as *mut *mut _) + }); + + desc.Filter = d3d11::D3D11_FILTER_MIN_MAG_MIP_LINEAR; + + assert_eq!(winerror::S_OK, unsafe { + device.CreateSamplerState(&desc, &mut linear as *mut *mut _ as *mut *mut _) + }); + + unsafe { (ComPtr::from_raw(nearest), ComPtr::from_raw(linear)) } + }; + + let (working_buffer, working_buffer_size) = { + let working_buffer_size = 1 << 16; + + let desc = d3d11::D3D11_BUFFER_DESC { + ByteWidth: working_buffer_size, + Usage: d3d11::D3D11_USAGE_STAGING, + BindFlags: 0, + CPUAccessFlags: d3d11::D3D11_CPU_ACCESS_READ | d3d11::D3D11_CPU_ACCESS_WRITE, + MiscFlags: 0, + StructureByteStride: 0, + }; + let mut working_buffer = ptr::null_mut(); + + assert_eq!(winerror::S_OK, unsafe { + device.CreateBuffer( + &desc, + ptr::null_mut(), + &mut working_buffer as *mut *mut _ as *mut *mut _, + ) + }); + + ( + unsafe { ComPtr::from_raw(working_buffer) }, + working_buffer_size, + ) + }; + + let clear_shaders = include_bytes!("../shaders/clear.hlsl"); + let copy_shaders = include_bytes!("../shaders/copy.hlsl"); + let blit_shaders = include_bytes!("../shaders/blit.hlsl"); + + Internal { + vs_partial_clear: compile_vs(device, clear_shaders, "vs_partial_clear"), + ps_partial_clear_float: compile_ps(device, clear_shaders, "ps_partial_clear_float"), + ps_partial_clear_uint: compile_ps(device, clear_shaders, "ps_partial_clear_uint"), + ps_partial_clear_int: compile_ps(device, clear_shaders, "ps_partial_clear_int"), + ps_partial_clear_depth: compile_ps(device, clear_shaders, "ps_partial_clear_depth"), + ps_partial_clear_stencil: compile_ps(device, clear_shaders, "ps_partial_clear_stencil"), + partial_clear_depth_stencil_state: depth_stencil_state, + partial_clear_depth_state: depth_state, + partial_clear_stencil_state: stencil_state, + + vs_blit_2d: compile_vs(device, blit_shaders, "vs_blit_2d"), + + sampler_nearest, + sampler_linear, + + ps_blit_2d_uint: compile_ps(device, blit_shaders, "ps_blit_2d_uint"), + ps_blit_2d_int: compile_ps(device, blit_shaders, "ps_blit_2d_int"), + ps_blit_2d_float: compile_ps(device, blit_shaders, "ps_blit_2d_float"), + + cs_copy_image2d_r8g8_image2d_r16: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r8g8_image2d_r16", + ), + cs_copy_image2d_r16_image2d_r8g8: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r16_image2d_r8g8", + ), + + cs_copy_image2d_r8g8b8a8_image2d_r32: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r8g8b8a8_image2d_r32", + ), + cs_copy_image2d_r8g8b8a8_image2d_r16g16: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r8g8b8a8_image2d_r16g16", + ), + cs_copy_image2d_r16g16_image2d_r32: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r16g16_image2d_r32", + ), + cs_copy_image2d_r16g16_image2d_r8g8b8a8: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r16g16_image2d_r8g8b8a8", + ), + cs_copy_image2d_r32_image2d_r16g16: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r32_image2d_r16g16", + ), + cs_copy_image2d_r32_image2d_r8g8b8a8: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r32_image2d_r8g8b8a8", + ), + + cs_copy_image2d_r32g32b32a32_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r32g32b32a32_buffer", + ), + cs_copy_image2d_r32g32_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r32g32_buffer", + ), + cs_copy_image2d_r16g16b16a16_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r16g16b16a16_buffer", + ), + cs_copy_image2d_r32_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r32_buffer", + ), + cs_copy_image2d_r16g16_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r16g16_buffer", + ), + cs_copy_image2d_r8g8b8a8_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r8g8b8a8_buffer", + ), + cs_copy_image2d_r16_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r16_buffer", + ), + cs_copy_image2d_r8g8_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r8g8_buffer", + ), + cs_copy_image2d_r8_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r8_buffer", + ), + cs_copy_image2d_b8g8r8a8_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_b8g8r8a8_buffer", + ), + + cs_copy_buffer_image2d_r32g32b32a32: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r32g32b32a32", + ), + cs_copy_buffer_image2d_r32g32: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r32g32", + ), + cs_copy_buffer_image2d_r16g16b16a16: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r16g16b16a16", + ), + cs_copy_buffer_image2d_r32: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r32", + ), + cs_copy_buffer_image2d_r16g16: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r16g16", + ), + cs_copy_buffer_image2d_r8g8b8a8: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r8g8b8a8", + ), + cs_copy_buffer_image2d_r16: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r16", + ), + cs_copy_buffer_image2d_r8g8: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r8g8", + ), + cs_copy_buffer_image2d_r8: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r8", + ), + + internal_buffer, + working_buffer, + working_buffer_size: working_buffer_size as _, + } + } + + fn map(&mut self, context: &ComPtr) -> *mut u8 { + let mut mapped = unsafe { mem::zeroed::() }; + let hr = unsafe { + context.Map( + self.internal_buffer.as_raw() as _, + 0, + d3d11::D3D11_MAP_WRITE_DISCARD, + 0, + &mut mapped, + ) + }; + + assert_eq!(winerror::S_OK, hr); + + mapped.pData as _ + } + + fn unmap(&mut self, context: &ComPtr) { + unsafe { + context.Unmap(self.internal_buffer.as_raw() as _, 0); + } + } + + fn update_image( + &mut self, + context: &ComPtr, + info: &command::ImageCopy, + ) { + unsafe { + ptr::copy( + &BufferImageCopyInfo { + image: ImageCopy { + src: [ + info.src_offset.x as _, + info.src_offset.y as _, + info.src_offset.z as _, + 0, + ], + dst: [ + info.dst_offset.x as _, + info.dst_offset.y as _, + info.dst_offset.z as _, + 0, + ], + }, + ..mem::zeroed() + }, + self.map(context) as *mut _, + 1, + ) + }; + + self.unmap(context); + } + + fn update_buffer_image( + &mut self, + context: &ComPtr, + info: &command::BufferImageCopy, + image: &Image, + ) { + let size = image.kind.extent(); + + unsafe { + ptr::copy( + &BufferImageCopyInfo { + buffer_image: BufferImageCopy { + buffer_offset: info.buffer_offset as _, + buffer_size: [info.buffer_width, info.buffer_height], + _padding: 0, + image_offset: [ + info.image_offset.x as _, + info.image_offset.y as _, + (info.image_offset.z + info.image_layers.layers.start as i32) as _, + 0, + ], + image_extent: [ + info.image_extent.width, + info.image_extent.height, + info.image_extent.depth, + 0, + ], + image_size: [size.width, size.height, size.depth, 0], + }, + ..mem::zeroed() + }, + self.map(context) as *mut _, + 1, + ) + }; + + self.unmap(context); + } + + fn update_blit( + &mut self, + context: &ComPtr, + src: &Image, + info: &command::ImageBlit, + ) { + let (sx, dx) = if info.dst_bounds.start.x > info.dst_bounds.end.x { + ( + info.src_bounds.end.x, + info.src_bounds.start.x - info.src_bounds.end.x, + ) + } else { + ( + info.src_bounds.start.x, + info.src_bounds.end.x - info.src_bounds.start.x, + ) + }; + let (sy, dy) = if info.dst_bounds.start.y > info.dst_bounds.end.y { + ( + info.src_bounds.end.y, + info.src_bounds.start.y - info.src_bounds.end.y, + ) + } else { + ( + info.src_bounds.start.y, + info.src_bounds.end.y - info.src_bounds.start.y, + ) + }; + let image::Extent { width, height, .. } = src.kind.level_extent(info.src_subresource.level); + + unsafe { + ptr::copy( + &BlitInfo { + offset: [sx as f32 / width as f32, sy as f32 / height as f32], + extent: [dx as f32 / width as f32, dy as f32 / height as f32], + z: 0f32, + level: info.src_subresource.level as _, + }, + self.map(context) as *mut _, + 1, + ) + }; + + self.unmap(context); + } + + fn update_clear_color( + &mut self, + context: &ComPtr, + value: command::ClearColor, + ) { + unsafe { + ptr::copy( + &PartialClearInfo { + data: mem::transmute(value), + }, + self.map(context) as *mut _, + 1, + ) + }; + + self.unmap(context); + } + + fn update_clear_depth_stencil( + &mut self, + context: &ComPtr, + depth: Option, + stencil: Option, + ) { + unsafe { + ptr::copy( + &PartialClearInfo { + data: [ + mem::transmute(depth.unwrap_or(0f32)), + stencil.unwrap_or(0), + 0, + 0, + ], + }, + self.map(context) as *mut _, + 1, + ); + } + + self.unmap(context); + } + + fn find_image_copy_shader( + &self, + src: &Image, + dst: &Image, + ) -> Option<*mut d3d11::ID3D11ComputeShader> { + use dxgiformat::*; + + let src_format = src.decomposed_format.copy_srv.unwrap(); + let dst_format = dst.decomposed_format.copy_uav.unwrap(); + + match (src_format, dst_format) { + (DXGI_FORMAT_R8G8_UINT, DXGI_FORMAT_R16_UINT) => { + Some(self.cs_copy_image2d_r8g8_image2d_r16.as_raw()) + } + (DXGI_FORMAT_R16_UINT, DXGI_FORMAT_R8G8_UINT) => { + Some(self.cs_copy_image2d_r16_image2d_r8g8.as_raw()) + } + (DXGI_FORMAT_R8G8B8A8_UINT, DXGI_FORMAT_R32_UINT) => { + Some(self.cs_copy_image2d_r8g8b8a8_image2d_r32.as_raw()) + } + (DXGI_FORMAT_R8G8B8A8_UINT, DXGI_FORMAT_R16G16_UINT) => { + Some(self.cs_copy_image2d_r8g8b8a8_image2d_r16g16.as_raw()) + } + (DXGI_FORMAT_R16G16_UINT, DXGI_FORMAT_R32_UINT) => { + Some(self.cs_copy_image2d_r16g16_image2d_r32.as_raw()) + } + (DXGI_FORMAT_R16G16_UINT, DXGI_FORMAT_R8G8B8A8_UINT) => { + Some(self.cs_copy_image2d_r16g16_image2d_r8g8b8a8.as_raw()) + } + (DXGI_FORMAT_R32_UINT, DXGI_FORMAT_R16G16_UINT) => { + Some(self.cs_copy_image2d_r32_image2d_r16g16.as_raw()) + } + (DXGI_FORMAT_R32_UINT, DXGI_FORMAT_R8G8B8A8_UINT) => { + Some(self.cs_copy_image2d_r32_image2d_r8g8b8a8.as_raw()) + } + _ => None, + } + } + + pub fn copy_image_2d( + &mut self, + context: &ComPtr, + src: &Image, + dst: &Image, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + if let Some(shader) = self.find_image_copy_shader(src, dst) { + + + + + + let srv = src.internal.copy_srv.clone().unwrap().as_raw(); + + unsafe { + context.CSSetShader(shader, ptr::null_mut(), 0); + context.CSSetConstantBuffers(0, 1, &self.internal_buffer.as_raw()); + context.CSSetShaderResources(0, 1, [srv].as_ptr()); + + for region in regions.into_iter() { + let info = region.borrow(); + self.update_image(context, &info); + + let uav = dst.get_uav(info.dst_subresource.level, 0).unwrap().as_raw(); + context.CSSetUnorderedAccessViews(0, 1, [uav].as_ptr(), ptr::null_mut()); + + context.Dispatch(info.extent.width as u32, info.extent.height as u32, 1); + } + + + context.CSSetShaderResources(0, 1, [ptr::null_mut(); 1].as_ptr()); + context.CSSetUnorderedAccessViews( + 0, + 1, + [ptr::null_mut(); 1].as_ptr(), + ptr::null_mut(), + ); + } + } else { + + for region in regions.into_iter() { + let info = region.borrow(); + + + unsafe { + context.CopySubresourceRegion( + dst.internal.raw, + src.calc_subresource(info.src_subresource.level as _, 0), + info.dst_offset.x as _, + info.dst_offset.y as _, + info.dst_offset.z as _, + src.internal.raw, + dst.calc_subresource(info.dst_subresource.level as _, 0), + &d3d11::D3D11_BOX { + left: info.src_offset.x as _, + top: info.src_offset.y as _, + front: info.src_offset.z as _, + right: info.src_offset.x as u32 + info.extent.width as u32, + bottom: info.src_offset.y as u32 + info.extent.height as u32, + back: info.src_offset.z as u32 + info.extent.depth as u32, + }, + ); + } + } + } + } + + fn find_image_to_buffer_shader( + &self, + format: dxgiformat::DXGI_FORMAT, + ) -> Option<(*mut d3d11::ID3D11ComputeShader, u32, u32)> { + use dxgiformat::*; + + match format { + DXGI_FORMAT_R32G32B32A32_UINT => { + Some((self.cs_copy_image2d_r32g32b32a32_buffer.as_raw(), 1, 1)) + } + DXGI_FORMAT_R32G32_UINT => Some((self.cs_copy_image2d_r32g32_buffer.as_raw(), 1, 1)), + DXGI_FORMAT_R16G16B16A16_UINT => { + Some((self.cs_copy_image2d_r16g16b16a16_buffer.as_raw(), 1, 1)) + } + DXGI_FORMAT_R32_UINT => Some((self.cs_copy_image2d_r32_buffer.as_raw(), 1, 1)), + DXGI_FORMAT_R16G16_UINT => Some((self.cs_copy_image2d_r16g16_buffer.as_raw(), 1, 1)), + DXGI_FORMAT_R8G8B8A8_UINT => { + Some((self.cs_copy_image2d_r8g8b8a8_buffer.as_raw(), 1, 1)) + } + DXGI_FORMAT_R16_UINT => Some((self.cs_copy_image2d_r16_buffer.as_raw(), 2, 1)), + DXGI_FORMAT_R8G8_UINT => Some((self.cs_copy_image2d_r8g8_buffer.as_raw(), 2, 1)), + DXGI_FORMAT_R8_UINT => Some((self.cs_copy_image2d_r8_buffer.as_raw(), 4, 1)), + DXGI_FORMAT_B8G8R8A8_UNORM => { + Some((self.cs_copy_image2d_b8g8r8a8_buffer.as_raw(), 1, 1)) + } + _ => None, + } + } + + pub fn copy_image_2d_to_buffer( + &mut self, + context: &ComPtr, + src: &Image, + dst: &Buffer, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let _scope = debug_scope!( + context, + "Image (format={:?},kind={:?}) => Buffer", + src.format, + src.kind + ); + let (shader, scale_x, scale_y) = self + .find_image_to_buffer_shader(src.decomposed_format.copy_srv.unwrap()) + .unwrap(); + + let srv = src.internal.copy_srv.clone().unwrap().as_raw(); + let uav = dst.internal.uav.unwrap(); + let format_desc = src.format.base_format().0.desc(); + let bytes_per_texel = format_desc.bits as u32 / 8; + + unsafe { + context.CSSetShader(shader, ptr::null_mut(), 0); + context.CSSetConstantBuffers(0, 1, &self.internal_buffer.as_raw()); + + context.CSSetShaderResources(0, 1, [srv].as_ptr()); + context.CSSetUnorderedAccessViews(0, 1, [uav].as_ptr(), ptr::null_mut()); + + for copy in regions { + let copy = copy.borrow(); + self.update_buffer_image(context, ©, src); + + debug_marker!(context, "{:?}", copy); + + context.Dispatch( + ((copy.image_extent.width + (COPY_THREAD_GROUP_X - 1)) + / COPY_THREAD_GROUP_X + / scale_x) + .max(1), + ((copy.image_extent.height + (COPY_THREAD_GROUP_X - 1)) + / COPY_THREAD_GROUP_Y + / scale_y) + .max(1), + 1, + ); + + if let Some(disjoint_cb) = dst.internal.disjoint_cb { + let total_size = copy.image_extent.depth + * (copy.buffer_height * copy.buffer_width * bytes_per_texel); + let copy_box = d3d11::D3D11_BOX { + left: copy.buffer_offset as u32, + top: 0, + front: 0, + right: copy.buffer_offset as u32 + total_size, + bottom: 1, + back: 1, + }; + + context.CopySubresourceRegion( + disjoint_cb as _, + 0, + copy.buffer_offset as _, + 0, + 0, + dst.internal.raw as _, + 0, + ©_box, + ); + } + } + + + context.CSSetShaderResources(0, 1, [ptr::null_mut(); 1].as_ptr()); + context.CSSetUnorderedAccessViews(0, 1, [ptr::null_mut(); 1].as_ptr(), ptr::null_mut()); + } + } + + fn find_buffer_to_image_shader( + &self, + format: dxgiformat::DXGI_FORMAT, + ) -> Option<(*mut d3d11::ID3D11ComputeShader, u32, u32)> { + use dxgiformat::*; + + match format { + DXGI_FORMAT_R32G32B32A32_UINT => { + Some((self.cs_copy_buffer_image2d_r32g32b32a32.as_raw(), 1, 1)) + } + DXGI_FORMAT_R32G32_UINT => Some((self.cs_copy_buffer_image2d_r32g32.as_raw(), 1, 1)), + DXGI_FORMAT_R16G16B16A16_UINT => { + Some((self.cs_copy_buffer_image2d_r16g16b16a16.as_raw(), 1, 1)) + } + DXGI_FORMAT_R32_UINT => Some((self.cs_copy_buffer_image2d_r32.as_raw(), 1, 1)), + DXGI_FORMAT_R16G16_UINT => Some((self.cs_copy_buffer_image2d_r16g16.as_raw(), 1, 1)), + DXGI_FORMAT_R8G8B8A8_UINT => { + Some((self.cs_copy_buffer_image2d_r8g8b8a8.as_raw(), 1, 1)) + } + DXGI_FORMAT_R16_UINT => Some((self.cs_copy_buffer_image2d_r16.as_raw(), 2, 1)), + DXGI_FORMAT_R8G8_UINT => Some((self.cs_copy_buffer_image2d_r8g8.as_raw(), 2, 1)), + DXGI_FORMAT_R8_UINT => Some((self.cs_copy_buffer_image2d_r8.as_raw(), 4, 1)), + _ => None, + } + } + + pub fn copy_buffer_to_image_2d( + &mut self, + context: &ComPtr, + src: &Buffer, + dst: &Image, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let _scope = debug_scope!( + context, + "Buffer => Image (format={:?},kind={:?})", + dst.format, + dst.kind + ); + + + + + let format_desc = dst.format.base_format().0.desc(); + if format_desc.is_compressed() { + + assert_eq!(format_desc.dim, (4, 4)); + assert!(!src.host_ptr.is_null()); + + for copy in regions { + let info = copy.borrow(); + + let bytes_per_texel = format_desc.bits as u32 / 8; + + let row_pitch = bytes_per_texel * info.image_extent.width / 4; + let depth_pitch = row_pitch * info.image_extent.height / 4; + + unsafe { + context.UpdateSubresource( + dst.internal.raw, + dst.calc_subresource( + info.image_layers.level as _, + info.image_layers.layers.start as _, + ), + &d3d11::D3D11_BOX { + left: info.image_offset.x as _, + top: info.image_offset.y as _, + front: info.image_offset.z as _, + right: info.image_offset.x as u32 + info.image_extent.width, + bottom: info.image_offset.y as u32 + info.image_extent.height, + back: info.image_offset.z as u32 + info.image_extent.depth, + }, + src.host_ptr + .offset(src.bound_range.start as isize + info.buffer_offset as isize) + as _, + row_pitch, + depth_pitch, + ); + } + } + } else { + let (shader, scale_x, scale_y) = self + .find_buffer_to_image_shader(dst.decomposed_format.copy_uav.unwrap()) + .unwrap(); + + let srv = src.internal.srv.unwrap(); + + unsafe { + context.CSSetShader(shader, ptr::null_mut(), 0); + context.CSSetConstantBuffers(0, 1, &self.internal_buffer.as_raw()); + context.CSSetShaderResources(0, 1, [srv].as_ptr()); + + for copy in regions { + let info = copy.borrow(); + self.update_buffer_image(context, &info, dst); + + debug_marker!(context, "{:?}", info); + + + + + + + let uav = dst + .get_uav( + info.image_layers.level, + 0, + ) + .unwrap() + .as_raw(); + context.CSSetUnorderedAccessViews(0, 1, [uav].as_ptr(), ptr::null_mut()); + + context.Dispatch( + ((info.image_extent.width + (COPY_THREAD_GROUP_X - 1)) + / COPY_THREAD_GROUP_X + / scale_x) + .max(1), + ((info.image_extent.height + (COPY_THREAD_GROUP_X - 1)) + / COPY_THREAD_GROUP_Y + / scale_y) + .max(1), + 1, + ); + } + + + context.CSSetShaderResources(0, 1, [ptr::null_mut(); 1].as_ptr()); + context.CSSetUnorderedAccessViews( + 0, + 1, + [ptr::null_mut(); 1].as_ptr(), + ptr::null_mut(), + ); + } + } + } + + fn find_blit_shader(&self, src: &Image) -> Option<*mut d3d11::ID3D11PixelShader> { + use format::ChannelType as Ct; + + match src.format.base_format().1 { + Ct::Uint => Some(self.ps_blit_2d_uint.as_raw()), + Ct::Sint => Some(self.ps_blit_2d_int.as_raw()), + Ct::Unorm | Ct::Snorm | Ct::Sfloat | Ct::Srgb => Some(self.ps_blit_2d_float.as_raw()), + Ct::Ufloat | Ct::Uscaled | Ct::Sscaled => None, + } + } + + pub fn blit_2d_image( + &mut self, + context: &ComPtr, + src: &Image, + dst: &Image, + filter: image::Filter, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + use std::cmp; + + let _scope = debug_scope!( + context, + "Blit: Image (format={:?},kind={:?}) => Image (format={:?},kind={:?})", + src.format, + src.kind, + dst.format, + dst.kind + ); + + let shader = self.find_blit_shader(src).unwrap(); + + let srv = src.internal.srv.clone().unwrap().as_raw(); + + unsafe { + context.IASetPrimitiveTopology(d3dcommon::D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); + context.VSSetShader(self.vs_blit_2d.as_raw(), ptr::null_mut(), 0); + context.VSSetConstantBuffers(0, 1, [self.internal_buffer.as_raw()].as_ptr()); + context.PSSetShader(shader, ptr::null_mut(), 0); + context.PSSetShaderResources(0, 1, [srv].as_ptr()); + context.PSSetSamplers( + 0, + 1, + match filter { + image::Filter::Nearest => [self.sampler_nearest.as_raw()], + image::Filter::Linear => [self.sampler_linear.as_raw()], + } + .as_ptr(), + ); + + for region in regions { + let region = region.borrow(); + self.update_blit(context, src, ®ion); + + + let rtv = dst + .get_rtv( + region.dst_subresource.level, + region.dst_subresource.layers.start, + ) + .unwrap() + .as_raw(); + + context.RSSetViewports( + 1, + [d3d11::D3D11_VIEWPORT { + TopLeftX: cmp::min(region.dst_bounds.start.x, region.dst_bounds.end.x) as _, + TopLeftY: cmp::min(region.dst_bounds.start.y, region.dst_bounds.end.y) as _, + Width: (region.dst_bounds.end.x - region.dst_bounds.start.x).abs() as _, + Height: (region.dst_bounds.end.y - region.dst_bounds.start.y).abs() as _, + MinDepth: 0.0f32, + MaxDepth: 1.0f32, + }] + .as_ptr(), + ); + context.OMSetRenderTargets(1, [rtv].as_ptr(), ptr::null_mut()); + context.Draw(3, 0); + } + + context.PSSetShaderResources(0, 1, [ptr::null_mut()].as_ptr()); + context.OMSetRenderTargets(1, [ptr::null_mut()].as_ptr(), ptr::null_mut()); + } + } + + pub fn clear_attachments( + &mut self, + context: &ComPtr, + clears: T, + rects: U, + cache: &RenderPassCache, + ) where + T: IntoIterator, + T::Item: Borrow, + U: IntoIterator, + U::Item: Borrow, + { + use hal::format::ChannelType as Ct; + let _scope = debug_scope!(context, "ClearAttachments"); + + let clear_rects: SmallVec<[pso::ClearRect; 8]> = rects + .into_iter() + .map(|rect| rect.borrow().clone()) + .collect(); + + unsafe { + context.IASetPrimitiveTopology(d3dcommon::D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); + context.IASetInputLayout(ptr::null_mut()); + context.VSSetShader(self.vs_partial_clear.as_raw(), ptr::null_mut(), 0); + context.PSSetConstantBuffers(0, 1, [self.internal_buffer.as_raw()].as_ptr()); + } + + let subpass = &cache.render_pass.subpasses[cache.current_subpass]; + + for clear in clears { + let clear = clear.borrow(); + + let _scope = debug_scope!(context, "{:?}", clear); + + match *clear { + command::AttachmentClear::Color { index, value } => { + self.update_clear_color(context, value); + + let attachment = { + let rtv_id = subpass.color_attachments[index]; + &cache.framebuffer.attachments[rtv_id.0] + }; + + unsafe { + context.OMSetRenderTargets( + 1, + [attachment.rtv_handle.clone().unwrap().as_raw()].as_ptr(), + ptr::null_mut(), + ); + } + + let shader = match attachment.format.base_format().1 { + Ct::Uint => self.ps_partial_clear_uint.as_raw(), + Ct::Sint => self.ps_partial_clear_int.as_raw(), + _ => self.ps_partial_clear_float.as_raw(), + }; + unsafe { context.PSSetShader(shader, ptr::null_mut(), 0) }; + + for clear_rect in &clear_rects { + let viewport = conv::map_viewport(&Viewport { + rect: clear_rect.rect, + depth: 0f32 .. 1f32, + }); + + debug_marker!(context, "{:?}", clear_rect.rect); + + unsafe { + context.RSSetViewports(1, [viewport].as_ptr()); + context.Draw(3, 0); + } + } + } + command::AttachmentClear::DepthStencil { depth, stencil } => { + self.update_clear_depth_stencil(context, depth, stencil); + + let attachment = { + let dsv_id = subpass.depth_stencil_attachment.unwrap(); + &cache.framebuffer.attachments[dsv_id.0] + }; + + unsafe { + match (depth, stencil) { + (Some(_), Some(stencil)) => { + context.OMSetDepthStencilState( + self.partial_clear_depth_stencil_state.as_raw(), + stencil, + ); + context.PSSetShader( + self.ps_partial_clear_depth.as_raw(), + ptr::null_mut(), + 0, + ); + } + + (Some(_), None) => { + context.OMSetDepthStencilState( + self.partial_clear_depth_state.as_raw(), + 0, + ); + context.PSSetShader( + self.ps_partial_clear_depth.as_raw(), + ptr::null_mut(), + 0, + ); + } + + (None, Some(stencil)) => { + context.OMSetDepthStencilState( + self.partial_clear_stencil_state.as_raw(), + stencil, + ); + context.PSSetShader( + self.ps_partial_clear_stencil.as_raw(), + ptr::null_mut(), + 0, + ); + } + (None, None) => {} + } + + context.OMSetRenderTargets( + 0, + ptr::null_mut(), + attachment.dsv_handle.clone().unwrap().as_raw(), + ); + context.PSSetShader( + self.ps_partial_clear_depth.as_raw(), + ptr::null_mut(), + 0, + ); + } + + for clear_rect in &clear_rects { + let viewport = conv::map_viewport(&Viewport { + rect: clear_rect.rect, + depth: 0f32 .. 1f32, + }); + + unsafe { + context.RSSetViewports(1, [viewport].as_ptr()); + context.Draw(3, 0); + } + } + } + } + } + } +} diff --git a/third_party/rust/gfx-backend-dx11/src/lib.rs b/third_party/rust/gfx-backend-dx11/src/lib.rs new file mode 100644 index 000000000000..19930a76e5f2 --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/src/lib.rs @@ -0,0 +1,3239 @@ + + +extern crate gfx_hal as hal; +extern crate auxil; +extern crate range_alloc; +#[macro_use] +extern crate bitflags; +extern crate libloading; +#[macro_use] +extern crate log; +extern crate parking_lot; +extern crate smallvec; +extern crate spirv_cross; +#[macro_use] +extern crate winapi; +extern crate wio; + +use hal::{ + adapter, + buffer, + command, + format, + image, + memory, + pass, + pso, + query, + queue, + range::RangeArg, + window, + DrawCount, + IndexCount, + InstanceCount, + Limits, + VertexCount, + VertexOffset, + WorkGroupCount, +}; + +use range_alloc::RangeAllocator; + +use winapi::shared::dxgi::{IDXGIAdapter, IDXGIFactory, IDXGISwapChain}; +use winapi::shared::minwindef::{FALSE, UINT, HMODULE}; +use winapi::shared::windef::{HWND, RECT}; +use winapi::shared::{dxgiformat, winerror}; +use winapi::um::winuser::GetClientRect; +use winapi::um::{d3d11, d3dcommon}; +use winapi::Interface as _; + +use wio::com::ComPtr; + +use parking_lot::{Condvar, Mutex}; + +use std::borrow::Borrow; +use std::cell::RefCell; +use std::fmt; +use std::mem; +use std::ops::Range; +use std::ptr; +use std::sync::Arc; + +use std::os::raw::c_void; + +macro_rules! debug_scope { + ($context:expr, $($arg:tt)+) => ({ + #[cfg(debug_assertions)] + { + $crate::debug::DebugScope::with_name( + $context, + format_args!($($arg)+), + ) + } + #[cfg(not(debug_assertions))] + { + () + } + }); +} + +macro_rules! debug_marker { + ($context:expr, $($arg:tt)+) => ({ + #[cfg(debug_assertions)] + { + $crate::debug::debug_marker( + $context, + format_args!($($arg)+), + ); + } + }); +} + +mod conv; +#[cfg(debug_assertions)] +mod debug; +mod device; +mod dxgi; +mod internal; +mod shader; + +type CreateFun = extern "system" fn( + *mut IDXGIAdapter, + UINT, + HMODULE, + UINT, + *const UINT, + UINT, + UINT, + *mut *mut d3d11::ID3D11Device, + *mut UINT, + *mut *mut d3d11::ID3D11DeviceContext, +) -> winerror::HRESULT; + +#[derive(Clone)] +pub(crate) struct ViewInfo { + resource: *mut d3d11::ID3D11Resource, + kind: image::Kind, + caps: image::ViewCapabilities, + view_kind: image::ViewKind, + format: dxgiformat::DXGI_FORMAT, + range: image::SubresourceRange, +} + +impl fmt::Debug for ViewInfo { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("ViewInfo") + } +} + +#[derive(Debug)] +pub struct Instance { + pub(crate) factory: ComPtr, + pub(crate) dxgi_version: dxgi::DxgiVersion, + library: Arc, +} + +unsafe impl Send for Instance {} +unsafe impl Sync for Instance {} + +impl Instance { + pub fn create_surface_from_hwnd(&self, hwnd: *mut c_void) -> Surface { + Surface { + factory: self.factory.clone(), + wnd_handle: hwnd as *mut _, + presentation: None, + } + } +} + +fn get_features( + _device: ComPtr, + _feature_level: d3dcommon::D3D_FEATURE_LEVEL, +) -> hal::Features { + hal::Features::ROBUST_BUFFER_ACCESS + | hal::Features::FULL_DRAW_INDEX_U32 + | hal::Features::FORMAT_BC + | hal::Features::INSTANCE_RATE + | hal::Features::SAMPLER_MIP_LOD_BIAS +} + +fn get_format_properties( + device: ComPtr, +) -> [format::Properties; format::NUM_FORMATS] { + let mut format_properties = [format::Properties::default(); format::NUM_FORMATS]; + for (i, props) in &mut format_properties.iter_mut().enumerate().skip(1) { + let format: format::Format = unsafe { mem::transmute(i as u32) }; + + let dxgi_format = match conv::map_format(format) { + Some(format) => format, + None => continue, + }; + + let mut support = d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT { + InFormat: dxgi_format, + OutFormatSupport: 0, + }; + let mut support_2 = d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT2 { + InFormat: dxgi_format, + OutFormatSupport2: 0, + }; + + let hr = unsafe { + device.CheckFeatureSupport( + d3d11::D3D11_FEATURE_FORMAT_SUPPORT, + &mut support as *mut _ as *mut _, + mem::size_of::() as UINT, + ) + }; + + if hr == winerror::S_OK { + let can_buffer = 0 != support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_BUFFER; + let can_image = 0 + != support.OutFormatSupport + & (d3d11::D3D11_FORMAT_SUPPORT_TEXTURE1D + | d3d11::D3D11_FORMAT_SUPPORT_TEXTURE2D + | d3d11::D3D11_FORMAT_SUPPORT_TEXTURE3D + | d3d11::D3D11_FORMAT_SUPPORT_TEXTURECUBE); + let can_linear = can_image && !format.surface_desc().is_compressed(); + if can_image { + props.optimal_tiling |= + format::ImageFeature::SAMPLED | format::ImageFeature::BLIT_SRC; + } + if can_linear { + props.linear_tiling |= + format::ImageFeature::SAMPLED | format::ImageFeature::BLIT_SRC; + } + if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER != 0 { + props.buffer_features |= format::BufferFeature::VERTEX; + } + if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_SHADER_SAMPLE != 0 { + props.optimal_tiling |= format::ImageFeature::SAMPLED_LINEAR; + } + if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_RENDER_TARGET != 0 { + props.optimal_tiling |= + format::ImageFeature::COLOR_ATTACHMENT | format::ImageFeature::BLIT_DST; + if can_linear { + props.linear_tiling |= + format::ImageFeature::COLOR_ATTACHMENT | format::ImageFeature::BLIT_DST; + } + } + if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_BLENDABLE != 0 { + props.optimal_tiling |= format::ImageFeature::COLOR_ATTACHMENT_BLEND; + } + if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_DEPTH_STENCIL != 0 { + props.optimal_tiling |= format::ImageFeature::DEPTH_STENCIL_ATTACHMENT; + } + if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_SHADER_LOAD != 0 { + + if can_buffer { + props.buffer_features |= format::BufferFeature::UNIFORM_TEXEL; + } + } + + let hr = unsafe { + device.CheckFeatureSupport( + d3d11::D3D11_FEATURE_FORMAT_SUPPORT2, + &mut support_2 as *mut _ as *mut _, + mem::size_of::() as UINT, + ) + }; + if hr == winerror::S_OK { + if support_2.OutFormatSupport2 & d3d11::D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD != 0 { + + if can_buffer { + props.buffer_features |= format::BufferFeature::STORAGE_TEXEL_ATOMIC; + } + if can_image { + props.optimal_tiling |= format::ImageFeature::STORAGE_ATOMIC; + } + } + if support_2.OutFormatSupport2 & d3d11::D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE != 0 { + if can_buffer { + props.buffer_features |= format::BufferFeature::STORAGE_TEXEL; + } + if can_image { + props.optimal_tiling |= format::ImageFeature::STORAGE; + } + } + } + } + + + } + + format_properties +} + +impl hal::Instance for Instance { + fn create(_: &str, _: u32) -> Result { + + + match dxgi::get_dxgi_factory() { + Ok((factory, dxgi_version)) => { + info!("DXGI version: {:?}", dxgi_version); + let library = Arc::new( + libloading::Library::new("d3d11.dll") + .map_err(|_| hal::UnsupportedBackend)? + ); + Ok(Instance { + factory, + dxgi_version, + library, + }) + } + Err(hr) => { + info!("Failed on factory creation: {:?}", hr); + Err(hal::UnsupportedBackend) + } + } + } + + fn enumerate_adapters(&self) -> Vec> { + let mut adapters = Vec::new(); + let mut idx = 0; + + let func: libloading::Symbol = match unsafe { + self.library.get(b"D3D11CreateDevice") + } { + Ok(func) => func, + Err(e) => { + error!("Unable to get device creation function: {:?}", e); + return Vec::new(); + } + }; + + while let Ok((adapter, info)) = + dxgi::get_adapter(idx, self.factory.as_raw(), self.dxgi_version) + { + idx += 1; + + use hal::memory::Properties; + + + let (device, feature_level) = { + let feature_level = get_feature_level(&func, adapter.as_raw()); + + let mut device = ptr::null_mut(); + let hr = func( + adapter.as_raw() as *mut _, + d3dcommon::D3D_DRIVER_TYPE_UNKNOWN, + ptr::null_mut(), + 0, + [feature_level].as_ptr(), + 1, + d3d11::D3D11_SDK_VERSION, + &mut device as *mut *mut _ as *mut *mut _, + ptr::null_mut(), + ptr::null_mut(), + ); + + if !winerror::SUCCEEDED(hr) { + continue; + } + + ( + unsafe { ComPtr::::from_raw(device) }, + feature_level, + ) + }; + + let memory_properties = adapter::MemoryProperties { + memory_types: vec![ + adapter::MemoryType { + properties: Properties::DEVICE_LOCAL, + heap_index: 0, + }, + adapter::MemoryType { + properties: Properties::CPU_VISIBLE + | Properties::COHERENT + | Properties::CPU_CACHED, + heap_index: 1, + }, + adapter::MemoryType { + properties: Properties::CPU_VISIBLE | Properties::CPU_CACHED, + heap_index: 1, + }, + ], + + + memory_heaps: vec![!0, !0], + }; + + let limits = hal::Limits { + max_image_1d_size: d3d11::D3D11_REQ_TEXTURE1D_U_DIMENSION as _, + max_image_2d_size: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION as _, + max_image_3d_size: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION as _, + max_image_cube_size: d3d11::D3D11_REQ_TEXTURECUBE_DIMENSION as _, + max_image_array_layers: d3d11::D3D11_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION as _, + max_texel_elements: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION as _, + max_patch_size: 0, + max_viewports: d3d11::D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE as _, + max_viewport_dimensions: [d3d11::D3D11_VIEWPORT_BOUNDS_MAX; 2], + max_framebuffer_extent: hal::image::Extent { + + width: 4096, + height: 4096, + depth: 1, + }, + max_compute_work_group_count: [ + d3d11::D3D11_CS_THREAD_GROUP_MAX_X, + d3d11::D3D11_CS_THREAD_GROUP_MAX_Y, + d3d11::D3D11_CS_THREAD_GROUP_MAX_Z, + ], + max_compute_work_group_size: [ + d3d11::D3D11_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP, + 1, + 1, + ], + max_vertex_input_attribute_offset: 255, + max_vertex_input_attributes: d3d11::D3D11_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT as _, + max_vertex_input_binding_stride: + d3d11::D3D11_REQ_MULTI_ELEMENT_STRUCTURE_SIZE_IN_BYTES as _, + max_vertex_input_bindings: d3d11::D3D11_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT as _, + max_vertex_output_components: d3d11::D3D11_VS_OUTPUT_REGISTER_COUNT as _, + min_texel_buffer_offset_alignment: 1, + min_uniform_buffer_offset_alignment: 16, + min_storage_buffer_offset_alignment: 1, + framebuffer_color_sample_counts: 1, + framebuffer_depth_sample_counts: 1, + framebuffer_stencil_sample_counts: 1, + max_color_attachments: d3d11::D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT as _, + buffer_image_granularity: 1, + non_coherent_atom_size: 1, + max_sampler_anisotropy: 16., + optimal_buffer_copy_offset_alignment: 1, + optimal_buffer_copy_pitch_alignment: 1, + min_vertex_input_binding_stride_alignment: 1, + ..hal::Limits::default() + }; + + let features = get_features(device.clone(), feature_level); + let format_properties = get_format_properties(device.clone()); + + let physical_device = PhysicalDevice { + adapter, + library: Arc::clone(&self.library), + features, + limits, + memory_properties, + format_properties, + }; + + info!("{:#?}", info); + + adapters.push(adapter::Adapter { + info, + physical_device, + queue_families: vec![QueueFamily], + }); + } + + adapters + } + + unsafe fn create_surface( + &self, + has_handle: &impl raw_window_handle::HasRawWindowHandle, + ) -> Result { + match has_handle.raw_window_handle() { + raw_window_handle::RawWindowHandle::Windows(handle) => { + Ok(self.create_surface_from_hwnd(handle.hwnd)) + } + _ => Err(hal::window::InitError::UnsupportedWindowHandle), + } + } + + unsafe fn destroy_surface(&self, _surface: Surface) { + + } +} + +pub struct PhysicalDevice { + adapter: ComPtr, + library: Arc, + features: hal::Features, + limits: hal::Limits, + memory_properties: adapter::MemoryProperties, + format_properties: [format::Properties; format::NUM_FORMATS], +} + +impl fmt::Debug for PhysicalDevice { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("PhysicalDevice") + } +} + +unsafe impl Send for PhysicalDevice {} +unsafe impl Sync for PhysicalDevice {} + + +fn get_feature_level(func: &CreateFun, adapter: *mut IDXGIAdapter) -> d3dcommon::D3D_FEATURE_LEVEL { + let requested_feature_levels = [ + d3dcommon::D3D_FEATURE_LEVEL_11_1, + d3dcommon::D3D_FEATURE_LEVEL_11_0, + d3dcommon::D3D_FEATURE_LEVEL_10_1, + d3dcommon::D3D_FEATURE_LEVEL_10_0, + d3dcommon::D3D_FEATURE_LEVEL_9_3, + d3dcommon::D3D_FEATURE_LEVEL_9_2, + d3dcommon::D3D_FEATURE_LEVEL_9_1, + ]; + + let mut feature_level = d3dcommon::D3D_FEATURE_LEVEL_9_1; + let hr = func( + adapter, + d3dcommon::D3D_DRIVER_TYPE_UNKNOWN, + ptr::null_mut(), + 0, + requested_feature_levels[..].as_ptr(), + requested_feature_levels.len() as _, + d3d11::D3D11_SDK_VERSION, + ptr::null_mut(), + &mut feature_level as *mut _, + ptr::null_mut(), + ); + + if !winerror::SUCCEEDED(hr) { + + + + if hr == winerror::E_INVALIDARG { + let hr = func( + adapter, + d3dcommon::D3D_DRIVER_TYPE_UNKNOWN, + ptr::null_mut(), + 0, + requested_feature_levels[1 ..].as_ptr(), + (requested_feature_levels.len() - 1) as _, + d3d11::D3D11_SDK_VERSION, + ptr::null_mut(), + &mut feature_level as *mut _, + ptr::null_mut(), + ); + + if !winerror::SUCCEEDED(hr) { + + unimplemented!(); + } + } + } + + feature_level +} + + +impl adapter::PhysicalDevice for PhysicalDevice { + unsafe fn open( + &self, + families: &[(&QueueFamily, &[queue::QueuePriority])], + requested_features: hal::Features, + ) -> Result, hal::device::CreationError> { + let func: libloading::Symbol = self.library + .get(b"D3D11CreateDevice") + .unwrap(); + + let (device, cxt) = { + if !self.features().contains(requested_features) { + return Err(hal::device::CreationError::MissingFeature); + } + + let feature_level = get_feature_level(&func, self.adapter.as_raw()); + let mut returned_level = d3dcommon::D3D_FEATURE_LEVEL_9_1; + + #[cfg(debug_assertions)] + let create_flags = d3d11::D3D11_CREATE_DEVICE_DEBUG; + #[cfg(not(debug_assertions))] + let create_flags = 0; + + + let mut device = ptr::null_mut(); + let mut cxt = ptr::null_mut(); + let hr = func( + self.adapter.as_raw() as *mut _, + d3dcommon::D3D_DRIVER_TYPE_UNKNOWN, + ptr::null_mut(), + create_flags, + [feature_level].as_ptr(), + 1, + d3d11::D3D11_SDK_VERSION, + &mut device as *mut *mut _ as *mut *mut _, + &mut returned_level as *mut _, + &mut cxt as *mut *mut _ as *mut *mut _, + ); + + + + + if !winerror::SUCCEEDED(hr) { + return Err(hal::device::CreationError::InitializationFailed); + } + + info!("feature level={:x}", feature_level); + + (ComPtr::from_raw(device), ComPtr::from_raw(cxt)) + }; + + let device = device::Device::new(device, cxt, self.memory_properties.clone()); + + + let queue_groups = families + .into_iter() + .map(|&(_family, prio)| { + assert_eq!(prio.len(), 1); + let mut group = queue::QueueGroup::new(queue::QueueFamilyId(0)); + + + let queue = CommandQueue { + context: device.context.clone(), + }; + group.add_queue(queue); + group + }) + .collect(); + + Ok(adapter::Gpu { + device, + queue_groups, + }) + } + + fn format_properties(&self, fmt: Option) -> format::Properties { + let idx = fmt.map(|fmt| fmt as usize).unwrap_or(0); + self.format_properties[idx] + } + + fn image_format_properties( + &self, + format: format::Format, + dimensions: u8, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Option { + conv::map_format(format)?; + + let supported_usage = { + use hal::image::Usage as U; + let format_props = &self.format_properties[format as usize]; + let props = match tiling { + image::Tiling::Optimal => format_props.optimal_tiling, + image::Tiling::Linear => format_props.linear_tiling, + }; + let mut flags = U::empty(); + + if props.contains(format::ImageFeature::BLIT_SRC) { + flags |= U::TRANSFER_SRC; + } + if props.contains(format::ImageFeature::BLIT_DST) { + flags |= U::TRANSFER_DST; + } + if props.contains(format::ImageFeature::SAMPLED) { + flags |= U::SAMPLED; + } + if props.contains(format::ImageFeature::STORAGE) { + flags |= U::STORAGE; + } + if props.contains(format::ImageFeature::COLOR_ATTACHMENT) { + flags |= U::COLOR_ATTACHMENT; + } + if props.contains(format::ImageFeature::DEPTH_STENCIL_ATTACHMENT) { + flags |= U::DEPTH_STENCIL_ATTACHMENT; + } + flags + }; + if !supported_usage.contains(usage) { + return None; + } + + let max_resource_size = + (d3d11::D3D11_REQ_RESOURCE_SIZE_IN_MEGABYTES_EXPRESSION_A_TERM as usize) << 20; + Some(match tiling { + image::Tiling::Optimal => image::FormatProperties { + max_extent: match dimensions { + 1 => image::Extent { + width: d3d11::D3D11_REQ_TEXTURE1D_U_DIMENSION, + height: 1, + depth: 1, + }, + 2 => image::Extent { + width: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION, + height: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION, + depth: 1, + }, + 3 => image::Extent { + width: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, + height: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, + depth: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, + }, + _ => return None, + }, + max_levels: d3d11::D3D11_REQ_MIP_LEVELS as _, + max_layers: match dimensions { + 1 => d3d11::D3D11_REQ_TEXTURE1D_ARRAY_AXIS_DIMENSION as _, + 2 => d3d11::D3D11_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION as _, + _ => return None, + }, + sample_count_mask: if dimensions == 2 + && !view_caps.contains(image::ViewCapabilities::KIND_CUBE) + && (usage.contains(image::Usage::COLOR_ATTACHMENT) + | usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT)) + { + 0x3F + } else { + 0x1 + }, + max_resource_size, + }, + image::Tiling::Linear => image::FormatProperties { + max_extent: match dimensions { + 2 => image::Extent { + width: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION, + height: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION, + depth: 1, + }, + _ => return None, + }, + max_levels: 1, + max_layers: 1, + sample_count_mask: 0x1, + max_resource_size, + }, + }) + } + + fn memory_properties(&self) -> adapter::MemoryProperties { + self.memory_properties.clone() + } + + fn features(&self) -> hal::Features { + self.features + } + + fn limits(&self) -> Limits { + self.limits + } +} + +struct Presentation { + swapchain: ComPtr, + view: ComPtr, + format: format::Format, + size: window::Extent2D, +} + +pub struct Surface { + pub(crate) factory: ComPtr, + wnd_handle: HWND, + presentation: Option, +} + + +impl fmt::Debug for Surface { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Surface") + } +} + +unsafe impl Send for Surface {} +unsafe impl Sync for Surface {} + +impl window::Surface for Surface { + fn supports_queue_family(&self, _queue_family: &QueueFamily) -> bool { + true + } + + fn capabilities(&self, _physical_device: &PhysicalDevice) -> window::SurfaceCapabilities { + let current_extent = unsafe { + let mut rect: RECT = mem::zeroed(); + assert_ne!( + 0, + GetClientRect(self.wnd_handle as *mut _, &mut rect as *mut RECT) + ); + Some(window::Extent2D { + width: (rect.right - rect.left) as u32, + height: (rect.bottom - rect.top) as u32, + }) + }; + + + + + window::SurfaceCapabilities { + present_modes: window::PresentMode::FIFO, + composite_alpha_modes: window::CompositeAlphaMode::OPAQUE, + image_count: 1 ..= 16, + current_extent, + extents: window::Extent2D { + width: 16, + height: 16, + } ..= window::Extent2D { + width: 4096, + height: 4096, + }, + max_image_layers: 1, + usage: image::Usage::COLOR_ATTACHMENT | image::Usage::TRANSFER_SRC, + } + } + + fn supported_formats(&self, _physical_device: &PhysicalDevice) -> Option> { + Some(vec![ + format::Format::Bgra8Srgb, + format::Format::Bgra8Unorm, + format::Format::Rgba8Srgb, + format::Format::Rgba8Unorm, + format::Format::A2b10g10r10Unorm, + format::Format::Rgba16Sfloat, + ]) + } +} + +impl window::PresentationSurface for Surface { + type SwapchainImage = ImageView; + + unsafe fn configure_swapchain( + &mut self, + device: &device::Device, + config: window::SwapchainConfig, + ) -> Result<(), window::CreationError> { + assert!(image::Usage::COLOR_ATTACHMENT.contains(config.image_usage)); + + let swapchain = match self.presentation.take() { + Some(present) => { + if present.format == config.format && present.size == config.extent { + self.presentation = Some(present); + return Ok(()); + } + let non_srgb_format = conv::map_format_nosrgb(config.format).unwrap(); + drop(present.view); + let result = present.swapchain.ResizeBuffers( + config.image_count, + config.extent.width, + config.extent.height, + non_srgb_format, + 0, + ); + if result != winerror::S_OK { + error!("ResizeBuffers failed with 0x{:x}", result as u32); + return Err(window::CreationError::WindowInUse(hal::device::WindowInUse)); + } + present.swapchain + } + None => { + let (swapchain, _) = + device.create_swapchain_impl(&config, self.wnd_handle, self.factory.clone())?; + swapchain + } + }; + + let mut resource: *mut d3d11::ID3D11Resource = ptr::null_mut(); + assert_eq!( + winerror::S_OK, + swapchain.GetBuffer( + 0 as _, + &d3d11::ID3D11Resource::uuidof(), + &mut resource as *mut *mut _ as *mut *mut _, + ) + ); + + let kind = image::Kind::D2(config.extent.width, config.extent.height, 1, 1); + let format = conv::map_format(config.format).unwrap(); + let decomposed = conv::DecomposedDxgiFormat::from_dxgi_format(format); + + let view_info = ViewInfo { + resource, + kind, + caps: image::ViewCapabilities::empty(), + view_kind: image::ViewKind::D2, + format: decomposed.rtv.unwrap(), + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: 0 .. 1, + layers: 0 .. 1, + }, + }; + let view = device.view_image_as_render_target(&view_info).unwrap(); + + (*resource).Release(); + + self.presentation = Some(Presentation { + swapchain, + view, + format: config.format, + size: config.extent, + }); + Ok(()) + } + + unsafe fn unconfigure_swapchain(&mut self, _device: &device::Device) { + self.presentation = None; + } + + unsafe fn acquire_image( + &mut self, + _timeout_ns: u64, + ) -> Result<(ImageView, Option), window::AcquireError> { + let present = self.presentation.as_ref().unwrap(); + let image_view = ImageView { + format: present.format, + rtv_handle: Some(present.view.clone()), + dsv_handle: None, + srv_handle: None, + uav_handle: None, + }; + Ok((image_view, None)) + } +} + + +pub struct Swapchain { + dxgi_swapchain: ComPtr, +} + + +impl fmt::Debug for Swapchain { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Swapchain") + } +} + +unsafe impl Send for Swapchain {} +unsafe impl Sync for Swapchain {} + +impl window::Swapchain for Swapchain { + unsafe fn acquire_image( + &mut self, + _timeout_ns: u64, + _semaphore: Option<&Semaphore>, + _fence: Option<&Fence>, + ) -> Result<(window::SwapImageIndex, Option), window::AcquireError> { + + + + Ok((0, None)) + } +} + +#[derive(Debug, Clone, Copy)] +pub struct QueueFamily; + +impl queue::QueueFamily for QueueFamily { + fn queue_type(&self) -> queue::QueueType { + queue::QueueType::General + } + fn max_queues(&self) -> usize { + 1 + } + fn id(&self) -> queue::QueueFamilyId { + queue::QueueFamilyId(0) + } +} + +#[derive(Clone)] +pub struct CommandQueue { + context: ComPtr, +} + +impl fmt::Debug for CommandQueue { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("CommandQueue") + } +} + +unsafe impl Send for CommandQueue {} +unsafe impl Sync for CommandQueue {} + +impl queue::CommandQueue for CommandQueue { + unsafe fn submit<'a, T, Ic, S, Iw, Is>( + &mut self, + submission: queue::Submission, + fence: Option<&Fence>, + ) where + T: 'a + Borrow, + Ic: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + Is: IntoIterator, + { + let _scope = debug_scope!(&self.context, "Submit(fence={:?})", fence); + for cmd_buf in submission.command_buffers { + let cmd_buf = cmd_buf.borrow(); + + let _scope = debug_scope!( + &self.context, + "CommandBuffer ({}/{})", + cmd_buf.flush_coherent_memory.len(), + cmd_buf.invalidate_coherent_memory.len() + ); + + { + let _scope = debug_scope!(&self.context, "Pre-Exec: Flush"); + for sync in &cmd_buf.flush_coherent_memory { + sync.do_flush(&self.context); + } + } + self.context + .ExecuteCommandList(cmd_buf.as_raw_list().as_raw(), FALSE); + { + let _scope = debug_scope!(&self.context, "Post-Exec: Invalidate"); + for sync in &cmd_buf.invalidate_coherent_memory { + sync.do_invalidate(&self.context); + } + } + } + + if let Some(fence) = fence { + *fence.mutex.lock() = true; + fence.condvar.notify_all(); + } + } + + unsafe fn present<'a, W, Is, S, Iw>( + &mut self, + swapchains: Is, + _wait_semaphores: Iw, + ) -> Result, window::PresentError> + where + W: 'a + Borrow, + Is: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + { + for (swapchain, _idx) in swapchains { + swapchain.borrow().dxgi_swapchain.Present(1, 0); + } + + Ok(None) + } + + unsafe fn present_surface( + &mut self, + surface: &mut Surface, + _image: ImageView, + _wait_semaphore: Option<&Semaphore>, + ) -> Result, window::PresentError> { + surface + .presentation + .as_ref() + .unwrap() + .swapchain + .Present(1, 0); + Ok(None) + } + + fn wait_idle(&self) -> Result<(), hal::device::OutOfMemory> { + + Ok(()) + } +} + +#[derive(Debug)] +pub struct AttachmentClear { + subpass_id: Option, + attachment_id: usize, + raw: command::AttachmentClear, +} + +#[derive(Debug)] +pub struct RenderPassCache { + pub render_pass: RenderPass, + pub framebuffer: Framebuffer, + pub attachment_clear_values: Vec, + pub target_rect: pso::Rect, + pub current_subpass: usize, +} + +impl RenderPassCache { + pub fn start_subpass( + &mut self, + internal: &mut internal::Internal, + context: &ComPtr, + cache: &mut CommandBufferState, + ) { + let attachments = self + .attachment_clear_values + .iter() + .filter(|clear| clear.subpass_id == Some(self.current_subpass)) + .map(|clear| clear.raw); + + cache + .dirty_flag + .insert(DirtyStateFlag::GRAPHICS_PIPELINE | DirtyStateFlag::VIEWPORTS); + internal.clear_attachments( + context, + attachments, + &[pso::ClearRect { + rect: self.target_rect, + layers: 0 .. 1, + }], + &self, + ); + + let subpass = &self.render_pass.subpasses[self.current_subpass]; + let color_views = subpass + .color_attachments + .iter() + .map(|&(id, _)| { + self.framebuffer.attachments[id] + .rtv_handle + .clone() + .unwrap() + .as_raw() + }) + .collect::>(); + let ds_view = match subpass.depth_stencil_attachment { + Some((id, _)) => Some( + self.framebuffer.attachments[id] + .dsv_handle + .clone() + .unwrap() + .as_raw(), + ), + None => None, + }; + + cache.set_render_targets(&color_views, ds_view); + cache.bind(context); + } + + pub fn next_subpass(&mut self) { + self.current_subpass += 1; + } +} + +bitflags! { + struct DirtyStateFlag : u32 { + const RENDER_TARGETS = (1 << 1); + const VERTEX_BUFFERS = (1 << 2); + const GRAPHICS_PIPELINE = (1 << 3); + const VIEWPORTS = (1 << 4); + const BLEND_STATE = (1 << 5); + } +} + +pub struct CommandBufferState { + dirty_flag: DirtyStateFlag, + + render_target_len: u32, + render_targets: [*mut d3d11::ID3D11RenderTargetView; 8], + depth_target: Option<*mut d3d11::ID3D11DepthStencilView>, + graphics_pipeline: Option, + + + + bound_bindings: u32, + + + required_bindings: Option, + + max_bindings: Option, + viewports: Vec, + vertex_buffers: Vec<*mut d3d11::ID3D11Buffer>, + vertex_offsets: Vec, + vertex_strides: Vec, + blend_factor: Option<[f32; 4]>, + + stencil_ref: Option, + stencil_read_mask: Option, + stencil_write_mask: Option, + current_blend: Option<*mut d3d11::ID3D11BlendState>, +} + + +impl fmt::Debug for CommandBufferState { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("CommandBufferState") + } +} + +impl CommandBufferState { + fn new() -> Self { + CommandBufferState { + dirty_flag: DirtyStateFlag::empty(), + render_target_len: 0, + render_targets: [ptr::null_mut(); 8], + depth_target: None, + graphics_pipeline: None, + bound_bindings: 0, + required_bindings: None, + max_bindings: None, + viewports: Vec::new(), + vertex_buffers: Vec::new(), + vertex_offsets: Vec::new(), + vertex_strides: Vec::new(), + blend_factor: None, + stencil_ref: None, + stencil_read_mask: None, + stencil_write_mask: None, + current_blend: None, + } + } + + fn clear(&mut self) { + self.render_target_len = 0; + self.depth_target = None; + self.graphics_pipeline = None; + self.bound_bindings = 0; + self.required_bindings = None; + self.max_bindings = None; + self.viewports.clear(); + self.vertex_buffers.clear(); + self.vertex_offsets.clear(); + self.vertex_strides.clear(); + self.blend_factor = None; + self.stencil_ref = None; + self.stencil_read_mask = None; + self.stencil_write_mask = None; + self.current_blend = None; + } + + pub fn set_vertex_buffer( + &mut self, + index: usize, + offset: u32, + buffer: *mut d3d11::ID3D11Buffer, + ) { + self.bound_bindings |= 1 << index as u32; + + if index >= self.vertex_buffers.len() { + self.vertex_buffers.push(buffer); + self.vertex_offsets.push(offset); + } else { + self.vertex_buffers[index] = buffer; + self.vertex_offsets[index] = offset; + } + + self.dirty_flag.insert(DirtyStateFlag::VERTEX_BUFFERS); + } + + pub fn bind_vertex_buffers(&mut self, context: &ComPtr) { + if let Some(binding_count) = self.max_bindings { + if self.vertex_buffers.len() >= binding_count as usize + && self.vertex_strides.len() >= binding_count as usize + { + unsafe { + context.IASetVertexBuffers( + 0, + binding_count, + self.vertex_buffers.as_ptr(), + self.vertex_strides.as_ptr(), + self.vertex_offsets.as_ptr(), + ); + } + + self.dirty_flag.remove(DirtyStateFlag::VERTEX_BUFFERS); + } + } + } + + pub fn set_viewports(&mut self, viewports: &[d3d11::D3D11_VIEWPORT]) { + self.viewports.clear(); + self.viewports.extend(viewports); + + self.dirty_flag.insert(DirtyStateFlag::VIEWPORTS); + } + + pub fn bind_viewports(&mut self, context: &ComPtr) { + if let Some(ref pipeline) = self.graphics_pipeline { + if let Some(ref viewport) = pipeline.baked_states.viewport { + unsafe { + context.RSSetViewports(1, [conv::map_viewport(&viewport)].as_ptr()); + } + } else { + unsafe { + context.RSSetViewports(self.viewports.len() as u32, self.viewports.as_ptr()); + } + } + } else { + unsafe { + context.RSSetViewports(self.viewports.len() as u32, self.viewports.as_ptr()); + } + } + + self.dirty_flag.remove(DirtyStateFlag::VIEWPORTS); + } + + pub fn set_render_targets( + &mut self, + render_targets: &[*mut d3d11::ID3D11RenderTargetView], + depth_target: Option<*mut d3d11::ID3D11DepthStencilView>, + ) { + for (idx, &rt) in render_targets.iter().enumerate() { + self.render_targets[idx] = rt; + } + + self.render_target_len = render_targets.len() as u32; + self.depth_target = depth_target; + + self.dirty_flag.insert(DirtyStateFlag::RENDER_TARGETS); + } + + pub fn bind_render_targets(&mut self, context: &ComPtr) { + unsafe { + context.OMSetRenderTargets( + self.render_target_len, + self.render_targets.as_ptr(), + if let Some(dsv) = self.depth_target { + dsv + } else { + ptr::null_mut() + }, + ); + } + + self.dirty_flag.remove(DirtyStateFlag::RENDER_TARGETS); + } + + pub fn set_blend_factor(&mut self, factor: [f32; 4]) { + self.blend_factor = Some(factor); + + self.dirty_flag.insert(DirtyStateFlag::BLEND_STATE); + } + + pub fn bind_blend_state(&mut self, context: &ComPtr) { + if let Some(blend) = self.current_blend { + let blend_color = if let Some(ref pipeline) = self.graphics_pipeline { + pipeline + .baked_states + .blend_color + .or(self.blend_factor) + .unwrap_or([0f32; 4]) + } else { + self.blend_factor.unwrap_or([0f32; 4]) + }; + + + unsafe { + context.OMSetBlendState(blend, &blend_color, !0); + } + + self.dirty_flag.remove(DirtyStateFlag::BLEND_STATE); + } + } + + pub fn set_graphics_pipeline(&mut self, pipeline: GraphicsPipeline) { + self.graphics_pipeline = Some(pipeline); + + self.dirty_flag.insert(DirtyStateFlag::GRAPHICS_PIPELINE); + } + + pub fn bind_graphics_pipeline(&mut self, context: &ComPtr) { + if let Some(ref pipeline) = self.graphics_pipeline { + self.vertex_strides.clear(); + self.vertex_strides.extend(&pipeline.strides); + + self.required_bindings = Some(pipeline.required_bindings); + self.max_bindings = Some(pipeline.max_vertex_bindings); + }; + + self.bind_vertex_buffers(context); + + if let Some(ref pipeline) = self.graphics_pipeline { + unsafe { + context.IASetPrimitiveTopology(pipeline.topology); + context.IASetInputLayout(pipeline.input_layout.as_raw()); + + context.VSSetShader(pipeline.vs.as_raw(), ptr::null_mut(), 0); + if let Some(ref ps) = pipeline.ps { + context.PSSetShader(ps.as_raw(), ptr::null_mut(), 0); + } + if let Some(ref gs) = pipeline.gs { + context.GSSetShader(gs.as_raw(), ptr::null_mut(), 0); + } + if let Some(ref hs) = pipeline.hs { + context.HSSetShader(hs.as_raw(), ptr::null_mut(), 0); + } + if let Some(ref ds) = pipeline.ds { + context.DSSetShader(ds.as_raw(), ptr::null_mut(), 0); + } + + context.RSSetState(pipeline.rasterizer_state.as_raw()); + if let Some(ref viewport) = pipeline.baked_states.viewport { + context.RSSetViewports(1, [conv::map_viewport(&viewport)].as_ptr()); + } + if let Some(ref scissor) = pipeline.baked_states.scissor { + context.RSSetScissorRects(1, [conv::map_rect(&scissor)].as_ptr()); + } + + if let Some((ref state, reference)) = pipeline.depth_stencil_state { + let stencil_ref = if let pso::State::Static(reference) = reference { + reference + } else { + self.stencil_ref.unwrap_or(0) + }; + + context.OMSetDepthStencilState(state.as_raw(), stencil_ref); + } + self.current_blend = Some(pipeline.blend_state.as_raw()); + } + }; + + self.bind_blend_state(context); + + self.dirty_flag.remove(DirtyStateFlag::GRAPHICS_PIPELINE); + } + + pub fn bind(&mut self, context: &ComPtr) { + if self.dirty_flag.contains(DirtyStateFlag::RENDER_TARGETS) { + self.bind_render_targets(context); + } + + if self.dirty_flag.contains(DirtyStateFlag::GRAPHICS_PIPELINE) { + self.bind_graphics_pipeline(context); + } + + if self.dirty_flag.contains(DirtyStateFlag::VERTEX_BUFFERS) { + self.bind_vertex_buffers(context); + } + + if self.dirty_flag.contains(DirtyStateFlag::VIEWPORTS) { + self.bind_viewports(context); + } + } +} + +pub struct CommandBuffer { + + internal: internal::Internal, + context: ComPtr, + list: RefCell>>, + + + + + flush_coherent_memory: Vec, + invalidate_coherent_memory: Vec, + + + render_pass_cache: Option, + + cache: CommandBufferState, + + one_time_submit: bool, +} + +impl fmt::Debug for CommandBuffer { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("CommandBuffer") + } +} + +unsafe impl Send for CommandBuffer {} +unsafe impl Sync for CommandBuffer {} + +impl CommandBuffer { + fn create_deferred(device: ComPtr, internal: internal::Internal) -> Self { + let mut context: *mut d3d11::ID3D11DeviceContext = ptr::null_mut(); + let hr = + unsafe { device.CreateDeferredContext(0, &mut context as *mut *mut _ as *mut *mut _) }; + assert_eq!(hr, winerror::S_OK); + + CommandBuffer { + internal, + context: unsafe { ComPtr::from_raw(context) }, + list: RefCell::new(None), + flush_coherent_memory: Vec::new(), + invalidate_coherent_memory: Vec::new(), + render_pass_cache: None, + cache: CommandBufferState::new(), + one_time_submit: false, + } + } + + fn as_raw_list(&self) -> ComPtr { + if self.one_time_submit { + self.list.replace(None).unwrap() + } else { + self.list.borrow().clone().unwrap() + } + } + + unsafe fn bind_vertex_descriptor( + &self, + context: &ComPtr, + binding: &PipelineBinding, + handles: *mut Descriptor, + ) { + use pso::DescriptorType::*; + + let handles = handles.offset(binding.handle_offset as isize); + let start = binding.binding_range.start as UINT; + let len = binding.binding_range.end as UINT - start; + + match binding.ty { + Sampler => context.VSSetSamplers(start, len, handles as *const *mut _ as *const *mut _), + SampledImage | InputAttachment => { + context.VSSetShaderResources(start, len, handles as *const *mut _ as *const *mut _) + } + CombinedImageSampler => { + context.VSSetShaderResources(start, len, handles as *const *mut _ as *const *mut _); + context.VSSetSamplers( + start, + len, + handles.offset(1) as *const *mut _ as *const *mut _, + ); + } + UniformBuffer | UniformBufferDynamic => { + context.VSSetConstantBuffers(start, len, handles as *const *mut _ as *const *mut _) + } + _ => {} + } + } + + unsafe fn bind_fragment_descriptor( + &self, + context: &ComPtr, + binding: &PipelineBinding, + handles: *mut Descriptor, + ) { + use pso::DescriptorType::*; + + let handles = handles.offset(binding.handle_offset as isize); + let start = binding.binding_range.start as UINT; + let len = binding.binding_range.end as UINT - start; + + match binding.ty { + Sampler => context.PSSetSamplers(start, len, handles as *const *mut _ as *const *mut _), + SampledImage | InputAttachment => { + context.PSSetShaderResources(start, len, handles as *const *mut _ as *const *mut _) + } + CombinedImageSampler => { + context.PSSetShaderResources(start, len, handles as *const *mut _ as *const *mut _); + context.PSSetSamplers( + start, + len, + handles.offset(1) as *const *mut _ as *const *mut _, + ); + } + UniformBuffer | UniformBufferDynamic => { + context.PSSetConstantBuffers(start, len, handles as *const *mut _ as *const *mut _) + } + _ => {} + } + } + + unsafe fn bind_compute_descriptor( + &self, + context: &ComPtr, + binding: &PipelineBinding, + handles: *mut Descriptor, + ) { + use pso::DescriptorType::*; + + let handles = handles.offset(binding.handle_offset as isize); + let start = binding.binding_range.start as UINT; + let len = binding.binding_range.end as UINT - start; + + match binding.ty { + Sampler => context.CSSetSamplers(start, len, handles as *const *mut _ as *const *mut _), + SampledImage | InputAttachment => { + context.CSSetShaderResources(start, len, handles as *const *mut _ as *const *mut _) + } + CombinedImageSampler => { + context.CSSetShaderResources(start, len, handles as *const *mut _ as *const *mut _); + context.CSSetSamplers( + start, + len, + handles.offset(1) as *const *mut _ as *const *mut _, + ); + } + UniformBuffer | UniformBufferDynamic => { + context.CSSetConstantBuffers(start, len, handles as *const *mut _ as *const *mut _) + } + StorageImage | StorageBuffer => context.CSSetUnorderedAccessViews( + start, + len, + handles as *const *mut _ as *const *mut _, + ptr::null_mut(), + ), + _ => unimplemented!(), + } + } + + fn bind_descriptor( + &self, + context: &ComPtr, + binding: &PipelineBinding, + handles: *mut Descriptor, + ) { + + + unsafe { + if binding.stage.contains(pso::ShaderStageFlags::VERTEX) { + self.bind_vertex_descriptor(context, binding, handles); + } + + if binding.stage.contains(pso::ShaderStageFlags::FRAGMENT) { + self.bind_fragment_descriptor(context, binding, handles); + } + + if binding.stage.contains(pso::ShaderStageFlags::COMPUTE) { + self.bind_compute_descriptor(context, binding, handles); + } + } + } + + fn defer_coherent_flush(&mut self, buffer: &Buffer) { + if !self + .flush_coherent_memory + .iter() + .any(|m| m.buffer == buffer.internal.raw) + { + self.flush_coherent_memory.push(MemoryFlush { + host_memory: buffer.host_ptr, + sync_range: SyncRange::Whole, + buffer: buffer.internal.raw, + }); + } + } + + fn defer_coherent_invalidate(&mut self, buffer: &Buffer) { + if !self + .invalidate_coherent_memory + .iter() + .any(|m| m.buffer == buffer.internal.raw) + { + self.invalidate_coherent_memory.push(MemoryInvalidate { + working_buffer: Some(self.internal.working_buffer.clone()), + working_buffer_size: self.internal.working_buffer_size, + host_memory: buffer.host_ptr, + sync_range: buffer.bound_range.clone(), + buffer: buffer.internal.raw, + }); + } + } + + fn reset(&mut self) { + self.flush_coherent_memory.clear(); + self.invalidate_coherent_memory.clear(); + self.render_pass_cache = None; + self.cache.clear(); + } +} + +impl command::CommandBuffer for CommandBuffer { + unsafe fn begin( + &mut self, + flags: command::CommandBufferFlags, + _info: command::CommandBufferInheritanceInfo, + ) { + self.one_time_submit = flags.contains(command::CommandBufferFlags::ONE_TIME_SUBMIT); + self.reset(); + } + + unsafe fn finish(&mut self) { + let mut list = ptr::null_mut(); + let hr = self + .context + .FinishCommandList(FALSE, &mut list as *mut *mut _ as *mut *mut _); + assert_eq!(hr, winerror::S_OK); + + self.list.replace(Some(ComPtr::from_raw(list))); + } + + unsafe fn reset(&mut self, _release_resources: bool) { + self.reset(); + } + + unsafe fn begin_render_pass( + &mut self, + render_pass: &RenderPass, + framebuffer: &Framebuffer, + target_rect: pso::Rect, + clear_values: T, + _first_subpass: command::SubpassContents, + ) where + T: IntoIterator, + T::Item: Borrow, + { + use pass::AttachmentLoadOp as Alo; + + let mut clear_iter = clear_values.into_iter(); + let mut attachment_clears = Vec::new(); + + for (idx, attachment) in render_pass.attachments.iter().enumerate() { + + let format = attachment.format.unwrap(); + + let subpass_id = render_pass.subpasses.iter().position(|sp| sp.is_using(idx)); + + if attachment.has_clears() { + let value = *clear_iter.next().unwrap().borrow(); + + match (attachment.ops.load, attachment.stencil_ops.load) { + (Alo::Clear, Alo::Clear) if format.is_depth() => { + attachment_clears.push(AttachmentClear { + subpass_id, + attachment_id: idx, + raw: command::AttachmentClear::DepthStencil { + depth: Some(value.depth_stencil.depth), + stencil: Some(value.depth_stencil.stencil), + }, + }); + } + (Alo::Clear, Alo::Clear) => { + attachment_clears.push(AttachmentClear { + subpass_id, + attachment_id: idx, + raw: command::AttachmentClear::Color { + index: idx, + value: value.color, + }, + }); + + attachment_clears.push(AttachmentClear { + subpass_id, + attachment_id: idx, + raw: command::AttachmentClear::DepthStencil { + depth: None, + stencil: Some(value.depth_stencil.stencil), + }, + }); + } + (Alo::Clear, _) if format.is_depth() => { + attachment_clears.push(AttachmentClear { + subpass_id, + attachment_id: idx, + raw: command::AttachmentClear::DepthStencil { + depth: Some(value.depth_stencil.depth), + stencil: None, + }, + }); + } + (Alo::Clear, _) => { + attachment_clears.push(AttachmentClear { + subpass_id, + attachment_id: idx, + raw: command::AttachmentClear::Color { + index: idx, + value: value.color, + }, + }); + } + (_, Alo::Clear) => { + attachment_clears.push(AttachmentClear { + subpass_id, + attachment_id: idx, + raw: command::AttachmentClear::DepthStencil { + depth: None, + stencil: Some(value.depth_stencil.stencil), + }, + }); + } + _ => {} + } + } + } + + self.render_pass_cache = Some(RenderPassCache { + render_pass: render_pass.clone(), + framebuffer: framebuffer.clone(), + attachment_clear_values: attachment_clears, + target_rect, + current_subpass: 0, + }); + + if let Some(ref mut current_render_pass) = self.render_pass_cache { + current_render_pass.start_subpass(&mut self.internal, &self.context, &mut self.cache); + } + } + + unsafe fn next_subpass(&mut self, _contents: command::SubpassContents) { + if let Some(ref mut current_render_pass) = self.render_pass_cache { + + current_render_pass.next_subpass(); + current_render_pass.start_subpass(&mut self.internal, &self.context, &mut self.cache); + } + } + + unsafe fn end_render_pass(&mut self) { + self.context + .OMSetRenderTargets(8, [ptr::null_mut(); 8].as_ptr(), ptr::null_mut()); + + self.render_pass_cache = None; + } + + unsafe fn pipeline_barrier<'a, T>( + &mut self, + _stages: Range, + _dependencies: memory::Dependencies, + _barriers: T, + ) where + T: IntoIterator, + T::Item: Borrow>, + { + + + } + + unsafe fn clear_image( + &mut self, + image: &Image, + _: image::Layout, + value: command::ClearValue, + subresource_ranges: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + for range in subresource_ranges { + let range = range.borrow(); + + + if range.aspects.contains(format::Aspects::COLOR) { + for layer in range.layers.clone() { + for level in range.levels.clone() { + self.context.ClearRenderTargetView( + image.get_rtv(level, layer).unwrap().as_raw(), + &value.color.float32, + ); + } + } + } + + let mut depth_stencil_flags = 0; + if range.aspects.contains(format::Aspects::DEPTH) { + depth_stencil_flags |= d3d11::D3D11_CLEAR_DEPTH; + } + + if range.aspects.contains(format::Aspects::STENCIL) { + depth_stencil_flags |= d3d11::D3D11_CLEAR_STENCIL; + } + + if depth_stencil_flags != 0 { + for layer in range.layers.clone() { + for level in range.levels.clone() { + self.context.ClearDepthStencilView( + image.get_dsv(level, layer).unwrap().as_raw(), + depth_stencil_flags, + value.depth_stencil.depth, + value.depth_stencil.stencil as _, + ); + } + } + } + } + } + + unsafe fn clear_attachments(&mut self, clears: T, rects: U) + where + T: IntoIterator, + T::Item: Borrow, + U: IntoIterator, + U::Item: Borrow, + { + if let Some(ref pass) = self.render_pass_cache { + self.cache.dirty_flag.insert( + DirtyStateFlag::GRAPHICS_PIPELINE + | DirtyStateFlag::VIEWPORTS + | DirtyStateFlag::RENDER_TARGETS, + ); + self.internal + .clear_attachments(&self.context, clears, rects, pass); + self.cache.bind(&self.context); + } else { + panic!("`clear_attachments` can only be called inside a renderpass") + } + } + + unsafe fn resolve_image( + &mut self, + _src: &Image, + _src_layout: image::Layout, + _dst: &Image, + _dst_layout: image::Layout, + _regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + unimplemented!() + } + + unsafe fn blit_image( + &mut self, + src: &Image, + _src_layout: image::Layout, + dst: &Image, + _dst_layout: image::Layout, + filter: image::Filter, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + self.cache + .dirty_flag + .insert(DirtyStateFlag::GRAPHICS_PIPELINE); + + self.internal + .blit_2d_image(&self.context, src, dst, filter, regions); + + self.cache.bind(&self.context); + } + + unsafe fn bind_index_buffer(&mut self, ibv: buffer::IndexBufferView) { + self.context.IASetIndexBuffer( + ibv.buffer.internal.raw, + conv::map_index_type(ibv.index_type), + ibv.offset as u32, + ); + } + + unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) + where + I: IntoIterator, + T: Borrow, + { + for (i, (buf, offset)) in buffers.into_iter().enumerate() { + let idx = i + first_binding as usize; + let buf = buf.borrow(); + + if buf.ty == MemoryHeapFlags::HOST_COHERENT { + self.defer_coherent_flush(buf); + } + + self.cache + .set_vertex_buffer(idx, offset as u32, buf.internal.raw); + } + + self.cache.bind_vertex_buffers(&self.context); + } + + unsafe fn set_viewports(&mut self, _first_viewport: u32, viewports: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let viewports = viewports + .into_iter() + .map(|v| { + let v = v.borrow(); + conv::map_viewport(v) + }) + .collect::>(); + + + self.cache.set_viewports(&viewports); + self.cache.bind_viewports(&self.context); + } + + unsafe fn set_scissors(&mut self, _first_scissor: u32, scissors: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let scissors = scissors + .into_iter() + .map(|s| { + let s = s.borrow(); + conv::map_rect(s) + }) + .collect::>(); + + + self.context + .RSSetScissorRects(scissors.len() as _, scissors.as_ptr()); + } + + unsafe fn set_blend_constants(&mut self, color: pso::ColorValue) { + self.cache.set_blend_factor(color); + self.cache.bind_blend_state(&self.context); + } + + unsafe fn set_stencil_reference(&mut self, _faces: pso::Face, value: pso::StencilValue) { + self.cache.stencil_ref = Some(value); + } + + unsafe fn set_stencil_read_mask(&mut self, _faces: pso::Face, value: pso::StencilValue) { + self.cache.stencil_read_mask = Some(value); + } + + unsafe fn set_stencil_write_mask(&mut self, _faces: pso::Face, value: pso::StencilValue) { + self.cache.stencil_write_mask = Some(value); + } + + unsafe fn set_depth_bounds(&mut self, _bounds: Range) { + unimplemented!() + } + + unsafe fn set_line_width(&mut self, width: f32) { + validate_line_width(width); + } + + unsafe fn set_depth_bias(&mut self, _depth_bias: pso::DepthBias) { + + + } + + unsafe fn bind_graphics_pipeline(&mut self, pipeline: &GraphicsPipeline) { + self.cache.set_graphics_pipeline(pipeline.clone()); + self.cache.bind_graphics_pipeline(&self.context); + } + + unsafe fn bind_graphics_descriptor_sets<'a, I, J>( + &mut self, + layout: &PipelineLayout, + first_set: usize, + sets: I, + _offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + let _scope = debug_scope!(&self.context, "BindGraphicsDescriptorSets"); + + + self.context.CSSetUnorderedAccessViews( + 0, + 16, + [ptr::null_mut(); 16].as_ptr(), + ptr::null_mut(), + ); + + + + let iter = sets + .into_iter() + .zip(layout.set_bindings.iter().skip(first_set)); + + for (set, bindings) in iter { + let set = set.borrow(); + + { + let coherent_buffers = set.coherent_buffers.lock(); + for sync in coherent_buffers.flush_coherent_buffers.borrow().iter() { + + if !self + .flush_coherent_memory + .iter() + .any(|m| m.buffer == sync.device_buffer) + { + self.flush_coherent_memory.push(MemoryFlush { + host_memory: sync.host_ptr, + sync_range: sync.range.clone(), + buffer: sync.device_buffer, + }); + } + } + + for sync in coherent_buffers.invalidate_coherent_buffers.borrow().iter() { + if !self + .invalidate_coherent_memory + .iter() + .any(|m| m.buffer == sync.device_buffer) + { + self.invalidate_coherent_memory.push(MemoryInvalidate { + working_buffer: Some(self.internal.working_buffer.clone()), + working_buffer_size: self.internal.working_buffer_size, + host_memory: sync.host_ptr, + sync_range: sync.range.clone(), + buffer: sync.device_buffer, + }); + } + } + } + + + for binding in bindings.iter() { + self.bind_descriptor(&self.context, binding, set.handles); + } + } + } + + unsafe fn bind_compute_pipeline(&mut self, pipeline: &ComputePipeline) { + self.context + .CSSetShader(pipeline.cs.as_raw(), ptr::null_mut(), 0); + } + + unsafe fn bind_compute_descriptor_sets( + &mut self, + layout: &PipelineLayout, + first_set: usize, + sets: I, + _offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + let _scope = debug_scope!(&self.context, "BindComputeDescriptorSets"); + + self.context.CSSetUnorderedAccessViews( + 0, + 16, + [ptr::null_mut(); 16].as_ptr(), + ptr::null_mut(), + ); + let iter = sets + .into_iter() + .zip(layout.set_bindings.iter().skip(first_set)); + + for (set, bindings) in iter { + let set = set.borrow(); + + { + let coherent_buffers = set.coherent_buffers.lock(); + for sync in coherent_buffers.flush_coherent_buffers.borrow().iter() { + if !self + .flush_coherent_memory + .iter() + .any(|m| m.buffer == sync.device_buffer) + { + self.flush_coherent_memory.push(MemoryFlush { + host_memory: sync.host_ptr, + sync_range: sync.range.clone(), + buffer: sync.device_buffer, + }); + } + } + + for sync in coherent_buffers.invalidate_coherent_buffers.borrow().iter() { + if !self + .invalidate_coherent_memory + .iter() + .any(|m| m.buffer == sync.device_buffer) + { + self.invalidate_coherent_memory.push(MemoryInvalidate { + working_buffer: Some(self.internal.working_buffer.clone()), + working_buffer_size: self.internal.working_buffer_size, + host_memory: sync.host_ptr, + sync_range: sync.range.clone(), + buffer: sync.device_buffer, + }); + } + } + } + + + for binding in bindings.iter() { + self.bind_descriptor(&self.context, binding, set.handles); + } + } + } + + unsafe fn dispatch(&mut self, count: WorkGroupCount) { + self.context.Dispatch(count[0], count[1], count[2]); + } + + unsafe fn dispatch_indirect(&mut self, _buffer: &Buffer, _offset: buffer::Offset) { + unimplemented!() + } + + unsafe fn fill_buffer(&mut self, _buffer: &Buffer, _range: R, _data: u32) + where + R: RangeArg, + { + unimplemented!() + } + + unsafe fn update_buffer(&mut self, _buffer: &Buffer, _offset: buffer::Offset, _data: &[u8]) { + unimplemented!() + } + + unsafe fn copy_buffer(&mut self, src: &Buffer, dst: &Buffer, regions: T) + where + T: IntoIterator, + T::Item: Borrow, + { + if src.ty == MemoryHeapFlags::HOST_COHERENT { + self.defer_coherent_flush(src); + } + + for region in regions.into_iter() { + let info = region.borrow(); + let dst_box = d3d11::D3D11_BOX { + left: info.src as _, + top: 0, + front: 0, + right: (info.src + info.size) as _, + bottom: 1, + back: 1, + }; + + self.context.CopySubresourceRegion( + dst.internal.raw as _, + 0, + info.dst as _, + 0, + 0, + src.internal.raw as _, + 0, + &dst_box, + ); + + if let Some(disjoint_cb) = dst.internal.disjoint_cb { + self.context.CopySubresourceRegion( + disjoint_cb as _, + 0, + info.dst as _, + 0, + 0, + src.internal.raw as _, + 0, + &dst_box, + ); + } + } + } + + unsafe fn copy_image( + &mut self, + src: &Image, + _: image::Layout, + dst: &Image, + _: image::Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + self.internal + .copy_image_2d(&self.context, src, dst, regions); + } + + unsafe fn copy_buffer_to_image( + &mut self, + buffer: &Buffer, + image: &Image, + _: image::Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + if buffer.ty == MemoryHeapFlags::HOST_COHERENT { + self.defer_coherent_flush(buffer); + } + + self.internal + .copy_buffer_to_image_2d(&self.context, buffer, image, regions); + } + + unsafe fn copy_image_to_buffer( + &mut self, + image: &Image, + _: image::Layout, + buffer: &Buffer, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + if buffer.ty == MemoryHeapFlags::HOST_COHERENT { + self.defer_coherent_invalidate(buffer); + } + + self.internal + .copy_image_2d_to_buffer(&self.context, image, buffer, regions); + } + + unsafe fn draw(&mut self, vertices: Range, instances: Range) { + self.context.DrawInstanced( + vertices.end - vertices.start, + instances.end - instances.start, + vertices.start, + instances.start, + ); + } + + unsafe fn draw_indexed( + &mut self, + indices: Range, + base_vertex: VertexOffset, + instances: Range, + ) { + self.context.DrawIndexedInstanced( + indices.end - indices.start, + instances.end - instances.start, + indices.start, + base_vertex, + instances.start, + ); + } + + unsafe fn draw_indirect( + &mut self, + _buffer: &Buffer, + _offset: buffer::Offset, + _draw_count: DrawCount, + _stride: u32, + ) { + unimplemented!() + } + + unsafe fn draw_indexed_indirect( + &mut self, + _buffer: &Buffer, + _offset: buffer::Offset, + _draw_count: DrawCount, + _stride: u32, + ) { + unimplemented!() + } + + unsafe fn set_event(&mut self, _: &(), _: pso::PipelineStage) { + unimplemented!() + } + + unsafe fn reset_event(&mut self, _: &(), _: pso::PipelineStage) { + unimplemented!() + } + + unsafe fn wait_events<'a, I, J>(&mut self, _: I, _: Range, _: J) + where + I: IntoIterator, + I::Item: Borrow<()>, + J: IntoIterator, + J::Item: Borrow>, + { + unimplemented!() + } + + unsafe fn begin_query(&mut self, _query: query::Query, _flags: query::ControlFlags) { + unimplemented!() + } + + unsafe fn end_query(&mut self, _query: query::Query) { + unimplemented!() + } + + unsafe fn reset_query_pool(&mut self, _pool: &QueryPool, _queries: Range) { + unimplemented!() + } + + unsafe fn copy_query_pool_results( + &mut self, + _pool: &QueryPool, + _queries: Range, + _buffer: &Buffer, + _offset: buffer::Offset, + _stride: buffer::Offset, + _flags: query::ResultFlags, + ) { + unimplemented!() + } + + unsafe fn write_timestamp(&mut self, _: pso::PipelineStage, _query: query::Query) { + unimplemented!() + } + + unsafe fn push_graphics_constants( + &mut self, + _layout: &PipelineLayout, + _stages: pso::ShaderStageFlags, + _offset: u32, + _constants: &[u32], + ) { + + } + + unsafe fn push_compute_constants( + &mut self, + _layout: &PipelineLayout, + _offset: u32, + _constants: &[u32], + ) { + unimplemented!() + } + + unsafe fn execute_commands<'a, T, I>(&mut self, _buffers: I) + where + T: 'a + Borrow, + I: IntoIterator, + { + unimplemented!() + } +} + +bitflags! { + struct MemoryHeapFlags: u64 { + const DEVICE_LOCAL = 0x1; + const HOST_VISIBLE = 0x2 | 0x4; + const HOST_COHERENT = 0x2; + } +} + +#[derive(Clone, Debug)] +enum SyncRange { + Whole, + Partial(Range), +} + +#[derive(Debug)] +pub struct MemoryFlush { + host_memory: *mut u8, + sync_range: SyncRange, + buffer: *mut d3d11::ID3D11Buffer, +} + +pub struct MemoryInvalidate { + working_buffer: Option>, + working_buffer_size: u64, + host_memory: *mut u8, + sync_range: Range, + buffer: *mut d3d11::ID3D11Buffer, +} + +impl fmt::Debug for MemoryInvalidate { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("MemoryInvalidate") + } +} + +fn intersection(a: &Range, b: &Range) -> Option> { + let min = if a.start < b.start { a } else { b }; + let max = if min == a { b } else { a }; + + if min.end < max.start { + None + } else { + let end = if min.end < max.end { min.end } else { max.end }; + Some(max.start .. end) + } +} + +impl MemoryFlush { + fn do_flush(&self, context: &ComPtr) { + let src = self.host_memory; + + debug_marker!(context, "Flush({:?})", self.sync_range); + let region = if let SyncRange::Partial(range) = &self.sync_range { + Some(d3d11::D3D11_BOX { + left: range.start as _, + top: 0, + front: 0, + right: range.end as _, + bottom: 1, + back: 1, + }) + } else { + None + }; + + unsafe { + context.UpdateSubresource( + self.buffer as _, + 0, + if let Some(region) = region { + ®ion + } else { + ptr::null_mut() + }, + src as _, + 0, + 0, + ); + } + } +} + +impl MemoryInvalidate { + fn download( + &self, + context: &ComPtr, + buffer: *mut d3d11::ID3D11Buffer, + range: Range, + ) { + unsafe { + context.CopySubresourceRegion( + self.working_buffer.clone().unwrap().as_raw() as _, + 0, + 0, + 0, + 0, + buffer as _, + 0, + &d3d11::D3D11_BOX { + left: range.start as _, + top: 0, + front: 0, + right: range.end as _, + bottom: 1, + back: 1, + }, + ); + + + let dst = self.host_memory.offset(range.start as isize); + let src = self.map(&context); + ptr::copy(src, dst, (range.end - range.start) as usize); + self.unmap(&context); + } + } + + fn do_invalidate(&self, context: &ComPtr) { + let stride = self.working_buffer_size; + let range = &self.sync_range; + let len = range.end - range.start; + let chunks = len / stride; + let remainder = len % stride; + + + for i in 0 .. chunks { + let offset = range.start + i * stride; + let range = offset .. (offset + stride); + + self.download(context, self.buffer, range); + } + + if remainder != 0 { + self.download(context, self.buffer, (chunks * stride) .. range.end); + } + } + + fn map(&self, context: &ComPtr) -> *mut u8 { + assert_eq!(self.working_buffer.is_some(), true); + + unsafe { + let mut map = mem::zeroed(); + let hr = context.Map( + self.working_buffer.clone().unwrap().as_raw() as _, + 0, + d3d11::D3D11_MAP_READ, + 0, + &mut map, + ); + + assert_eq!(hr, winerror::S_OK); + + map.pData as _ + } + } + + fn unmap(&self, context: &ComPtr) { + unsafe { + context.Unmap(self.working_buffer.clone().unwrap().as_raw() as _, 0); + } + } +} + + + + + + + + +pub struct Memory { + ty: MemoryHeapFlags, + properties: memory::Properties, + size: u64, + + mapped_ptr: *mut u8, + + + host_visible: Option>>, + + + local_buffers: RefCell, InternalBuffer)>>, + + + local_images: RefCell, InternalImage)>>, +} + +impl fmt::Debug for Memory { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Memory") + } +} + +unsafe impl Send for Memory {} +unsafe impl Sync for Memory {} + +impl Memory { + pub fn resolve>(&self, range: &R) -> Range { + *range.start().unwrap_or(&0) .. *range.end().unwrap_or(&self.size) + } + + pub fn bind_buffer(&self, range: Range, buffer: InternalBuffer) { + self.local_buffers.borrow_mut().push((range, buffer)); + } + + pub fn flush(&self, context: &ComPtr, range: Range) { + use buffer::Usage; + + for &(ref buffer_range, ref buffer) in self.local_buffers.borrow().iter() { + if let Some(range) = intersection(&range, &buffer_range) { + let ptr = self.mapped_ptr; + + + + + + + + + + + + + + + if buffer.usage.contains(Usage::UNIFORM) && buffer.usage != Usage::UNIFORM { + MemoryFlush { + host_memory: unsafe { ptr.offset(buffer_range.start as _) }, + sync_range: SyncRange::Whole, + buffer: buffer.raw, + } + .do_flush(&context); + + if let Some(disjoint) = buffer.disjoint_cb { + MemoryFlush { + host_memory: unsafe { ptr.offset(buffer_range.start as _) }, + sync_range: SyncRange::Whole, + buffer: disjoint, + } + .do_flush(&context); + } + } else if buffer.usage == Usage::UNIFORM { + MemoryFlush { + host_memory: unsafe { ptr.offset(buffer_range.start as _) }, + sync_range: SyncRange::Whole, + buffer: buffer.raw, + } + .do_flush(&context); + } else { + let local_start = range.start - buffer_range.start; + let local_len = range.end - range.start; + + MemoryFlush { + host_memory: unsafe { ptr.offset(range.start as _) }, + sync_range: SyncRange::Partial(local_start .. (local_start + local_len)), + buffer: buffer.raw, + } + .do_flush(&context); + } + } + } + } + + pub fn invalidate( + &self, + context: &ComPtr, + range: Range, + working_buffer: ComPtr, + working_buffer_size: u64, + ) { + for &(ref buffer_range, ref buffer) in self.local_buffers.borrow().iter() { + if let Some(range) = intersection(&range, &buffer_range) { + MemoryInvalidate { + working_buffer: Some(working_buffer.clone()), + working_buffer_size, + host_memory: self.mapped_ptr, + sync_range: range.clone(), + buffer: buffer.raw, + } + .do_invalidate(&context); + } + } + } +} + +#[derive(Debug)] +pub struct CommandPool { + device: ComPtr, + internal: internal::Internal, +} + +unsafe impl Send for CommandPool {} +unsafe impl Sync for CommandPool {} + +impl hal::pool::CommandPool for CommandPool { + unsafe fn reset(&mut self, _release_resources: bool) { + + } + + unsafe fn allocate_one(&mut self, _level: command::Level) -> CommandBuffer { + CommandBuffer::create_deferred(self.device.clone(), self.internal.clone()) + } + + unsafe fn free(&mut self, _cbufs: I) + where + I: IntoIterator, + { + + + } +} + + +pub enum ShaderModule { + Dxbc(Vec), + Spirv(Vec), +} + + +impl ::fmt::Debug for ShaderModule { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(f, "{}", "ShaderModule { ... }") + } +} + +unsafe impl Send for ShaderModule {} +unsafe impl Sync for ShaderModule {} + +#[derive(Clone, Debug)] +pub struct SubpassDesc { + pub color_attachments: Vec, + pub depth_stencil_attachment: Option, + pub input_attachments: Vec, + pub resolve_attachments: Vec, +} + +impl SubpassDesc { + pub(crate) fn is_using(&self, at_id: pass::AttachmentId) -> bool { + self.color_attachments + .iter() + .chain(self.depth_stencil_attachment.iter()) + .chain(self.input_attachments.iter()) + .chain(self.resolve_attachments.iter()) + .any(|&(id, _)| id == at_id) + } +} + +#[derive(Clone, Debug)] +pub struct RenderPass { + pub attachments: Vec, + pub subpasses: Vec, +} + +#[derive(Clone, Debug)] +pub struct Framebuffer { + attachments: Vec, + layers: image::Layer, +} + +#[derive(Clone, Debug)] +pub struct InternalBuffer { + raw: *mut d3d11::ID3D11Buffer, + + + disjoint_cb: Option<*mut d3d11::ID3D11Buffer>, + srv: Option<*mut d3d11::ID3D11ShaderResourceView>, + uav: Option<*mut d3d11::ID3D11UnorderedAccessView>, + usage: buffer::Usage, +} + +pub struct Buffer { + internal: InternalBuffer, + ty: MemoryHeapFlags, + host_ptr: *mut u8, + bound_range: Range, + requirements: memory::Requirements, + bind: d3d11::D3D11_BIND_FLAG, +} + +impl fmt::Debug for Buffer { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Buffer") + } +} + +unsafe impl Send for Buffer {} +unsafe impl Sync for Buffer {} + +#[derive(Debug)] +pub struct BufferView; + +pub struct Image { + kind: image::Kind, + usage: image::Usage, + format: format::Format, + view_caps: image::ViewCapabilities, + decomposed_format: conv::DecomposedDxgiFormat, + mip_levels: image::Level, + internal: InternalImage, + tiling: image::Tiling, + bind: d3d11::D3D11_BIND_FLAG, + requirements: memory::Requirements, +} + +impl fmt::Debug for Image { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Image") + } +} + +pub struct InternalImage { + raw: *mut d3d11::ID3D11Resource, + copy_srv: Option>, + srv: Option>, + + + unordered_access_views: Vec>, + + + depth_stencil_views: Vec>, + + + render_target_views: Vec>, +} + +impl fmt::Debug for InternalImage { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("InternalImage") + } +} + +unsafe impl Send for Image {} +unsafe impl Sync for Image {} + +impl Image { + pub fn calc_subresource(&self, mip_level: UINT, layer: UINT) -> UINT { + mip_level + (layer * self.mip_levels as UINT) + } + + pub fn get_uav( + &self, + mip_level: image::Level, + _layer: image::Layer, + ) -> Option<&ComPtr> { + self.internal + .unordered_access_views + .get(self.calc_subresource(mip_level as _, 0) as usize) + } + + pub fn get_dsv( + &self, + mip_level: image::Level, + layer: image::Layer, + ) -> Option<&ComPtr> { + self.internal + .depth_stencil_views + .get(self.calc_subresource(mip_level as _, layer as _) as usize) + } + + pub fn get_rtv( + &self, + mip_level: image::Level, + layer: image::Layer, + ) -> Option<&ComPtr> { + self.internal + .render_target_views + .get(self.calc_subresource(mip_level as _, layer as _) as usize) + } +} + +#[derive(Clone)] +pub struct ImageView { + format: format::Format, + rtv_handle: Option>, + srv_handle: Option>, + dsv_handle: Option>, + uav_handle: Option>, +} + +impl fmt::Debug for ImageView { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("ImageView") + } +} + +unsafe impl Send for ImageView {} +unsafe impl Sync for ImageView {} + +pub struct Sampler { + sampler_handle: ComPtr, +} + +impl fmt::Debug for Sampler { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Sampler") + } +} + +unsafe impl Send for Sampler {} +unsafe impl Sync for Sampler {} + +pub struct ComputePipeline { + cs: ComPtr, +} + +impl fmt::Debug for ComputePipeline { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("ComputePipeline") + } +} + +unsafe impl Send for ComputePipeline {} +unsafe impl Sync for ComputePipeline {} + + + + + + +#[derive(Clone)] +pub struct GraphicsPipeline { + vs: ComPtr, + gs: Option>, + hs: Option>, + ds: Option>, + ps: Option>, + topology: d3d11::D3D11_PRIMITIVE_TOPOLOGY, + input_layout: ComPtr, + rasterizer_state: ComPtr, + blend_state: ComPtr, + depth_stencil_state: Option<( + ComPtr, + pso::State, + )>, + baked_states: pso::BakedStates, + required_bindings: u32, + max_vertex_bindings: u32, + strides: Vec, +} + +impl fmt::Debug for GraphicsPipeline { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("GraphicsPipeline") + } +} + +unsafe impl Send for GraphicsPipeline {} +unsafe impl Sync for GraphicsPipeline {} + +#[derive(Clone, Debug)] +struct PipelineBinding { + stage: pso::ShaderStageFlags, + ty: pso::DescriptorType, + binding_range: Range, + handle_offset: u32, +} + +#[derive(Clone, Debug)] +struct RegisterMapping { + ty: pso::DescriptorType, + spirv_binding: u32, + hlsl_register: u8, + combined: bool, +} + +#[derive(Clone, Debug)] +struct RegisterRemapping { + mapping: Vec, + num_t: u8, + num_s: u8, + num_c: u8, + num_u: u8, +} + + + +#[derive(Debug)] +pub struct PipelineLayout { + set_bindings: Vec>, + set_remapping: Vec, +} + + + + +#[derive(Debug)] +pub struct DescriptorSetLayout { + bindings: Vec, + handle_count: u32, + register_remap: RegisterRemapping, +} + +#[derive(Debug)] +struct CoherentBufferFlushRange { + device_buffer: *mut d3d11::ID3D11Buffer, + host_ptr: *mut u8, + range: SyncRange, +} + +#[derive(Debug)] +struct CoherentBufferInvalidateRange { + device_buffer: *mut d3d11::ID3D11Buffer, + host_ptr: *mut u8, + range: Range, +} + +#[derive(Debug)] +struct CoherentBuffers { + + + flush_coherent_buffers: RefCell>, + invalidate_coherent_buffers: RefCell>, +} + +impl CoherentBuffers { + fn add_flush(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) { + let new = buffer.internal.raw; + + if old != new { + let mut buffers = self.flush_coherent_buffers.borrow_mut(); + + let pos = buffers.iter().position(|sync| old == sync.device_buffer); + + let sync_range = CoherentBufferFlushRange { + device_buffer: new, + host_ptr: buffer.host_ptr, + range: SyncRange::Whole, + }; + + if let Some(pos) = pos { + buffers[pos] = sync_range; + } else { + buffers.push(sync_range); + } + + if let Some(disjoint) = buffer.internal.disjoint_cb { + let pos = buffers + .iter() + .position(|sync| disjoint == sync.device_buffer); + + let sync_range = CoherentBufferFlushRange { + device_buffer: disjoint, + host_ptr: buffer.host_ptr, + range: SyncRange::Whole, + }; + + if let Some(pos) = pos { + buffers[pos] = sync_range; + } else { + buffers.push(sync_range); + } + } + } + } + + fn add_invalidate(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) { + let new = buffer.internal.raw; + + if old != new { + let mut buffers = self.invalidate_coherent_buffers.borrow_mut(); + + let pos = buffers.iter().position(|sync| old == sync.device_buffer); + + let sync_range = CoherentBufferInvalidateRange { + device_buffer: new, + host_ptr: buffer.host_ptr, + range: buffer.bound_range.clone(), + }; + + if let Some(pos) = pos { + buffers[pos] = sync_range; + } else { + buffers.push(sync_range); + } + } + } +} + + +#[derive(Debug, Copy, Clone)] +#[repr(C)] +struct Descriptor(*mut d3d11::ID3D11DeviceChild); + +pub struct DescriptorSet { + offset: usize, + len: usize, + handles: *mut Descriptor, + register_remap: RegisterRemapping, + coherent_buffers: Mutex, +} + +impl fmt::Debug for DescriptorSet { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("DescriptorSet") + } +} + +unsafe impl Send for DescriptorSet {} +unsafe impl Sync for DescriptorSet {} + +impl DescriptorSet { + fn get_handle_offset(&self, target_binding: u32) -> (pso::DescriptorType, u8, u8) { + use pso::DescriptorType::*; + + let mapping = self + .register_remap + .mapping + .iter() + .find(|&mapping| target_binding == mapping.spirv_binding) + .unwrap(); + + let (ty, register) = (mapping.ty, mapping.hlsl_register); + + match ty { + Sampler => { + let (ty, t_reg) = if mapping.combined { + let combined_mapping = self + .register_remap + .mapping + .iter() + .find(|&mapping| { + mapping.ty == SampledImage && target_binding == mapping.spirv_binding + }) + .unwrap(); + (CombinedImageSampler, combined_mapping.hlsl_register) + } else { + (ty, 0) + }; + + (ty, register, self.register_remap.num_s + t_reg) + } + SampledImage | UniformTexelBuffer => (ty, self.register_remap.num_s + register, 0), + UniformBuffer | UniformBufferDynamic => ( + ty, + self.register_remap.num_s + self.register_remap.num_t + register, + 0, + ), + StorageTexelBuffer | StorageBuffer | InputAttachment | StorageBufferDynamic + | StorageImage => ( + ty, + self.register_remap.num_s + + self.register_remap.num_t + + self.register_remap.num_c + + register, + 0, + ), + CombinedImageSampler => unreachable!(), + } + } + + fn add_flush(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) { + let new = buffer.internal.raw; + + if old != new { + self.coherent_buffers.lock().add_flush(old, buffer); + } + } + + fn add_invalidate(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) { + let new = buffer.internal.raw; + + if old != new { + self.coherent_buffers.lock().add_invalidate(old, buffer); + } + } +} + +#[derive(Debug)] +pub struct DescriptorPool { + handles: Vec, + allocator: RangeAllocator, +} + +unsafe impl Send for DescriptorPool {} +unsafe impl Sync for DescriptorPool {} + +impl DescriptorPool { + pub fn with_capacity(size: usize) -> Self { + DescriptorPool { + handles: vec![Descriptor(ptr::null_mut()); size], + allocator: RangeAllocator::new(0 .. size), + } + } +} + +impl pso::DescriptorPool for DescriptorPool { + unsafe fn allocate_set( + &mut self, + layout: &DescriptorSetLayout, + ) -> Result { + + + let len = layout.handle_count.max(1) as _; + + self.allocator + .allocate_range(len) + .map(|range| { + for handle in &mut self.handles[range.clone()] { + *handle = Descriptor(ptr::null_mut()); + } + + DescriptorSet { + offset: range.start, + len, + handles: self.handles.as_mut_ptr().offset(range.start as _), + register_remap: layout.register_remap.clone(), + coherent_buffers: Mutex::new(CoherentBuffers { + flush_coherent_buffers: RefCell::new(Vec::new()), + invalidate_coherent_buffers: RefCell::new(Vec::new()), + }), + } + }) + .map_err(|_| pso::AllocationError::OutOfPoolMemory) + } + + unsafe fn free_sets(&mut self, descriptor_sets: I) + where + I: IntoIterator, + { + for set in descriptor_sets { + self.allocator + .free_range(set.offset .. (set.offset + set.len)) + } + } + + unsafe fn reset(&mut self) { + self.allocator.reset(); + } +} + +#[derive(Debug)] +pub struct RawFence { + mutex: Mutex, + condvar: Condvar, +} + +pub type Fence = Arc; + +#[derive(Debug)] +pub struct Semaphore; +#[derive(Debug)] +pub struct QueryPool; + +#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)] +pub enum Backend {} +impl hal::Backend for Backend { + type Instance = Instance; + type PhysicalDevice = PhysicalDevice; + type Device = device::Device; + + type Surface = Surface; + type Swapchain = Swapchain; + + type QueueFamily = QueueFamily; + type CommandQueue = CommandQueue; + type CommandBuffer = CommandBuffer; + + type Memory = Memory; + type CommandPool = CommandPool; + + type ShaderModule = ShaderModule; + type RenderPass = RenderPass; + type Framebuffer = Framebuffer; + + type Buffer = Buffer; + type BufferView = BufferView; + type Image = Image; + + type ImageView = ImageView; + type Sampler = Sampler; + + type ComputePipeline = ComputePipeline; + type GraphicsPipeline = GraphicsPipeline; + type PipelineLayout = PipelineLayout; + type PipelineCache = (); + type DescriptorSetLayout = DescriptorSetLayout; + type DescriptorPool = DescriptorPool; + type DescriptorSet = DescriptorSet; + + type Fence = Fence; + type Semaphore = Semaphore; + type Event = (); + type QueryPool = QueryPool; +} + +fn validate_line_width(width: f32) { + + + + assert_eq!(width, 1.0); +} diff --git a/third_party/rust/gfx-backend-dx11/src/shader.rs b/third_party/rust/gfx-backend-dx11/src/shader.rs new file mode 100644 index 000000000000..053d531952f9 --- /dev/null +++ b/third_party/rust/gfx-backend-dx11/src/shader.rs @@ -0,0 +1,315 @@ +use std::{ffi, ptr, slice}; + +use spirv_cross::{hlsl, spirv, ErrorCode as SpirvErrorCode}; + +use winapi::shared::winerror; +use winapi::um::{d3dcommon, d3dcompiler}; +use wio::com::ComPtr; + +use auxil::spirv_cross_specialize_ast; +use hal::{device, pso}; + +use {conv, Backend, PipelineLayout}; + + + +fn gen_unexpected_error(err: SpirvErrorCode) -> device::ShaderError { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unexpected error".into(), + }; + device::ShaderError::CompilationFailed(msg) +} + + +fn gen_query_error(err: SpirvErrorCode) -> device::ShaderError { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unknown query error".into(), + }; + device::ShaderError::CompilationFailed(msg) +} + +pub(crate) fn compile_spirv_entrypoint( + raw_data: &[u32], + stage: pso::Stage, + source: &pso::EntryPoint, + layout: &PipelineLayout, +) -> Result>, device::ShaderError> { + let mut ast = parse_spirv(raw_data)?; + spirv_cross_specialize_ast(&mut ast, &source.specialization)?; + + patch_spirv_resources(&mut ast, layout)?; + let shader_model = hlsl::ShaderModel::V5_0; + let shader_code = translate_spirv(&mut ast, shader_model, layout, stage)?; + + let real_name = ast + .get_cleansed_entry_point_name(source.entry, conv::map_stage(stage)) + .map_err(gen_query_error)?; + + + let entry_points = ast.get_entry_points().map_err(gen_query_error)?; + entry_points + .iter() + .find(|entry_point| entry_point.name == real_name) + .ok_or(device::ShaderError::MissingEntryPoint(source.entry.into())) + .and_then(|entry_point| { + let stage = conv::map_execution_model(entry_point.execution_model); + let shader = compile_hlsl_shader( + stage, + shader_model, + &entry_point.name, + shader_code.as_bytes(), + )?; + Ok(Some(unsafe { ComPtr::from_raw(shader) })) + }) +} + +pub(crate) fn compile_hlsl_shader( + stage: pso::Stage, + shader_model: hlsl::ShaderModel, + entry: &str, + code: &[u8], +) -> Result<*mut d3dcommon::ID3DBlob, device::ShaderError> { + let stage_to_str = |stage, shader_model| { + let stage = match stage { + pso::Stage::Vertex => "vs", + pso::Stage::Fragment => "ps", + pso::Stage::Compute => "cs", + _ => unimplemented!(), + }; + + let model = match shader_model { + hlsl::ShaderModel::V5_0 => "5_0", + + hlsl::ShaderModel::V5_1 => "5_1", + + hlsl::ShaderModel::V6_0 => "6_0", + _ => unimplemented!(), + }; + + format!("{}_{}\0", stage, model) + }; + + let mut blob = ptr::null_mut(); + let mut error = ptr::null_mut(); + let entry = ffi::CString::new(entry).unwrap(); + let hr = unsafe { + d3dcompiler::D3DCompile( + code.as_ptr() as *const _, + code.len(), + ptr::null(), + ptr::null(), + ptr::null_mut(), + entry.as_ptr() as *const _, + stage_to_str(stage, shader_model).as_ptr() as *const i8, + 1, + 0, + &mut blob as *mut *mut _, + &mut error as *mut *mut _, + ) + }; + + if !winerror::SUCCEEDED(hr) { + let error = unsafe { ComPtr::::from_raw(error) }; + let message = unsafe { + let pointer = error.GetBufferPointer(); + let size = error.GetBufferSize(); + let slice = slice::from_raw_parts(pointer as *const u8, size as usize); + String::from_utf8_lossy(slice).into_owned() + }; + + Err(device::ShaderError::CompilationFailed(message)) + } else { + Ok(blob) + } +} + +fn parse_spirv(raw_data: &[u32]) -> Result, device::ShaderError> { + let module = spirv::Module::from_words(raw_data); + + spirv::Ast::parse(&module).map_err(|err| { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unknown parsing error".into(), + }; + device::ShaderError::CompilationFailed(msg) + }) +} + +fn patch_spirv_resources( + ast: &mut spirv::Ast, + layout: &PipelineLayout, +) -> Result<(), device::ShaderError> { + + + + let shader_resources = ast.get_shader_resources().map_err(gen_query_error)?; + for image in &shader_resources.separate_images { + let set = ast + .get_decoration(image.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)? as usize; + let binding = ast + .get_decoration(image.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + let mapping = layout.set_remapping[set] + .mapping + .iter() + .find(|&mapping| binding == mapping.spirv_binding) + .unwrap(); + + ast.set_decoration( + image.id, + spirv::Decoration::Binding, + mapping.hlsl_register as u32, + ) + .map_err(gen_unexpected_error)?; + } + + for uniform_buffer in &shader_resources.uniform_buffers { + let set = ast + .get_decoration(uniform_buffer.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)? as usize; + let binding = ast + .get_decoration(uniform_buffer.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + let mapping = layout.set_remapping[set] + .mapping + .iter() + .find(|&mapping| binding == mapping.spirv_binding) + .unwrap(); + + ast.set_decoration( + uniform_buffer.id, + spirv::Decoration::Binding, + mapping.hlsl_register as u32, + ) + .map_err(gen_unexpected_error)?; + } + + for storage_buffer in &shader_resources.storage_buffers { + let set = ast + .get_decoration(storage_buffer.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)? as usize; + let binding = ast + .get_decoration(storage_buffer.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + let mapping = layout.set_remapping[set] + .mapping + .iter() + .find(|&mapping| binding == mapping.spirv_binding) + .unwrap(); + + ast.set_decoration( + storage_buffer.id, + spirv::Decoration::Binding, + mapping.hlsl_register as u32, + ) + .map_err(gen_unexpected_error)?; + } + + for image in &shader_resources.storage_images { + let set = ast + .get_decoration(image.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)? as usize; + let binding = ast + .get_decoration(image.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + let mapping = layout.set_remapping[set] + .mapping + .iter() + .find(|&mapping| binding == mapping.spirv_binding) + .unwrap(); + + ast.set_decoration( + image.id, + spirv::Decoration::Binding, + mapping.hlsl_register as u32, + ) + .map_err(gen_unexpected_error)?; + } + + for sampler in &shader_resources.separate_samplers { + let set = ast + .get_decoration(sampler.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)? as usize; + let binding = ast + .get_decoration(sampler.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + let mapping = layout.set_remapping[set] + .mapping + .iter() + .find(|&mapping| binding == mapping.spirv_binding) + .unwrap(); + + ast.set_decoration( + sampler.id, + spirv::Decoration::Binding, + mapping.hlsl_register as u32, + ) + .map_err(gen_unexpected_error)?; + } + + for image in &shader_resources.sampled_images { + let set = ast + .get_decoration(image.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)? as usize; + let binding = ast + .get_decoration(image.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + let mapping = layout.set_remapping[set] + .mapping + .iter() + .find(|&mapping| binding == mapping.spirv_binding) + .unwrap(); + + ast.set_decoration( + image.id, + spirv::Decoration::Binding, + mapping.hlsl_register as u32, + ) + .map_err(gen_unexpected_error)?; + } + + Ok(()) +} + +fn translate_spirv( + ast: &mut spirv::Ast, + shader_model: hlsl::ShaderModel, + _layout: &PipelineLayout, + _stage: pso::Stage, +) -> Result { + let mut compile_options = hlsl::CompilerOptions::default(); + compile_options.shader_model = shader_model; + compile_options.vertex.invert_y = true; + + + + + + + + + + + + + + + + + + + ast.set_compiler_options(&compile_options) + .map_err(gen_unexpected_error)?; + + + ast.compile().map_err(|err| { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unknown compile error".into(), + }; + device::ShaderError::CompilationFailed(msg) + }) +} diff --git a/third_party/rust/gfx-backend-dx12/.cargo-checksum.json b/third_party/rust/gfx-backend-dx12/.cargo-checksum.json new file mode 100644 index 000000000000..f990b2fc2214 --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"96e407f8222b9949caa9a7aa16b8b5b16b3f6cebecc43110b7848ec535645bb5","README.md":"53ad6efa9975b59f9ab830c26c940e18c3e41efd107cd34d670771a8ba53ae1a","shaders/blit.hlsl":"1f8819f3a91acf71a69bfd14ccd245180a92a9efb0ab76bf6b2e89aae10f3952","src/command.rs":"486abca81c49e037946e02a44f0c0b6ce02b1d5ecd76c04769c39ff1efa03e6b","src/conv.rs":"c9bd69d537d3c1d196d3e45bee02aad83dc3b847dd440da354c210bc65c49901","src/descriptors_cpu.rs":"ba881e6f9b90ad90aaebba5bc8d5cf1903560a762ba977ab63f278abba2e2d70","src/device.rs":"b78557efe443c69e2637a1721ae4290c1f006c468e9112c4f5fa906b2992c2d5","src/internal.rs":"da049335e8514d44686ec34f74001a155766fb7ae3cc6d9197479865eb2e3f82","src/lib.rs":"0d48df7adee4833d090dadd9d804f65ee9e6b4bbb4569b20fb0c1eb7605349b6","src/pool.rs":"d76526023087026752acf8a15a7d5e585dbb0486d0511bcae57b0e8f905970eb","src/resource.rs":"3e3d93a8793ebda2977162b5bb7681efce1a1598edd9a8d0e0b40266d67ea38d","src/root_constants.rs":"b6bb4d5ee8dd9686fb7172bc951c4c04801966f8e5cf9843fca52faa45cf7943","src/window.rs":"10003034e9512a69422520238823f342e79b32073351dec654a5360a0280b48d"},"package":"b6e913cc800fb12eaba2c420091a02aca9aafbefd672600dfc5b52654343d341"} \ No newline at end of file diff --git a/third_party/rust/gfx-backend-dx12/Cargo.toml b/third_party/rust/gfx-backend-dx12/Cargo.toml new file mode 100644 index 000000000000..985f0397f058 --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/Cargo.toml @@ -0,0 +1,64 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "gfx-backend-dx12" +version = "0.4.1" +authors = ["The Gfx-rs Developers"] +description = "DirectX-12 API backend for gfx-rs" +homepage = "https://github.com/gfx-rs/gfx" +documentation = "https://docs.rs/gfx-backend-dx12" +readme = "README.md" +keywords = ["graphics", "gamedev"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/gfx-rs/gfx" +[package.metadata.docs.rs] +default-target = "x86_64-pc-windows-msvc" + +[lib] +name = "gfx_backend_dx12" +[dependencies.auxil] +version = "0.1" +package = "gfx-auxil" + +[dependencies.bitflags] +version = "1" + +[dependencies.d3d12] +version = "0.3" +features = ["libloading"] + +[dependencies.gfx-hal] +version = "0.4" + +[dependencies.log] +version = "0.4" + +[dependencies.range-alloc] +version = "0.1" + +[dependencies.raw-window-handle] +version = "0.3" + +[dependencies.smallvec] +version = "0.6" + +[dependencies.spirv_cross] +version = "0.16" +features = ["hlsl"] + +[dependencies.winapi] +version = "0.3" +features = ["basetsd", "d3d12", "d3d12sdklayers", "d3d12shader", "d3dcommon", "d3dcompiler", "dxgi1_2", "dxgi1_3", "dxgi1_4", "dxgi1_6", "dxgidebug", "dxgiformat", "dxgitype", "handleapi", "minwindef", "synchapi", "unknwnbase", "winbase", "windef", "winerror", "winnt", "winuser"] + +[features] +default = [] diff --git a/third_party/rust/gfx-backend-dx12/README.md b/third_party/rust/gfx-backend-dx12/README.md new file mode 100644 index 000000000000..ebcfffa236bc --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/README.md @@ -0,0 +1,13 @@ +# gfx-backend-dx12 + +DX12 backend for gfx. + +## Normalized Coordinates + +Render | Depth | Texture +-------|-------|-------- +![render_coordinates](../../../info/gl_render_coordinates.png) | ![depth_coordinates](../../../info/dx_depth_coordinates.png) | ![texture_coordinates](../../../info/dx_texture_coordinates.png) + +## Mirroring + +TODO diff --git a/third_party/rust/gfx-backend-dx12/shaders/blit.hlsl b/third_party/rust/gfx-backend-dx12/shaders/blit.hlsl new file mode 100644 index 000000000000..40810558a93a --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/shaders/blit.hlsl @@ -0,0 +1,29 @@ + +Texture2DArray BlitSource : register(t0); +SamplerState BlitSampler : register(s0); + +cbuffer Region : register(b0) { + float2 offset; + float2 extent; + float z; + float level; +}; + +struct VsOutput { + float4 pos: SV_POSITION; + float4 uv: TEXCOORD0; +}; + +// Create a screen filling triangle +VsOutput vs_blit_2d(uint id: SV_VertexID) { + float2 coord = float2((id << 1) & 2, id & 2); + VsOutput output = { + float4(float2(-1.0, 1.0) + coord * float2(2.0, -2.0), 0.0, 1.0), + float4(offset + coord * extent, z, level) + }; + return output; +} + +float4 ps_blit_2d(VsOutput input) : SV_TARGET { + return BlitSource.SampleLevel(BlitSampler, input.uv.xyz, input.uv.w); +} diff --git a/third_party/rust/gfx-backend-dx12/src/command.rs b/third_party/rust/gfx-backend-dx12/src/command.rs new file mode 100644 index 000000000000..bf4ae0e2894e --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/src/command.rs @@ -0,0 +1,2618 @@ +use auxil::FastHashMap; +use hal::format::Aspects; +use hal::range::RangeArg; +use hal::{buffer, command as com, format, image, memory, pass, pool, pso, query}; +use hal::{ + DrawCount, + IndexCount, + IndexType, + InstanceCount, + VertexCount, + VertexOffset, + WorkGroupCount, +}; + +use std::borrow::Borrow; +use std::ops::Range; +use std::sync::Arc; +use std::{cmp, fmt, iter, mem, ptr}; + +use winapi::shared::minwindef::{FALSE, TRUE, UINT}; +use winapi::shared::{dxgiformat, winerror}; +use winapi::um::{d3d12, d3dcommon}; +use winapi::Interface; + +use native; + +use root_constants::RootConstant; +use smallvec::SmallVec; +use { + conv, + descriptors_cpu, + device, + internal, + resource as r, + validate_line_width, + Backend, + Device, + Shared, + MAX_VERTEX_BUFFERS, +}; + + + +const ROOT_SIGNATURE_SIZE: usize = 64; + +const NULL_VERTEX_BUFFER_VIEW: d3d12::D3D12_VERTEX_BUFFER_VIEW = d3d12::D3D12_VERTEX_BUFFER_VIEW { + BufferLocation: 0, + SizeInBytes: 0, + StrideInBytes: 0, +}; + +fn get_rect(rect: &pso::Rect) -> d3d12::D3D12_RECT { + d3d12::D3D12_RECT { + left: rect.x as i32, + top: rect.y as i32, + right: (rect.x + rect.w) as i32, + bottom: (rect.y + rect.h) as i32, + } +} + +fn div(a: u32, b: u32) -> u32 { + (a + b - 1) / b +} + +fn up_align(x: u32, alignment: u32) -> u32 { + (x + alignment - 1) & !(alignment - 1) +} + +#[derive(Clone, Debug)] +struct AttachmentClear { + subpass_id: Option, + value: Option, + stencil_value: Option, +} + +pub struct RenderPassCache { + render_pass: r::RenderPass, + framebuffer: r::Framebuffer, + target_rect: d3d12::D3D12_RECT, + attachment_clears: Vec, +} + +impl fmt::Debug for RenderPassCache { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("RenderPassCache") + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +enum OcclusionQuery { + Binary(UINT), + Precise(UINT), +} + + + + + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +enum RootElement { + + Constant(u32), + + TableSrvCbvUav(u32), + + TableSampler(u32), + + DescriptorCbv { + buffer: u64, + }, + DescriptorPlaceholder, + + Undefined, +} + + +struct UserData { + data: [RootElement; ROOT_SIGNATURE_SIZE], + dirty_mask: u64, +} + +impl fmt::Debug for UserData { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("UserData") + .field("data", &&self.data[..]) + .field("dirty_mask", &self.dirty_mask) + .finish() + } +} + +impl UserData { + fn new() -> Self { + UserData { + data: [RootElement::Undefined; ROOT_SIGNATURE_SIZE], + dirty_mask: 0, + } + } + + + fn set_constants(&mut self, offset: usize, data: &[u32]) { + assert!(offset + data.len() <= ROOT_SIGNATURE_SIZE); + + for (i, val) in data.iter().enumerate() { + self.data[offset + i] = RootElement::Constant(*val); + self.dirty_mask |= 1u64 << (offset + i); + } + } + + + fn set_srv_cbv_uav_table(&mut self, offset: usize, table_start: u32) { + assert!(offset < ROOT_SIGNATURE_SIZE); + + self.data[offset] = RootElement::TableSrvCbvUav(table_start); + self.dirty_mask |= 1u64 << offset; + } + + + fn set_sampler_table(&mut self, offset: usize, table_start: u32) { + assert!(offset < ROOT_SIGNATURE_SIZE); + + self.data[offset] = RootElement::TableSampler(table_start); + self.dirty_mask |= 1u64 << offset; + } + + + fn set_descriptor_cbv(&mut self, offset: usize, buffer: u64) { + assert!(offset + 1 < ROOT_SIGNATURE_SIZE); + self.data[offset] = RootElement::DescriptorCbv { buffer }; + self.data[offset + 1] = RootElement::DescriptorPlaceholder; + self.dirty_mask |= 0b1u64 << offset; + self.dirty_mask |= 0b1u64 << offset + 1; + } + + fn is_dirty(&self) -> bool { + self.dirty_mask != 0 + } + + fn is_index_dirty(&self, i: usize) -> bool { + ((self.dirty_mask >> i) & 1) == 1 + } + + + fn clear_dirty(&mut self, i: usize) { + self.dirty_mask &= !(1 << i); + } + + + fn dirty_all(&mut self) { + self.dirty_mask = !0; + } +} + +#[derive(Debug)] +struct PipelineCache { + + + pipeline: Option<(native::PipelineState, native::RootSignature)>, + + num_parameter_slots: usize, + + root_constants: Vec, + + user_data: UserData, + + + srv_cbv_uav_start: u64, + sampler_start: u64, +} + +impl PipelineCache { + fn new() -> Self { + PipelineCache { + pipeline: None, + num_parameter_slots: 0, + root_constants: Vec::new(), + user_data: UserData::new(), + srv_cbv_uav_start: 0, + sampler_start: 0, + } + } + + fn bind_descriptor_sets<'a, I, J>( + &mut self, + layout: &r::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) -> [native::DescriptorHeap; 2] + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + let mut sets = sets.into_iter().peekable(); + let mut offsets = offsets.into_iter().map(|offset| *offset.borrow() as u64); + + + + let (srv_cbv_uav_start, sampler_start, heap_srv_cbv_uav, heap_sampler) = + if let Some(set_0) = sets.peek().map(Borrow::borrow) { + ( + set_0.srv_cbv_uav_gpu_start().ptr, + set_0.sampler_gpu_start().ptr, + set_0.heap_srv_cbv_uav, + set_0.heap_samplers, + ) + } else { + return [native::DescriptorHeap::null(); 2]; + }; + + self.srv_cbv_uav_start = srv_cbv_uav_start; + self.sampler_start = sampler_start; + + for (set, element) in sets.zip(layout.elements[first_set ..].iter()) { + let set = set.borrow(); + + let mut num_tables = 0; + + + set.first_gpu_view.map(|gpu| { + let table = &element.table; + assert!(table.ty.contains(r::SRV_CBV_UAV)); + + + + let table_gpu_offset = (gpu.ptr - srv_cbv_uav_start) as u32; + let table_offset = table.offset + num_tables; + self.user_data + .set_srv_cbv_uav_table(table_offset, table_gpu_offset); + num_tables += 1; + }); + + + set.first_gpu_sampler.map(|gpu| { + let table = &element.table; + assert!(table.ty.contains(r::SAMPLERS)); + + + + let table_gpu_offset = (gpu.ptr - sampler_start) as u32; + let table_offset = table.offset + num_tables; + self.user_data + .set_sampler_table(table_offset, table_gpu_offset); + }); + + + + + let mut descriptor_id = 0; + for binding in &set.binding_infos { + + let dynamic_descriptors = unsafe { &*binding.dynamic_descriptors.get() }; + for descriptor in dynamic_descriptors { + let root_offset = element.descriptors[descriptor_id].offset; + self.user_data + .set_descriptor_cbv(root_offset, descriptor.gpu_buffer_location + offsets.next().unwrap()); + descriptor_id += 1; + } + } + } + + [heap_srv_cbv_uav, heap_sampler] + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +enum BindPoint { + Compute, + Graphics { + + internal: bool, + }, +} + +#[derive(Clone, Debug)] +struct Copy { + footprint_offset: u64, + footprint: image::Extent, + row_pitch: u32, + img_subresource: u32, + img_offset: image::Offset, + buf_offset: image::Offset, + copy_extent: image::Extent, +} + +pub struct CommandBuffer { + raw: native::GraphicsCommandList, + allocator: native::CommandAllocator, + shared: Arc, + + + pass_cache: Option, + cur_subpass: usize, + + + + gr_pipeline: PipelineCache, + + + primitive_topology: d3d12::D3D12_PRIMITIVE_TOPOLOGY, + + comp_pipeline: PipelineCache, + + + active_bindpoint: BindPoint, + + + active_descriptor_heaps: [native::DescriptorHeap; 2], + + + + + + + occlusion_query: Option, + pipeline_stats_query: Option, + + + + + vertex_bindings_remap: [Option; MAX_VERTEX_BUFFERS], + + vertex_buffer_views: [d3d12::D3D12_VERTEX_BUFFER_VIEW; MAX_VERTEX_BUFFERS], + + + copies: Vec, + + + + viewport_cache: SmallVec< + [d3d12::D3D12_VIEWPORT; + d3d12::D3D12_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE as usize], + >, + scissor_cache: SmallVec< + [d3d12::D3D12_RECT; + d3d12::D3D12_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE as usize], + >, + + + rtv_pools: Vec, + + temporary_gpu_heaps: Vec, + + retained_resources: Vec, + + + + pool_create_flags: pool::CommandPoolCreateFlags, +} + +impl fmt::Debug for CommandBuffer { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("CommandBuffer") + } +} + +unsafe impl Send for CommandBuffer {} +unsafe impl Sync for CommandBuffer {} + + +enum BarrierPoint { + + Pre, + + Post, +} + +impl CommandBuffer { + pub(crate) fn new( + raw: native::GraphicsCommandList, + allocator: native::CommandAllocator, + shared: Arc, + pool_create_flags: pool::CommandPoolCreateFlags, + ) -> Self { + CommandBuffer { + raw, + allocator, + shared, + pass_cache: None, + cur_subpass: !0, + gr_pipeline: PipelineCache::new(), + primitive_topology: d3dcommon::D3D_PRIMITIVE_TOPOLOGY_UNDEFINED, + comp_pipeline: PipelineCache::new(), + active_bindpoint: BindPoint::Graphics { internal: false }, + active_descriptor_heaps: [native::DescriptorHeap::null(); 2], + occlusion_query: None, + pipeline_stats_query: None, + vertex_bindings_remap: [None; MAX_VERTEX_BUFFERS], + vertex_buffer_views: [NULL_VERTEX_BUFFER_VIEW; MAX_VERTEX_BUFFERS], + copies: Vec::new(), + viewport_cache: SmallVec::new(), + scissor_cache: SmallVec::new(), + rtv_pools: Vec::new(), + temporary_gpu_heaps: Vec::new(), + retained_resources: Vec::new(), + pool_create_flags, + } + } + + pub(crate) unsafe fn destroy(&mut self) { + self.raw.destroy(); + for heap in &self.rtv_pools { + heap.destroy(); + } + for heap in &self.temporary_gpu_heaps { + heap.destroy(); + } + for resource in &self.retained_resources { + resource.destroy(); + } + } + + pub(crate) unsafe fn as_raw_list(&self) -> *mut d3d12::ID3D12CommandList { + self.raw.as_mut_ptr() as *mut _ + } + + fn reset(&mut self) { + self.raw + .reset(self.allocator, native::PipelineState::null()); + self.pass_cache = None; + self.cur_subpass = !0; + self.gr_pipeline = PipelineCache::new(); + self.primitive_topology = d3dcommon::D3D_PRIMITIVE_TOPOLOGY_UNDEFINED; + self.comp_pipeline = PipelineCache::new(); + self.active_bindpoint = BindPoint::Graphics { internal: false }; + self.active_descriptor_heaps = [native::DescriptorHeap::null(); 2]; + self.occlusion_query = None; + self.pipeline_stats_query = None; + self.vertex_bindings_remap = [None; MAX_VERTEX_BUFFERS]; + self.vertex_buffer_views = [NULL_VERTEX_BUFFER_VIEW; MAX_VERTEX_BUFFERS]; + for heap in self.rtv_pools.drain(..) { + unsafe { + heap.destroy(); + } + } + for heap in self.temporary_gpu_heaps.drain(..) { + unsafe { + heap.destroy(); + } + } + for resource in self.retained_resources.drain(..) { + unsafe { + resource.destroy(); + } + } + } + + + + + fn set_internal_graphics_pipeline(&mut self) { + self.active_bindpoint = BindPoint::Graphics { internal: true }; + self.gr_pipeline.user_data.dirty_all(); + } + + fn bind_descriptor_heaps(&mut self) { + self.raw.set_descriptor_heaps(&self.active_descriptor_heaps); + } + + fn insert_subpass_barriers(&self, insertion: BarrierPoint) { + let state = self.pass_cache.as_ref().unwrap(); + let proto_barriers = match state.render_pass.subpasses.get(self.cur_subpass) { + Some(subpass) => match insertion { + BarrierPoint::Pre => &subpass.pre_barriers, + BarrierPoint::Post => &subpass.post_barriers, + }, + None => &state.render_pass.post_barriers, + }; + + let transition_barriers = proto_barriers + .iter() + .map(|barrier| { + let mut resource_barrier = d3d12::D3D12_RESOURCE_BARRIER { + Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, + Flags: barrier.flags, + u: unsafe { mem::zeroed() }, + }; + + *unsafe { resource_barrier.u.Transition_mut() } = + d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: state.framebuffer.attachments[barrier.attachment_id] + .resource + .as_mut_ptr(), + Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, + StateBefore: barrier.states.start, + StateAfter: barrier.states.end, + }; + + resource_barrier + }) + .collect::>(); + + if !transition_barriers.is_empty() { + unsafe { + self.raw + .clone() + .ResourceBarrier(transition_barriers.len() as _, transition_barriers.as_ptr()); + } + } + } + + fn bind_targets(&mut self) { + let state = self.pass_cache.as_ref().unwrap(); + let subpass = &state.render_pass.subpasses[self.cur_subpass]; + + + let color_views = subpass + .color_attachments + .iter() + .map(|&(id, _)| state.framebuffer.attachments[id].handle_rtv.unwrap()) + .collect::>(); + let ds_view = match subpass.depth_stencil_attachment { + Some((id, _)) => state.framebuffer.attachments[id] + .handle_dsv + .as_ref() + .unwrap() as *const _, + None => ptr::null(), + }; + + unsafe { + self.raw.OMSetRenderTargets( + color_views.len() as UINT, + color_views.as_ptr(), + FALSE, + ds_view, + ); + } + + + for (view, clear) in state + .framebuffer + .attachments + .iter() + .zip(state.attachment_clears.iter()) + { + if clear.subpass_id != Some(self.cur_subpass) { + continue; + } + + if let (Some(handle), Some(cv)) = (view.handle_rtv, clear.value) { + self.clear_render_target_view(handle, unsafe { cv.color }, &[state.target_rect]); + } + + if let Some(handle) = view.handle_dsv { + let depth = clear.value.map(|cv| unsafe { cv.depth_stencil.depth }); + let stencil = clear.stencil_value; + + if depth.is_some() || stencil.is_some() { + self.clear_depth_stencil_view(handle, depth, stencil, &[state.target_rect]); + } + } + } + } + + fn resolve_attachments(&self) { + let state = self.pass_cache.as_ref().unwrap(); + let framebuffer = &state.framebuffer; + let subpass = &state.render_pass.subpasses[self.cur_subpass]; + + for (&(src_attachment, _), &(dst_attachment, _)) in subpass + .color_attachments + .iter() + .zip(subpass.resolve_attachments.iter()) + { + if dst_attachment == pass::ATTACHMENT_UNUSED { + continue; + } + + let resolve_src = state.framebuffer.attachments[src_attachment]; + let resolve_dst = state.framebuffer.attachments[dst_attachment]; + + + for l in 0 .. framebuffer.layers { + + let subresource_src = resolve_src.calc_subresource( + resolve_src.mip_levels.0 as _, + (resolve_src.layers.0 + l) as _, + ); + let subresource_dst = resolve_dst.calc_subresource( + resolve_dst.mip_levels.0 as _, + (resolve_dst.layers.0 + l) as _, + ); + + + unsafe { + self.raw.ResolveSubresource( + resolve_dst.resource.as_mut_ptr(), + subresource_dst, + resolve_src.resource.as_mut_ptr(), + subresource_src, + resolve_dst.dxgi_format, + ); + } + } + } + } + + fn clear_render_target_view( + &self, + rtv: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, + color: com::ClearColor, + rects: &[d3d12::D3D12_RECT], + ) { + let num_rects = rects.len() as _; + let rects = if num_rects > 0 { + rects.as_ptr() + } else { + ptr::null() + }; + + unsafe { + self.raw + .clone() + .ClearRenderTargetView(rtv, &color.float32, num_rects, rects); + } + } + + fn clear_depth_stencil_view( + &self, + dsv: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, + depth: Option, + stencil: Option, + rects: &[d3d12::D3D12_RECT], + ) { + let mut flags = native::ClearFlags::empty(); + if depth.is_some() { + flags |= native::ClearFlags::DEPTH; + } + if stencil.is_some() { + flags |= native::ClearFlags::STENCIL; + } + + self.raw.clear_depth_stencil_view( + dsv, + flags, + depth.unwrap_or_default(), + stencil.unwrap_or_default() as _, + rects, + ); + } + + fn set_graphics_bind_point(&mut self) { + match self.active_bindpoint { + BindPoint::Compute => { + + let (pipeline, _) = self + .gr_pipeline + .pipeline + .expect("No graphics pipeline bound"); + self.raw.set_pipeline_state(pipeline); + } + BindPoint::Graphics { internal: true } => { + + let (pipeline, signature) = self + .gr_pipeline + .pipeline + .expect("No graphics pipeline bound"); + self.raw.set_pipeline_state(pipeline); + self.raw.set_graphics_root_signature(signature); + self.bind_descriptor_heaps(); + } + BindPoint::Graphics { internal: false } => {} + } + + self.active_bindpoint = BindPoint::Graphics { internal: false }; + let cmd_buffer = &mut self.raw; + + + Self::flush_user_data( + &mut self.gr_pipeline, + |slot, data| unsafe { + cmd_buffer.clone().SetGraphicsRoot32BitConstants( + slot, + data.len() as _, + data.as_ptr() as *const _, + 0, + ) + }, + |slot, gpu| cmd_buffer.set_graphics_root_descriptor_table(slot, gpu), + |slot, buffer| cmd_buffer.set_graphics_root_constant_buffer_view(slot, buffer), + ); + } + + fn set_compute_bind_point(&mut self) { + match self.active_bindpoint { + BindPoint::Graphics { internal } => { + + let (pipeline, _) = self + .comp_pipeline + .pipeline + .expect("No compute pipeline bound"); + + self.raw.set_pipeline_state(pipeline); + + self.active_bindpoint = BindPoint::Compute; + + if internal { + self.bind_descriptor_heaps(); + + + + if let Some((_, signature)) = self.gr_pipeline.pipeline { + self.raw.set_graphics_root_signature(signature); + } + } + } + BindPoint::Compute => {} + } + + let cmd_buffer = &mut self.raw; + Self::flush_user_data( + &mut self.comp_pipeline, + |slot, data| unsafe { + cmd_buffer.clone().SetComputeRoot32BitConstants( + slot, + data.len() as _, + data.as_ptr() as *const _, + 0, + ) + }, + |slot, gpu| cmd_buffer.set_compute_root_descriptor_table(slot, gpu), + |slot, buffer| cmd_buffer.set_compute_root_constant_buffer_view(slot, buffer), + ); + } + + fn flush_user_data( + pipeline: &mut PipelineCache, + mut constants_update: F, + mut table_update: G, + mut descriptor_cbv_update: H, + ) where + F: FnMut(u32, &[u32]), + G: FnMut(u32, d3d12::D3D12_GPU_DESCRIPTOR_HANDLE), + H: FnMut(u32, d3d12::D3D12_GPU_VIRTUAL_ADDRESS), + { + let user_data = &mut pipeline.user_data; + if !user_data.is_dirty() { + return; + } + + let num_root_constant = pipeline.root_constants.len(); + let mut cur_index = 0; + + for (i, root_constant) in pipeline.root_constants.iter().enumerate() { + let num_constants = (root_constant.range.end - root_constant.range.start) as usize; + let mut data = Vec::new(); + for c in cur_index .. cur_index + num_constants { + data.push(match user_data.data[c] { + RootElement::Constant(v) => v, + _ => { + warn!( + "Unset or mismatching root constant at index {:?} ({:?})", + c, user_data.data[c] + ); + 0 + } + }); + user_data.clear_dirty(c); + } + constants_update(i as _, &data); + cur_index += num_constants; + } + + + + let table_start = pipeline + .root_constants + .iter() + .fold(0, |sum, c| sum + c.range.end - c.range.start) as usize; + + for i in num_root_constant .. pipeline.num_parameter_slots { + let table_index = i - num_root_constant + table_start; + if user_data.is_index_dirty(table_index) { + match user_data.data[table_index] { + RootElement::TableSrvCbvUav(offset) => { + let gpu = d3d12::D3D12_GPU_DESCRIPTOR_HANDLE { ptr: pipeline.srv_cbv_uav_start + offset as u64 }; + table_update(i as _, gpu); + user_data.clear_dirty(table_index); + } + RootElement::TableSampler(offset) => { + let gpu = d3d12::D3D12_GPU_DESCRIPTOR_HANDLE { ptr: pipeline.sampler_start + offset as u64 }; + table_update(i as _, gpu); + user_data.clear_dirty(table_index); + } + RootElement::DescriptorCbv { buffer } => { + debug_assert!(user_data.is_index_dirty(table_index + 1)); + debug_assert_eq!(user_data.data[table_index + 1], RootElement::DescriptorPlaceholder); + + descriptor_cbv_update(i as _, buffer); + + user_data.clear_dirty(table_index); + user_data.clear_dirty(table_index + 1); + } + other => { + error!( + "Unexpected user data element in the root signature ({:?})", + (table_index, other) + ); + continue; + } + }; + } + } + } + + fn transition_barrier( + transition: d3d12::D3D12_RESOURCE_TRANSITION_BARRIER, + ) -> d3d12::D3D12_RESOURCE_BARRIER { + let mut barrier = d3d12::D3D12_RESOURCE_BARRIER { + Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, + Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, + u: unsafe { mem::zeroed() }, + }; + + *unsafe { barrier.u.Transition_mut() } = transition; + barrier + } + + fn split_buffer_copy(copies: &mut Vec, r: &com::BufferImageCopy, image: &r::ImageBound) { + let buffer_width = if r.buffer_width == 0 { + r.image_extent.width + } else { + r.buffer_width + }; + let buffer_height = if r.buffer_height == 0 { + r.image_extent.height + } else { + r.buffer_height + }; + let image_extent_aligned = image::Extent { + width: up_align(r.image_extent.width, image.block_dim.0 as _), + height: up_align(r.image_extent.height, image.block_dim.1 as _), + depth: r.image_extent.depth, + }; + let row_pitch = div(buffer_width, image.block_dim.0 as _) * image.bytes_per_block as u32; + let slice_pitch = div(buffer_height, image.block_dim.1 as _) * row_pitch; + let is_pitch_aligned = row_pitch % d3d12::D3D12_TEXTURE_DATA_PITCH_ALIGNMENT == 0; + + for layer in r.image_layers.layers.clone() { + let img_subresource = image.calc_subresource(r.image_layers.level as _, layer as _, 0); + let layer_relative = (layer - r.image_layers.layers.start) as u32; + let layer_offset = r.buffer_offset as u64 + + (layer_relative * slice_pitch * r.image_extent.depth) as u64; + let aligned_offset = + layer_offset & !(d3d12::D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT as u64 - 1); + if layer_offset == aligned_offset && is_pitch_aligned { + + copies.push(Copy { + footprint_offset: aligned_offset, + footprint: image_extent_aligned, + row_pitch, + img_subresource, + img_offset: r.image_offset, + buf_offset: image::Offset::ZERO, + copy_extent: image_extent_aligned, + }); + } else if is_pitch_aligned { + + let row_pitch_texels = + row_pitch / image.bytes_per_block as u32 * image.block_dim.0 as u32; + let gap = (layer_offset - aligned_offset) as i32; + let buf_offset = image::Offset { + x: (gap % row_pitch as i32) / image.bytes_per_block as i32 + * image.block_dim.0 as i32, + y: (gap % slice_pitch as i32) / row_pitch as i32 * image.block_dim.1 as i32, + z: gap / slice_pitch as i32, + }; + let footprint = image::Extent { + width: buf_offset.x as u32 + image_extent_aligned.width, + height: buf_offset.y as u32 + image_extent_aligned.height, + depth: buf_offset.z as u32 + image_extent_aligned.depth, + }; + if r.image_extent.width + buf_offset.x as u32 <= row_pitch_texels { + + copies.push(Copy { + footprint_offset: aligned_offset, + footprint, + row_pitch, + img_subresource, + img_offset: r.image_offset, + buf_offset, + copy_extent: image_extent_aligned, + }); + } else { + + assert!(buf_offset.x as u32 <= row_pitch_texels); + let half = row_pitch_texels - buf_offset.x as u32; + assert!(half <= r.image_extent.width); + + copies.push(Copy { + footprint_offset: aligned_offset, + footprint: image::Extent { + width: row_pitch_texels, + ..footprint + }, + row_pitch, + img_subresource, + img_offset: r.image_offset, + buf_offset, + copy_extent: image::Extent { + width: half, + ..r.image_extent + }, + }); + copies.push(Copy { + footprint_offset: aligned_offset, + footprint: image::Extent { + width: image_extent_aligned.width - half, + height: footprint.height + image.block_dim.1 as u32, + depth: footprint.depth, + }, + row_pitch, + img_subresource, + img_offset: image::Offset { + x: r.image_offset.x + half as i32, + ..r.image_offset + }, + buf_offset: image::Offset { + x: 0, + y: buf_offset.y + image.block_dim.1 as i32, + z: buf_offset.z, + }, + copy_extent: image::Extent { + width: image_extent_aligned.width - half, + ..image_extent_aligned + }, + }); + } + } else { + + for z in 0 .. r.image_extent.depth { + for y in 0 .. image_extent_aligned.height / image.block_dim.1 as u32 { + + let row_offset = layer_offset + + z as u64 * slice_pitch as u64 + + y as u64 * row_pitch as u64; + let aligned_offset = row_offset + & !(d3d12::D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT as u64 - 1); + let next_aligned_offset = + aligned_offset + d3d12::D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT as u64; + let cut_row_texels = (next_aligned_offset - row_offset) + / image.bytes_per_block as u64 + * image.block_dim.0 as u64; + let cut_width = + cmp::min(image_extent_aligned.width, cut_row_texels as image::Size); + let gap_texels = (row_offset - aligned_offset) as image::Size + / image.bytes_per_block as image::Size + * image.block_dim.0 as image::Size; + + let max_unaligned_pitch = + (r.image_extent.width + gap_texels) * image.bytes_per_block as u32; + let row_pitch = (max_unaligned_pitch + | (d3d12::D3D12_TEXTURE_DATA_PITCH_ALIGNMENT - 1)) + + 1; + + copies.push(Copy { + footprint_offset: aligned_offset, + footprint: image::Extent { + width: cut_width + gap_texels, + height: image.block_dim.1 as _, + depth: 1, + }, + row_pitch, + img_subresource, + img_offset: image::Offset { + x: r.image_offset.x, + y: r.image_offset.y + image.block_dim.1 as i32 * y as i32, + z: r.image_offset.z + z as i32, + }, + buf_offset: image::Offset { + x: gap_texels as i32, + y: 0, + z: 0, + }, + copy_extent: image::Extent { + width: cut_width, + height: image.block_dim.1 as _, + depth: 1, + }, + }); + + + if cut_width >= image_extent_aligned.width { + continue; + } + let leftover = image_extent_aligned.width - cut_width; + + copies.push(Copy { + footprint_offset: next_aligned_offset, + footprint: image::Extent { + width: leftover, + height: image.block_dim.1 as _, + depth: 1, + }, + row_pitch, + img_subresource, + img_offset: image::Offset { + x: r.image_offset.x + cut_width as i32, + y: r.image_offset.y + y as i32 * image.block_dim.1 as i32, + z: r.image_offset.z + z as i32, + }, + buf_offset: image::Offset::ZERO, + copy_extent: image::Extent { + width: leftover, + height: image.block_dim.1 as _, + depth: 1, + }, + }); + } + } + } + } + } + + fn set_vertex_buffers(&mut self) { + let cmd_buffer = &mut self.raw; + let vbs_remap = &self.vertex_bindings_remap; + let vbs = &self.vertex_buffer_views; + let mut last_end_slot = 0; + loop { + let start_offset = match vbs_remap[last_end_slot ..] + .iter() + .position(|remap| remap.is_some()) + { + Some(offset) => offset, + None => break, + }; + + let start_slot = last_end_slot + start_offset; + let buffers = vbs_remap[start_slot ..] + .iter() + .take_while(|x| x.is_some()) + .filter_map(|mapping| { + let mapping = mapping.unwrap(); + let view = vbs[mapping.mapped_binding]; + + + view.SizeInBytes.checked_sub(mapping.offset).map(|size| { + d3d12::D3D12_VERTEX_BUFFER_VIEW { + BufferLocation: view.BufferLocation + mapping.offset as u64, + SizeInBytes: size, + StrideInBytes: mapping.stride, + } + }) + }) + .collect::>(); + + if buffers.is_empty() { + last_end_slot = start_slot + 1; + } else { + let num_views = buffers.len(); + unsafe { + cmd_buffer.IASetVertexBuffers( + start_slot as _, + num_views as _, + buffers.as_ptr(), + ); + } + last_end_slot = start_slot + num_views; + } + } + } + + fn fill_texture_barries( + target: &r::ImageBound, + states: Range, + range: &image::SubresourceRange, + list: &mut impl Extend, + ) { + let mut bar = + Self::transition_barrier(d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: target.resource.as_mut_ptr(), + Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, + StateBefore: states.start, + StateAfter: states.end, + }); + + if *range == target.to_subresource_range(range.aspects) { + + list.extend(iter::once(bar)); + } else { + + for level in range.levels.clone() { + for layer in range.layers.clone() { + unsafe { + let transition_barrier = &mut *bar.u.Transition_mut(); + transition_barrier.Subresource = + target.calc_subresource(level as _, layer as _, 0); + } + list.extend(iter::once(bar)); + } + } + } + } +} + +impl com::CommandBuffer for CommandBuffer { + unsafe fn begin( + &mut self, + _flags: com::CommandBufferFlags, + _info: com::CommandBufferInheritanceInfo, + ) { + + if self + .pool_create_flags + .contains(pool::CommandPoolCreateFlags::RESET_INDIVIDUAL) + { + + self.allocator.Reset(); + } + self.reset(); + } + + unsafe fn finish(&mut self) { + self.raw.Close(); + } + + unsafe fn reset(&mut self, _release_resources: bool) { + + + assert!(self + .pool_create_flags + .contains(pool::CommandPoolCreateFlags::RESET_INDIVIDUAL)); + + + self.allocator.Reset(); + self.reset(); + } + + unsafe fn begin_render_pass( + &mut self, + render_pass: &r::RenderPass, + framebuffer: &r::Framebuffer, + target_rect: pso::Rect, + clear_values: T, + _first_subpass: com::SubpassContents, + ) where + T: IntoIterator, + T::Item: Borrow, + { + assert_eq!(framebuffer.attachments.len(), render_pass.attachments.len()); + + + + + assert!(!render_pass.subpasses.iter().any(|sp| sp + .color_attachments + .iter() + .chain(sp.depth_stencil_attachment.iter()) + .chain(sp.input_attachments.iter()) + .any(|aref| aref.1 == image::Layout::Present))); + + let mut clear_iter = clear_values.into_iter(); + let attachment_clears = render_pass + .attachments + .iter() + .enumerate() + .map(|(i, attachment)| { + let cv = if attachment.has_clears() { + Some(*clear_iter.next().unwrap().borrow()) + } else { + None + }; + + AttachmentClear { + subpass_id: render_pass.subpasses.iter().position(|sp| sp.is_using(i)), + value: if attachment.ops.load == pass::AttachmentLoadOp::Clear { + assert!(cv.is_some()); + cv + } else { + None + }, + stencil_value: if attachment.stencil_ops.load == pass::AttachmentLoadOp::Clear { + Some(cv.unwrap().depth_stencil.stencil) + } else { + None + }, + } + }) + .collect(); + + self.pass_cache = Some(RenderPassCache { + render_pass: render_pass.clone(), + framebuffer: framebuffer.clone(), + target_rect: get_rect(&target_rect), + attachment_clears, + }); + self.cur_subpass = 0; + self.insert_subpass_barriers(BarrierPoint::Pre); + self.bind_targets(); + } + + unsafe fn next_subpass(&mut self, _contents: com::SubpassContents) { + self.insert_subpass_barriers(BarrierPoint::Post); + self.resolve_attachments(); + + self.cur_subpass += 1; + self.insert_subpass_barriers(BarrierPoint::Pre); + self.bind_targets(); + } + + unsafe fn end_render_pass(&mut self) { + self.insert_subpass_barriers(BarrierPoint::Post); + self.resolve_attachments(); + + self.cur_subpass = !0; + self.insert_subpass_barriers(BarrierPoint::Pre); + self.pass_cache = None; + } + + unsafe fn pipeline_barrier<'a, T>( + &mut self, + _stages: Range, + _dependencies: memory::Dependencies, + barriers: T, + ) where + T: IntoIterator, + T::Item: Borrow>, + { + let mut raw_barriers = SmallVec::<[_; 4]>::new(); + + + for barrier in barriers { + match *barrier.borrow() { + memory::Barrier::AllBuffers(_) | memory::Barrier::AllImages(_) => { + + + + let mut bar = d3d12::D3D12_RESOURCE_BARRIER { + Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_UAV, + Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, + u: mem::zeroed(), + }; + *bar.u.UAV_mut() = d3d12::D3D12_RESOURCE_UAV_BARRIER { + pResource: ptr::null_mut(), + }; + raw_barriers.push(bar); + } + memory::Barrier::Buffer { + ref states, + target, + ref families, + .. + } => { + + if let Some(f) = families { + if f.start.0 != f.end.0 { + unimplemented!("Queue family resource ownership transitions are not implemented for DX12 (attempted transition from queue family {} to {}", f.start.0, f.end.0); + } + } + let state_src = conv::map_buffer_resource_state(states.start); + let state_dst = conv::map_buffer_resource_state(states.end); + + if state_src == state_dst { + continue; + } + + let target = target.expect_bound(); + let bar = Self::transition_barrier(d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: target.resource.as_mut_ptr(), + Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, + StateBefore: state_src, + StateAfter: state_dst, + }); + + raw_barriers.push(bar); + } + memory::Barrier::Image { + ref states, + target, + ref families, + ref range, + } => { + + if let Some(f) = families { + if f.start.0 != f.end.0 { + unimplemented!("Queue family resource ownership transitions are not implemented for DX12 (attempted transition from queue family {} to {}", f.start.0, f.end.0); + } + } + let state_src = conv::map_image_resource_state(states.start.0, states.start.1); + let state_dst = conv::map_image_resource_state(states.end.0, states.end.1); + + if state_src == state_dst { + continue; + } + + let target = target.expect_bound(); + Self::fill_texture_barries(target, state_src .. state_dst, range, &mut raw_barriers); + } + } + } + + + + + + + { + let mut barrier = d3d12::D3D12_RESOURCE_BARRIER { + Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_UAV, + Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, + u: mem::zeroed(), + }; + *barrier.u.UAV_mut() = d3d12::D3D12_RESOURCE_UAV_BARRIER { + pResource: ptr::null_mut(), + }; + raw_barriers.push(barrier); + } + + + + + { + let mut barrier = d3d12::D3D12_RESOURCE_BARRIER { + Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_ALIASING, + Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, + u: mem::zeroed(), + }; + *barrier.u.Aliasing_mut() = d3d12::D3D12_RESOURCE_ALIASING_BARRIER { + pResourceBefore: ptr::null_mut(), + pResourceAfter: ptr::null_mut(), + }; + raw_barriers.push(barrier); + } + + self.raw + .ResourceBarrier(raw_barriers.len() as _, raw_barriers.as_ptr()); + } + + unsafe fn clear_image( + &mut self, + image: &r::Image, + layout: image::Layout, + value: com::ClearValue, + subresource_ranges: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let image = image.expect_bound(); + let base_state = conv::map_image_resource_state(image::Access::TRANSFER_WRITE, layout); + let mut raw_barriers = SmallVec::<[_; 4]>::new(); + + for subresource_range in subresource_ranges { + let sub = subresource_range.borrow(); + if sub.levels.end != 1 { + warn!("Clearing non-zero mipmap levels is not supported yet"); + } + let target_state = if sub.aspects.contains(Aspects::COLOR) { + d3d12::D3D12_RESOURCE_STATE_RENDER_TARGET + } else { + d3d12::D3D12_RESOURCE_STATE_DEPTH_WRITE + }; + + + + raw_barriers.clear(); + Self::fill_texture_barries(image, base_state .. target_state, sub, &mut raw_barriers); + self.raw + .ResourceBarrier(raw_barriers.len() as _, raw_barriers.as_ptr()); + + for layer in sub.layers.clone() { + if sub.aspects.contains(Aspects::COLOR) { + let rtv = image.clear_cv[layer as usize]; + self.clear_render_target_view(rtv, value.color, &[]); + } + if sub.aspects.contains(Aspects::DEPTH) { + let dsv = image.clear_dv[layer as usize]; + self.clear_depth_stencil_view(dsv, Some(value.depth_stencil.depth), None, &[]); + } + if sub.aspects.contains(Aspects::STENCIL) { + let dsv = image.clear_sv[layer as usize]; + self.clear_depth_stencil_view( + dsv, + None, + Some(value.depth_stencil.stencil as _), + &[], + ); + } + } + + + raw_barriers.clear(); + Self::fill_texture_barries(image, target_state .. base_state, sub, &mut raw_barriers); + self.raw + .ResourceBarrier(raw_barriers.len() as _, raw_barriers.as_ptr()); + } + } + + unsafe fn clear_attachments(&mut self, clears: T, rects: U) + where + T: IntoIterator, + T::Item: Borrow, + U: IntoIterator, + U::Item: Borrow, + { + let pass_cache = match self.pass_cache { + Some(ref cache) => cache, + None => panic!("`clear_attachments` can only be called inside a renderpass"), + }; + let sub_pass = &pass_cache.render_pass.subpasses[self.cur_subpass]; + + let clear_rects: SmallVec<[pso::ClearRect; 4]> = rects + .into_iter() + .map(|rect| rect.borrow().clone()) + .collect(); + + let device = self.shared.service_pipes.device; + + for clear in clears { + match *clear.borrow() { + com::AttachmentClear::Color { index, value } => { + let attachment = { + let rtv_id = sub_pass.color_attachments[index]; + pass_cache.framebuffer.attachments[rtv_id.0] + }; + + let mut rtv_pool = descriptors_cpu::HeapLinear::new( + device, + native::DescriptorHeapType::Rtv, + clear_rects.len(), + ); + + for clear_rect in &clear_rects { + assert!(attachment.layers.0 + clear_rect.layers.end <= attachment.layers.1); + let rect = [get_rect(&clear_rect.rect)]; + + let view_info = device::ViewInfo { + resource: attachment.resource, + kind: attachment.kind, + caps: image::ViewCapabilities::empty(), + view_kind: image::ViewKind::D2Array, + format: attachment.dxgi_format, + component_mapping: device::IDENTITY_MAPPING, + range: image::SubresourceRange { + aspects: Aspects::COLOR, + levels: attachment.mip_levels.0 .. attachment.mip_levels.1, + layers: attachment.layers.0 + clear_rect.layers.start + .. attachment.layers.0 + clear_rect.layers.end, + }, + }; + let rtv = rtv_pool.alloc_handle(); + Device::view_image_as_render_target_impl(device, rtv, view_info).unwrap(); + self.clear_render_target_view(rtv, value.into(), &rect); + } + + rtv_pool.destroy(); + } + com::AttachmentClear::DepthStencil { depth, stencil } => { + let attachment = { + let dsv_id = sub_pass.depth_stencil_attachment.unwrap(); + pass_cache.framebuffer.attachments[dsv_id.0] + }; + + let mut dsv_pool = descriptors_cpu::HeapLinear::new( + device, + native::DescriptorHeapType::Dsv, + clear_rects.len(), + ); + + for clear_rect in &clear_rects { + assert!(attachment.layers.0 + clear_rect.layers.end <= attachment.layers.1); + let rect = [get_rect(&clear_rect.rect)]; + + let view_info = device::ViewInfo { + resource: attachment.resource, + kind: attachment.kind, + caps: image::ViewCapabilities::empty(), + view_kind: image::ViewKind::D2Array, + format: attachment.dxgi_format, + component_mapping: device::IDENTITY_MAPPING, + range: image::SubresourceRange { + aspects: if depth.is_some() { + Aspects::DEPTH + } else { + Aspects::empty() + } | if stencil.is_some() { + Aspects::STENCIL + } else { + Aspects::empty() + }, + levels: attachment.mip_levels.0 .. attachment.mip_levels.1, + layers: attachment.layers.0 + clear_rect.layers.start + .. attachment.layers.0 + clear_rect.layers.end, + }, + }; + let dsv = dsv_pool.alloc_handle(); + Device::view_image_as_depth_stencil_impl(device, dsv, view_info).unwrap(); + self.clear_depth_stencil_view(dsv, depth, stencil, &rect); + } + + dsv_pool.destroy(); + } + } + } + } + + unsafe fn resolve_image( + &mut self, + src: &r::Image, + _src_layout: image::Layout, + dst: &r::Image, + _dst_layout: image::Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let src = src.expect_bound(); + let dst = dst.expect_bound(); + assert_eq!(src.descriptor.Format, dst.descriptor.Format); + + { + + + let transition_barrier = + Self::transition_barrier(d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: dst.resource.as_mut_ptr(), + Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, + StateBefore: d3d12::D3D12_RESOURCE_STATE_COPY_DEST, + StateAfter: d3d12::D3D12_RESOURCE_STATE_RESOLVE_DEST, + }); + self.raw.ResourceBarrier(1, &transition_barrier); + } + + for region in regions { + let r = region.borrow(); + for layer in 0 .. r.extent.depth as UINT { + self.raw.ResolveSubresource( + src.resource.as_mut_ptr(), + src.calc_subresource( + r.src_subresource.level as UINT, + r.src_subresource.layers.start as UINT + layer, + 0, + ), + dst.resource.as_mut_ptr(), + dst.calc_subresource( + r.dst_subresource.level as UINT, + r.dst_subresource.layers.start as UINT + layer, + 0, + ), + src.descriptor.Format, + ); + } + } + + { + + let transition_barrier = + Self::transition_barrier(d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: dst.resource.as_mut_ptr(), + Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, + StateBefore: d3d12::D3D12_RESOURCE_STATE_RESOLVE_DEST, + StateAfter: d3d12::D3D12_RESOURCE_STATE_COPY_DEST, + }); + self.raw.ResourceBarrier(1, &transition_barrier); + } + } + + unsafe fn blit_image( + &mut self, + src: &r::Image, + _src_layout: image::Layout, + dst: &r::Image, + _dst_layout: image::Layout, + filter: image::Filter, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let device = self.shared.service_pipes.device.clone(); + let src = src.expect_bound(); + let dst = dst.expect_bound(); + + + + + + match (src.kind, dst.kind) { + (image::Kind::D2(..), image::Kind::D2(..)) => {} + _ => unimplemented!(), + } + + + let (srv_heap, _) = device.create_descriptor_heap( + 1, + native::DescriptorHeapType::CbvSrvUav, + native::DescriptorHeapFlags::SHADER_VISIBLE, + 0, + ); + let srv_desc = Device::build_image_as_shader_resource_desc(&device::ViewInfo { + resource: src.resource, + kind: src.kind, + caps: src.view_caps, + view_kind: image::ViewKind::D2Array, + format: src.default_view_format.unwrap(), + component_mapping: device::IDENTITY_MAPPING, + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: 0 .. src.descriptor.MipLevels as _, + layers: 0 .. src.kind.num_layers(), + }, + }) + .unwrap(); + device.CreateShaderResourceView( + src.resource.as_mut_ptr(), + &srv_desc, + srv_heap.start_cpu_descriptor(), + ); + self.raw.set_descriptor_heaps(&[srv_heap]); + self.temporary_gpu_heaps.push(srv_heap); + + let filter = match filter { + image::Filter::Nearest => d3d12::D3D12_FILTER_MIN_MAG_MIP_POINT, + image::Filter::Linear => d3d12::D3D12_FILTER_MIN_MAG_LINEAR_MIP_POINT, + }; + + struct Instance { + rtv: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, + viewport: d3d12::D3D12_VIEWPORT, + data: internal::BlitData, + }; + let mut instances = FastHashMap::>::default(); + let mut barriers = Vec::new(); + + for region in regions { + let r = region.borrow(); + + let first_layer = r.dst_subresource.layers.start; + let num_layers = r.dst_subresource.layers.end - first_layer; + + + let rtv_pool = Device::create_descriptor_heap_impl( + device, + native::DescriptorHeapType::Rtv, + false, + num_layers as _, + ); + self.rtv_pools.push(rtv_pool.raw.clone()); + + let key = match r.dst_subresource.aspects { + format::Aspects::COLOR => { + let format = dst.default_view_format.unwrap(); + + for i in 0 .. num_layers { + let mut desc = d3d12::D3D12_RENDER_TARGET_VIEW_DESC { + Format: format, + ViewDimension: d3d12::D3D12_RTV_DIMENSION_TEXTURE2DARRAY, + u: mem::zeroed(), + }; + + *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_RTV { + MipSlice: r.dst_subresource.level as _, + FirstArraySlice: (i + first_layer) as u32, + ArraySize: 1, + PlaneSlice: 0, + }; + + let view = rtv_pool.at(i as _, 0).cpu; + device.CreateRenderTargetView(dst.resource.as_mut_ptr(), &desc, view); + } + + (format, filter) + } + _ => unimplemented!(), + }; + + + let viewport = d3d12::D3D12_VIEWPORT { + TopLeftX: cmp::min(r.dst_bounds.start.x, r.dst_bounds.end.x) as _, + TopLeftY: cmp::min(r.dst_bounds.start.y, r.dst_bounds.end.y) as _, + Width: (r.dst_bounds.end.x - r.dst_bounds.start.x).abs() as _, + Height: (r.dst_bounds.end.y - r.dst_bounds.start.y).abs() as _, + MinDepth: 0.0, + MaxDepth: 1.0, + }; + + let list = instances.entry(key).or_insert(Vec::new()); + + for i in 0 .. num_layers { + let src_layer = r.src_subresource.layers.start + i; + + let data = { + + let (sx, dx) = if r.dst_bounds.start.x > r.dst_bounds.end.x { + ( + r.src_bounds.end.x, + r.src_bounds.start.x - r.src_bounds.end.x, + ) + } else { + ( + r.src_bounds.start.x, + r.src_bounds.end.x - r.src_bounds.start.x, + ) + }; + let (sy, dy) = if r.dst_bounds.start.y > r.dst_bounds.end.y { + ( + r.src_bounds.end.y, + r.src_bounds.start.y - r.src_bounds.end.y, + ) + } else { + ( + r.src_bounds.start.y, + r.src_bounds.end.y - r.src_bounds.start.y, + ) + }; + let image::Extent { width, height, .. } = + src.kind.level_extent(r.src_subresource.level); + + internal::BlitData { + src_offset: [sx as f32 / width as f32, sy as f32 / height as f32], + src_extent: [dx as f32 / width as f32, dy as f32 / height as f32], + layer: src_layer as f32, + level: r.src_subresource.level as _, + } + }; + + list.push(Instance { + rtv: rtv_pool.at(i as _, 0).cpu, + viewport, + data, + }); + + barriers.push(Self::transition_barrier( + d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: dst.resource.as_mut_ptr(), + Subresource: dst.calc_subresource( + r.dst_subresource.level as _, + (first_layer + i) as _, + 0, + ), + StateBefore: d3d12::D3D12_RESOURCE_STATE_COPY_DEST, + StateAfter: d3d12::D3D12_RESOURCE_STATE_RENDER_TARGET, + }, + )); + } + } + + + self.raw + .ResourceBarrier(barriers.len() as _, barriers.as_ptr()); + + self.set_internal_graphics_pipeline(); + for (key, list) in instances { + let blit = self.shared.service_pipes.get_blit_2d_color(key); + self.raw + .IASetPrimitiveTopology(d3dcommon::D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST); + self.raw.set_pipeline_state(blit.pipeline); + self.raw.set_graphics_root_signature(blit.signature); + self.raw + .set_graphics_root_descriptor_table(0, srv_heap.start_gpu_descriptor()); + for inst in list { + let scissor = d3d12::D3D12_RECT { + left: inst.viewport.TopLeftX as _, + top: inst.viewport.TopLeftY as _, + right: (inst.viewport.TopLeftX + inst.viewport.Width) as _, + bottom: (inst.viewport.TopLeftY + inst.viewport.Height) as _, + }; + self.raw.RSSetViewports(1, &inst.viewport); + self.raw.RSSetScissorRects(1, &scissor); + self.raw.SetGraphicsRoot32BitConstants( + 1, + (mem::size_of::() / 4) as _, + &inst.data as *const _ as *const _, + 0, + ); + self.raw.OMSetRenderTargets(1, &inst.rtv, TRUE, ptr::null()); + self.raw.draw(3, 1, 0, 0); + } + } + + for bar in &mut barriers { + let transition = bar.u.Transition_mut(); + mem::swap(&mut transition.StateBefore, &mut transition.StateAfter); + } + self.raw + .ResourceBarrier(barriers.len() as _, barriers.as_ptr()); + + + self.raw + .RSSetViewports(self.viewport_cache.len() as _, self.viewport_cache.as_ptr()); + self.raw + .RSSetScissorRects(self.scissor_cache.len() as _, self.scissor_cache.as_ptr()); + if self.primitive_topology != d3dcommon::D3D_PRIMITIVE_TOPOLOGY_UNDEFINED { + self.raw.IASetPrimitiveTopology(self.primitive_topology); + } + } + + unsafe fn bind_index_buffer(&mut self, ibv: buffer::IndexBufferView) { + let buffer = ibv.buffer.expect_bound(); + let format = match ibv.index_type { + IndexType::U16 => dxgiformat::DXGI_FORMAT_R16_UINT, + IndexType::U32 => dxgiformat::DXGI_FORMAT_R32_UINT, + }; + let location = buffer.resource.gpu_virtual_address(); + self.raw.set_index_buffer( + location + ibv.offset, + (buffer.requirements.size - ibv.offset) as u32, + format, + ); + } + + unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) + where + I: IntoIterator, + T: Borrow, + { + assert!(first_binding as usize <= MAX_VERTEX_BUFFERS); + + for (view, (buffer, offset)) in self.vertex_buffer_views[first_binding as _ ..] + .iter_mut() + .zip(buffers) + { + let b = buffer.borrow().expect_bound(); + let base = (*b.resource).GetGPUVirtualAddress(); + view.BufferLocation = base + offset; + view.SizeInBytes = (b.requirements.size - offset) as u32; + } + self.set_vertex_buffers(); + } + + unsafe fn set_viewports(&mut self, first_viewport: u32, viewports: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let viewports = viewports + .into_iter() + .map(|viewport| { + let viewport = viewport.borrow(); + d3d12::D3D12_VIEWPORT { + TopLeftX: viewport.rect.x as _, + TopLeftY: viewport.rect.y as _, + Width: viewport.rect.w as _, + Height: viewport.rect.h as _, + MinDepth: viewport.depth.start, + MaxDepth: viewport.depth.end, + } + }) + .enumerate(); + + for (i, viewport) in viewports { + if i + first_viewport as usize >= self.viewport_cache.len() { + self.viewport_cache.push(viewport); + } else { + self.viewport_cache[i + first_viewport as usize] = viewport; + } + } + + self.raw + .RSSetViewports(self.viewport_cache.len() as _, self.viewport_cache.as_ptr()); + } + + unsafe fn set_scissors(&mut self, first_scissor: u32, scissors: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let rects = scissors + .into_iter() + .map(|rect| get_rect(rect.borrow())) + .enumerate(); + + for (i, rect) in rects { + if i + first_scissor as usize >= self.scissor_cache.len() { + self.scissor_cache.push(rect); + } else { + self.scissor_cache[i + first_scissor as usize] = rect; + } + } + + self.raw + .RSSetScissorRects(self.scissor_cache.len() as _, self.scissor_cache.as_ptr()) + } + + unsafe fn set_blend_constants(&mut self, color: pso::ColorValue) { + self.raw.set_blend_factor(color); + } + + unsafe fn set_stencil_reference(&mut self, faces: pso::Face, value: pso::StencilValue) { + assert!(!faces.is_empty()); + + if !faces.is_all() { + warn!( + "Stencil ref values set for both faces but only one was requested ({})", + faces.bits(), + ); + } + + self.raw.set_stencil_reference(value as _); + } + + unsafe fn set_stencil_read_mask(&mut self, _faces: pso::Face, _value: pso::StencilValue) { + unimplemented!(); + } + + unsafe fn set_stencil_write_mask(&mut self, _faces: pso::Face, _value: pso::StencilValue) { + unimplemented!(); + } + + unsafe fn set_depth_bounds(&mut self, bounds: Range) { + let (cmd_list1, hr) = self.raw.cast::(); + if winerror::SUCCEEDED(hr) { + cmd_list1.OMSetDepthBounds(bounds.start, bounds.end); + cmd_list1.destroy(); + } else { + warn!("Depth bounds test is not supported"); + } + } + + unsafe fn set_line_width(&mut self, width: f32) { + validate_line_width(width); + } + + unsafe fn set_depth_bias(&mut self, _depth_bias: pso::DepthBias) { + unimplemented!() + } + + unsafe fn bind_graphics_pipeline(&mut self, pipeline: &r::GraphicsPipeline) { + match self.gr_pipeline.pipeline { + Some((_, signature)) if signature == pipeline.signature => { + + } + _ => { + self.raw.set_graphics_root_signature(pipeline.signature); + self.gr_pipeline.num_parameter_slots = pipeline.num_parameter_slots; + self.gr_pipeline.root_constants = pipeline.constants.clone(); + + self.gr_pipeline.user_data.dirty_all(); + } + } + self.raw.set_pipeline_state(pipeline.raw); + self.raw.IASetPrimitiveTopology(pipeline.topology); + self.primitive_topology = pipeline.topology; + + self.active_bindpoint = BindPoint::Graphics { internal: false }; + self.gr_pipeline.pipeline = Some((pipeline.raw, pipeline.signature)); + self.vertex_bindings_remap = pipeline.vertex_bindings; + + self.set_vertex_buffers(); + + if let Some(ref vp) = pipeline.baked_states.viewport { + self.set_viewports(0, iter::once(vp)); + } + if let Some(ref rect) = pipeline.baked_states.scissor { + self.set_scissors(0, iter::once(rect)); + } + if let Some(color) = pipeline.baked_states.blend_color { + self.set_blend_constants(color); + } + if let Some(ref bounds) = pipeline.baked_states.depth_bounds { + self.set_depth_bounds(bounds.clone()); + } + } + + unsafe fn bind_graphics_descriptor_sets<'a, I, J>( + &mut self, + layout: &r::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + self.active_descriptor_heaps = self + .gr_pipeline + .bind_descriptor_sets(layout, first_set, sets, offsets); + self.bind_descriptor_heaps(); + } + + unsafe fn bind_compute_pipeline(&mut self, pipeline: &r::ComputePipeline) { + match self.comp_pipeline.pipeline { + Some((_, signature)) if signature == pipeline.signature => { + + } + _ => { + self.raw.set_compute_root_signature(pipeline.signature); + self.comp_pipeline.num_parameter_slots = pipeline.num_parameter_slots; + self.comp_pipeline.root_constants = pipeline.constants.clone(); + + self.comp_pipeline.user_data.dirty_all(); + } + } + self.raw.set_pipeline_state(pipeline.raw); + + self.active_bindpoint = BindPoint::Compute; + self.comp_pipeline.pipeline = Some((pipeline.raw, pipeline.signature)); + } + + unsafe fn bind_compute_descriptor_sets( + &mut self, + layout: &r::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + self.active_descriptor_heaps = self + .comp_pipeline + .bind_descriptor_sets(layout, first_set, sets, offsets); + self.bind_descriptor_heaps(); + } + + unsafe fn dispatch(&mut self, count: WorkGroupCount) { + self.set_compute_bind_point(); + self.raw.dispatch(count); + } + + unsafe fn dispatch_indirect(&mut self, buffer: &r::Buffer, offset: buffer::Offset) { + let buffer = buffer.expect_bound(); + self.set_compute_bind_point(); + self.raw.ExecuteIndirect( + self.shared.signatures.dispatch.as_mut_ptr(), + 1, + buffer.resource.as_mut_ptr(), + offset, + ptr::null_mut(), + 0, + ); + } + + unsafe fn fill_buffer(&mut self, buffer: &r::Buffer, range: R, _data: u32) + where + R: RangeArg, + { + let buffer = buffer.expect_bound(); + assert!( + buffer.clear_uav.is_some(), + "Buffer needs to be created with usage `TRANSFER_DST`" + ); + let bytes_per_unit = 4; + let start = *range.start().unwrap_or(&0) as i32; + let end = *range.end().unwrap_or(&(buffer.requirements.size as u64)) as i32; + if start % 4 != 0 || end % 4 != 0 { + warn!("Fill buffer bounds have to be multiples of 4"); + } + let _rect = d3d12::D3D12_RECT { + left: start / bytes_per_unit, + top: 0, + right: end / bytes_per_unit, + bottom: 1, + }; + + + + let pre_barrier = Self::transition_barrier(d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: buffer.resource.as_mut_ptr(), + Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, + StateBefore: d3d12::D3D12_RESOURCE_STATE_COPY_DEST, + StateAfter: d3d12::D3D12_RESOURCE_STATE_UNORDERED_ACCESS, + }); + self.raw.ResourceBarrier(1, &pre_barrier); + + error!("fill_buffer currently unimplemented"); + + + + + + + + + + + + + + + + + let post_barrier = Self::transition_barrier(d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: buffer.resource.as_mut_ptr(), + Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, + StateBefore: d3d12::D3D12_RESOURCE_STATE_UNORDERED_ACCESS, + StateAfter: d3d12::D3D12_RESOURCE_STATE_COPY_DEST, + }); + self.raw.ResourceBarrier(1, &post_barrier); + } + + unsafe fn update_buffer(&mut self, _buffer: &r::Buffer, _offset: buffer::Offset, _data: &[u8]) { + unimplemented!() + } + + unsafe fn copy_buffer(&mut self, src: &r::Buffer, dst: &r::Buffer, regions: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let src = src.expect_bound(); + let dst = dst.expect_bound(); + + for region in regions { + let region = region.borrow(); + self.raw.CopyBufferRegion( + dst.resource.as_mut_ptr(), + region.dst as _, + src.resource.as_mut_ptr(), + region.src as _, + region.size as _, + ); + } + + + } + + unsafe fn copy_image( + &mut self, + src: &r::Image, + _: image::Layout, + dst: &r::Image, + _: image::Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let src = src.expect_bound(); + let dst = dst.expect_bound(); + let mut src_image = d3d12::D3D12_TEXTURE_COPY_LOCATION { + pResource: src.resource.as_mut_ptr(), + Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, + u: mem::zeroed(), + }; + let mut dst_image = d3d12::D3D12_TEXTURE_COPY_LOCATION { + pResource: dst.resource.as_mut_ptr(), + Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, + u: mem::zeroed(), + }; + + let device = self.shared.service_pipes.device.clone(); + let src_desc = src.surface_type.desc(); + let dst_desc = dst.surface_type.desc(); + assert_eq!(src_desc.bits, dst_desc.bits); + + + let do_alias = src.surface_type != dst.surface_type + && src_desc.is_compressed() == dst_desc.is_compressed(); + + if do_alias { + + + + + let mut alias = native::Resource::null(); + let desc = d3d12::D3D12_RESOURCE_DESC { + Format: dst.descriptor.Format, + ..src.descriptor.clone() + }; + let (heap_ptr, offset) = match src.place { + r::Place::SwapChain => { + error!("Unable to copy from a swapchain image with format conversion: {:?} -> {:?}", + src.descriptor.Format, dst.descriptor.Format); + return; + } + r::Place::Heap { ref raw, offset } => (raw.as_mut_ptr(), offset), + }; + assert_eq!( + winerror::S_OK, + device.CreatePlacedResource( + heap_ptr, + offset, + &desc, + d3d12::D3D12_RESOURCE_STATE_COMMON, + ptr::null(), + &d3d12::ID3D12Resource::uuidof(), + alias.mut_void(), + ) + ); + src_image.pResource = alias.as_mut_ptr(); + self.retained_resources.push(alias); + + + let sub_barrier = d3d12::D3D12_RESOURCE_ALIASING_BARRIER { + pResourceBefore: src.resource.as_mut_ptr(), + pResourceAfter: src_image.pResource, + }; + let mut barrier = d3d12::D3D12_RESOURCE_BARRIER { + Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_ALIASING, + Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, + u: mem::zeroed(), + }; + *barrier.u.Aliasing_mut() = sub_barrier; + self.raw.ResourceBarrier(1, &barrier as *const _); + } + + for region in regions { + let r = region.borrow(); + debug_assert_eq!( + r.src_subresource.layers.len(), + r.dst_subresource.layers.len() + ); + let src_box = d3d12::D3D12_BOX { + left: r.src_offset.x as _, + top: r.src_offset.y as _, + right: (r.src_offset.x + r.extent.width as i32) as _, + bottom: (r.src_offset.y + r.extent.height as i32) as _, + front: r.src_offset.z as _, + back: (r.src_offset.z + r.extent.depth as i32) as _, + }; + + for (src_layer, dst_layer) in r + .src_subresource + .layers + .clone() + .zip(r.dst_subresource.layers.clone()) + { + *src_image.u.SubresourceIndex_mut() = + src.calc_subresource(r.src_subresource.level as _, src_layer as _, 0); + *dst_image.u.SubresourceIndex_mut() = + dst.calc_subresource(r.dst_subresource.level as _, dst_layer as _, 0); + self.raw.CopyTextureRegion( + &dst_image, + r.dst_offset.x as _, + r.dst_offset.y as _, + r.dst_offset.z as _, + &src_image, + &src_box, + ); + } + } + + if do_alias { + + let sub_barrier = d3d12::D3D12_RESOURCE_ALIASING_BARRIER { + pResourceBefore: src_image.pResource, + pResourceAfter: src.resource.as_mut_ptr(), + }; + let mut barrier = d3d12::D3D12_RESOURCE_BARRIER { + Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_ALIASING, + Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, + u: mem::zeroed(), + }; + *barrier.u.Aliasing_mut() = sub_barrier; + self.raw.ResourceBarrier(1, &barrier as *const _); + } + } + + unsafe fn copy_buffer_to_image( + &mut self, + buffer: &r::Buffer, + image: &r::Image, + _: image::Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let buffer = buffer.expect_bound(); + let image = image.expect_bound(); + assert!(self.copies.is_empty()); + + for region in regions { + let r = region.borrow(); + Self::split_buffer_copy(&mut self.copies, r, image); + } + + if self.copies.is_empty() { + return; + } + + let mut src = d3d12::D3D12_TEXTURE_COPY_LOCATION { + pResource: buffer.resource.as_mut_ptr(), + Type: d3d12::D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT, + u: mem::zeroed(), + }; + let mut dst = d3d12::D3D12_TEXTURE_COPY_LOCATION { + pResource: image.resource.as_mut_ptr(), + Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, + u: mem::zeroed(), + }; + + for c in self.copies.drain(..) { + let src_box = d3d12::D3D12_BOX { + left: c.buf_offset.x as u32, + top: c.buf_offset.y as u32, + right: c.buf_offset.x as u32 + c.copy_extent.width, + bottom: c.buf_offset.y as u32 + c.copy_extent.height, + front: c.buf_offset.z as u32, + back: c.buf_offset.z as u32 + c.copy_extent.depth, + }; + let footprint = d3d12::D3D12_PLACED_SUBRESOURCE_FOOTPRINT { + Offset: c.footprint_offset, + Footprint: d3d12::D3D12_SUBRESOURCE_FOOTPRINT { + Format: image.descriptor.Format, + Width: c.footprint.width, + Height: c.footprint.height, + Depth: c.footprint.depth, + RowPitch: c.row_pitch, + }, + }; + *src.u.PlacedFootprint_mut() = footprint; + *dst.u.SubresourceIndex_mut() = c.img_subresource; + self.raw.CopyTextureRegion( + &dst, + c.img_offset.x as _, + c.img_offset.y as _, + c.img_offset.z as _, + &src, + &src_box, + ); + } + } + + unsafe fn copy_image_to_buffer( + &mut self, + image: &r::Image, + _: image::Layout, + buffer: &r::Buffer, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let image = image.expect_bound(); + let buffer = buffer.expect_bound(); + assert!(self.copies.is_empty()); + + for region in regions { + let r = region.borrow(); + Self::split_buffer_copy(&mut self.copies, r, image); + } + + if self.copies.is_empty() { + return; + } + + let mut src = d3d12::D3D12_TEXTURE_COPY_LOCATION { + pResource: image.resource.as_mut_ptr(), + Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, + u: mem::zeroed(), + }; + let mut dst = d3d12::D3D12_TEXTURE_COPY_LOCATION { + pResource: buffer.resource.as_mut_ptr(), + Type: d3d12::D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT, + u: mem::zeroed(), + }; + + for c in self.copies.drain(..) { + let src_box = d3d12::D3D12_BOX { + left: c.img_offset.x as u32, + top: c.img_offset.y as u32, + right: c.img_offset.x as u32 + c.copy_extent.width, + bottom: c.img_offset.y as u32 + c.copy_extent.height, + front: c.img_offset.z as u32, + back: c.img_offset.z as u32 + c.copy_extent.depth, + }; + let footprint = d3d12::D3D12_PLACED_SUBRESOURCE_FOOTPRINT { + Offset: c.footprint_offset, + Footprint: d3d12::D3D12_SUBRESOURCE_FOOTPRINT { + Format: image.descriptor.Format, + Width: c.footprint.width, + Height: c.footprint.height, + Depth: c.footprint.depth, + RowPitch: c.row_pitch, + }, + }; + *dst.u.PlacedFootprint_mut() = footprint; + *src.u.SubresourceIndex_mut() = c.img_subresource; + self.raw.CopyTextureRegion( + &dst, + c.buf_offset.x as _, + c.buf_offset.y as _, + c.buf_offset.z as _, + &src, + &src_box, + ); + } + } + + unsafe fn draw(&mut self, vertices: Range, instances: Range) { + self.set_graphics_bind_point(); + self.raw.draw( + vertices.end - vertices.start, + instances.end - instances.start, + vertices.start, + instances.start, + ); + } + + unsafe fn draw_indexed( + &mut self, + indices: Range, + base_vertex: VertexOffset, + instances: Range, + ) { + self.set_graphics_bind_point(); + self.raw.draw_indexed( + indices.end - indices.start, + instances.end - instances.start, + indices.start, + base_vertex, + instances.start, + ); + } + + unsafe fn draw_indirect( + &mut self, + buffer: &r::Buffer, + offset: buffer::Offset, + draw_count: DrawCount, + stride: u32, + ) { + assert_eq!(stride, 16); + let buffer = buffer.expect_bound(); + self.set_graphics_bind_point(); + self.raw.ExecuteIndirect( + self.shared.signatures.draw.as_mut_ptr(), + draw_count, + buffer.resource.as_mut_ptr(), + offset, + ptr::null_mut(), + 0, + ); + } + + unsafe fn draw_indexed_indirect( + &mut self, + buffer: &r::Buffer, + offset: buffer::Offset, + draw_count: DrawCount, + stride: u32, + ) { + assert_eq!(stride, 20); + let buffer = buffer.expect_bound(); + self.set_graphics_bind_point(); + self.raw.ExecuteIndirect( + self.shared.signatures.draw_indexed.as_mut_ptr(), + draw_count, + buffer.resource.as_mut_ptr(), + offset, + ptr::null_mut(), + 0, + ); + } + + unsafe fn set_event(&mut self, _: &(), _: pso::PipelineStage) { + unimplemented!() + } + + unsafe fn reset_event(&mut self, _: &(), _: pso::PipelineStage) { + unimplemented!() + } + + unsafe fn wait_events<'a, I, J>(&mut self, _: I, _: Range, _: J) + where + I: IntoIterator, + I::Item: Borrow<()>, + J: IntoIterator, + J::Item: Borrow>, + { + unimplemented!() + } + + unsafe fn begin_query(&mut self, query: query::Query, flags: query::ControlFlags) { + let query_ty = match query.pool.ty { + native::QueryHeapType::Occlusion => { + if flags.contains(query::ControlFlags::PRECISE) { + self.occlusion_query = Some(OcclusionQuery::Precise(query.id)); + d3d12::D3D12_QUERY_TYPE_OCCLUSION + } else { + + + self.occlusion_query = Some(OcclusionQuery::Binary(query.id)); + d3d12::D3D12_QUERY_TYPE_BINARY_OCCLUSION + } + } + native::QueryHeapType::Timestamp => panic!("Timestap queries are issued via "), + native::QueryHeapType::PipelineStatistics => { + self.pipeline_stats_query = Some(query.id); + d3d12::D3D12_QUERY_TYPE_PIPELINE_STATISTICS + } + _ => unreachable!(), + }; + + self.raw + .BeginQuery(query.pool.raw.as_mut_ptr(), query_ty, query.id); + } + + unsafe fn end_query(&mut self, query: query::Query) { + let id = query.id; + let query_ty = match query.pool.ty { + native::QueryHeapType::Occlusion + if self.occlusion_query == Some(OcclusionQuery::Precise(id)) => + { + self.occlusion_query = None; + d3d12::D3D12_QUERY_TYPE_OCCLUSION + } + native::QueryHeapType::Occlusion + if self.occlusion_query == Some(OcclusionQuery::Binary(id)) => + { + self.occlusion_query = None; + d3d12::D3D12_QUERY_TYPE_BINARY_OCCLUSION + } + native::QueryHeapType::PipelineStatistics + if self.pipeline_stats_query == Some(id) => + { + self.pipeline_stats_query = None; + d3d12::D3D12_QUERY_TYPE_PIPELINE_STATISTICS + } + _ => panic!("Missing `begin_query` call for query: {:?}", query), + }; + + self.raw.EndQuery(query.pool.raw.as_mut_ptr(), query_ty, id); + } + + unsafe fn reset_query_pool(&mut self, _pool: &r::QueryPool, _queries: Range) { + + + + + + + } + + unsafe fn copy_query_pool_results( + &mut self, + _pool: &r::QueryPool, + _queries: Range, + _buffer: &r::Buffer, + _offset: buffer::Offset, + _stride: buffer::Offset, + _flags: query::ResultFlags, + ) { + unimplemented!() + } + + unsafe fn write_timestamp(&mut self, _: pso::PipelineStage, query: query::Query) { + self.raw.EndQuery( + query.pool.raw.as_mut_ptr(), + d3d12::D3D12_QUERY_TYPE_TIMESTAMP, + query.id, + ); + } + + unsafe fn push_graphics_constants( + &mut self, + _layout: &r::PipelineLayout, + _stages: pso::ShaderStageFlags, + offset: u32, + constants: &[u32], + ) { + assert!(offset % 4 == 0); + self.gr_pipeline + .user_data + .set_constants(offset as usize / 4, constants); + } + + unsafe fn push_compute_constants( + &mut self, + _layout: &r::PipelineLayout, + offset: u32, + constants: &[u32], + ) { + assert!(offset % 4 == 0); + self.comp_pipeline + .user_data + .set_constants(offset as usize / 4, constants); + } + + unsafe fn execute_commands<'a, T, I>(&mut self, cmd_buffers: I) + where + T: 'a + Borrow, + I: IntoIterator, + { + for _cmd_buf in cmd_buffers { + error!("TODO: execute_commands"); + } + } +} diff --git a/third_party/rust/gfx-backend-dx12/src/conv.rs b/third_party/rust/gfx-backend-dx12/src/conv.rs new file mode 100644 index 000000000000..b9ca40cd3fd9 --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/src/conv.rs @@ -0,0 +1,633 @@ +use validate_line_width; + +use spirv_cross::spirv; +use std::mem; + +use winapi::shared::basetsd::UINT8; +use winapi::shared::dxgiformat::*; +use winapi::shared::minwindef::{FALSE, INT, TRUE, UINT}; +use winapi::um::d3d12::*; +use winapi::um::d3dcommon::*; + +use hal::format::{Format, ImageFeature, SurfaceType, Swizzle}; +use hal::{buffer, image, pso}; + +use native::ShaderVisibility; + +pub fn map_format(format: Format) -> Option { + use hal::format::Format::*; + + + let reverse = unsafe { 1 == *(&1u32 as *const _ as *const u8) }; + let format = match format { + Bgra4Unorm if !reverse => DXGI_FORMAT_B4G4R4A4_UNORM, + R5g6b5Unorm if reverse => DXGI_FORMAT_B5G6R5_UNORM, + B5g6r5Unorm if !reverse => DXGI_FORMAT_B5G6R5_UNORM, + B5g5r5a1Unorm if !reverse => DXGI_FORMAT_B5G5R5A1_UNORM, + A1r5g5b5Unorm if reverse => DXGI_FORMAT_B5G5R5A1_UNORM, + R8Unorm => DXGI_FORMAT_R8_UNORM, + R8Snorm => DXGI_FORMAT_R8_SNORM, + R8Uint => DXGI_FORMAT_R8_UINT, + R8Sint => DXGI_FORMAT_R8_SINT, + Rg8Unorm => DXGI_FORMAT_R8G8_UNORM, + Rg8Snorm => DXGI_FORMAT_R8G8_SNORM, + Rg8Uint => DXGI_FORMAT_R8G8_UINT, + Rg8Sint => DXGI_FORMAT_R8G8_SINT, + Rgba8Unorm => DXGI_FORMAT_R8G8B8A8_UNORM, + Rgba8Snorm => DXGI_FORMAT_R8G8B8A8_SNORM, + Rgba8Uint => DXGI_FORMAT_R8G8B8A8_UINT, + Rgba8Sint => DXGI_FORMAT_R8G8B8A8_SINT, + Rgba8Srgb => DXGI_FORMAT_R8G8B8A8_UNORM_SRGB, + Bgra8Unorm => DXGI_FORMAT_B8G8R8A8_UNORM, + Bgra8Srgb => DXGI_FORMAT_B8G8R8A8_UNORM_SRGB, + Abgr8Unorm if reverse => DXGI_FORMAT_R8G8B8A8_UNORM, + Abgr8Snorm if reverse => DXGI_FORMAT_R8G8B8A8_SNORM, + Abgr8Uint if reverse => DXGI_FORMAT_R8G8B8A8_UINT, + Abgr8Sint if reverse => DXGI_FORMAT_R8G8B8A8_SINT, + Abgr8Srgb if reverse => DXGI_FORMAT_R8G8B8A8_UNORM_SRGB, + A2b10g10r10Unorm if reverse => DXGI_FORMAT_R10G10B10A2_UNORM, + A2b10g10r10Uint if reverse => DXGI_FORMAT_R10G10B10A2_UINT, + R16Unorm => DXGI_FORMAT_R16_UNORM, + R16Snorm => DXGI_FORMAT_R16_SNORM, + R16Uint => DXGI_FORMAT_R16_UINT, + R16Sint => DXGI_FORMAT_R16_SINT, + R16Sfloat => DXGI_FORMAT_R16_FLOAT, + Rg16Unorm => DXGI_FORMAT_R16G16_UNORM, + Rg16Snorm => DXGI_FORMAT_R16G16_SNORM, + Rg16Uint => DXGI_FORMAT_R16G16_UINT, + Rg16Sint => DXGI_FORMAT_R16G16_SINT, + Rg16Sfloat => DXGI_FORMAT_R16G16_FLOAT, + Rgba16Unorm => DXGI_FORMAT_R16G16B16A16_UNORM, + Rgba16Snorm => DXGI_FORMAT_R16G16B16A16_SNORM, + Rgba16Uint => DXGI_FORMAT_R16G16B16A16_UINT, + Rgba16Sint => DXGI_FORMAT_R16G16B16A16_SINT, + Rgba16Sfloat => DXGI_FORMAT_R16G16B16A16_FLOAT, + R32Uint => DXGI_FORMAT_R32_UINT, + R32Sint => DXGI_FORMAT_R32_SINT, + R32Sfloat => DXGI_FORMAT_R32_FLOAT, + Rg32Uint => DXGI_FORMAT_R32G32_UINT, + Rg32Sint => DXGI_FORMAT_R32G32_SINT, + Rg32Sfloat => DXGI_FORMAT_R32G32_FLOAT, + Rgb32Uint => DXGI_FORMAT_R32G32B32_UINT, + Rgb32Sint => DXGI_FORMAT_R32G32B32_SINT, + Rgb32Sfloat => DXGI_FORMAT_R32G32B32_FLOAT, + Rgba32Uint => DXGI_FORMAT_R32G32B32A32_UINT, + Rgba32Sint => DXGI_FORMAT_R32G32B32A32_SINT, + Rgba32Sfloat => DXGI_FORMAT_R32G32B32A32_FLOAT, + B10g11r11Ufloat if reverse => DXGI_FORMAT_R11G11B10_FLOAT, + E5b9g9r9Ufloat if reverse => DXGI_FORMAT_R9G9B9E5_SHAREDEXP, + D16Unorm => DXGI_FORMAT_D16_UNORM, + D24UnormS8Uint => DXGI_FORMAT_D24_UNORM_S8_UINT, + X8D24Unorm if reverse => DXGI_FORMAT_D24_UNORM_S8_UINT, + D32Sfloat => DXGI_FORMAT_D32_FLOAT, + D32SfloatS8Uint => DXGI_FORMAT_D32_FLOAT_S8X24_UINT, + Bc1RgbUnorm => DXGI_FORMAT_BC1_UNORM, + Bc1RgbSrgb => DXGI_FORMAT_BC1_UNORM_SRGB, + Bc2Unorm => DXGI_FORMAT_BC2_UNORM, + Bc2Srgb => DXGI_FORMAT_BC2_UNORM_SRGB, + Bc3Unorm => DXGI_FORMAT_BC3_UNORM, + Bc3Srgb => DXGI_FORMAT_BC3_UNORM_SRGB, + Bc4Unorm => DXGI_FORMAT_BC4_UNORM, + Bc4Snorm => DXGI_FORMAT_BC4_SNORM, + Bc5Unorm => DXGI_FORMAT_BC5_UNORM, + Bc5Snorm => DXGI_FORMAT_BC5_SNORM, + Bc6hUfloat => DXGI_FORMAT_BC6H_UF16, + Bc6hSfloat => DXGI_FORMAT_BC6H_SF16, + Bc7Unorm => DXGI_FORMAT_BC7_UNORM, + Bc7Srgb => DXGI_FORMAT_BC7_UNORM_SRGB, + + _ => return None, + }; + + Some(format) +} + +pub fn map_format_nosrgb(format: Format) -> Option { + + + match format { + Format::Bgra8Srgb => Some(DXGI_FORMAT_B8G8R8A8_UNORM), + Format::Rgba8Srgb => Some(DXGI_FORMAT_R8G8B8A8_UNORM), + _ => map_format(format), + } +} + +pub fn map_swizzle(swizzle: Swizzle) -> UINT { + use hal::format::Component::*; + + [swizzle.0, swizzle.1, swizzle.2, swizzle.3] + .iter() + .enumerate() + .fold( + D3D12_SHADER_COMPONENT_MAPPING_ALWAYS_SET_BIT_AVOIDING_ZEROMEM_MISTAKES, + |mapping, (i, &component)| { + let value = match component { + R => D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_0, + G => D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1, + B => D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_2, + A => D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_3, + Zero => D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0, + One => D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1, + }; + mapping | (value << D3D12_SHADER_COMPONENT_MAPPING_SHIFT as usize * i) + }, + ) +} + +pub fn map_surface_type(st: SurfaceType) -> Option { + use hal::format::SurfaceType::*; + + assert_eq!(1, unsafe { *(&1u32 as *const _ as *const u8) }); + Some(match st { + R5_G6_B5 => DXGI_FORMAT_B5G6R5_UNORM, + A1_R5_G5_B5 => DXGI_FORMAT_B5G5R5A1_UNORM, + R8 => DXGI_FORMAT_R8_TYPELESS, + R8_G8 => DXGI_FORMAT_R8G8_TYPELESS, + R8_G8_B8_A8 => DXGI_FORMAT_R8G8B8A8_TYPELESS, + B8_G8_R8_A8 => DXGI_FORMAT_B8G8R8A8_TYPELESS, + A8_B8_G8_R8 => DXGI_FORMAT_R8G8B8A8_TYPELESS, + A2_B10_G10_R10 => DXGI_FORMAT_R10G10B10A2_TYPELESS, + R16 => DXGI_FORMAT_R16_TYPELESS, + R16_G16 => DXGI_FORMAT_R16G16_TYPELESS, + R16_G16_B16_A16 => DXGI_FORMAT_R16G16B16A16_TYPELESS, + R32 => DXGI_FORMAT_R32_TYPELESS, + R32_G32 => DXGI_FORMAT_R32G32_TYPELESS, + R32_G32_B32 => DXGI_FORMAT_R32G32B32_TYPELESS, + R32_G32_B32_A32 => DXGI_FORMAT_R32G32B32A32_TYPELESS, + B10_G11_R11 => DXGI_FORMAT_R11G11B10_FLOAT, + E5_B9_G9_R9 => DXGI_FORMAT_R9G9B9E5_SHAREDEXP, + D16 => DXGI_FORMAT_R16_TYPELESS, + X8D24 => DXGI_FORMAT_D24_UNORM_S8_UINT, + D32 => DXGI_FORMAT_R32_TYPELESS, + D24_S8 => DXGI_FORMAT_D24_UNORM_S8_UINT, + D32_S8 => DXGI_FORMAT_D32_FLOAT_S8X24_UINT, + _ => return None, + }) +} + +pub fn map_format_dsv(surface: SurfaceType) -> Option { + Some(match surface { + SurfaceType::D16 => DXGI_FORMAT_D16_UNORM, + SurfaceType::X8D24 | SurfaceType::D24_S8 => DXGI_FORMAT_D24_UNORM_S8_UINT, + SurfaceType::D32 => DXGI_FORMAT_D32_FLOAT, + SurfaceType::D32_S8 => DXGI_FORMAT_D32_FLOAT_S8X24_UINT, + _ => return None, + }) +} + +pub fn map_topology_type(primitive: pso::Primitive) -> D3D12_PRIMITIVE_TOPOLOGY_TYPE { + use hal::pso::Primitive::*; + match primitive { + PointList => D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT, + LineList | LineStrip => { + D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE + } + TriangleList | TriangleStrip => { + D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE + } + PatchList(_) => D3D12_PRIMITIVE_TOPOLOGY_TYPE_PATCH, + } +} + +pub fn map_topology(ia: &pso::InputAssemblerDesc) -> D3D12_PRIMITIVE_TOPOLOGY { + use hal::pso::Primitive::*; + match (ia.primitive, ia.with_adjacency) { + (PointList, false) => D3D_PRIMITIVE_TOPOLOGY_POINTLIST, + (PointList, true) => panic!("Points can't have adjacency info"), + (LineList, false) => D3D_PRIMITIVE_TOPOLOGY_LINELIST, + (LineList, true) => D3D_PRIMITIVE_TOPOLOGY_LINELIST_ADJ, + (LineStrip, false) => D3D_PRIMITIVE_TOPOLOGY_LINESTRIP, + (LineStrip, true) => D3D_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ, + (TriangleList, false) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST, + (TriangleList, true) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ, + (TriangleStrip, false) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, + (TriangleStrip, true) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ, + (PatchList(num), false) => { + assert!(num != 0); + D3D_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST + (num as u32) - 1 + } + (_, true) => panic!("Patches can't have adjacency info"), + } +} + +pub fn map_rasterizer(rasterizer: &pso::Rasterizer) -> D3D12_RASTERIZER_DESC { + use hal::pso::FrontFace::*; + use hal::pso::PolygonMode::*; + + let bias = match rasterizer.depth_bias { + + Some(pso::State::Static(db)) => db, + Some(_) | None => pso::DepthBias::default(), + }; + + D3D12_RASTERIZER_DESC { + FillMode: match rasterizer.polygon_mode { + Point => { + error!("Point rasterization is not supported"); + D3D12_FILL_MODE_WIREFRAME + } + Line(width) => { + if let pso::State::Static(w) = width { + validate_line_width(w); + } + D3D12_FILL_MODE_WIREFRAME + } + Fill => D3D12_FILL_MODE_SOLID, + }, + CullMode: match rasterizer.cull_face { + pso::Face::NONE => D3D12_CULL_MODE_NONE, + pso::Face::FRONT => D3D12_CULL_MODE_FRONT, + pso::Face::BACK => D3D12_CULL_MODE_BACK, + _ => panic!("Culling both front and back faces is not supported"), + }, + FrontCounterClockwise: match rasterizer.front_face { + Clockwise => FALSE, + CounterClockwise => TRUE, + }, + DepthBias: bias.const_factor as INT, + DepthBiasClamp: bias.clamp, + SlopeScaledDepthBias: bias.slope_factor, + DepthClipEnable: !rasterizer.depth_clamping as _, + MultisampleEnable: FALSE, + ForcedSampleCount: 0, + AntialiasedLineEnable: FALSE, + ConservativeRaster: if rasterizer.conservative { + + D3D12_CONSERVATIVE_RASTERIZATION_MODE_ON + } else { + D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF + }, + } +} + +fn map_factor(factor: pso::Factor) -> D3D12_BLEND { + use hal::pso::Factor::*; + match factor { + Zero => D3D12_BLEND_ZERO, + One => D3D12_BLEND_ONE, + SrcColor => D3D12_BLEND_SRC_COLOR, + OneMinusSrcColor => D3D12_BLEND_INV_SRC_COLOR, + DstColor => D3D12_BLEND_DEST_COLOR, + OneMinusDstColor => D3D12_BLEND_INV_DEST_COLOR, + SrcAlpha => D3D12_BLEND_SRC_ALPHA, + OneMinusSrcAlpha => D3D12_BLEND_INV_SRC_ALPHA, + DstAlpha => D3D12_BLEND_DEST_ALPHA, + OneMinusDstAlpha => D3D12_BLEND_INV_DEST_ALPHA, + ConstColor | ConstAlpha => D3D12_BLEND_BLEND_FACTOR, + OneMinusConstColor | OneMinusConstAlpha => D3D12_BLEND_INV_BLEND_FACTOR, + SrcAlphaSaturate => D3D12_BLEND_SRC_ALPHA_SAT, + Src1Color => D3D12_BLEND_SRC1_COLOR, + OneMinusSrc1Color => D3D12_BLEND_INV_SRC1_COLOR, + Src1Alpha => D3D12_BLEND_SRC1_ALPHA, + OneMinusSrc1Alpha => D3D12_BLEND_INV_SRC1_ALPHA, + } +} + +fn map_blend_op(operation: pso::BlendOp) -> (D3D12_BLEND_OP, D3D12_BLEND, D3D12_BLEND) { + use hal::pso::BlendOp::*; + match operation { + Add { src, dst } => (D3D12_BLEND_OP_ADD, map_factor(src), map_factor(dst)), + Sub { src, dst } => (D3D12_BLEND_OP_SUBTRACT, map_factor(src), map_factor(dst)), + RevSub { src, dst } => ( + D3D12_BLEND_OP_REV_SUBTRACT, + map_factor(src), + map_factor(dst), + ), + Min => (D3D12_BLEND_OP_MIN, D3D12_BLEND_ZERO, D3D12_BLEND_ZERO), + Max => (D3D12_BLEND_OP_MAX, D3D12_BLEND_ZERO, D3D12_BLEND_ZERO), + } +} + +pub fn map_render_targets( + color_targets: &[pso::ColorBlendDesc], +) -> [D3D12_RENDER_TARGET_BLEND_DESC; 8] { + let dummy_target = D3D12_RENDER_TARGET_BLEND_DESC { + BlendEnable: FALSE, + LogicOpEnable: FALSE, + SrcBlend: D3D12_BLEND_ZERO, + DestBlend: D3D12_BLEND_ZERO, + BlendOp: D3D12_BLEND_OP_ADD, + SrcBlendAlpha: D3D12_BLEND_ZERO, + DestBlendAlpha: D3D12_BLEND_ZERO, + BlendOpAlpha: D3D12_BLEND_OP_ADD, + LogicOp: D3D12_LOGIC_OP_CLEAR, + RenderTargetWriteMask: 0, + }; + let mut targets = [dummy_target; 8]; + + for (target, color_desc) in targets.iter_mut().zip(color_targets.iter()) { + target.RenderTargetWriteMask = color_desc.mask.bits() as UINT8; + if let Some(ref blend) = color_desc.blend { + let (color_op, color_src, color_dst) = map_blend_op(blend.color); + let (alpha_op, alpha_src, alpha_dst) = map_blend_op(blend.alpha); + target.BlendEnable = TRUE; + target.BlendOp = color_op; + target.SrcBlend = color_src; + target.DestBlend = color_dst; + target.BlendOpAlpha = alpha_op; + target.SrcBlendAlpha = alpha_src; + target.DestBlendAlpha = alpha_dst; + } + } + + targets +} + +pub fn map_depth_stencil(dsi: &pso::DepthStencilDesc) -> D3D12_DEPTH_STENCIL_DESC { + let (depth_on, depth_write, depth_func) = match dsi.depth { + Some(ref depth) => (TRUE, depth.write, map_comparison(depth.fun)), + None => unsafe { mem::zeroed() }, + }; + + let (stencil_on, front, back, read_mask, write_mask) = match dsi.stencil { + Some(ref stencil) => { + let read_masks = stencil.read_masks.static_or(pso::Sided::new(!0)); + let write_masks = stencil.write_masks.static_or(pso::Sided::new(!0)); + if read_masks.front != read_masks.back || write_masks.front != write_masks.back { + error!( + "Different sides are specified for read ({:?} and write ({:?}) stencil masks", + read_masks, write_masks + ); + } + ( + TRUE, + map_stencil_side(&stencil.faces.front), + map_stencil_side(&stencil.faces.back), + read_masks.front, + write_masks.front, + ) + } + None => unsafe { mem::zeroed() }, + }; + + D3D12_DEPTH_STENCIL_DESC { + DepthEnable: depth_on, + DepthWriteMask: if depth_write { + D3D12_DEPTH_WRITE_MASK_ALL + } else { + D3D12_DEPTH_WRITE_MASK_ZERO + }, + DepthFunc: depth_func, + StencilEnable: stencil_on, + StencilReadMask: read_mask as _, + StencilWriteMask: write_mask as _, + FrontFace: front, + BackFace: back, + } +} + +pub fn map_comparison(func: pso::Comparison) -> D3D12_COMPARISON_FUNC { + use hal::pso::Comparison::*; + match func { + Never => D3D12_COMPARISON_FUNC_NEVER, + Less => D3D12_COMPARISON_FUNC_LESS, + LessEqual => D3D12_COMPARISON_FUNC_LESS_EQUAL, + Equal => D3D12_COMPARISON_FUNC_EQUAL, + GreaterEqual => D3D12_COMPARISON_FUNC_GREATER_EQUAL, + Greater => D3D12_COMPARISON_FUNC_GREATER, + NotEqual => D3D12_COMPARISON_FUNC_NOT_EQUAL, + Always => D3D12_COMPARISON_FUNC_ALWAYS, + } +} + +fn map_stencil_op(op: pso::StencilOp) -> D3D12_STENCIL_OP { + use hal::pso::StencilOp::*; + match op { + Keep => D3D12_STENCIL_OP_KEEP, + Zero => D3D12_STENCIL_OP_ZERO, + Replace => D3D12_STENCIL_OP_REPLACE, + IncrementClamp => D3D12_STENCIL_OP_INCR_SAT, + IncrementWrap => D3D12_STENCIL_OP_INCR, + DecrementClamp => D3D12_STENCIL_OP_DECR_SAT, + DecrementWrap => D3D12_STENCIL_OP_DECR, + Invert => D3D12_STENCIL_OP_INVERT, + } +} + +fn map_stencil_side(side: &pso::StencilFace) -> D3D12_DEPTH_STENCILOP_DESC { + D3D12_DEPTH_STENCILOP_DESC { + StencilFailOp: map_stencil_op(side.op_fail), + StencilDepthFailOp: map_stencil_op(side.op_depth_fail), + StencilPassOp: map_stencil_op(side.op_pass), + StencilFunc: map_comparison(side.fun), + } +} + +pub fn map_wrap(wrap: image::WrapMode) -> D3D12_TEXTURE_ADDRESS_MODE { + use hal::image::WrapMode::*; + match wrap { + Tile => D3D12_TEXTURE_ADDRESS_MODE_WRAP, + Mirror => D3D12_TEXTURE_ADDRESS_MODE_MIRROR, + Clamp => D3D12_TEXTURE_ADDRESS_MODE_CLAMP, + Border => D3D12_TEXTURE_ADDRESS_MODE_BORDER, + } +} + +fn map_filter_type(filter: image::Filter) -> D3D12_FILTER_TYPE { + match filter { + image::Filter::Nearest => D3D12_FILTER_TYPE_POINT, + image::Filter::Linear => D3D12_FILTER_TYPE_LINEAR, + } +} + +fn map_anisotropic(anisotropic: image::Anisotropic) -> D3D12_FILTER { + match anisotropic { + image::Anisotropic::On(_) => D3D12_FILTER_ANISOTROPIC, + image::Anisotropic::Off => 0, + } +} + +pub fn map_filter( + mag_filter: image::Filter, + min_filter: image::Filter, + mip_filter: image::Filter, + reduction: D3D12_FILTER_REDUCTION_TYPE, + anisotropic: image::Anisotropic, +) -> D3D12_FILTER { + let mag = map_filter_type(mag_filter); + let min = map_filter_type(min_filter); + let mip = map_filter_type(mip_filter); + + (min & D3D12_FILTER_TYPE_MASK) << D3D12_MIN_FILTER_SHIFT + | (mag & D3D12_FILTER_TYPE_MASK) << D3D12_MAG_FILTER_SHIFT + | (mip & D3D12_FILTER_TYPE_MASK) << D3D12_MIP_FILTER_SHIFT + | (reduction & D3D12_FILTER_REDUCTION_TYPE_MASK) << D3D12_FILTER_REDUCTION_TYPE_SHIFT + | map_anisotropic(anisotropic) +} + +pub fn map_buffer_resource_state(access: buffer::Access) -> D3D12_RESOURCE_STATES { + use self::buffer::Access; + + if access.contains(Access::SHADER_WRITE) { + return D3D12_RESOURCE_STATE_UNORDERED_ACCESS; + } + if access.contains(Access::TRANSFER_WRITE) { + + return D3D12_RESOURCE_STATE_COPY_DEST; + } + + + let mut state = D3D12_RESOURCE_STATE_COMMON; + + if access.contains(Access::TRANSFER_READ) { + state |= D3D12_RESOURCE_STATE_COPY_SOURCE; + } + if access.contains(Access::INDEX_BUFFER_READ) { + state |= D3D12_RESOURCE_STATE_INDEX_BUFFER; + } + if access.contains(Access::VERTEX_BUFFER_READ) || access.contains(Access::UNIFORM_READ) + { + state |= D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER; + } + if access.contains(Access::INDIRECT_COMMAND_READ) { + state |= D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT; + } + if access.contains(Access::SHADER_READ) { + + state |= D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE + | D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE; + } + + state +} + + +fn derive_image_state(access: image::Access) -> D3D12_RESOURCE_STATES { + let mut state = D3D12_RESOURCE_STATE_COMMON; + + if access.contains(image::Access::TRANSFER_READ) { + state |= D3D12_RESOURCE_STATE_COPY_SOURCE; + } + if access.contains(image::Access::INPUT_ATTACHMENT_READ) { + state |= D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE; + } + if access.contains(image::Access::DEPTH_STENCIL_ATTACHMENT_READ) { + state |= D3D12_RESOURCE_STATE_DEPTH_READ; + } + if access.contains(image::Access::SHADER_READ) { + state |= D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE + | D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE; + } + + state +} + +const MUTABLE_IMAGE_ACCESS: &[(image::Access, D3D12_RESOURCE_STATES)] = &[ + (image::Access::SHADER_WRITE, D3D12_RESOURCE_STATE_UNORDERED_ACCESS), + (image::Access::COLOR_ATTACHMENT_WRITE, D3D12_RESOURCE_STATE_RENDER_TARGET), + (image::Access::DEPTH_STENCIL_ATTACHMENT_WRITE, D3D12_RESOURCE_STATE_DEPTH_WRITE), + (image::Access::TRANSFER_WRITE, D3D12_RESOURCE_STATE_COPY_DEST), +]; + +pub fn map_image_resource_state( + access: image::Access, + layout: image::Layout, +) -> D3D12_RESOURCE_STATES { + match layout { + + image::Layout::Present => D3D12_RESOURCE_STATE_PRESENT, + image::Layout::ColorAttachmentOptimal => D3D12_RESOURCE_STATE_RENDER_TARGET, + image::Layout::DepthStencilAttachmentOptimal => D3D12_RESOURCE_STATE_DEPTH_WRITE, + + + + + + image::Layout::TransferDstOptimal => D3D12_RESOURCE_STATE_COPY_DEST, + image::Layout::TransferSrcOptimal => D3D12_RESOURCE_STATE_COPY_SOURCE, + image::Layout::General => { + match MUTABLE_IMAGE_ACCESS.iter().find(|&(bit, _)| access.contains(*bit)) { + Some(&(bit, state)) => { + if !(access & !bit).is_empty() { + warn!("Required access contains multiple writable states with `General` layout: {:?}", access); + } + state + } + None => derive_image_state(access), + } + } + image::Layout::ShaderReadOnlyOptimal | + image::Layout::DepthStencilReadOnlyOptimal => derive_image_state(access), + image::Layout::Undefined | + image::Layout::Preinitialized => D3D12_RESOURCE_STATE_COMMON, + } +} + +pub fn map_shader_visibility(flags: pso::ShaderStageFlags) -> ShaderVisibility { + use hal::pso::ShaderStageFlags as Ssf; + + match flags { + Ssf::VERTEX => ShaderVisibility::VS, + Ssf::GEOMETRY => ShaderVisibility::GS, + Ssf::HULL => ShaderVisibility::HS, + Ssf::DOMAIN => ShaderVisibility::DS, + Ssf::FRAGMENT => ShaderVisibility::PS, + _ => ShaderVisibility::All, + } +} + +pub fn map_buffer_flags(usage: buffer::Usage) -> D3D12_RESOURCE_FLAGS { + let mut flags = D3D12_RESOURCE_FLAG_NONE; + + + if usage.contains(buffer::Usage::STORAGE) || usage.contains(buffer::Usage::TRANSFER_DST) { + flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS; + } + + flags +} + +pub fn map_image_flags(usage: image::Usage, features: ImageFeature) -> D3D12_RESOURCE_FLAGS { + use self::image::Usage; + let mut flags = D3D12_RESOURCE_FLAG_NONE; + + + if usage.contains(Usage::COLOR_ATTACHMENT) { + debug_assert!(features.contains(ImageFeature::COLOR_ATTACHMENT)); + flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET; + } + if usage.contains(Usage::DEPTH_STENCIL_ATTACHMENT) { + debug_assert!(features.contains(ImageFeature::DEPTH_STENCIL_ATTACHMENT)); + flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL; + } + if usage.contains(Usage::TRANSFER_DST) { + if features.contains(ImageFeature::COLOR_ATTACHMENT) { + flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET + }; + if features.contains(ImageFeature::DEPTH_STENCIL_ATTACHMENT) { + flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL + }; + } + if usage.contains(Usage::STORAGE) { + debug_assert!(features.contains(ImageFeature::STORAGE)); + flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS; + } + if !features.contains(ImageFeature::SAMPLED) { + flags |= D3D12_RESOURCE_FLAG_DENY_SHADER_RESOURCE; + } + + flags +} + +pub fn map_execution_model(model: spirv::ExecutionModel) -> pso::Stage { + match model { + spirv::ExecutionModel::Vertex => pso::Stage::Vertex, + spirv::ExecutionModel::Fragment => pso::Stage::Fragment, + spirv::ExecutionModel::Geometry => pso::Stage::Geometry, + spirv::ExecutionModel::GlCompute => pso::Stage::Compute, + spirv::ExecutionModel::TessellationControl => pso::Stage::Hull, + spirv::ExecutionModel::TessellationEvaluation => pso::Stage::Domain, + spirv::ExecutionModel::Kernel => panic!("Kernel is not a valid execution model."), + } +} + +pub fn map_stage(stage: pso::Stage) -> spirv::ExecutionModel { + match stage { + pso::Stage::Vertex => spirv::ExecutionModel::Vertex, + pso::Stage::Fragment => spirv::ExecutionModel::Fragment, + pso::Stage::Geometry => spirv::ExecutionModel::Geometry, + pso::Stage::Compute => spirv::ExecutionModel::GlCompute, + pso::Stage::Hull => spirv::ExecutionModel::TessellationControl, + pso::Stage::Domain => spirv::ExecutionModel::TessellationEvaluation, + } +} diff --git a/third_party/rust/gfx-backend-dx12/src/descriptors_cpu.rs b/third_party/rust/gfx-backend-dx12/src/descriptors_cpu.rs new file mode 100644 index 000000000000..325a199f914c --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/src/descriptors_cpu.rs @@ -0,0 +1,158 @@ +use native::{CpuDescriptor, DescriptorHeapFlags, DescriptorHeapType}; +use std::{collections::HashSet, fmt}; + + +pub struct HeapLinear { + handle_size: usize, + num: usize, + size: usize, + start: CpuDescriptor, + raw: native::DescriptorHeap, +} + +impl fmt::Debug for HeapLinear { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("HeapLinear") + } +} + +impl HeapLinear { + pub fn new(device: native::Device, ty: DescriptorHeapType, size: usize) -> Self { + let (heap, _hr) = device.create_descriptor_heap(size as _, ty, DescriptorHeapFlags::empty(), 0); + + HeapLinear { + handle_size: device.get_descriptor_increment_size(ty) as _, + num: 0, + size, + start: heap.start_cpu_descriptor(), + raw: heap, + } + } + + pub fn alloc_handle(&mut self) -> CpuDescriptor { + assert!(!self.is_full()); + + let slot = self.num; + self.num += 1; + + CpuDescriptor { + ptr: self.start.ptr + self.handle_size * slot, + } + } + + pub fn is_full(&self) -> bool { + self.num >= self.size + } + + pub fn clear(&mut self) { + self.num = 0; + } + + pub unsafe fn destroy(&self) { + self.raw.destroy(); + } +} + +const HEAP_SIZE_FIXED: usize = 64; + + +struct Heap { + + + + + availability: u64, + handle_size: usize, + start: CpuDescriptor, + raw: native::DescriptorHeap, +} + +impl fmt::Debug for Heap { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Heap") + } +} + +impl Heap { + pub fn new(device: native::Device, ty: DescriptorHeapType) -> Self { + let (heap, _hr) = + device.create_descriptor_heap(HEAP_SIZE_FIXED as _, ty, DescriptorHeapFlags::empty(), 0); + + Heap { + handle_size: device.get_descriptor_increment_size(ty) as _, + availability: !0, + start: heap.start_cpu_descriptor(), + raw: heap, + } + } + + pub fn alloc_handle(&mut self) -> CpuDescriptor { + + let slot = self.availability.trailing_zeros() as usize; + assert!(slot < HEAP_SIZE_FIXED); + + self.availability ^= 1 << slot; + + CpuDescriptor { + ptr: self.start.ptr + self.handle_size * slot, + } + } + + pub fn is_full(&self) -> bool { + self.availability == 0 + } + + pub unsafe fn destroy(&self) { + self.raw.destroy(); + } +} + +pub struct DescriptorCpuPool { + device: native::Device, + ty: DescriptorHeapType, + heaps: Vec, + free_list: HashSet, +} + +impl fmt::Debug for DescriptorCpuPool { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("DescriptorCpuPool") + } +} + +impl DescriptorCpuPool { + pub fn new(device: native::Device, ty: DescriptorHeapType) -> Self { + DescriptorCpuPool { + device, + ty, + heaps: Vec::new(), + free_list: HashSet::new(), + } + } + + pub fn alloc_handle(&mut self) -> CpuDescriptor { + let heap_id = self.free_list.iter().cloned().next().unwrap_or_else(|| { + + let id = self.heaps.len(); + self.heaps.push(Heap::new(self.device, self.ty)); + self.free_list.insert(id); + id + }); + + let heap = &mut self.heaps[heap_id]; + let handle = heap.alloc_handle(); + if heap.is_full() { + self.free_list.remove(&heap_id); + } + + handle + } + + + + pub unsafe fn destroy(&self) { + for heap in &self.heaps { + heap.destroy(); + } + } +} diff --git a/third_party/rust/gfx-backend-dx12/src/device.rs b/third_party/rust/gfx-backend-dx12/src/device.rs new file mode 100644 index 000000000000..8b46489434ef --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/src/device.rs @@ -0,0 +1,3507 @@ +use std::borrow::Borrow; +use std::collections::{BTreeMap, VecDeque}; +use std::ops::Range; +use std::{ffi, mem, ptr, slice}; + +use spirv_cross::{hlsl, spirv, ErrorCode as SpirvErrorCode}; +use smallvec::SmallVec; + +use winapi::shared::minwindef::{FALSE, TRUE, UINT}; +use winapi::shared::{dxgi, dxgi1_2, dxgi1_4, dxgiformat, dxgitype, windef, winerror}; +use winapi::um::{d3d12, d3dcompiler, synchapi, winbase, winnt}; +use winapi::Interface; + +use auxil::spirv_cross_specialize_ast; +use hal::format::Aspects; +use hal::memory::Requirements; +use hal::pool::CommandPoolCreateFlags; +use hal::pso::VertexInputRate; +use hal::queue::{CommandQueue as _, QueueFamilyId}; +use hal::range::RangeArg; +use hal::{ + self, + buffer, + device as d, + format, + image, + memory, + pass, + pso, + query, + window as w, +}; + +use pool::{CommandPool, CommandPoolAllocator}; +use range_alloc::RangeAllocator; +use root_constants::RootConstant; +use { + conv, + command as cmd, + descriptors_cpu, + resource as r, + root_constants, + window::{Surface, Swapchain}, + Backend as B, + Device, + MemoryGroup, + MAX_VERTEX_BUFFERS, + NUM_HEAP_PROPERTIES, + QUEUE_FAMILIES, +}; +use native::{ + PipelineStateSubobject, + Subobject, +}; + + +const ROOT_CONSTANT_SPACE: u32 = 0; + +const MEM_TYPE_MASK: u64 = 0x7; +const MEM_TYPE_SHIFT: u64 = 3; + +const MEM_TYPE_UNIVERSAL_SHIFT: u64 = MEM_TYPE_SHIFT * MemoryGroup::Universal as u64; +const MEM_TYPE_BUFFER_SHIFT: u64 = MEM_TYPE_SHIFT * MemoryGroup::BufferOnly as u64; +const MEM_TYPE_IMAGE_SHIFT: u64 = MEM_TYPE_SHIFT * MemoryGroup::ImageOnly as u64; +const MEM_TYPE_TARGET_SHIFT: u64 = MEM_TYPE_SHIFT * MemoryGroup::TargetOnly as u64; + +pub const IDENTITY_MAPPING: UINT = 0x1688; + + + +fn gen_unexpected_error(err: SpirvErrorCode) -> d::ShaderError { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unexpected error".into(), + }; + d::ShaderError::CompilationFailed(msg) +} + + +fn gen_query_error(err: SpirvErrorCode) -> d::ShaderError { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unknown query error".into(), + }; + d::ShaderError::CompilationFailed(msg) +} + +#[derive(Clone, Debug)] +pub(crate) struct ViewInfo { + pub(crate) resource: native::Resource, + pub(crate) kind: image::Kind, + pub(crate) caps: image::ViewCapabilities, + pub(crate) view_kind: image::ViewKind, + pub(crate) format: dxgiformat::DXGI_FORMAT, + pub(crate) component_mapping: UINT, + pub(crate) range: image::SubresourceRange, +} + +pub(crate) enum CommandSignature { + Draw, + DrawIndexed, + Dispatch, +} + + +pub(crate) fn compile_shader( + stage: pso::Stage, + shader_model: hlsl::ShaderModel, + entry: &str, + code: &[u8], +) -> Result { + let stage_to_str = |stage, shader_model| { + let stage = match stage { + pso::Stage::Vertex => "vs", + pso::Stage::Fragment => "ps", + pso::Stage::Compute => "cs", + _ => unimplemented!(), + }; + + let model = match shader_model { + hlsl::ShaderModel::V5_0 => "5_0", + hlsl::ShaderModel::V5_1 => "5_1", + hlsl::ShaderModel::V6_0 => "6_0", + _ => unimplemented!(), + }; + + format!("{}_{}\0", stage, model) + }; + + let mut shader_data = native::Blob::null(); + let mut error = native::Blob::null(); + let entry = ffi::CString::new(entry).unwrap(); + let hr = unsafe { + d3dcompiler::D3DCompile( + code.as_ptr() as *const _, + code.len(), + ptr::null(), + ptr::null(), + ptr::null_mut(), + entry.as_ptr() as *const _, + stage_to_str(stage, shader_model).as_ptr() as *const i8, + 1, + 0, + shader_data.mut_void() as *mut *mut _, + error.mut_void() as *mut *mut _, + ) + }; + if !winerror::SUCCEEDED(hr) { + error!("D3DCompile error {:x}", hr); + let message = unsafe { + let pointer = error.GetBufferPointer(); + let size = error.GetBufferSize(); + let slice = slice::from_raw_parts(pointer as *const u8, size as usize); + String::from_utf8_lossy(slice).into_owned() + }; + unsafe { + error.destroy(); + } + Err(d::ShaderError::CompilationFailed(message)) + } else { + Ok(shader_data) + } +} + +#[repr(C)] +struct GraphicsPipelineStateSubobjectStream { + root_signature: PipelineStateSubobject<*mut d3d12::ID3D12RootSignature>, + vs: PipelineStateSubobject, + ps: PipelineStateSubobject, + ds: PipelineStateSubobject, + hs: PipelineStateSubobject, + gs: PipelineStateSubobject, + stream_output: PipelineStateSubobject, + blend: PipelineStateSubobject, + sample_mask: PipelineStateSubobject, + rasterizer: PipelineStateSubobject, + depth_stencil: PipelineStateSubobject, + input_layout: PipelineStateSubobject, + ib_strip_cut_value: PipelineStateSubobject, + primitive_topology: PipelineStateSubobject, + render_target_formats: PipelineStateSubobject, + depth_stencil_format: PipelineStateSubobject, + sample_desc: PipelineStateSubobject, + node_mask: PipelineStateSubobject, + cached_pso: PipelineStateSubobject, + flags: PipelineStateSubobject, +} + +impl GraphicsPipelineStateSubobjectStream { + fn new( + pso_desc: &d3d12::D3D12_GRAPHICS_PIPELINE_STATE_DESC, + depth_bounds_test_enable: bool, + ) -> Self { + GraphicsPipelineStateSubobjectStream { + root_signature: PipelineStateSubobject::new( + Subobject::RootSignature, + pso_desc.pRootSignature, + ), + vs: PipelineStateSubobject::new(Subobject::VS, pso_desc.VS), + ps: PipelineStateSubobject::new(Subobject::PS, pso_desc.PS), + ds: PipelineStateSubobject::new(Subobject::DS, pso_desc.DS), + hs: PipelineStateSubobject::new(Subobject::HS, pso_desc.HS), + gs: PipelineStateSubobject::new(Subobject::GS, pso_desc.GS), + stream_output: PipelineStateSubobject::new( + Subobject::StreamOutput, + pso_desc.StreamOutput, + ), + blend: PipelineStateSubobject::new(Subobject::Blend, pso_desc.BlendState), + sample_mask: PipelineStateSubobject::new(Subobject::SampleMask, pso_desc.SampleMask), + rasterizer: PipelineStateSubobject::new( + Subobject::Rasterizer, + pso_desc.RasterizerState, + ), + depth_stencil: PipelineStateSubobject::new( + Subobject::DepthStencil1, + d3d12::D3D12_DEPTH_STENCIL_DESC1 { + DepthEnable: pso_desc.DepthStencilState.DepthEnable, + DepthWriteMask: pso_desc.DepthStencilState.DepthWriteMask, + DepthFunc: pso_desc.DepthStencilState.DepthFunc, + StencilEnable: pso_desc.DepthStencilState.StencilEnable, + StencilReadMask: pso_desc.DepthStencilState.StencilReadMask, + StencilWriteMask: pso_desc.DepthStencilState.StencilWriteMask, + FrontFace: pso_desc.DepthStencilState.FrontFace, + BackFace: pso_desc.DepthStencilState.BackFace, + DepthBoundsTestEnable: depth_bounds_test_enable as _, + }, + ), + input_layout: PipelineStateSubobject::new(Subobject::InputLayout, pso_desc.InputLayout), + ib_strip_cut_value: PipelineStateSubobject::new( + Subobject::IBStripCut, + pso_desc.IBStripCutValue, + ), + primitive_topology: PipelineStateSubobject::new( + Subobject::PrimitiveTopology, + pso_desc.PrimitiveTopologyType, + ), + render_target_formats: PipelineStateSubobject::new( + Subobject::RTFormats, + d3d12::D3D12_RT_FORMAT_ARRAY { + RTFormats: pso_desc.RTVFormats, + NumRenderTargets: pso_desc.NumRenderTargets, + }, + ), + depth_stencil_format: PipelineStateSubobject::new( + Subobject::DSFormat, + pso_desc.DSVFormat, + ), + sample_desc: PipelineStateSubobject::new(Subobject::SampleDesc, pso_desc.SampleDesc), + node_mask: PipelineStateSubobject::new(Subobject::NodeMask, pso_desc.NodeMask), + cached_pso: PipelineStateSubobject::new(Subobject::CachedPSO, pso_desc.CachedPSO), + flags: PipelineStateSubobject::new(Subobject::Flags, pso_desc.Flags), + } + } +} + +impl Device { + fn parse_spirv(raw_data: &[u32]) -> Result, d::ShaderError> { + let module = spirv::Module::from_words(raw_data); + + spirv::Ast::parse(&module).map_err(|err| { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unknown parsing error".into(), + }; + d::ShaderError::CompilationFailed(msg) + }) + } + + fn patch_spirv_resources( + ast: &mut spirv::Ast, + layout: Option<&r::PipelineLayout>, + ) -> Result<(), d::ShaderError> { + + let space_offset = match layout { + Some(layout) if !layout.constants.is_empty() => 1, + _ => return Ok(()), + }; + + let shader_resources = ast.get_shader_resources().map_err(gen_query_error)?; + for image in &shader_resources.separate_images { + let set = ast + .get_decoration(image.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)?; + ast.set_decoration( + image.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } + + for uniform_buffer in &shader_resources.uniform_buffers { + let set = ast + .get_decoration(uniform_buffer.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)?; + ast.set_decoration( + uniform_buffer.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } + + for storage_buffer in &shader_resources.storage_buffers { + let set = ast + .get_decoration(storage_buffer.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)?; + ast.set_decoration( + storage_buffer.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } + + for image in &shader_resources.storage_images { + let set = ast + .get_decoration(image.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)?; + ast.set_decoration( + image.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } + + for sampler in &shader_resources.separate_samplers { + let set = ast + .get_decoration(sampler.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)?; + ast.set_decoration( + sampler.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } + + for image in &shader_resources.sampled_images { + let set = ast + .get_decoration(image.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)?; + ast.set_decoration( + image.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } + + for input in &shader_resources.subpass_inputs { + let set = ast + .get_decoration(input.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)?; + ast.set_decoration( + input.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } + + + + Ok(()) + } + + fn translate_spirv( + ast: &mut spirv::Ast, + shader_model: hlsl::ShaderModel, + layout: &r::PipelineLayout, + stage: pso::Stage, + ) -> Result { + let mut compile_options = hlsl::CompilerOptions::default(); + compile_options.shader_model = shader_model; + compile_options.vertex.invert_y = true; + + let stage_flag = stage.into(); + let root_constant_layout = layout + .constants + .iter() + .filter_map(|constant| { + if constant.stages.contains(stage_flag) { + Some(hlsl::RootConstant { + start: constant.range.start * 4, + end: constant.range.end * 4, + binding: constant.range.start, + space: 0, + }) + } else { + None + } + }) + .collect(); + ast.set_compiler_options(&compile_options) + .map_err(gen_unexpected_error)?; + ast.set_root_constant_layout(root_constant_layout) + .map_err(gen_unexpected_error)?; + ast.compile().map_err(|err| { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unknown compile error".into(), + }; + d::ShaderError::CompilationFailed(msg) + }) + } + + + + + fn extract_entry_point( + stage: pso::Stage, + source: &pso::EntryPoint, + layout: &r::PipelineLayout, + ) -> Result<(native::Blob, bool), d::ShaderError> { + match *source.module { + r::ShaderModule::Compiled(ref shaders) => { + + + shaders + .get(source.entry) + .map(|src| (*src, false)) + .ok_or(d::ShaderError::MissingEntryPoint(source.entry.into())) + } + r::ShaderModule::Spirv(ref raw_data) => { + let mut ast = Self::parse_spirv(raw_data)?; + spirv_cross_specialize_ast(&mut ast, &source.specialization)?; + Self::patch_spirv_resources(&mut ast, Some(layout))?; + + let shader_model = hlsl::ShaderModel::V5_1; + let shader_code = Self::translate_spirv(&mut ast, shader_model, layout, stage)?; + debug!("SPIRV-Cross generated shader:\n{}", shader_code); + + let real_name = ast + .get_cleansed_entry_point_name(source.entry, conv::map_stage(stage)) + .map_err(gen_query_error)?; + + let entry_points = ast.get_entry_points().map_err(gen_query_error)?; + entry_points + .iter() + .find(|entry_point| entry_point.name == real_name) + .ok_or(d::ShaderError::MissingEntryPoint(source.entry.into())) + .and_then(|entry_point| { + let stage = conv::map_execution_model(entry_point.execution_model); + let shader = compile_shader( + stage, + shader_model, + &entry_point.name, + shader_code.as_bytes(), + )?; + Ok((shader, true)) + }) + } + } + } + + + pub fn create_shader_module_from_source( + &self, + stage: pso::Stage, + hlsl_entry: &str, + entry_point: &str, + code: &[u8], + ) -> Result { + let mut shader_map = BTreeMap::new(); + let blob = compile_shader(stage, hlsl::ShaderModel::V5_1, hlsl_entry, code)?; + shader_map.insert(entry_point.into(), blob); + Ok(r::ShaderModule::Compiled(shader_map)) + } + + pub(crate) fn create_command_signature( + device: native::Device, + ty: CommandSignature, + ) -> native::CommandSignature { + let (arg, stride) = match ty { + CommandSignature::Draw => (native::IndirectArgument::draw(), 16), + CommandSignature::DrawIndexed => (native::IndirectArgument::draw_indexed(), 20), + CommandSignature::Dispatch => (native::IndirectArgument::dispatch(), 12), + }; + + let (signature, hr) = + device.create_command_signature(native::RootSignature::null(), &[arg], stride, 0); + + if !winerror::SUCCEEDED(hr) { + error!("error on command signature creation: {:x}", hr); + } + signature + } + + pub(crate) fn create_descriptor_heap_impl( + device: native::Device, + heap_type: native::DescriptorHeapType, + shader_visible: bool, + capacity: usize, + ) -> r::DescriptorHeap { + assert_ne!(capacity, 0); + + let (heap, _hr) = device.create_descriptor_heap( + capacity as _, + heap_type, + if shader_visible { + native::DescriptorHeapFlags::SHADER_VISIBLE + } else { + native::DescriptorHeapFlags::empty() + }, + 0, + ); + + let descriptor_size = device.get_descriptor_increment_size(heap_type); + let cpu_handle = heap.start_cpu_descriptor(); + let gpu_handle = heap.start_gpu_descriptor(); + + let range_allocator = RangeAllocator::new(0 .. (capacity as u64)); + + r::DescriptorHeap { + raw: heap, + handle_size: descriptor_size as _, + total_handles: capacity as _, + start: r::DualHandle { + cpu: cpu_handle, + gpu: gpu_handle, + size: 0, + }, + range_allocator, + } + } + + pub(crate) fn view_image_as_render_target_impl( + device: native::Device, + handle: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, + info: ViewInfo, + ) -> Result<(), image::ViewError> { + #![allow(non_snake_case)] + + let mut desc = d3d12::D3D12_RENDER_TARGET_VIEW_DESC { + Format: info.format, + ViewDimension: 0, + u: unsafe { mem::zeroed() }, + }; + + let MipSlice = info.range.levels.start as _; + let FirstArraySlice = info.range.layers.start as _; + let ArraySize = (info.range.layers.end - info.range.layers.start) as _; + let is_msaa = info.kind.num_samples() > 1; + if info.range.levels.start + 1 != info.range.levels.end { + return Err(image::ViewError::Level(info.range.levels.start)); + } + if info.range.layers.end > info.kind.num_layers() { + return Err(image::ViewError::Layer(image::LayerError::OutOfBounds( + info.range.layers, + ))); + } + + match info.view_kind { + image::ViewKind::D1 => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE1D; + *unsafe { desc.u.Texture1D_mut() } = d3d12::D3D12_TEX1D_RTV { MipSlice } + } + image::ViewKind::D1Array => { + desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE1DARRAY; + *unsafe { desc.u.Texture1DArray_mut() } = d3d12::D3D12_TEX1D_ARRAY_RTV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D2 if is_msaa => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DMS; + *unsafe { desc.u.Texture2DMS_mut() } = d3d12::D3D12_TEX2DMS_RTV { + UnusedField_NothingToDefine: 0, + } + } + image::ViewKind::D2 => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2D; + *unsafe { desc.u.Texture2D_mut() } = d3d12::D3D12_TEX2D_RTV { + MipSlice, + PlaneSlice: 0, + } + } + image::ViewKind::D2Array if is_msaa => { + desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DMSARRAY; + *unsafe { desc.u.Texture2DMSArray_mut() } = d3d12::D3D12_TEX2DMS_ARRAY_RTV { + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D2Array => { + desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DARRAY; + *unsafe { desc.u.Texture2DArray_mut() } = d3d12::D3D12_TEX2D_ARRAY_RTV { + MipSlice, + FirstArraySlice, + ArraySize, + PlaneSlice: 0, + } + } + image::ViewKind::D3 => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE3D; + *unsafe { desc.u.Texture3D_mut() } = d3d12::D3D12_TEX3D_RTV { + MipSlice, + FirstWSlice: 0, + WSize: info.kind.extent().depth as _, + } + } + image::ViewKind::Cube | image::ViewKind::CubeArray => { + desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DARRAY; + + *unsafe { desc.u.Texture2DArray_mut() } = d3d12::D3D12_TEX2D_ARRAY_RTV { + MipSlice, + FirstArraySlice, + ArraySize, + PlaneSlice: 0, + } + } + }; + + unsafe { + device.CreateRenderTargetView(info.resource.as_mut_ptr(), &desc, handle); + } + + Ok(()) + } + + fn view_image_as_render_target( + &self, + info: ViewInfo, + ) -> Result { + let handle = self.rtv_pool.lock().unwrap().alloc_handle(); + Self::view_image_as_render_target_impl(self.raw, handle, info).map(|_| handle) + } + + pub(crate) fn view_image_as_depth_stencil_impl( + device: native::Device, + handle: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, + info: ViewInfo, + ) -> Result<(), image::ViewError> { + #![allow(non_snake_case)] + + let mut desc = d3d12::D3D12_DEPTH_STENCIL_VIEW_DESC { + Format: info.format, + ViewDimension: 0, + Flags: 0, + u: unsafe { mem::zeroed() }, + }; + + let MipSlice = info.range.levels.start as _; + let FirstArraySlice = info.range.layers.start as _; + let ArraySize = (info.range.layers.end - info.range.layers.start) as _; + let is_msaa = info.kind.num_samples() > 1; + if info.range.levels.start + 1 != info.range.levels.end { + return Err(image::ViewError::Level(info.range.levels.start)); + } + if info.range.layers.end > info.kind.num_layers() { + return Err(image::ViewError::Layer(image::LayerError::OutOfBounds( + info.range.layers, + ))); + } + + match info.view_kind { + image::ViewKind::D1 => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE1D; + *unsafe { desc.u.Texture1D_mut() } = d3d12::D3D12_TEX1D_DSV { MipSlice } + } + image::ViewKind::D1Array => { + desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE1DARRAY; + *unsafe { desc.u.Texture1DArray_mut() } = d3d12::D3D12_TEX1D_ARRAY_DSV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D2 if is_msaa => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2DMS; + *unsafe { desc.u.Texture2DMS_mut() } = d3d12::D3D12_TEX2DMS_DSV { + UnusedField_NothingToDefine: 0, + } + } + image::ViewKind::D2 => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2D; + *unsafe { desc.u.Texture2D_mut() } = d3d12::D3D12_TEX2D_DSV { MipSlice } + } + image::ViewKind::D2Array if is_msaa => { + desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2DMSARRAY; + *unsafe { desc.u.Texture2DMSArray_mut() } = d3d12::D3D12_TEX2DMS_ARRAY_DSV { + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D2Array => { + desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2DARRAY; + *unsafe { desc.u.Texture2DArray_mut() } = d3d12::D3D12_TEX2D_ARRAY_DSV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D3 | image::ViewKind::Cube | image::ViewKind::CubeArray => unimplemented!(), + }; + + unsafe { + device.CreateDepthStencilView(info.resource.as_mut_ptr(), &desc, handle); + } + + Ok(()) + } + + fn view_image_as_depth_stencil( + &self, + info: ViewInfo, + ) -> Result { + let handle = self.dsv_pool.lock().unwrap().alloc_handle(); + Self::view_image_as_depth_stencil_impl(self.raw, handle, info).map(|_| handle) + } + + pub(crate) fn build_image_as_shader_resource_desc( + info: &ViewInfo, + ) -> Result { + #![allow(non_snake_case)] + + let mut desc = d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC { + Format: info.format, + ViewDimension: 0, + Shader4ComponentMapping: info.component_mapping, + u: unsafe { mem::zeroed() }, + }; + + let MostDetailedMip = info.range.levels.start as _; + let MipLevels = (info.range.levels.end - info.range.levels.start) as _; + let FirstArraySlice = info.range.layers.start as _; + let ArraySize = (info.range.layers.end - info.range.layers.start) as _; + + if info.range.layers.end > info.kind.num_layers() { + return Err(image::ViewError::Layer(image::LayerError::OutOfBounds( + info.range.layers.clone(), + ))); + } + let is_msaa = info.kind.num_samples() > 1; + let is_cube = info.caps.contains(image::ViewCapabilities::KIND_CUBE); + + match info.view_kind { + image::ViewKind::D1 => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE1D; + *unsafe { desc.u.Texture1D_mut() } = d3d12::D3D12_TEX1D_SRV { + MostDetailedMip, + MipLevels, + ResourceMinLODClamp: 0.0, + } + } + image::ViewKind::D1Array => { + desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE1DARRAY; + *unsafe { desc.u.Texture1DArray_mut() } = d3d12::D3D12_TEX1D_ARRAY_SRV { + MostDetailedMip, + MipLevels, + FirstArraySlice, + ArraySize, + ResourceMinLODClamp: 0.0, + } + } + image::ViewKind::D2 if is_msaa => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DMS; + *unsafe { desc.u.Texture2DMS_mut() } = d3d12::D3D12_TEX2DMS_SRV { + UnusedField_NothingToDefine: 0, + } + } + image::ViewKind::D2 => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2D; + *unsafe { desc.u.Texture2D_mut() } = d3d12::D3D12_TEX2D_SRV { + MostDetailedMip, + MipLevels, + PlaneSlice: 0, + ResourceMinLODClamp: 0.0, + } + } + image::ViewKind::D2Array if is_msaa => { + desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY; + *unsafe { desc.u.Texture2DMSArray_mut() } = d3d12::D3D12_TEX2DMS_ARRAY_SRV { + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D2Array => { + desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DARRAY; + *unsafe { desc.u.Texture2DArray_mut() } = d3d12::D3D12_TEX2D_ARRAY_SRV { + MostDetailedMip, + MipLevels, + FirstArraySlice, + ArraySize, + PlaneSlice: 0, + ResourceMinLODClamp: 0.0, + } + } + image::ViewKind::D3 => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE3D; + *unsafe { desc.u.Texture3D_mut() } = d3d12::D3D12_TEX3D_SRV { + MostDetailedMip, + MipLevels, + ResourceMinLODClamp: 0.0, + } + } + image::ViewKind::Cube if is_cube => { + desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURECUBE; + *unsafe { desc.u.TextureCube_mut() } = d3d12::D3D12_TEXCUBE_SRV { + MostDetailedMip, + MipLevels, + ResourceMinLODClamp: 0.0, + } + } + image::ViewKind::CubeArray if is_cube => { + assert_eq!(0, ArraySize % 6); + desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURECUBEARRAY; + *unsafe { desc.u.TextureCubeArray_mut() } = d3d12::D3D12_TEXCUBE_ARRAY_SRV { + MostDetailedMip, + MipLevels, + First2DArrayFace: FirstArraySlice, + NumCubes: ArraySize / 6, + ResourceMinLODClamp: 0.0, + } + } + image::ViewKind::Cube | image::ViewKind::CubeArray => { + error!( + "Cube views are not supported for the image, kind: {:?}", + info.kind + ); + return Err(image::ViewError::BadKind(info.view_kind)); + } + } + + Ok(desc) + } + + fn view_image_as_shader_resource( + &self, + mut info: ViewInfo, + ) -> Result { + #![allow(non_snake_case)] + + + info.format = match info.format { + dxgiformat::DXGI_FORMAT_D16_UNORM => dxgiformat::DXGI_FORMAT_R16_UNORM, + dxgiformat::DXGI_FORMAT_D32_FLOAT => dxgiformat::DXGI_FORMAT_R32_FLOAT, + format => format, + }; + + let desc = Self::build_image_as_shader_resource_desc(&info)?; + let handle = self.srv_uav_pool.lock().unwrap().alloc_handle(); + unsafe { + self.raw + .CreateShaderResourceView(info.resource.as_mut_ptr(), &desc, handle); + } + + Ok(handle) + } + + fn view_image_as_storage( + &self, + info: ViewInfo, + ) -> Result { + #![allow(non_snake_case)] + assert_eq!(info.range.levels.start + 1, info.range.levels.end); + + let mut desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC { + Format: info.format, + ViewDimension: 0, + u: unsafe { mem::zeroed() }, + }; + + let MipSlice = info.range.levels.start as _; + let FirstArraySlice = info.range.layers.start as _; + let ArraySize = (info.range.layers.end - info.range.layers.start) as _; + + if info.range.layers.end > info.kind.num_layers() { + return Err(image::ViewError::Layer(image::LayerError::OutOfBounds( + info.range.layers, + ))); + } + if info.kind.num_samples() > 1 { + error!("MSAA images can't be viewed as UAV"); + return Err(image::ViewError::Unsupported); + } + + match info.view_kind { + image::ViewKind::D1 => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE1D; + *unsafe { desc.u.Texture1D_mut() } = d3d12::D3D12_TEX1D_UAV { MipSlice } + } + image::ViewKind::D1Array => { + desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE1DARRAY; + *unsafe { desc.u.Texture1DArray_mut() } = d3d12::D3D12_TEX1D_ARRAY_UAV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D2 => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE2D; + *unsafe { desc.u.Texture2D_mut() } = d3d12::D3D12_TEX2D_UAV { + MipSlice, + PlaneSlice: 0, + } + } + image::ViewKind::D2Array => { + desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE2DARRAY; + *unsafe { desc.u.Texture2DArray_mut() } = d3d12::D3D12_TEX2D_ARRAY_UAV { + MipSlice, + FirstArraySlice, + ArraySize, + PlaneSlice: 0, + } + } + image::ViewKind::D3 => { + assert_eq!(info.range.layers, 0 .. 1); + desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE3D; + *unsafe { desc.u.Texture3D_mut() } = d3d12::D3D12_TEX3D_UAV { + MipSlice, + FirstWSlice: 0, + WSize: info.kind.extent().depth as _, + } + } + image::ViewKind::Cube | image::ViewKind::CubeArray => { + error!("Cubic images can't be viewed as UAV"); + return Err(image::ViewError::Unsupported); + } + } + + let handle = self.srv_uav_pool.lock().unwrap().alloc_handle(); + unsafe { + self.raw.CreateUnorderedAccessView( + info.resource.as_mut_ptr(), + ptr::null_mut(), + &desc, + handle, + ); + } + + Ok(handle) + } + + pub(crate) fn create_raw_fence(&self, signalled: bool) -> native::Fence { + let mut handle = native::Fence::null(); + assert_eq!(winerror::S_OK, unsafe { + self.raw.CreateFence( + if signalled { 1 } else { 0 }, + d3d12::D3D12_FENCE_FLAG_NONE, + &d3d12::ID3D12Fence::uuidof(), + handle.mut_void(), + ) + }); + handle + } + + pub(crate) fn create_swapchain_impl( + &self, + config: &w::SwapchainConfig, + window_handle: windef::HWND, + factory: native::WeakPtr, + ) -> Result< + ( + native::WeakPtr, + dxgiformat::DXGI_FORMAT, + ), + w::CreationError, + > { + let mut swap_chain1 = native::WeakPtr::::null(); + + + let non_srgb_format = conv::map_format_nosrgb(config.format).unwrap(); + + + let desc = dxgi1_2::DXGI_SWAP_CHAIN_DESC1 { + AlphaMode: dxgi1_2::DXGI_ALPHA_MODE_IGNORE, + BufferCount: config.image_count, + Width: config.extent.width, + Height: config.extent.height, + Format: non_srgb_format, + Flags: 0, + BufferUsage: dxgitype::DXGI_USAGE_RENDER_TARGET_OUTPUT, + SampleDesc: dxgitype::DXGI_SAMPLE_DESC { + Count: 1, + Quality: 0, + }, + Scaling: dxgi1_2::DXGI_SCALING_STRETCH, + Stereo: FALSE, + SwapEffect: dxgi::DXGI_SWAP_EFFECT_FLIP_DISCARD, + }; + + unsafe { + let hr = factory.CreateSwapChainForHwnd( + self.present_queue.as_mut_ptr() as *mut _, + window_handle, + &desc, + ptr::null(), + ptr::null_mut(), + swap_chain1.mut_void() as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + error!("error on swapchain creation 0x{:x}", hr); + } + + let (swap_chain3, hr3) = swap_chain1.cast::(); + if !winerror::SUCCEEDED(hr3) { + error!("error on swapchain cast 0x{:x}", hr3); + } + + swap_chain1.destroy(); + Ok((swap_chain3, non_srgb_format)) + } + } + + pub(crate) fn wrap_swapchain( + &self, + inner: native::WeakPtr, + config: &w::SwapchainConfig, + ) -> Swapchain { + let rtv_desc = d3d12::D3D12_RENDER_TARGET_VIEW_DESC { + Format: conv::map_format(config.format).unwrap(), + ViewDimension: d3d12::D3D12_RTV_DIMENSION_TEXTURE2D, + ..unsafe { mem::zeroed() } + }; + let rtv_heap = Device::create_descriptor_heap_impl( + self.raw, + native::DescriptorHeapType::Rtv, + false, + config.image_count as _, + ); + + let mut resources = vec![native::Resource::null(); config.image_count as usize]; + for (i, res) in resources.iter_mut().enumerate() { + let rtv_handle = rtv_heap.at(i as _, 0).cpu; + unsafe { + inner.GetBuffer(i as _, &d3d12::ID3D12Resource::uuidof(), res.mut_void()); + self.raw + .CreateRenderTargetView(res.as_mut_ptr(), &rtv_desc, rtv_handle); + } + } + + Swapchain { + inner, + next_frame: 0, + frame_queue: VecDeque::new(), + rtv_heap, + resources, + } + } +} + +impl d::Device for Device { + unsafe fn allocate_memory( + &self, + mem_type: hal::MemoryTypeId, + size: u64, + ) -> Result { + let mem_type = mem_type.0; + let mem_base_id = mem_type % NUM_HEAP_PROPERTIES; + let heap_property = &self.heap_properties[mem_base_id]; + + let properties = d3d12::D3D12_HEAP_PROPERTIES { + Type: d3d12::D3D12_HEAP_TYPE_CUSTOM, + CPUPageProperty: heap_property.page_property, + MemoryPoolPreference: heap_property.memory_pool, + CreationNodeMask: 0, + VisibleNodeMask: 0, + }; + + + + let mem_group = mem_type / NUM_HEAP_PROPERTIES; + + let desc = d3d12::D3D12_HEAP_DESC { + SizeInBytes: size, + Properties: properties, + Alignment: d3d12::D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT as _, + Flags: match mem_group { + 0 => d3d12::D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES, + 1 => d3d12::D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS, + 2 => d3d12::D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES, + 3 => d3d12::D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES, + _ => unreachable!(), + }, + }; + + let mut heap = native::Heap::null(); + let hr = self + .raw + .clone() + .CreateHeap(&desc, &d3d12::ID3D12Heap::uuidof(), heap.mut_void()); + if hr == winerror::E_OUTOFMEMORY { + return Err(d::OutOfMemory::Device.into()); + } + assert_eq!(winerror::S_OK, hr); + + + + + + let is_mapable = mem_base_id != 0 + && (mem_group == MemoryGroup::Universal as _ + || mem_group == MemoryGroup::BufferOnly as _); + + + let resource = if is_mapable { + let mut resource = native::Resource::null(); + let desc = d3d12::D3D12_RESOURCE_DESC { + Dimension: d3d12::D3D12_RESOURCE_DIMENSION_BUFFER, + Alignment: 0, + Width: size, + Height: 1, + DepthOrArraySize: 1, + MipLevels: 1, + Format: dxgiformat::DXGI_FORMAT_UNKNOWN, + SampleDesc: dxgitype::DXGI_SAMPLE_DESC { + Count: 1, + Quality: 0, + }, + Layout: d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR, + Flags: d3d12::D3D12_RESOURCE_FLAG_NONE, + }; + + assert_eq!( + winerror::S_OK, + self.raw.clone().CreatePlacedResource( + heap.as_mut_ptr(), + 0, + &desc, + d3d12::D3D12_RESOURCE_STATE_COMMON, + ptr::null(), + &d3d12::ID3D12Resource::uuidof(), + resource.mut_void(), + ) + ); + + Some(resource) + } else { + None + }; + + Ok(r::Memory { + heap, + type_id: mem_type, + size, + resource, + }) + } + + unsafe fn create_command_pool( + &self, + family: QueueFamilyId, + create_flags: CommandPoolCreateFlags, + ) -> Result { + let list_type = QUEUE_FAMILIES[family.0].native_type(); + + let allocator = if create_flags.contains(CommandPoolCreateFlags::RESET_INDIVIDUAL) { + + CommandPoolAllocator::Individual(Vec::new()) + } else { + let (command_allocator, hr) = self.raw.create_command_allocator(list_type); + + + if !winerror::SUCCEEDED(hr) { + error!("error on command allocator creation: {:x}", hr); + } + + CommandPoolAllocator::Shared(command_allocator) + }; + + Ok(CommandPool { + allocator, + device: self.raw, + list_type, + shared: self.shared.clone(), + create_flags, + }) + } + + unsafe fn destroy_command_pool(&self, pool: CommandPool) { + pool.destroy(); + } + + unsafe fn create_render_pass<'a, IA, IS, ID>( + &self, + attachments: IA, + subpasses: IS, + dependencies: ID, + ) -> Result + where + IA: IntoIterator, + IA::Item: Borrow, + IS: IntoIterator, + IS::Item: Borrow>, + ID: IntoIterator, + ID::Item: Borrow, + { + #[derive(Copy, Clone, Debug, PartialEq)] + enum SubState { + New(d3d12::D3D12_RESOURCE_STATES), + + Resolve(d3d12::D3D12_RESOURCE_STATES), + Preserve, + Undefined, + } + + struct SubInfo<'a> { + desc: pass::SubpassDesc<'a>, + + + external_dependencies: Range, + + + unresolved_dependencies: u16, + } + struct AttachmentInfo { + sub_states: Vec, + last_state: d3d12::D3D12_RESOURCE_STATES, + barrier_start_index: usize, + } + + let attachments = attachments + .into_iter() + .map(|attachment| attachment.borrow().clone()) + .collect::>(); + let mut sub_infos = subpasses + .into_iter() + .map(|desc| { + SubInfo { + desc: desc.borrow().clone(), + external_dependencies: image::Access::empty() .. image::Access::empty(), + unresolved_dependencies: 0, + } + }) + .collect::>(); + let dependencies = dependencies.into_iter().collect::>(); + + let mut att_infos = (0 .. attachments.len()) + .map(|_| AttachmentInfo { + sub_states: vec![SubState::Undefined; sub_infos.len()], + last_state: d3d12::D3D12_RESOURCE_STATE_COMMON, + barrier_start_index: 0, + }) + .collect::>(); + + for dep in &dependencies { + use hal::pass::SubpassRef as Sr; + let dep = dep.borrow(); + match dep.passes { + Range { start: Sr::External, end: Sr::External } => { + error!("Unexpected external-external dependency!"); + } + Range { start: Sr::External, end: Sr::Pass(sid) } => { + sub_infos[sid].external_dependencies.start |= dep.accesses.start; + } + Range { start: Sr::Pass(sid), end: Sr::External } => { + sub_infos[sid].external_dependencies.end |= dep.accesses.end; + } + Range { start: Sr::Pass(from_sid), end: Sr::Pass(sid) } => { + + if from_sid != sid { + sub_infos[sid].unresolved_dependencies += 1; + } + } + } + } + + + for (sid, sub_info) in sub_infos.iter().enumerate() { + let sub = &sub_info.desc; + for (i, &(id, _layout)) in sub.colors.iter().enumerate() { + let target_state = d3d12::D3D12_RESOURCE_STATE_RENDER_TARGET; + let state = match sub.resolves.get(i) { + Some(_) => SubState::Resolve(target_state), + None => SubState::New(target_state), + }; + let old = mem::replace(&mut att_infos[id].sub_states[sid], state); + debug_assert_eq!(SubState::Undefined, old); + } + for &(id, layout) in sub.depth_stencil { + let state = SubState::New(match layout { + image::Layout::DepthStencilAttachmentOptimal => d3d12::D3D12_RESOURCE_STATE_DEPTH_WRITE, + image::Layout::DepthStencilReadOnlyOptimal => d3d12::D3D12_RESOURCE_STATE_DEPTH_READ, + image::Layout::General => d3d12::D3D12_RESOURCE_STATE_DEPTH_WRITE, + _ => { + error!("Unexpected depth/stencil layout: {:?}", layout); + d3d12::D3D12_RESOURCE_STATE_COMMON + } + }); + let old = mem::replace(&mut att_infos[id].sub_states[sid], state); + debug_assert_eq!(SubState::Undefined, old); + } + for &(id, _layout) in sub.inputs { + let state = SubState::New(d3d12::D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE); + let old = mem::replace(&mut att_infos[id].sub_states[sid], state); + debug_assert_eq!(SubState::Undefined, old); + } + for &(id, _layout) in sub.resolves { + let state = SubState::New(d3d12::D3D12_RESOURCE_STATE_RESOLVE_DEST); + let old = mem::replace(&mut att_infos[id].sub_states[sid], state); + debug_assert_eq!(SubState::Undefined, old); + } + for &id in sub.preserves { + let old = mem::replace(&mut att_infos[id].sub_states[sid], SubState::Preserve); + debug_assert_eq!(SubState::Undefined, old); + } + } + + let mut rp = r::RenderPass { + attachments: attachments.iter().cloned().collect(), + subpasses: Vec::new(), + post_barriers: Vec::new(), + }; + + while let Some(sid) = sub_infos.iter().position(|si| si.unresolved_dependencies == 0) { + for dep in &dependencies { + let dep = dep.borrow(); + if dep.passes.start != dep.passes.end + && dep.passes.start == pass::SubpassRef::Pass(sid) + { + if let pass::SubpassRef::Pass(other) = dep.passes.end { + sub_infos[other].unresolved_dependencies -= 1; + } + } + } + + let si = &mut sub_infos[sid]; + si.unresolved_dependencies = !0; + + + let mut pre_barriers = Vec::new(); + let mut post_barriers = Vec::new(); + for (att_id, (ai, att)) in att_infos.iter_mut().zip(attachments.iter()).enumerate() { + + if ai.barrier_start_index == 0 { + + + + ai.last_state = conv::map_image_resource_state( + si.external_dependencies.start, + att.layouts.start, + ); + } + + match ai.sub_states[sid] { + SubState::Preserve => { + ai.barrier_start_index = rp.subpasses.len() + 1; + } + SubState::New(state) if state != ai.last_state => { + let barrier = r::BarrierDesc::new(att_id, ai.last_state .. state); + match rp.subpasses.get_mut(ai.barrier_start_index) { + Some(past_subpass) => { + let split = barrier.split(); + past_subpass.pre_barriers.push(split.start); + pre_barriers.push(split.end); + } + None => pre_barriers.push(barrier), + } + ai.last_state = state; + ai.barrier_start_index = rp.subpasses.len() + 1; + } + SubState::Resolve(state) => { + + if state != ai.last_state { + let barrier = r::BarrierDesc::new(att_id, ai.last_state .. state); + match rp.subpasses.get_mut(ai.barrier_start_index) { + Some(past_subpass) => { + let split = barrier.split(); + past_subpass.pre_barriers.push(split.start); + pre_barriers.push(split.end); + } + None => pre_barriers.push(barrier), + } + } + + + let resolve_state = d3d12::D3D12_RESOURCE_STATE_RESOLVE_SOURCE; + let barrier = r::BarrierDesc::new(att_id, state .. resolve_state); + post_barriers.push(barrier); + + ai.last_state = resolve_state; + ai.barrier_start_index = rp.subpasses.len() + 1; + } + SubState::Undefined | + SubState::New(_) => {} + }; + } + + rp.subpasses.push(r::SubpassDesc { + color_attachments: si.desc.colors.iter().cloned().collect(), + depth_stencil_attachment: si.desc.depth_stencil.cloned(), + input_attachments: si.desc.inputs.iter().cloned().collect(), + resolve_attachments: si.desc.resolves.iter().cloned().collect(), + pre_barriers, + post_barriers, + }); + } + + assert_eq!(rp.subpasses.len(), sub_infos.len()); + assert!(sub_infos.iter().all(|si| si.unresolved_dependencies == !0)); + + + for (att_id, (ai, att)) in att_infos.iter().zip(attachments.iter()).enumerate() { + let state_dst = if ai.barrier_start_index == 0 { + + continue + } else { + let si = &sub_infos[ai.barrier_start_index - 1]; + conv::map_image_resource_state(si.external_dependencies.end, att.layouts.end) + }; + if state_dst == ai.last_state { + continue; + } + let barrier = r::BarrierDesc::new(att_id, ai.last_state .. state_dst); + match rp.subpasses.get_mut(ai.barrier_start_index) { + Some(past_subpass) => { + let split = barrier.split(); + past_subpass.pre_barriers.push(split.start); + rp.post_barriers.push(split.end); + } + None => rp.post_barriers.push(barrier), + } + } + + Ok(rp) + } + + unsafe fn create_pipeline_layout( + &self, + sets: IS, + push_constant_ranges: IR, + ) -> Result + where + IS: IntoIterator, + IS::Item: Borrow, + IR: IntoIterator, + IR::Item: Borrow<(pso::ShaderStageFlags, Range)>, + { + + + + + + + + + + + + + + + + + + + + + + let sets = sets.into_iter().collect::>(); + + let mut root_offset = 0; + let root_constants = root_constants::split(push_constant_ranges) + .iter() + .map(|constant| { + assert!(constant.range.start <= constant.range.end); + root_offset += (constant.range.end - constant.range.start) as usize; + + RootConstant { + stages: constant.stages, + range: constant.range.start .. constant.range.end, + } + }) + .collect::>(); + + info!( + "Creating a pipeline layout with {} sets and {} root constants", + sets.len(), + root_constants.len() + ); + + + + let mut parameters = Vec::with_capacity(root_constants.len() + sets.len() * 2); + + + for root_constant in root_constants.iter() { + debug!( + "\tRoot constant set={} range {:?}", + ROOT_CONSTANT_SPACE, root_constant.range + ); + parameters.push(native::RootParameter::constants( + conv::map_shader_visibility(root_constant.stages), + native::Binding { + register: root_constant.range.start as _, + space: ROOT_CONSTANT_SPACE, + }, + (root_constant.range.end - root_constant.range.start) as _, + )); + } + + + + + let root_space_offset = if !root_constants.is_empty() { 1 } else { 0 }; + + + + + let total = sets + .iter() + .map(|desc_set| { + let mut sum = 0; + for binding in desc_set.borrow().bindings.iter() { + let content = r::DescriptorContent::from(binding.ty); + if !content.is_dynamic() { + sum += content.bits().count_ones() as usize; + } + } + sum + }) + .sum(); + let mut ranges = Vec::with_capacity(total); + + let elements = sets.iter().enumerate().map(|(i, set)| { + let set = set.borrow(); + let space = (root_space_offset + i) as u32; + let mut table_type = r::SetTableTypes::empty(); + let root_table_offset = root_offset; + + + let visibility = conv::map_shader_visibility( + set.bindings + .iter() + .fold(pso::ShaderStageFlags::empty(), |u, bind| { + u | bind.stage_flags + }), + ); + + for bind in set.bindings.iter() { + debug!("\tRange {:?} at space={}", bind, space); + } + + let describe = |bind: &pso::DescriptorSetLayoutBinding, ty| { + native::DescriptorRange::new( + ty, + bind.count as _, + native::Binding { + register: bind.binding as _, + space, + }, + d3d12::D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND, + ) + }; + + let mut descriptors = Vec::new(); + let mut range_base = ranges.len(); + for bind in set.bindings.iter() { + let content = r::DescriptorContent::from(bind.ty); + + if content.is_dynamic() { + + let binding = native::Binding { + register: bind.binding as _, + space, + }; + + if content.contains(r::DescriptorContent::CBV) { + descriptors.push(r::RootDescriptor { + offset: root_offset, + }); + parameters.push(native::RootParameter::cbv_descriptor(visibility, binding)); + root_offset += 2; + } else { + + unimplemented!() + } + } else { + + if content.contains(r::DescriptorContent::CBV) { + ranges.push(describe(bind, native::DescriptorRangeType::CBV)); + } + if content.contains(r::DescriptorContent::SRV) { + ranges.push(describe(bind, native::DescriptorRangeType::SRV)); + } + if content.contains(r::DescriptorContent::UAV) { + ranges.push(describe(bind, native::DescriptorRangeType::UAV)); + } + } + } + if ranges.len() > range_base { + parameters.push(native::RootParameter::descriptor_table( + visibility, + &ranges[range_base ..], + )); + table_type |= r::SRV_CBV_UAV; + root_offset += 1; + } + + range_base = ranges.len(); + for bind in set.bindings.iter() { + let content = r::DescriptorContent::from(bind.ty); + if content.contains(r::DescriptorContent::SAMPLER) { + ranges.push(describe(bind, native::DescriptorRangeType::Sampler)); + } + } + if ranges.len() > range_base { + parameters.push(native::RootParameter::descriptor_table( + visibility, + &ranges[range_base ..], + )); + table_type |= r::SAMPLERS; + root_offset += 1; + } + + r::RootElement { + table: r::RootTable { + ty: table_type, + offset: root_table_offset as _, + }, + descriptors, + } + }).collect(); + + + debug_assert_eq!(ranges.len(), total); + + + let (signature_raw, error) = match self.library.serialize_root_signature( + native::RootSignatureVersion::V1_0, + ¶meters, + &[], + native::RootSignatureFlags::ALLOW_IA_INPUT_LAYOUT, + ) { + Ok((pair, hr)) if winerror::SUCCEEDED(hr) => pair, + Ok((_, hr)) => panic!("Can't serialize root signature: {:?}", hr), + Err(e) => panic!("Can't find serialization function: {:?}", e), + }; + + if !error.is_null() { + error!( + "Root signature serialization error: {:?}", + error.as_c_str().to_str().unwrap() + ); + error.destroy(); + } + + + let (signature, _hr) = self.raw.create_root_signature(signature_raw, 0); + signature_raw.destroy(); + + Ok(r::PipelineLayout { + raw: signature, + constants: root_constants, + elements, + num_parameter_slots: parameters.len(), + }) + } + + unsafe fn create_pipeline_cache(&self, _data: Option<&[u8]>) -> Result<(), d::OutOfMemory> { + Ok(()) + } + + unsafe fn get_pipeline_cache_data(&self, _cache: &()) -> Result, d::OutOfMemory> { + + Ok(Vec::new()) + } + + unsafe fn destroy_pipeline_cache(&self, _: ()) { + + } + + unsafe fn merge_pipeline_caches(&self, _: &(), _: I) -> Result<(), d::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<()>, + { + + Ok(()) + } + + unsafe fn create_graphics_pipeline<'a>( + &self, + desc: &pso::GraphicsPipelineDesc<'a, B>, + _cache: Option<&()>, + ) -> Result { + enum ShaderBc { + Owned(native::Blob), + Borrowed(native::Blob), + None, + } + impl ShaderBc { + pub fn shader(&self) -> native::Shader { + match *self { + ShaderBc::Owned(ref bc) | ShaderBc::Borrowed(ref bc) => { + native::Shader::from_blob(*bc) + } + ShaderBc::None => native::Shader::null(), + } + } + } + + let build_shader = |stage: pso::Stage, source: Option<&pso::EntryPoint<'a, B>>| { + let source = match source { + Some(src) => src, + None => return Ok(ShaderBc::None), + }; + + match Self::extract_entry_point(stage, source, desc.layout) { + Ok((shader, true)) => Ok(ShaderBc::Owned(shader)), + Ok((shader, false)) => Ok(ShaderBc::Borrowed(shader)), + Err(err) => Err(pso::CreationError::Shader(err)), + } + }; + + let vs = build_shader(pso::Stage::Vertex, Some(&desc.shaders.vertex))?; + let ps = build_shader(pso::Stage::Fragment, desc.shaders.fragment.as_ref())?; + let gs = build_shader(pso::Stage::Geometry, desc.shaders.geometry.as_ref())?; + let ds = build_shader(pso::Stage::Domain, desc.shaders.domain.as_ref())?; + let hs = build_shader(pso::Stage::Hull, desc.shaders.hull.as_ref())?; + + + let mut vertex_bindings = [None; MAX_VERTEX_BUFFERS]; + let mut vertex_strides = [0; MAX_VERTEX_BUFFERS]; + + for buffer in &desc.vertex_buffers { + vertex_strides[buffer.binding as usize] = buffer.stride; + } + + for attrib in &desc.attributes { + let binding = attrib.binding as usize; + let stride = vertex_strides[attrib.binding as usize]; + if attrib.element.offset < stride { + vertex_bindings[binding] = Some(r::VertexBinding { + stride: vertex_strides[attrib.binding as usize], + offset: 0, + mapped_binding: binding, + }); + } + } + + + let input_element_descs = desc + .attributes + .iter() + .filter_map(|attrib| { + let buffer_desc = match desc + .vertex_buffers + .iter() + .find(|buffer_desc| buffer_desc.binding == attrib.binding) + { + Some(buffer_desc) => buffer_desc, + None => { + error!( + "Couldn't find associated vertex buffer description {:?}", + attrib.binding + ); + return Some(Err(pso::CreationError::Other)); + } + }; + + let (slot_class, step_rate) = match buffer_desc.rate { + VertexInputRate::Vertex => { + (d3d12::D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0) + } + VertexInputRate::Instance(divisor) => { + (d3d12::D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA, divisor) + } + }; + let format = attrib.element.format; + + + + + let binding = attrib.binding as usize; + let stride = vertex_strides[binding]; + let offset = attrib.element.offset; + let (input_slot, offset) = if stride <= offset { + + + let mapping = vertex_bindings.iter().position(Option::is_none).unwrap(); + vertex_bindings[mapping] = Some(r::VertexBinding { + stride: vertex_strides[binding], + offset: offset, + mapped_binding: binding, + }); + + (mapping, 0) + } else { + (binding, offset) + }; + + Some(Ok(d3d12::D3D12_INPUT_ELEMENT_DESC { + SemanticName: "TEXCOORD\0".as_ptr() as *const _, + SemanticIndex: attrib.location, + Format: match conv::map_format(format) { + Some(fm) => fm, + None => { + error!("Unable to find DXGI format for {:?}", format); + return Some(Err(pso::CreationError::Other)); + } + }, + InputSlot: input_slot as _, + AlignedByteOffset: offset, + InputSlotClass: slot_class, + InstanceDataStepRate: step_rate as _, + })) + }) + .collect::, _>>()?; + + + + let pass = { + let subpass = &desc.subpass; + match subpass.main_pass.subpasses.get(subpass.index) { + Some(subpass) => subpass, + None => return Err(pso::CreationError::InvalidSubpass(subpass.index)), + } + }; + + + let (rtvs, num_rtvs) = { + let mut rtvs = [dxgiformat::DXGI_FORMAT_UNKNOWN; 8]; + let mut num_rtvs = 0; + for (rtv, target) in rtvs.iter_mut().zip(pass.color_attachments.iter()) { + let format = desc.subpass.main_pass.attachments[target.0].format; + *rtv = format + .and_then(conv::map_format) + .unwrap_or(dxgiformat::DXGI_FORMAT_UNKNOWN); + num_rtvs += 1; + } + (rtvs, num_rtvs) + }; + + let sample_desc = dxgitype::DXGI_SAMPLE_DESC { + Count: match desc.multisampling { + Some(ref ms) => ms.rasterization_samples as _, + None => 1, + }, + Quality: 0, + }; + + + let pso_desc = d3d12::D3D12_GRAPHICS_PIPELINE_STATE_DESC { + pRootSignature: desc.layout.raw.as_mut_ptr(), + VS: *vs.shader(), + PS: *ps.shader(), + GS: *gs.shader(), + DS: *ds.shader(), + HS: *hs.shader(), + StreamOutput: d3d12::D3D12_STREAM_OUTPUT_DESC { + pSODeclaration: ptr::null(), + NumEntries: 0, + pBufferStrides: ptr::null(), + NumStrides: 0, + RasterizedStream: 0, + }, + BlendState: d3d12::D3D12_BLEND_DESC { + AlphaToCoverageEnable: desc.multisampling.as_ref().map_or(FALSE, |ms| { + if ms.alpha_coverage { + TRUE + } else { + FALSE + } + }), + IndependentBlendEnable: TRUE, + RenderTarget: conv::map_render_targets(&desc.blender.targets), + }, + SampleMask: UINT::max_value(), + RasterizerState: conv::map_rasterizer(&desc.rasterizer), + DepthStencilState: conv::map_depth_stencil(&desc.depth_stencil), + InputLayout: d3d12::D3D12_INPUT_LAYOUT_DESC { + pInputElementDescs: if input_element_descs.is_empty() { + ptr::null() + } else { + input_element_descs.as_ptr() + }, + NumElements: input_element_descs.len() as u32, + }, + IBStripCutValue: d3d12::D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED, + PrimitiveTopologyType: conv::map_topology_type(desc.input_assembler.primitive), + NumRenderTargets: num_rtvs, + RTVFormats: rtvs, + DSVFormat: pass + .depth_stencil_attachment + .and_then(|att_ref| { + desc.subpass.main_pass.attachments[att_ref.0] + .format + .and_then(|f| conv::map_format_dsv(f.base_format().0)) + }) + .unwrap_or(dxgiformat::DXGI_FORMAT_UNKNOWN), + SampleDesc: sample_desc, + NodeMask: 0, + CachedPSO: d3d12::D3D12_CACHED_PIPELINE_STATE { + pCachedBlob: ptr::null(), + CachedBlobSizeInBytes: 0, + }, + Flags: d3d12::D3D12_PIPELINE_STATE_FLAG_NONE, + }; + + let topology = conv::map_topology(&desc.input_assembler); + + + let mut pipeline = native::PipelineState::null(); + let hr = if desc.depth_stencil.depth_bounds { + + + let (device2, hr) = self.raw.cast::(); + if winerror::SUCCEEDED(hr) { + let mut pss_stream = GraphicsPipelineStateSubobjectStream::new(&pso_desc, true); + let pss_desc = d3d12::D3D12_PIPELINE_STATE_STREAM_DESC { + SizeInBytes: mem::size_of_val(&pss_stream), + pPipelineStateSubobjectStream: &mut pss_stream as *mut _ as _, + }; + device2.CreatePipelineState( + &pss_desc, + &d3d12::ID3D12PipelineState::uuidof(), + pipeline.mut_void(), + ) + } else { + hr + } + } else { + self.raw.clone().CreateGraphicsPipelineState( + &pso_desc, + &d3d12::ID3D12PipelineState::uuidof(), + pipeline.mut_void(), + ) + }; + + let destroy_shader = |shader: ShaderBc| { + if let ShaderBc::Owned(bc) = shader { + bc.destroy(); + } + }; + + destroy_shader(vs); + destroy_shader(ps); + destroy_shader(gs); + destroy_shader(hs); + destroy_shader(ds); + + if winerror::SUCCEEDED(hr) { + let mut baked_states = desc.baked_states.clone(); + if !desc.depth_stencil.depth_bounds { + baked_states.depth_bounds = None; + } + + Ok(r::GraphicsPipeline { + raw: pipeline, + signature: desc.layout.raw, + num_parameter_slots: desc.layout.num_parameter_slots, + topology, + constants: desc.layout.constants.clone(), + vertex_bindings, + baked_states, + }) + } else { + Err(pso::CreationError::Other) + } + } + + unsafe fn create_compute_pipeline<'a>( + &self, + desc: &pso::ComputePipelineDesc<'a, B>, + _cache: Option<&()>, + ) -> Result { + let (cs, cs_destroy) = + Self::extract_entry_point(pso::Stage::Compute, &desc.shader, desc.layout) + .map_err(|err| pso::CreationError::Shader(err))?; + + let (pipeline, hr) = self.raw.create_compute_pipeline_state( + desc.layout.raw, + native::Shader::from_blob(cs), + 0, + native::CachedPSO::null(), + native::PipelineStateFlags::empty(), + ); + + if cs_destroy { + cs.destroy(); + } + + if winerror::SUCCEEDED(hr) { + Ok(r::ComputePipeline { + raw: pipeline, + signature: desc.layout.raw, + num_parameter_slots: desc.layout.num_parameter_slots, + constants: desc.layout.constants.clone(), + }) + } else { + Err(pso::CreationError::Other) + } + } + + unsafe fn create_framebuffer( + &self, + _renderpass: &r::RenderPass, + attachments: I, + extent: image::Extent, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + Ok(r::Framebuffer { + attachments: attachments.into_iter().map(|att| *att.borrow()).collect(), + layers: extent.depth as _, + }) + } + + unsafe fn create_shader_module( + &self, + raw_data: &[u32], + ) -> Result { + Ok(r::ShaderModule::Spirv(raw_data.into())) + } + + unsafe fn create_buffer( + &self, + mut size: u64, + usage: buffer::Usage, + ) -> Result { + if usage.contains(buffer::Usage::UNIFORM) { + + + + size = (size + 255) & !255; + } + if usage.contains(buffer::Usage::TRANSFER_DST) { + + size = size.max(4); + } + + let type_mask_shift = if self.private_caps.heterogeneous_resource_heaps { + MEM_TYPE_UNIVERSAL_SHIFT + } else { + MEM_TYPE_BUFFER_SHIFT + }; + + let requirements = memory::Requirements { + size, + alignment: d3d12::D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64, + type_mask: MEM_TYPE_MASK << type_mask_shift, + }; + + Ok(r::Buffer::Unbound(r::BufferUnbound { + requirements, + usage, + })) + } + + unsafe fn get_buffer_requirements(&self, buffer: &r::Buffer) -> Requirements { + match buffer { + r::Buffer::Unbound(b) => b.requirements, + r::Buffer::Bound(b) => b.requirements, + } + } + + unsafe fn bind_buffer_memory( + &self, + memory: &r::Memory, + offset: u64, + buffer: &mut r::Buffer, + ) -> Result<(), d::BindError> { + let buffer_unbound = *buffer.expect_unbound(); + if buffer_unbound.requirements.type_mask & (1 << memory.type_id) == 0 { + error!( + "Bind memory failure: supported mask 0x{:x}, given id {}", + buffer_unbound.requirements.type_mask, memory.type_id + ); + return Err(d::BindError::WrongMemory); + } + if offset + buffer_unbound.requirements.size > memory.size { + return Err(d::BindError::OutOfBounds); + } + + let mut resource = native::Resource::null(); + let desc = d3d12::D3D12_RESOURCE_DESC { + Dimension: d3d12::D3D12_RESOURCE_DIMENSION_BUFFER, + Alignment: 0, + Width: buffer_unbound.requirements.size, + Height: 1, + DepthOrArraySize: 1, + MipLevels: 1, + Format: dxgiformat::DXGI_FORMAT_UNKNOWN, + SampleDesc: dxgitype::DXGI_SAMPLE_DESC { + Count: 1, + Quality: 0, + }, + Layout: d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR, + Flags: conv::map_buffer_flags(buffer_unbound.usage), + }; + + assert_eq!( + winerror::S_OK, + self.raw.clone().CreatePlacedResource( + memory.heap.as_mut_ptr(), + offset, + &desc, + d3d12::D3D12_RESOURCE_STATE_COMMON, + ptr::null(), + &d3d12::ID3D12Resource::uuidof(), + resource.mut_void(), + ) + ); + + let clear_uav = if buffer_unbound.usage.contains(buffer::Usage::TRANSFER_DST) { + let handle = self.srv_uav_pool.lock().unwrap().alloc_handle(); + let mut view_desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC { + Format: dxgiformat::DXGI_FORMAT_R32_TYPELESS, + ViewDimension: d3d12::D3D12_UAV_DIMENSION_BUFFER, + u: mem::zeroed(), + }; + + *view_desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_UAV { + FirstElement: 0, + NumElements: (buffer_unbound.requirements.size / 4) as _, + StructureByteStride: 0, + CounterOffsetInBytes: 0, + Flags: d3d12::D3D12_BUFFER_UAV_FLAG_RAW, + }; + + self.raw.CreateUnorderedAccessView( + resource.as_mut_ptr(), + ptr::null_mut(), + &view_desc, + handle, + ); + Some(handle) + } else { + None + }; + + *buffer = r::Buffer::Bound(r::BufferBound { + resource, + requirements: buffer_unbound.requirements, + clear_uav, + }); + + Ok(()) + } + + unsafe fn create_buffer_view>( + &self, + buffer: &r::Buffer, + format: Option, + range: R, + ) -> Result { + let buffer = buffer.expect_bound(); + let buffer_features = { + let idx = format.map(|fmt| fmt as usize).unwrap_or(0); + self.format_properties.get(idx).properties.buffer_features + }; + let (format, format_desc) = match format.and_then(conv::map_format) { + Some(fmt) => (fmt, format.unwrap().surface_desc()), + None => return Err(buffer::ViewCreationError::UnsupportedFormat { format }), + }; + + let start = *range.start().unwrap_or(&0); + let end = *range.end().unwrap_or(&(buffer.requirements.size as _)); + + let bytes_per_texel = (format_desc.bits / 8) as u64; + + assert_eq!(start % bytes_per_texel, 0); + let first_element = start / bytes_per_texel; + let num_elements = (end - start) / bytes_per_texel; + + let handle_srv = if buffer_features.contains(format::BufferFeature::UNIFORM_TEXEL) { + let mut desc = d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC { + Format: format, + ViewDimension: d3d12::D3D12_SRV_DIMENSION_BUFFER, + Shader4ComponentMapping: IDENTITY_MAPPING, + u: mem::zeroed(), + }; + + *desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_SRV { + FirstElement: first_element, + NumElements: num_elements as _, + StructureByteStride: bytes_per_texel as _, + Flags: d3d12::D3D12_BUFFER_SRV_FLAG_NONE, + }; + + let handle = self.srv_uav_pool.lock().unwrap().alloc_handle(); + self.raw + .clone() + .CreateShaderResourceView(buffer.resource.as_mut_ptr(), &desc, handle); + handle + } else { + d3d12::D3D12_CPU_DESCRIPTOR_HANDLE { ptr: 0 } + }; + + let handle_uav = if buffer_features.intersects( + format::BufferFeature::STORAGE_TEXEL | format::BufferFeature::STORAGE_TEXEL_ATOMIC, + ) { + let mut desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC { + Format: format, + ViewDimension: d3d12::D3D12_UAV_DIMENSION_BUFFER, + u: mem::zeroed(), + }; + + *desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_UAV { + FirstElement: first_element, + NumElements: num_elements as _, + StructureByteStride: bytes_per_texel as _, + Flags: d3d12::D3D12_BUFFER_UAV_FLAG_NONE, + CounterOffsetInBytes: 0, + }; + + let handle = self.srv_uav_pool.lock().unwrap().alloc_handle(); + self.raw.clone().CreateUnorderedAccessView( + buffer.resource.as_mut_ptr(), + ptr::null_mut(), + &desc, + handle, + ); + handle + } else { + d3d12::D3D12_CPU_DESCRIPTOR_HANDLE { ptr: 0 } + }; + + return Ok(r::BufferView { + handle_srv, + handle_uav, + }); + } + + unsafe fn create_image( + &self, + kind: image::Kind, + mip_levels: image::Level, + format: format::Format, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Result { + assert!(mip_levels <= kind.num_levels()); + + let base_format = format.base_format(); + let format_desc = base_format.0.desc(); + let bytes_per_block = (format_desc.bits / 8) as _; + let block_dim = format_desc.dim; + let extent = kind.extent(); + + let format_info = self.format_properties.get(format as usize); + let (layout, features) = match tiling { + image::Tiling::Optimal => ( + d3d12::D3D12_TEXTURE_LAYOUT_UNKNOWN, + format_info.properties.optimal_tiling, + ), + image::Tiling::Linear => ( + d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR, + format_info.properties.linear_tiling, + ), + }; + if format_info.sample_count_mask & kind.num_samples() == 0 { + return Err(image::CreationError::Samples(kind.num_samples())); + } + + let desc = d3d12::D3D12_RESOURCE_DESC { + Dimension: match kind { + image::Kind::D1(..) => d3d12::D3D12_RESOURCE_DIMENSION_TEXTURE1D, + image::Kind::D2(..) => d3d12::D3D12_RESOURCE_DIMENSION_TEXTURE2D, + image::Kind::D3(..) => d3d12::D3D12_RESOURCE_DIMENSION_TEXTURE3D, + }, + Alignment: 0, + Width: extent.width as _, + Height: extent.height as _, + DepthOrArraySize: if extent.depth > 1 { + extent.depth as _ + } else { + kind.num_layers() as _ + }, + MipLevels: mip_levels as _, + Format: match conv::map_surface_type(base_format.0) { + Some(format) => format, + None => return Err(image::CreationError::Format(format)), + }, + SampleDesc: dxgitype::DXGI_SAMPLE_DESC { + Count: kind.num_samples() as _, + Quality: 0, + }, + Layout: layout, + Flags: conv::map_image_flags(usage, features), + }; + + let alloc_info = self.raw.clone().GetResourceAllocationInfo(0, 1, &desc); + + + let target_usage = image::Usage::COLOR_ATTACHMENT + | image::Usage::DEPTH_STENCIL_ATTACHMENT + | image::Usage::TRANSFER_DST; + + let type_mask_shift = if self.private_caps.heterogeneous_resource_heaps { + MEM_TYPE_UNIVERSAL_SHIFT + } else if usage.intersects(target_usage) { + MEM_TYPE_TARGET_SHIFT + } else { + MEM_TYPE_IMAGE_SHIFT + }; + + Ok(r::Image::Unbound(r::ImageUnbound { + view_format: conv::map_format(format), + dsv_format: conv::map_format_dsv(base_format.0), + desc, + requirements: memory::Requirements { + size: alloc_info.SizeInBytes, + alignment: alloc_info.Alignment, + type_mask: MEM_TYPE_MASK << type_mask_shift, + }, + format, + kind, + usage, + tiling, + view_caps, + bytes_per_block, + block_dim, + })) + } + + unsafe fn get_image_requirements(&self, image: &r::Image) -> Requirements { + match image { + r::Image::Bound(i) => i.requirements, + r::Image::Unbound(i) => i.requirements, + } + } + + unsafe fn get_image_subresource_footprint( + &self, + image: &r::Image, + sub: image::Subresource, + ) -> image::SubresourceFootprint { + let mut num_rows = 0; + let mut total_bytes = 0; + let _desc = match image { + r::Image::Bound(i) => i.descriptor, + r::Image::Unbound(i) => i.desc, + }; + let footprint = { + let mut footprint = mem::zeroed(); + self.raw.GetCopyableFootprints( + image.get_desc(), + image.calc_subresource(sub.level as _, sub.layer as _, 0), + 1, + 0, + &mut footprint, + &mut num_rows, + ptr::null_mut(), + &mut total_bytes, + ); + footprint + }; + + let depth_pitch = (footprint.Footprint.RowPitch * num_rows) as buffer::Offset; + let array_pitch = footprint.Footprint.Depth as buffer::Offset * depth_pitch; + image::SubresourceFootprint { + slice: footprint.Offset .. footprint.Offset + total_bytes, + row_pitch: footprint.Footprint.RowPitch as _, + depth_pitch, + array_pitch, + } + } + + unsafe fn bind_image_memory( + &self, + memory: &r::Memory, + offset: u64, + image: &mut r::Image, + ) -> Result<(), d::BindError> { + use self::image::Usage; + + let image_unbound = *image.expect_unbound(); + if image_unbound.requirements.type_mask & (1 << memory.type_id) == 0 { + error!( + "Bind memory failure: supported mask 0x{:x}, given id {}", + image_unbound.requirements.type_mask, memory.type_id + ); + return Err(d::BindError::WrongMemory); + } + if offset + image_unbound.requirements.size > memory.size { + return Err(d::BindError::OutOfBounds); + } + + let mut resource = native::Resource::null(); + let num_layers = image_unbound.kind.num_layers(); + + assert_eq!( + winerror::S_OK, + self.raw.clone().CreatePlacedResource( + memory.heap.as_mut_ptr(), + offset, + &image_unbound.desc, + d3d12::D3D12_RESOURCE_STATE_COMMON, + ptr::null(), + &d3d12::ID3D12Resource::uuidof(), + resource.mut_void(), + ) + ); + + let info = ViewInfo { + resource, + kind: image_unbound.kind, + caps: image::ViewCapabilities::empty(), + view_kind: match image_unbound.kind { + image::Kind::D1(..) => image::ViewKind::D1Array, + image::Kind::D2(..) => image::ViewKind::D2Array, + image::Kind::D3(..) => image::ViewKind::D3, + }, + format: image_unbound.desc.Format, + component_mapping: IDENTITY_MAPPING, + range: image::SubresourceRange { + aspects: Aspects::empty(), + levels: 0 .. 0, + layers: 0 .. 0, + }, + }; + + + + + let format_properties = self + .format_properties + .get(image_unbound.format as usize) + .properties; + let props = match image_unbound.tiling { + image::Tiling::Optimal => format_properties.optimal_tiling, + image::Tiling::Linear => format_properties.linear_tiling, + }; + let can_clear_color = image_unbound + .usage + .intersects(Usage::TRANSFER_DST | Usage::COLOR_ATTACHMENT) + && props.contains(format::ImageFeature::COLOR_ATTACHMENT); + let can_clear_depth = image_unbound + .usage + .intersects(Usage::TRANSFER_DST | Usage::DEPTH_STENCIL_ATTACHMENT) + && props.contains(format::ImageFeature::DEPTH_STENCIL_ATTACHMENT); + let aspects = image_unbound.format.surface_desc().aspects; + + *image = r::Image::Bound(r::ImageBound { + resource: resource, + place: r::Place::Heap { + raw: memory.heap.clone(), + offset, + }, + surface_type: image_unbound.format.base_format().0, + kind: image_unbound.kind, + usage: image_unbound.usage, + default_view_format: image_unbound.view_format, + view_caps: image_unbound.view_caps, + descriptor: image_unbound.desc, + bytes_per_block: image_unbound.bytes_per_block, + block_dim: image_unbound.block_dim, + clear_cv: if aspects.contains(Aspects::COLOR) && can_clear_color { + let format = image_unbound.view_format.unwrap(); + (0 .. num_layers) + .map(|layer| { + self.view_image_as_render_target(ViewInfo { + format, + range: image::SubresourceRange { + aspects: Aspects::COLOR, + levels: 0 .. 1, + layers: layer .. layer + 1, + }, + ..info.clone() + }) + .unwrap() + }) + .collect() + } else { + Vec::new() + }, + clear_dv: if aspects.contains(Aspects::DEPTH) && can_clear_depth { + let format = image_unbound.dsv_format.unwrap(); + (0 .. num_layers) + .map(|layer| { + self.view_image_as_depth_stencil(ViewInfo { + format, + range: image::SubresourceRange { + aspects: Aspects::DEPTH, + levels: 0 .. 1, + layers: layer .. layer + 1, + }, + ..info.clone() + }) + .unwrap() + }) + .collect() + } else { + Vec::new() + }, + clear_sv: if aspects.contains(Aspects::STENCIL) && can_clear_depth { + let format = image_unbound.dsv_format.unwrap(); + (0 .. num_layers) + .map(|layer| { + self.view_image_as_depth_stencil(ViewInfo { + format, + range: image::SubresourceRange { + aspects: Aspects::STENCIL, + levels: 0 .. 1, + layers: layer .. layer + 1, + }, + ..info.clone() + }) + .unwrap() + }) + .collect() + } else { + Vec::new() + }, + requirements: image_unbound.requirements, + }); + + Ok(()) + } + + unsafe fn create_image_view( + &self, + image: &r::Image, + view_kind: image::ViewKind, + format: format::Format, + swizzle: format::Swizzle, + range: image::SubresourceRange, + ) -> Result { + let image = image.expect_bound(); + let is_array = image.kind.num_layers() > 1; + let mip_levels = (range.levels.start, range.levels.end); + let layers = (range.layers.start, range.layers.end); + + let info = ViewInfo { + resource: image.resource, + kind: image.kind, + caps: image.view_caps, + + view_kind: if is_array && view_kind == image::ViewKind::D2 { + image::ViewKind::D2Array + } else if is_array && view_kind == image::ViewKind::D1 { + image::ViewKind::D1Array + } else { + view_kind + }, + format: conv::map_format(format).ok_or(image::ViewError::BadFormat(format))?, + component_mapping: conv::map_swizzle(swizzle), + range, + }; + + + + + Ok(r::ImageView { + resource: image.resource, + handle_srv: if image + .usage + .intersects(image::Usage::SAMPLED | image::Usage::INPUT_ATTACHMENT) + { + self.view_image_as_shader_resource(info.clone()).ok() + } else { + None + }, + handle_rtv: if image.usage.contains(image::Usage::COLOR_ATTACHMENT) { + self.view_image_as_render_target(info.clone()).ok() + } else { + None + }, + handle_uav: if image.usage.contains(image::Usage::STORAGE) { + self.view_image_as_storage(info.clone()).ok() + } else { + None + }, + handle_dsv: if image.usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT) { + match conv::map_format_dsv(format.base_format().0) { + Some(dsv_format) => self + .view_image_as_depth_stencil(ViewInfo { + format: dsv_format, + ..info + }) + .ok(), + None => None, + } + } else { + None + }, + dxgi_format: image.default_view_format.unwrap(), + num_levels: image.descriptor.MipLevels as image::Level, + mip_levels, + layers, + kind: info.kind, + }) + } + + unsafe fn create_sampler( + &self, + info: &image::SamplerDesc, + ) -> Result { + assert!(info.normalized); + let handle = self.sampler_pool.lock().unwrap().alloc_handle(); + + let op = match info.comparison { + Some(_) => d3d12::D3D12_FILTER_REDUCTION_TYPE_COMPARISON, + None => d3d12::D3D12_FILTER_REDUCTION_TYPE_STANDARD, + }; + self.raw.create_sampler( + handle, + conv::map_filter( + info.mag_filter, + info.min_filter, + info.mip_filter, + op, + info.anisotropic, + ), + [ + conv::map_wrap(info.wrap_mode.0), + conv::map_wrap(info.wrap_mode.1), + conv::map_wrap(info.wrap_mode.2), + ], + info.lod_bias.0, + match info.anisotropic { + image::Anisotropic::On(max) => max as _, + image::Anisotropic::Off => 0, + }, + conv::map_comparison(info.comparison.unwrap_or(pso::Comparison::Always)), + info.border.into(), + info.lod_range.start.0 .. info.lod_range.end.0, + ); + + Ok(r::Sampler { handle }) + } + + unsafe fn create_descriptor_pool( + &self, + max_sets: usize, + descriptor_pools: I, + _flags: pso::DescriptorPoolCreateFlags, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + + + + + let mut num_srv_cbv_uav = 0; + let mut num_samplers = 0; + + let descriptor_pools = descriptor_pools + .into_iter() + .map(|desc| *desc.borrow()) + .collect::>(); + + info!("create_descriptor_pool with {} max sets", max_sets); + for desc in &descriptor_pools { + let content = r::DescriptorContent::from(desc.ty); + debug!("\tcontent {:?}", content); + if content.contains(r::DescriptorContent::CBV) { + num_srv_cbv_uav += desc.count; + } + if content.contains(r::DescriptorContent::SRV) { + num_srv_cbv_uav += desc.count; + } + if content.contains(r::DescriptorContent::UAV) { + num_srv_cbv_uav += desc.count; + } + if content.contains(r::DescriptorContent::SAMPLER) { + num_samplers += desc.count; + } + } + + info!( + "total {} views and {} samplers", + num_srv_cbv_uav, num_samplers + ); + + + let heap_srv_cbv_uav = { + let mut heap_srv_cbv_uav = self.heap_srv_cbv_uav.lock().unwrap(); + + let range = match num_srv_cbv_uav { + 0 => 0 .. 0, + _ => heap_srv_cbv_uav + .range_allocator + .allocate_range(num_srv_cbv_uav as _) + .unwrap(), + }; + + r::DescriptorHeapSlice { + heap: heap_srv_cbv_uav.raw.clone(), + handle_size: heap_srv_cbv_uav.handle_size as _, + range_allocator: RangeAllocator::new(range), + start: heap_srv_cbv_uav.start, + } + }; + + let heap_sampler = { + let mut heap_sampler = self.heap_sampler.lock().unwrap(); + + let range = match num_samplers { + 0 => 0 .. 0, + _ => heap_sampler + .range_allocator + .allocate_range(num_samplers as _) + .unwrap(), + }; + + r::DescriptorHeapSlice { + heap: heap_sampler.raw.clone(), + handle_size: heap_sampler.handle_size as _, + range_allocator: RangeAllocator::new(range), + start: heap_sampler.start, + } + }; + + Ok(r::DescriptorPool { + heap_srv_cbv_uav, + heap_sampler, + pools: descriptor_pools, + max_size: max_sets as _, + }) + } + + unsafe fn create_descriptor_set_layout( + &self, + bindings: I, + _immutable_samplers: J, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + Ok(r::DescriptorSetLayout { + bindings: bindings.into_iter().map(|b| b.borrow().clone()).collect(), + }) + } + + unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I) + where + I: IntoIterator>, + J: IntoIterator, + J::Item: Borrow>, + { + let mut descriptor_update_pools = self.descriptor_update_pools.lock().unwrap(); + let mut update_pool_index = 0; + + + let mut dst_samplers = Vec::new(); + let mut dst_views = Vec::new(); + let mut src_samplers = Vec::new(); + let mut src_views = Vec::new(); + let mut num_samplers = Vec::new(); + let mut num_views = Vec::new(); + debug!("write_descriptor_sets"); + + for write in write_iter { + let mut offset = write.array_offset as u64; + let mut target_binding = write.binding as usize; + let mut bind_info = &write.set.binding_infos[target_binding]; + debug!( + "\t{:?} binding {} array offset {}", + bind_info, target_binding, offset + ); + for descriptor in write.descriptors { + + while offset >= bind_info.count { + assert_eq!(offset, bind_info.count); + target_binding += 1; + bind_info = &write.set.binding_infos[target_binding]; + offset = 0; + } + let mut src_cbv = None; + let mut src_srv = None; + let mut src_uav = None; + let mut src_sampler = None; + + match *descriptor.borrow() { + pso::Descriptor::Buffer(buffer, ref range) => { + let buffer = buffer.expect_bound(); + + if bind_info.content.is_dynamic() { + + let buffer_offset = range.start.unwrap_or(0); + let buffer_address = (*buffer.resource).GetGPUVirtualAddress(); + + + let dynamic_descriptors = &mut *bind_info.dynamic_descriptors.get(); + dynamic_descriptors[offset as usize].gpu_buffer_location = buffer_address + buffer_offset; + } else { + + if update_pool_index == descriptor_update_pools.len() { + let max_size = 1u64 << 12; + descriptor_update_pools.push(descriptors_cpu::HeapLinear::new( + self.raw, + native::DescriptorHeapType::CbvSrvUav, + max_size as _, + )); + } + let mut heap = descriptor_update_pools.last_mut().unwrap(); + let start = range.start.unwrap_or(0); + let end = range.end.unwrap_or(buffer.requirements.size as _); + + if bind_info.content.contains(r::DescriptorContent::CBV) { + + + + + + let size = ((end - start) + 255) & !255; + let desc = d3d12::D3D12_CONSTANT_BUFFER_VIEW_DESC { + BufferLocation: (*buffer.resource).GetGPUVirtualAddress() + start, + SizeInBytes: size as _, + }; + let handle = heap.alloc_handle(); + self.raw.CreateConstantBufferView(&desc, handle); + src_cbv = Some(handle); + } + if bind_info.content.contains(r::DescriptorContent::SRV) { + assert_eq!((end - start) % 4, 0); + let mut desc = d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC { + Format: dxgiformat::DXGI_FORMAT_R32_TYPELESS, + Shader4ComponentMapping: IDENTITY_MAPPING, + ViewDimension: d3d12::D3D12_SRV_DIMENSION_BUFFER, + u: mem::zeroed(), + }; + *desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_SRV { + FirstElement: start as _, + NumElements: ((end - start) / 4) as _, + StructureByteStride: 0, + Flags: d3d12::D3D12_BUFFER_SRV_FLAG_RAW, + }; + let handle = heap.alloc_handle(); + self.raw.CreateShaderResourceView( + buffer.resource.as_mut_ptr(), + &desc, + handle, + ); + src_srv = Some(handle); + } + if bind_info.content.contains(r::DescriptorContent::UAV) { + assert_eq!((end - start) % 4, 0); + let mut desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC { + Format: dxgiformat::DXGI_FORMAT_R32_TYPELESS, + ViewDimension: d3d12::D3D12_UAV_DIMENSION_BUFFER, + u: mem::zeroed(), + }; + *desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_UAV { + FirstElement: start as _, + NumElements: ((end - start) / 4) as _, + StructureByteStride: 0, + CounterOffsetInBytes: 0, + Flags: d3d12::D3D12_BUFFER_UAV_FLAG_RAW, + }; + if heap.is_full() { + + update_pool_index += 1; + let max_size = 1u64 << 12; + descriptor_update_pools.push(descriptors_cpu::HeapLinear::new( + self.raw, + native::DescriptorHeapType::CbvSrvUav, + max_size as _, + )); + heap = descriptor_update_pools.last_mut().unwrap(); + } + let handle = heap.alloc_handle(); + self.raw.CreateUnorderedAccessView( + buffer.resource.as_mut_ptr(), + ptr::null_mut(), + &desc, + handle, + ); + src_uav = Some(handle); + } + + + if heap.is_full() { + + update_pool_index += 1; + } + } + } + pso::Descriptor::Image(image, _layout) => { + if bind_info.content.contains(r::DescriptorContent::SRV) { + src_srv = image.handle_srv; + } + if bind_info.content.contains(r::DescriptorContent::UAV) { + src_uav = image.handle_uav; + } + } + pso::Descriptor::CombinedImageSampler(image, _layout, sampler) => { + src_srv = image.handle_srv; + src_sampler = Some(sampler.handle); + } + pso::Descriptor::Sampler(sampler) => { + src_sampler = Some(sampler.handle); + } + pso::Descriptor::UniformTexelBuffer(buffer_view) => { + let handle = buffer_view.handle_srv; + src_srv = Some(handle); + if handle.ptr == 0 { + error!("SRV handle of the uniform texel buffer is zero (not supported by specified format)."); + } + } + pso::Descriptor::StorageTexelBuffer(buffer_view) => { + if bind_info.content.contains(r::DescriptorContent::SRV) { + let handle = buffer_view.handle_srv; + src_srv = Some(handle); + if handle.ptr == 0 { + error!("SRV handle of the storage texel buffer is zero (not supported by specified format)."); + } + } + if bind_info.content.contains(r::DescriptorContent::UAV) { + let handle = buffer_view.handle_uav; + src_uav = Some(handle); + if handle.ptr == 0 { + error!("UAV handle of the storage texel buffer is zero (not supported by specified format)."); + } + } + } + } + + if let Some(handle) = src_cbv { + trace!("\tcbv offset {}", offset); + src_views.push(handle); + dst_views.push(bind_info.view_range.as_ref().unwrap().at(offset)); + num_views.push(1); + } + if let Some(handle) = src_srv { + trace!("\tsrv offset {}", offset); + src_views.push(handle); + dst_views.push(bind_info.view_range.as_ref().unwrap().at(offset)); + num_views.push(1); + } + if let Some(handle) = src_uav { + let uav_offset = if bind_info.content.contains(r::DescriptorContent::SRV) { + bind_info.count + offset + } else { + offset + }; + trace!("\tuav offset {}", uav_offset); + src_views.push(handle); + dst_views.push(bind_info.view_range.as_ref().unwrap().at(uav_offset)); + num_views.push(1); + } + if let Some(handle) = src_sampler { + trace!("\tsampler offset {}", offset); + src_samplers.push(handle); + dst_samplers.push(bind_info.sampler_range.as_ref().unwrap().at(offset)); + num_samplers.push(1); + } + + offset += 1; + } + } + + if !num_views.is_empty() { + self.raw.clone().CopyDescriptors( + dst_views.len() as u32, + dst_views.as_ptr(), + num_views.as_ptr(), + src_views.len() as u32, + src_views.as_ptr(), + num_views.as_ptr(), + d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, + ); + } + if !num_samplers.is_empty() { + self.raw.clone().CopyDescriptors( + dst_samplers.len() as u32, + dst_samplers.as_ptr(), + num_samplers.as_ptr(), + src_samplers.len() as u32, + src_samplers.as_ptr(), + num_samplers.as_ptr(), + d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER, + ); + } + + + for buffer_desc_pool in descriptor_update_pools.iter_mut() { + buffer_desc_pool.clear(); + } + } + + unsafe fn copy_descriptor_sets<'a, I>(&self, copy_iter: I) + where + I: IntoIterator, + I::Item: Borrow>, + { + let mut dst_samplers = Vec::new(); + let mut dst_views = Vec::new(); + let mut src_samplers = Vec::new(); + let mut src_views = Vec::new(); + let mut num_samplers = Vec::new(); + let mut num_views = Vec::new(); + + for copy_wrap in copy_iter { + let copy = copy_wrap.borrow(); + let src_info = ©.src_set.binding_infos[copy.src_binding as usize]; + let dst_info = ©.dst_set.binding_infos[copy.dst_binding as usize]; + if let (Some(src_range), Some(dst_range)) = + (src_info.view_range.as_ref(), dst_info.view_range.as_ref()) + { + assert!(copy.src_array_offset + copy.count <= src_range.count as usize); + assert!(copy.dst_array_offset + copy.count <= dst_range.count as usize); + src_views.push(src_range.at(copy.src_array_offset as _)); + dst_views.push(dst_range.at(copy.dst_array_offset as _)); + num_views.push(copy.count as u32); + + if (src_info.content & dst_info.content) + .contains(r::DescriptorContent::SRV | r::DescriptorContent::UAV) + { + assert!( + src_info.count as usize + copy.src_array_offset + copy.count + <= src_range.count as usize + ); + assert!( + dst_info.count as usize + copy.dst_array_offset + copy.count + <= dst_range.count as usize + ); + src_views.push(src_range.at(src_info.count + copy.src_array_offset as u64)); + dst_views.push(dst_range.at(dst_info.count + copy.dst_array_offset as u64)); + num_views.push(copy.count as u32); + } + } + if let (Some(src_range), Some(dst_range)) = ( + src_info.sampler_range.as_ref(), + dst_info.sampler_range.as_ref(), + ) { + assert!(copy.src_array_offset + copy.count <= src_range.count as usize); + assert!(copy.dst_array_offset + copy.count <= dst_range.count as usize); + src_samplers.push(src_range.at(copy.src_array_offset as _)); + dst_samplers.push(dst_range.at(copy.dst_array_offset as _)); + num_samplers.push(copy.count as u32); + } + } + + if !num_views.is_empty() { + self.raw.clone().CopyDescriptors( + dst_views.len() as u32, + dst_views.as_ptr(), + num_views.as_ptr(), + src_views.len() as u32, + src_views.as_ptr(), + num_views.as_ptr(), + d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, + ); + } + if !num_samplers.is_empty() { + self.raw.clone().CopyDescriptors( + dst_samplers.len() as u32, + dst_samplers.as_ptr(), + num_samplers.as_ptr(), + src_samplers.len() as u32, + src_samplers.as_ptr(), + num_samplers.as_ptr(), + d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER, + ); + } + } + + unsafe fn map_memory(&self, memory: &r::Memory, range: R) -> Result<*mut u8, d::MapError> + where + R: RangeArg, + { + if let Some(mem) = memory.resource { + let start = range.start().unwrap_or(&0); + let end = range.end().unwrap_or(&memory.size); + assert!(start <= end); + + let mut ptr = ptr::null_mut(); + assert_eq!( + winerror::S_OK, + (*mem).Map(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 }, &mut ptr) + ); + ptr = ptr.offset(*start as _); + Ok(ptr as *mut _) + } else { + panic!("Memory not created with a memory type exposing `CPU_VISIBLE`.") + } + } + + unsafe fn unmap_memory(&self, memory: &r::Memory) { + if let Some(mem) = memory.resource { + (*mem).Unmap(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 }); + } + } + + unsafe fn flush_mapped_memory_ranges<'a, I, R>(&self, ranges: I) -> Result<(), d::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a r::Memory, R)>, + R: RangeArg, + { + for range in ranges { + let &(ref memory, ref range) = range.borrow(); + if let Some(mem) = memory.resource { + + + assert_eq!( + winerror::S_OK, + (*mem).Map(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 }, ptr::null_mut()) + ); + + let start = *range.start().unwrap_or(&0); + let end = *range.end().unwrap_or(&memory.size); + + (*mem).Unmap( + 0, + &d3d12::D3D12_RANGE { + Begin: start as _, + End: end as _, + }, + ); + } + } + + Ok(()) + } + + unsafe fn invalidate_mapped_memory_ranges<'a, I, R>( + &self, + ranges: I, + ) -> Result<(), d::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a r::Memory, R)>, + R: RangeArg, + { + for range in ranges { + let &(ref memory, ref range) = range.borrow(); + if let Some(mem) = memory.resource { + let start = *range.start().unwrap_or(&0); + let end = *range.end().unwrap_or(&memory.size); + + + + assert_eq!( + winerror::S_OK, + (*mem).Map( + 0, + &d3d12::D3D12_RANGE { + Begin: start as _, + End: end as _, + }, + ptr::null_mut(), + ) + ); + + (*mem).Unmap(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 }); + } + } + + Ok(()) + } + + fn create_semaphore(&self) -> Result { + let fence = self.create_fence(false)?; + Ok(r::Semaphore { raw: fence.raw }) + } + + fn create_fence(&self, signalled: bool) -> Result { + Ok(r::Fence { + raw: self.create_raw_fence(signalled), + }) + } + + unsafe fn reset_fence(&self, fence: &r::Fence) -> Result<(), d::OutOfMemory> { + assert_eq!(winerror::S_OK, fence.raw.signal(0)); + Ok(()) + } + + unsafe fn wait_for_fences( + &self, + fences: I, + wait: d::WaitFor, + timeout_ns: u64, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + let fences = fences.into_iter().collect::>(); + let mut events = self.events.lock().unwrap(); + for _ in events.len() .. fences.len() { + events.push(native::Event::create(false, false)); + } + + for (&event, fence) in events.iter().zip(fences.iter()) { + synchapi::ResetEvent(event.0); + assert_eq!( + winerror::S_OK, + fence.borrow().raw.set_event_on_completion(event, 1) + ); + } + + let all = match wait { + d::WaitFor::Any => FALSE, + d::WaitFor::All => TRUE, + }; + + let hr = { + + + let timeout_ms = { + if timeout_ns > (::max_value() as u64) * 1_000_000 { + ::max_value() + } else { + ((timeout_ns + 999_999) / 1_000_000) as u32 + } + }; + + synchapi::WaitForMultipleObjects( + fences.len() as u32, + events.as_ptr() as *const _, + all, + timeout_ms, + ) + }; + + const WAIT_OBJECT_LAST: u32 = winbase::WAIT_OBJECT_0 + winnt::MAXIMUM_WAIT_OBJECTS; + const WAIT_ABANDONED_LAST: u32 = winbase::WAIT_ABANDONED_0 + winnt::MAXIMUM_WAIT_OBJECTS; + match hr { + winbase::WAIT_OBJECT_0 ..= WAIT_OBJECT_LAST => Ok(true), + winbase::WAIT_ABANDONED_0 ..= WAIT_ABANDONED_LAST => Ok(true), //TODO? + winerror::WAIT_TIMEOUT => Ok(false), + _ => panic!("Unexpected wait status 0x{:X}", hr), + } + } + + unsafe fn get_fence_status(&self, fence: &r::Fence) -> Result { + match fence.raw.GetCompletedValue() { + 0 => Ok(false), + 1 => Ok(true), + _ => Err(d::DeviceLost), + } + } + + fn create_event(&self) -> Result<(), d::OutOfMemory> { + unimplemented!() + } + + unsafe fn get_event_status(&self, _event: &()) -> Result { + unimplemented!() + } + + unsafe fn set_event(&self, _event: &()) -> Result<(), d::OutOfMemory> { + unimplemented!() + } + + unsafe fn reset_event(&self, _event: &()) -> Result<(), d::OutOfMemory> { + unimplemented!() + } + + unsafe fn free_memory(&self, memory: r::Memory) { + memory.heap.destroy(); + if let Some(buffer) = memory.resource { + buffer.destroy(); + } + } + + unsafe fn create_query_pool( + &self, + query_ty: query::Type, + count: query::Id, + ) -> Result { + let heap_ty = match query_ty { + query::Type::Occlusion => native::QueryHeapType::Occlusion, + query::Type::PipelineStatistics(_) => native::QueryHeapType::PipelineStatistics, + query::Type::Timestamp => native::QueryHeapType::Timestamp, + }; + + let (query_heap, hr) = self.raw.create_query_heap(heap_ty, count, 0); + assert_eq!(winerror::S_OK, hr); + + Ok(r::QueryPool { + raw: query_heap, + ty: heap_ty, + }) + } + + unsafe fn destroy_query_pool(&self, pool: r::QueryPool) { + pool.raw.destroy(); + } + + unsafe fn get_query_pool_results( + &self, + _pool: &r::QueryPool, + _queries: Range, + _data: &mut [u8], + _stride: buffer::Offset, + _flags: query::ResultFlags, + ) -> Result { + unimplemented!() + } + + unsafe fn destroy_shader_module(&self, shader_lib: r::ShaderModule) { + if let r::ShaderModule::Compiled(shaders) = shader_lib { + for (_, blob) in shaders { + blob.destroy(); + } + } + } + + unsafe fn destroy_render_pass(&self, _rp: r::RenderPass) { + + } + + unsafe fn destroy_pipeline_layout(&self, layout: r::PipelineLayout) { + layout.raw.destroy(); + } + + unsafe fn destroy_graphics_pipeline(&self, pipeline: r::GraphicsPipeline) { + pipeline.raw.destroy(); + } + + unsafe fn destroy_compute_pipeline(&self, pipeline: r::ComputePipeline) { + pipeline.raw.destroy(); + } + + unsafe fn destroy_framebuffer(&self, _fb: r::Framebuffer) { + + } + + unsafe fn destroy_buffer(&self, buffer: r::Buffer) { + match buffer { + r::Buffer::Bound(buffer) => { + buffer.resource.destroy(); + } + r::Buffer::Unbound(_) => {} + } + } + + unsafe fn destroy_buffer_view(&self, _view: r::BufferView) { + + } + + unsafe fn destroy_image(&self, image: r::Image) { + match image { + r::Image::Bound(image) => { + image.resource.destroy(); + } + r::Image::Unbound(_) => {} + } + } + + unsafe fn destroy_image_view(&self, _view: r::ImageView) { + + } + + unsafe fn destroy_sampler(&self, _sampler: r::Sampler) { + + } + + unsafe fn destroy_descriptor_pool(&self, _pool: r::DescriptorPool) { + + + } + + unsafe fn destroy_descriptor_set_layout(&self, _layout: r::DescriptorSetLayout) { + + } + + unsafe fn destroy_fence(&self, fence: r::Fence) { + fence.raw.destroy(); + } + + unsafe fn destroy_semaphore(&self, semaphore: r::Semaphore) { + semaphore.raw.destroy(); + } + + unsafe fn destroy_event(&self, _event: ()) { + unimplemented!() + } + + unsafe fn create_swapchain( + &self, + surface: &mut Surface, + config: w::SwapchainConfig, + old_swapchain: Option, + ) -> Result<(Swapchain, Vec), w::CreationError> { + if let Some(old_swapchain) = old_swapchain { + self.destroy_swapchain(old_swapchain); + } + + let (swap_chain3, non_srgb_format) = + self.create_swapchain_impl(&config, surface.wnd_handle, surface.factory)?; + + let swapchain = self.wrap_swapchain(swap_chain3, &config); + + let mut images = Vec::with_capacity(config.image_count as usize); + for (i, &resource) in swapchain.resources.iter().enumerate() { + let rtv_handle = swapchain.rtv_heap.at(i as _, 0).cpu; + let surface_type = config.format.base_format().0; + let format_desc = surface_type.desc(); + + let bytes_per_block = (format_desc.bits / 8) as _; + let block_dim = format_desc.dim; + let kind = image::Kind::D2(config.extent.width, config.extent.height, 1, 1); + + images.push(r::Image::Bound(r::ImageBound { + resource, + place: r::Place::SwapChain, + surface_type, + kind, + usage: config.image_usage, + default_view_format: Some(non_srgb_format), + view_caps: image::ViewCapabilities::empty(), + descriptor: d3d12::D3D12_RESOURCE_DESC { + Dimension: d3d12::D3D12_RESOURCE_DIMENSION_TEXTURE2D, + Alignment: 0, + Width: config.extent.width as _, + Height: config.extent.height as _, + DepthOrArraySize: 1, + MipLevels: 1, + Format: non_srgb_format, + SampleDesc: dxgitype::DXGI_SAMPLE_DESC { + Count: 1, + Quality: 0, + }, + Layout: d3d12::D3D12_TEXTURE_LAYOUT_UNKNOWN, + Flags: 0, + }, + bytes_per_block, + block_dim, + clear_cv: vec![rtv_handle], + clear_dv: Vec::new(), + clear_sv: Vec::new(), + + requirements: memory::Requirements { + alignment: 1, + size: 1, + type_mask: MEM_TYPE_MASK, + }, + })); + } + + Ok((swapchain, images)) + } + + unsafe fn destroy_swapchain(&self, swapchain: Swapchain) { + let inner = swapchain.release_resources(); + inner.destroy(); + } + + fn wait_idle(&self) -> Result<(), d::OutOfMemory> { + for queue in &self.queues { + queue.wait_idle()?; + } + Ok(()) + } + + unsafe fn set_image_name(&self, _image: &mut r::Image, _name: &str) { + + } + + unsafe fn set_buffer_name(&self, _buffer: &mut r::Buffer, _name: &str) { + + } + + unsafe fn set_command_buffer_name( + &self, + _command_buffer: &mut cmd::CommandBuffer, + _name: &str + ) { + + } + + unsafe fn set_semaphore_name(&self, _semaphore: &mut r::Semaphore, _name: &str) { + + } + + unsafe fn set_fence_name(&self, _fence: &mut r::Fence, _name: &str) { + + } + + unsafe fn set_framebuffer_name(&self, _framebuffer: &mut r::Framebuffer, _name: &str) { + + } + + unsafe fn set_render_pass_name(&self, _render_pass: &mut r::RenderPass, _name: &str) { + + } + + unsafe fn set_descriptor_set_name(&self, _descriptor_set: &mut r::DescriptorSet, _name: &str) { + + } + + unsafe fn set_descriptor_set_layout_name( + &self, + _descriptor_set_layout: &mut r::DescriptorSetLayout, + _name: &str, + ) { + + } +} + +#[test] +fn test_identity_mapping() { + assert_eq!(conv::map_swizzle(format::Swizzle::NO), IDENTITY_MAPPING); +} diff --git a/third_party/rust/gfx-backend-dx12/src/internal.rs b/third_party/rust/gfx-backend-dx12/src/internal.rs new file mode 100644 index 000000000000..91ecf9331529 --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/src/internal.rs @@ -0,0 +1,239 @@ +use auxil::FastHashMap; +use std::ffi::CStr; +use std::sync::{Arc, Mutex}; +use std::{mem, ptr}; + +use d3d12; +use winapi::shared::minwindef::{FALSE, TRUE}; +use winapi::shared::{dxgiformat, dxgitype, winerror}; +use winapi::um::d3d12::*; +use winapi::Interface; + +use native; + +#[derive(Clone, Debug)] +pub struct BlitPipe { + pub pipeline: native::PipelineState, + pub signature: native::RootSignature, +} + +impl BlitPipe { + pub unsafe fn destroy(&self) { + self.pipeline.destroy(); + self.signature.destroy(); + } +} + + +#[repr(C)] +#[derive(Debug)] +pub struct BlitData { + pub src_offset: [f32; 2], + pub src_extent: [f32; 2], + pub layer: f32, + pub level: f32, +} + +pub type BlitKey = (dxgiformat::DXGI_FORMAT, d3d12::D3D12_FILTER); +type BlitMap = FastHashMap; + +#[derive(Debug)] +pub(crate) struct ServicePipes { + pub(crate) device: native::Device, + library: Arc, + blits_2d_color: Mutex, +} + +impl ServicePipes { + pub fn new(device: native::Device, library: Arc) -> Self { + ServicePipes { + device, + library, + blits_2d_color: Mutex::new(FastHashMap::default()), + } + } + + pub unsafe fn destroy(&self) { + let blits = self.blits_2d_color.lock().unwrap(); + for (_, pipe) in &*blits { + pipe.destroy(); + } + } + + pub fn get_blit_2d_color(&self, key: BlitKey) -> BlitPipe { + let mut blits = self.blits_2d_color.lock().unwrap(); + blits + .entry(key) + .or_insert_with(|| self.create_blit_2d_color(key)) + .clone() + } + + fn create_blit_2d_color(&self, (dst_format, filter): BlitKey) -> BlitPipe { + let descriptor_range = [native::DescriptorRange::new( + native::DescriptorRangeType::SRV, + 1, + native::Binding { + register: 0, + space: 0, + }, + 0, + )]; + + let root_parameters = [ + native::RootParameter::descriptor_table( + native::ShaderVisibility::All, + &descriptor_range, + ), + native::RootParameter::constants( + native::ShaderVisibility::All, + native::Binding { + register: 0, + space: 0, + }, + (mem::size_of::() / 4) as _, + ), + ]; + + let static_samplers = [native::StaticSampler::new( + native::ShaderVisibility::PS, + native::Binding { + register: 0, + space: 0, + }, + filter, + [ + d3d12::D3D12_TEXTURE_ADDRESS_MODE_CLAMP, + d3d12::D3D12_TEXTURE_ADDRESS_MODE_CLAMP, + d3d12::D3D12_TEXTURE_ADDRESS_MODE_CLAMP, + ], + 0.0, + 0, + d3d12::D3D12_COMPARISON_FUNC_ALWAYS, + native::StaticBorderColor::TransparentBlack, + 0.0 .. d3d12::D3D12_FLOAT32_MAX, + )]; + + let (signature_raw, error) = match self.library.serialize_root_signature( + native::RootSignatureVersion::V1_0, + &root_parameters, + &static_samplers, + native::RootSignatureFlags::empty(), + ) { + Ok((pair, hr)) if winerror::SUCCEEDED(hr) => pair, + Ok((_, hr)) => panic!("Can't serialize internal root signature: {:?}", hr), + Err(e) => panic!("Can't find serialization function: {:?}", e), + }; + + if !error.is_null() { + error!("D3D12SerializeRootSignature error: {:?}", unsafe { + error.as_c_str().to_str().unwrap() + }); + unsafe { error.destroy() }; + } + + let (signature, _hr) = self.device.create_root_signature(signature_raw, 0); + unsafe { signature_raw.destroy(); } + + let shader_src = include_bytes!("../shaders/blit.hlsl"); + + let ((vs, _), _hr_vs) = native::Shader::compile( + shader_src, + unsafe { CStr::from_bytes_with_nul_unchecked(b"vs_5_0\0") }, + unsafe { CStr::from_bytes_with_nul_unchecked(b"vs_blit_2d\0") }, + native::ShaderCompileFlags::empty(), + ); + let ((ps, _), _hr_ps) = native::Shader::compile( + shader_src, + unsafe { CStr::from_bytes_with_nul_unchecked(b"ps_5_0\0") }, + unsafe { CStr::from_bytes_with_nul_unchecked(b"ps_blit_2d\0") }, + native::ShaderCompileFlags::empty(), + ); + + let mut rtvs = [dxgiformat::DXGI_FORMAT_UNKNOWN; 8]; + rtvs[0] = dst_format; + + let dummy_target = D3D12_RENDER_TARGET_BLEND_DESC { + BlendEnable: FALSE, + LogicOpEnable: FALSE, + SrcBlend: D3D12_BLEND_ZERO, + DestBlend: D3D12_BLEND_ZERO, + BlendOp: D3D12_BLEND_OP_ADD, + SrcBlendAlpha: D3D12_BLEND_ZERO, + DestBlendAlpha: D3D12_BLEND_ZERO, + BlendOpAlpha: D3D12_BLEND_OP_ADD, + LogicOp: D3D12_LOGIC_OP_CLEAR, + RenderTargetWriteMask: D3D12_COLOR_WRITE_ENABLE_ALL as _, + }; + let render_targets = [dummy_target; 8]; + + let pso_desc = d3d12::D3D12_GRAPHICS_PIPELINE_STATE_DESC { + pRootSignature: signature.as_mut_ptr(), + VS: *native::Shader::from_blob(vs), + PS: *native::Shader::from_blob(ps), + GS: *native::Shader::null(), + DS: *native::Shader::null(), + HS: *native::Shader::null(), + StreamOutput: d3d12::D3D12_STREAM_OUTPUT_DESC { + pSODeclaration: ptr::null(), + NumEntries: 0, + pBufferStrides: ptr::null(), + NumStrides: 0, + RasterizedStream: 0, + }, + BlendState: d3d12::D3D12_BLEND_DESC { + AlphaToCoverageEnable: FALSE, + IndependentBlendEnable: FALSE, + RenderTarget: render_targets, + }, + SampleMask: !0, + RasterizerState: D3D12_RASTERIZER_DESC { + FillMode: D3D12_FILL_MODE_SOLID, + CullMode: D3D12_CULL_MODE_NONE, + FrontCounterClockwise: TRUE, + DepthBias: 0, + DepthBiasClamp: 0.0, + SlopeScaledDepthBias: 0.0, + DepthClipEnable: FALSE, + MultisampleEnable: FALSE, + ForcedSampleCount: 0, + AntialiasedLineEnable: FALSE, + ConservativeRaster: D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF, + }, + DepthStencilState: unsafe { mem::zeroed() }, + InputLayout: d3d12::D3D12_INPUT_LAYOUT_DESC { + pInputElementDescs: ptr::null(), + NumElements: 0, + }, + IBStripCutValue: d3d12::D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED, + PrimitiveTopologyType: D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE, + NumRenderTargets: 1, + RTVFormats: rtvs, + DSVFormat: dxgiformat::DXGI_FORMAT_UNKNOWN, + SampleDesc: dxgitype::DXGI_SAMPLE_DESC { + Count: 1, + Quality: 0, + }, + NodeMask: 0, + CachedPSO: d3d12::D3D12_CACHED_PIPELINE_STATE { + pCachedBlob: ptr::null(), + CachedBlobSizeInBytes: 0, + }, + Flags: d3d12::D3D12_PIPELINE_STATE_FLAG_NONE, + }; + + let mut pipeline = native::PipelineState::null(); + let hr = unsafe { + self.device.CreateGraphicsPipelineState( + &pso_desc, + &d3d12::ID3D12PipelineState::uuidof(), + pipeline.mut_void(), + ) + }; + assert_eq!(hr, winerror::S_OK); + + BlitPipe { + pipeline, + signature, + } + } +} diff --git a/third_party/rust/gfx-backend-dx12/src/lib.rs b/third_party/rust/gfx-backend-dx12/src/lib.rs new file mode 100644 index 000000000000..6cbf9132c30a --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/src/lib.rs @@ -0,0 +1,1343 @@ +extern crate gfx_hal as hal; +extern crate auxil; +extern crate range_alloc; +#[macro_use] +extern crate bitflags; +extern crate d3d12 as native; +#[macro_use] +extern crate log; +extern crate smallvec; +extern crate spirv_cross; +extern crate winapi; + +mod command; +mod conv; +mod descriptors_cpu; +mod device; +mod internal; +mod pool; +mod resource; +mod root_constants; +mod window; + +use hal::pso::PipelineStage; +use hal::{adapter, format as f, image, memory, queue as q, Features, Limits}; + +use winapi::shared::minwindef::TRUE; +use winapi::shared::{dxgi, dxgi1_2, dxgi1_4, dxgi1_6, winerror}; +use winapi::um::{d3d12, d3d12sdklayers, handleapi, synchapi, winbase}; +use winapi::Interface; + +use std::borrow::Borrow; +use std::ffi::OsString; +use std::os::windows::ffi::OsStringExt; +use std::sync::{Arc, Mutex}; +use std::{fmt, mem}; + +use descriptors_cpu::DescriptorCpuPool; + +#[derive(Debug)] +pub(crate) struct HeapProperties { + pub page_property: d3d12::D3D12_CPU_PAGE_PROPERTY, + pub memory_pool: d3d12::D3D12_MEMORY_POOL, +} + + + +const MAX_VERTEX_BUFFERS: usize = 16; + +const NUM_HEAP_PROPERTIES: usize = 3; + + + + + +enum MemoryGroup { + Universal = 0, + BufferOnly, + ImageOnly, + TargetOnly, + + NumGroups, +} + + +static HEAPS_NUMA: [HeapProperties; NUM_HEAP_PROPERTIES] = [ + + HeapProperties { + page_property: d3d12::D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE, + memory_pool: d3d12::D3D12_MEMORY_POOL_L1, + }, + + HeapProperties { + page_property: d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE, + memory_pool: d3d12::D3D12_MEMORY_POOL_L0, + }, + + HeapProperties { + page_property: d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_BACK, + memory_pool: d3d12::D3D12_MEMORY_POOL_L0, + }, +]; + +static HEAPS_UMA: [HeapProperties; NUM_HEAP_PROPERTIES] = [ + + HeapProperties { + page_property: d3d12::D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE, + memory_pool: d3d12::D3D12_MEMORY_POOL_L0, + }, + + HeapProperties { + page_property: d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE, + memory_pool: d3d12::D3D12_MEMORY_POOL_L0, + }, + + HeapProperties { + page_property: d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_BACK, + memory_pool: d3d12::D3D12_MEMORY_POOL_L0, + }, +]; + +static HEAPS_CCUMA: [HeapProperties; NUM_HEAP_PROPERTIES] = [ + + HeapProperties { + page_property: d3d12::D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE, + memory_pool: d3d12::D3D12_MEMORY_POOL_L0, + }, + + HeapProperties { + page_property: d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_BACK, + memory_pool: d3d12::D3D12_MEMORY_POOL_L0, + }, + + HeapProperties { + page_property: d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_BACK, + memory_pool: d3d12::D3D12_MEMORY_POOL_L0, + }, +]; + +#[derive(Debug, Copy, Clone)] +pub enum QueueFamily { + + + + Present, + Normal(q::QueueType), +} + +const MAX_QUEUES: usize = 16; + +impl q::QueueFamily for QueueFamily { + fn queue_type(&self) -> q::QueueType { + match *self { + QueueFamily::Present => q::QueueType::General, + QueueFamily::Normal(ty) => ty, + } + } + fn max_queues(&self) -> usize { + match *self { + QueueFamily::Present => 1, + QueueFamily::Normal(_) => MAX_QUEUES, + } + } + fn id(&self) -> q::QueueFamilyId { + + q::QueueFamilyId(match *self { + QueueFamily::Present => 0, + QueueFamily::Normal(q::QueueType::General) => 1, + QueueFamily::Normal(q::QueueType::Compute) => 2, + QueueFamily::Normal(q::QueueType::Transfer) => 3, + _ => unreachable!(), + }) + } +} + +impl QueueFamily { + fn native_type(&self) -> native::CmdListType { + use hal::queue::QueueFamily as _; + use native::CmdListType as Clt; + + let queue_type = self.queue_type(); + match queue_type { + q::QueueType::General | q::QueueType::Graphics => Clt::Direct, + q::QueueType::Compute => Clt::Compute, + q::QueueType::Transfer => Clt::Copy, + } + } +} + +static QUEUE_FAMILIES: [QueueFamily; 4] = [ + QueueFamily::Present, + QueueFamily::Normal(q::QueueType::General), + QueueFamily::Normal(q::QueueType::Compute), + QueueFamily::Normal(q::QueueType::Transfer), +]; + +pub struct PhysicalDevice { + library: Arc, + adapter: native::WeakPtr, + features: Features, + limits: Limits, + format_properties: Arc, + private_caps: Capabilities, + heap_properties: &'static [HeapProperties; NUM_HEAP_PROPERTIES], + memory_properties: adapter::MemoryProperties, + + + is_open: Arc>, +} + +impl fmt::Debug for PhysicalDevice { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("PhysicalDevice") + } +} + +unsafe impl Send for PhysicalDevice {} +unsafe impl Sync for PhysicalDevice {} + +impl adapter::PhysicalDevice for PhysicalDevice { + unsafe fn open( + &self, + families: &[(&QueueFamily, &[q::QueuePriority])], + requested_features: Features, + ) -> Result, hal::device::CreationError> { + let lock = self.is_open.try_lock(); + let mut open_guard = match lock { + Ok(inner) => inner, + Err(_) => return Err(hal::device::CreationError::TooManyObjects), + }; + + if !self.features().contains(requested_features) { + return Err(hal::device::CreationError::MissingFeature); + } + + let device_raw = match self.library.create_device( + self.adapter, + native::FeatureLevel::L11_0, + ) { + Ok((device, hr)) if winerror::SUCCEEDED(hr) => device, + Ok((_, hr)) => { + error!("error on device creation: {:x}", hr); + return Err(hal::device::CreationError::InitializationFailed); + } + Err(e) => panic!("device creation failed with {:?}", e), + }; + + + let (present_queue, hr_queue) = device_raw.create_command_queue( + QueueFamily::Present.native_type(), + native::Priority::Normal, + native::CommandQueueFlags::empty(), + 0, + ); + if !winerror::SUCCEEDED(hr_queue) { + error!("error on queue creation: {:x}", hr_queue); + } + + let mut device = Device::new(device_raw, &self, present_queue); + + let queue_groups = families + .into_iter() + .map(|&(&family, priorities)| { + use hal::queue::QueueFamily as _; + let mut group = q::QueueGroup::new(family.id()); + + let create_idle_event = || native::Event::create(true, false); + + match family { + QueueFamily::Present => { + + + + let queue = CommandQueue { + raw: device.present_queue.clone(), + idle_fence: device.create_raw_fence(false), + idle_event: create_idle_event(), + }; + device.append_queue(queue.clone()); + group.add_queue(queue); + } + QueueFamily::Normal(_) => { + let list_type = family.native_type(); + for _ in 0 .. priorities.len() { + let (queue, hr_queue) = device_raw.create_command_queue( + list_type, + native::Priority::Normal, + native::CommandQueueFlags::empty(), + 0, + ); + + if winerror::SUCCEEDED(hr_queue) { + let queue = CommandQueue { + raw: queue, + idle_fence: device.create_raw_fence(false), + idle_event: create_idle_event(), + }; + device.append_queue(queue.clone()); + group.add_queue(queue); + } else { + error!("error on queue creation: {:x}", hr_queue); + } + } + } + } + + group + }) + .collect(); + + *open_guard = true; + + Ok(adapter::Gpu { + device, + queue_groups, + }) + } + + fn format_properties(&self, fmt: Option) -> f::Properties { + let idx = fmt.map(|fmt| fmt as usize).unwrap_or(0); + self.format_properties.get(idx).properties + } + + fn image_format_properties( + &self, + format: f::Format, + dimensions: u8, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Option { + conv::map_format(format)?; + let format_info = self.format_properties.get(format as usize); + + let supported_usage = { + use hal::image::Usage as U; + let props = match tiling { + image::Tiling::Optimal => format_info.properties.optimal_tiling, + image::Tiling::Linear => format_info.properties.linear_tiling, + }; + let mut flags = U::empty(); + + if props.contains(f::ImageFeature::BLIT_SRC) { + flags |= U::TRANSFER_SRC; + } + if props.contains(f::ImageFeature::BLIT_DST) { + flags |= U::TRANSFER_DST; + } + if props.contains(f::ImageFeature::SAMPLED) { + flags |= U::SAMPLED; + } + if props.contains(f::ImageFeature::STORAGE) { + flags |= U::STORAGE; + } + if props.contains(f::ImageFeature::COLOR_ATTACHMENT) { + flags |= U::COLOR_ATTACHMENT; + } + if props.contains(f::ImageFeature::DEPTH_STENCIL_ATTACHMENT) { + flags |= U::DEPTH_STENCIL_ATTACHMENT; + } + flags + }; + if !supported_usage.contains(usage) { + return None; + } + + let max_resource_size = + (d3d12::D3D12_REQ_RESOURCE_SIZE_IN_MEGABYTES_EXPRESSION_A_TERM as usize) << 20; + Some(match tiling { + image::Tiling::Optimal => image::FormatProperties { + max_extent: match dimensions { + 1 => image::Extent { + width: d3d12::D3D12_REQ_TEXTURE1D_U_DIMENSION, + height: 1, + depth: 1, + }, + 2 => image::Extent { + width: d3d12::D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION, + height: d3d12::D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION, + depth: 1, + }, + 3 => image::Extent { + width: d3d12::D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, + height: d3d12::D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, + depth: d3d12::D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, + }, + _ => return None, + }, + max_levels: d3d12::D3D12_REQ_MIP_LEVELS as _, + max_layers: match dimensions { + 1 => d3d12::D3D12_REQ_TEXTURE1D_ARRAY_AXIS_DIMENSION as _, + 2 => d3d12::D3D12_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION as _, + _ => return None, + }, + sample_count_mask: if dimensions == 2 + && !view_caps.contains(image::ViewCapabilities::KIND_CUBE) + && !usage.contains(image::Usage::STORAGE) + { + format_info.sample_count_mask + } else { + 0x1 + }, + max_resource_size, + }, + image::Tiling::Linear => image::FormatProperties { + max_extent: match dimensions { + 2 => image::Extent { + width: d3d12::D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION, + height: d3d12::D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION, + depth: 1, + }, + _ => return None, + }, + max_levels: 1, + max_layers: 1, + sample_count_mask: 0x1, + max_resource_size, + }, + }) + } + + fn memory_properties(&self) -> adapter::MemoryProperties { + self.memory_properties.clone() + } + + fn features(&self) -> Features { + self.features + } + fn limits(&self) -> Limits { + self.limits + } +} + +#[derive(Clone)] +pub struct CommandQueue { + pub(crate) raw: native::CommandQueue, + idle_fence: native::Fence, + idle_event: native::Event, +} + +impl fmt::Debug for CommandQueue { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("CommandQueue") + } +} + +impl CommandQueue { + unsafe fn destroy(&self) { + handleapi::CloseHandle(self.idle_event.0); + self.idle_fence.destroy(); + self.raw.destroy(); + } +} + +unsafe impl Send for CommandQueue {} +unsafe impl Sync for CommandQueue {} + +impl q::CommandQueue for CommandQueue { + unsafe fn submit<'a, T, Ic, S, Iw, Is>( + &mut self, + submission: q::Submission, + fence: Option<&resource::Fence>, + ) where + T: 'a + Borrow, + Ic: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + Is: IntoIterator, + { + + + self.idle_fence.signal(0); + synchapi::ResetEvent(self.idle_event.0); + + + let mut lists = submission + .command_buffers + .into_iter() + .map(|buf| buf.borrow().as_raw_list()) + .collect::>(); + self.raw + .ExecuteCommandLists(lists.len() as _, lists.as_mut_ptr()); + + if let Some(fence) = fence { + assert_eq!(winerror::S_OK, self.raw.Signal(fence.raw.as_mut_ptr(), 1)); + } + } + + unsafe fn present<'a, W, Is, S, Iw>( + &mut self, + swapchains: Is, + _wait_semaphores: Iw, + ) -> Result, hal::window::PresentError> + where + W: 'a + Borrow, + Is: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + { + + for (swapchain, _) in swapchains { + swapchain.borrow().inner.Present(1, 0); + } + + Ok(None) + } + + unsafe fn present_surface( + &mut self, + surface: &mut window::Surface, + _image: resource::ImageView, + _wait_semaphore: Option<&resource::Semaphore>, + ) -> Result, hal::window::PresentError> { + surface.present(); + Ok(None) + } + + fn wait_idle(&self) -> Result<(), hal::device::OutOfMemory> { + self.raw.signal(self.idle_fence, 1); + assert_eq!( + winerror::S_OK, + self.idle_fence.set_event_on_completion(self.idle_event, 1) + ); + + unsafe { + synchapi::WaitForSingleObject(self.idle_event.0, winbase::INFINITE); + } + + Ok(()) + } +} + +#[derive(Debug, Clone, Copy)] +enum MemoryArchitecture { + NUMA, + UMA, + CacheCoherentUMA, +} + +#[derive(Debug, Clone, Copy)] +pub struct Capabilities { + heterogeneous_resource_heaps: bool, + memory_architecture: MemoryArchitecture, +} + +#[derive(Clone, Debug)] +struct CmdSignatures { + draw: native::CommandSignature, + draw_indexed: native::CommandSignature, + dispatch: native::CommandSignature, +} + +impl CmdSignatures { + unsafe fn destroy(&self) { + self.draw.destroy(); + self.draw_indexed.destroy(); + self.dispatch.destroy(); + } +} + + +#[derive(Debug)] +struct Shared { + pub signatures: CmdSignatures, + pub service_pipes: internal::ServicePipes, +} + +impl Shared { + unsafe fn destroy(&self) { + self.signatures.destroy(); + self.service_pipes.destroy(); + } +} + +pub struct Device { + raw: native::Device, + library: Arc, + private_caps: Capabilities, + format_properties: Arc, + heap_properties: &'static [HeapProperties], + + rtv_pool: Mutex, + dsv_pool: Mutex, + srv_uav_pool: Mutex, + sampler_pool: Mutex, + descriptor_update_pools: Mutex>, + + heap_srv_cbv_uav: Mutex, + heap_sampler: Mutex, + events: Mutex>, + shared: Arc, + + + present_queue: native::CommandQueue, + + + queues: Vec, + + open: Arc>, +} + +impl fmt::Debug for Device { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Device") + } +} + +unsafe impl Send for Device {} +unsafe impl Sync for Device {} + +impl Device { + fn new( + device: native::Device, + physical_device: &PhysicalDevice, + present_queue: native::CommandQueue, + ) -> Self { + + let rtv_pool = DescriptorCpuPool::new(device, native::DescriptorHeapType::Rtv); + let dsv_pool = DescriptorCpuPool::new(device, native::DescriptorHeapType::Dsv); + let srv_uav_pool = DescriptorCpuPool::new(device, native::DescriptorHeapType::CbvSrvUav); + let sampler_pool = DescriptorCpuPool::new(device, native::DescriptorHeapType::Sampler); + + let heap_srv_cbv_uav = Self::create_descriptor_heap_impl( + device, + native::DescriptorHeapType::CbvSrvUav, + true, + 1_000_000, + ); + + let heap_sampler = + Self::create_descriptor_heap_impl(device, native::DescriptorHeapType::Sampler, true, 2_048); + + let draw_signature = Self::create_command_signature(device, device::CommandSignature::Draw); + let draw_indexed_signature = + Self::create_command_signature(device, device::CommandSignature::DrawIndexed); + let dispatch_signature = + Self::create_command_signature(device, device::CommandSignature::Dispatch); + + let signatures = CmdSignatures { + draw: draw_signature, + draw_indexed: draw_indexed_signature, + dispatch: dispatch_signature, + }; + let service_pipes = internal::ServicePipes::new( + device, + Arc::clone(&physical_device.library), + ); + let shared = Shared { + signatures, + service_pipes, + }; + + Device { + raw: device, + library: Arc::clone(&physical_device.library), + private_caps: physical_device.private_caps, + format_properties: physical_device.format_properties.clone(), + heap_properties: physical_device.heap_properties, + rtv_pool: Mutex::new(rtv_pool), + dsv_pool: Mutex::new(dsv_pool), + srv_uav_pool: Mutex::new(srv_uav_pool), + sampler_pool: Mutex::new(sampler_pool), + descriptor_update_pools: Mutex::new(Vec::new()), + heap_srv_cbv_uav: Mutex::new(heap_srv_cbv_uav), + heap_sampler: Mutex::new(heap_sampler), + events: Mutex::new(Vec::new()), + shared: Arc::new(shared), + present_queue, + queues: Vec::new(), + open: physical_device.is_open.clone(), + } + } + + fn append_queue(&mut self, queue: CommandQueue) { + self.queues.push(queue); + } + + + + + pub unsafe fn as_raw(&self) -> *mut d3d12::ID3D12Device { + self.raw.as_mut_ptr() + } +} + +impl Drop for Device { + fn drop(&mut self) { + *self.open.lock().unwrap() = false; + + unsafe { + for queue in &mut self.queues { + queue.destroy(); + } + + self.shared.destroy(); + self.heap_srv_cbv_uav.lock().unwrap().destroy(); + self.heap_sampler.lock().unwrap().destroy(); + self.rtv_pool.lock().unwrap().destroy(); + self.dsv_pool.lock().unwrap().destroy(); + self.srv_uav_pool.lock().unwrap().destroy(); + self.sampler_pool.lock().unwrap().destroy(); + + for pool in &*self.descriptor_update_pools.lock().unwrap() { + pool.destroy(); + } + + + let (debug_device, hr_debug) = self.raw.cast::(); + if winerror::SUCCEEDED(hr_debug) { + debug_device.ReportLiveDeviceObjects(d3d12sdklayers::D3D12_RLDO_DETAIL); + debug_device.destroy(); + } + + self.raw.destroy(); + } + } +} + +#[derive(Debug)] +pub struct Instance { + pub(crate) factory: native::Factory4, + library: Arc, +} + +impl Drop for Instance { + fn drop(&mut self) { + unsafe { + self.factory.destroy(); + } + } +} + +unsafe impl Send for Instance {} +unsafe impl Sync for Instance {} + +impl hal::Instance for Instance { + fn create(_: &str, _: u32) -> Result { + let lib_main = match native::D3D12Lib::new() { + Ok(lib) => lib, + Err(_) => return Err(hal::UnsupportedBackend), + }; + + #[cfg(debug_assertions)] + { + + match lib_main.get_debug_interface() { + Ok((debug_controller, hr)) if winerror::SUCCEEDED(hr) => { + debug_controller.enable_layer(); + unsafe { debug_controller.Release() }; + } + _ => { + warn!("Unable to get D3D12 debug interface"); + } + } + } + + let lib_dxgi = native::DxgiLib::new().unwrap(); + + + + + let factory_flags = match lib_dxgi.get_debug_interface1() { + Ok((queue, hr)) if winerror::SUCCEEDED(hr) => { + unsafe { queue.destroy() }; + native::FactoryCreationFlags::DEBUG + } + _ => native::FactoryCreationFlags::empty(), + }; + + + let factory = match lib_dxgi.create_factory2(factory_flags) { + Ok((factory, hr)) if winerror::SUCCEEDED(hr) => factory, + Ok((_, hr)) => { + info!("Failed on dxgi factory creation: {:?}", hr); + return Err(hal::UnsupportedBackend) + } + Err(_) => return Err(hal::UnsupportedBackend), + }; + + Ok(Instance { + factory, + library: Arc::new(lib_main), + }) + } + + fn enumerate_adapters(&self) -> Vec> { + use self::memory::Properties; + + + let (use_f6, factory6) = unsafe { + let (f6, hr) = self.factory.cast::(); + if winerror::SUCCEEDED(hr) { + + + f6.destroy(); + (true, f6) + } else { + (false, native::WeakPtr::null()) + } + }; + + + let mut cur_index = 0; + let mut adapters = Vec::new(); + loop { + let adapter = if use_f6 { + let mut adapter2 = native::WeakPtr::::null(); + let hr = unsafe { + factory6.EnumAdapterByGpuPreference( + cur_index, + 2, + &dxgi1_2::IDXGIAdapter2::uuidof(), + adapter2.mut_void() as *mut *mut _, + ) + }; + + if hr == winerror::DXGI_ERROR_NOT_FOUND { + break; + } + + adapter2 + } else { + let mut adapter1 = native::WeakPtr::::null(); + let hr1 = unsafe { + self.factory + .EnumAdapters1(cur_index, adapter1.mut_void() as *mut *mut _) + }; + + if hr1 == winerror::DXGI_ERROR_NOT_FOUND { + break; + } + + let (adapter2, hr2) = unsafe { adapter1.cast::() }; + if !winerror::SUCCEEDED(hr2) { + error!("Failed casting to Adapter2"); + break; + } + + unsafe { + adapter1.destroy(); + } + adapter2 + }; + + cur_index += 1; + + + + let device = match self.library.create_device(adapter, native::FeatureLevel::L11_0) { + Ok((device, hr)) if winerror::SUCCEEDED(hr) => device, + _ => continue, + }; + + + + let mut desc: dxgi1_2::DXGI_ADAPTER_DESC2 = unsafe { mem::zeroed() }; + unsafe { + adapter.GetDesc2(&mut desc); + } + + let device_name = { + let len = desc.Description.iter().take_while(|&&c| c != 0).count(); + let name = ::from_wide(&desc.Description[.. len]); + name.to_string_lossy().into_owned() + }; + + let info = adapter::AdapterInfo { + name: device_name, + vendor: desc.VendorId as usize, + device: desc.DeviceId as usize, + device_type: if (desc.Flags & dxgi::DXGI_ADAPTER_FLAG_SOFTWARE) != 0 { + adapter::DeviceType::VirtualGpu + } else { + adapter::DeviceType::DiscreteGpu + }, + }; + + let mut features: d3d12::D3D12_FEATURE_DATA_D3D12_OPTIONS = unsafe { mem::zeroed() }; + assert_eq!(winerror::S_OK, unsafe { + device.CheckFeatureSupport( + d3d12::D3D12_FEATURE_D3D12_OPTIONS, + &mut features as *mut _ as *mut _, + mem::size_of::() as _, + ) + }); + + let mut features_architecture: d3d12::D3D12_FEATURE_DATA_ARCHITECTURE = + unsafe { mem::zeroed() }; + assert_eq!(winerror::S_OK, unsafe { + device.CheckFeatureSupport( + d3d12::D3D12_FEATURE_ARCHITECTURE, + &mut features_architecture as *mut _ as *mut _, + mem::size_of::() as _, + ) + }); + + let depth_bounds_test_supported = { + let mut features2: d3d12::D3D12_FEATURE_DATA_D3D12_OPTIONS2 = + unsafe { mem::zeroed() }; + let hr = unsafe { + device.CheckFeatureSupport( + d3d12::D3D12_FEATURE_D3D12_OPTIONS2, + &mut features2 as *mut _ as *mut _, + mem::size_of::() as _, + ) + }; + if hr == winerror::S_OK { + features2.DepthBoundsTestSupported != 0 + } else { + false + } + }; + + let heterogeneous_resource_heaps = + features.ResourceHeapTier != d3d12::D3D12_RESOURCE_HEAP_TIER_1; + + let uma = features_architecture.UMA == TRUE; + let cc_uma = features_architecture.CacheCoherentUMA == TRUE; + + let (memory_architecture, heap_properties) = match (uma, cc_uma) { + (true, true) => (MemoryArchitecture::CacheCoherentUMA, &HEAPS_CCUMA), + (true, false) => (MemoryArchitecture::UMA, &HEAPS_UMA), + (false, _) => (MemoryArchitecture::NUMA, &HEAPS_NUMA), + }; + + + let base_memory_types: [adapter::MemoryType; NUM_HEAP_PROPERTIES] = + match memory_architecture { + MemoryArchitecture::NUMA => [ + + adapter::MemoryType { + properties: Properties::DEVICE_LOCAL, + heap_index: 0, + }, + + adapter::MemoryType { + properties: Properties::CPU_VISIBLE | Properties::COHERENT, + heap_index: 1, + }, + + adapter::MemoryType { + properties: Properties::CPU_VISIBLE + | Properties::COHERENT + | Properties::CPU_CACHED, + heap_index: 1, + }, + ], + MemoryArchitecture::UMA => [ + + adapter::MemoryType { + properties: Properties::DEVICE_LOCAL, + heap_index: 0, + }, + + adapter::MemoryType { + properties: Properties::DEVICE_LOCAL + | Properties::CPU_VISIBLE + | Properties::COHERENT, + heap_index: 0, + }, + + adapter::MemoryType { + properties: Properties::DEVICE_LOCAL + | Properties::CPU_VISIBLE + | Properties::COHERENT + | Properties::CPU_CACHED, + heap_index: 0, + }, + ], + MemoryArchitecture::CacheCoherentUMA => [ + + adapter::MemoryType { + properties: Properties::DEVICE_LOCAL, + heap_index: 0, + }, + + adapter::MemoryType { + properties: Properties::DEVICE_LOCAL + | Properties::CPU_VISIBLE + | Properties::COHERENT + | Properties::CPU_CACHED, + heap_index: 0, + }, + + adapter::MemoryType { + properties: Properties::DEVICE_LOCAL + | Properties::CPU_VISIBLE + | Properties::COHERENT + | Properties::CPU_CACHED, + heap_index: 0, + }, + ], + }; + + let memory_types = if heterogeneous_resource_heaps { + base_memory_types.to_vec() + } else { + + + + + + + + + + + + + let mut types = Vec::new(); + for i in 0 .. MemoryGroup::NumGroups as _ { + types.extend(base_memory_types.iter().map(|mem_type| { + let mut ty = mem_type.clone(); + + + + if i == MemoryGroup::ImageOnly as _ || i == MemoryGroup::TargetOnly as _ { + ty.properties.remove(Properties::CPU_VISIBLE); + + ty.properties.remove(Properties::COHERENT); + ty.properties.remove(Properties::CPU_CACHED); + } + ty + })); + } + types + }; + + let memory_heaps = { + + let adapter_id = unsafe { device.GetAdapterLuid() }; + let adapter = { + let mut adapter = native::WeakPtr::::null(); + unsafe { + assert_eq!( + winerror::S_OK, + self.factory.EnumAdapterByLuid( + adapter_id, + &dxgi1_4::IDXGIAdapter3::uuidof(), + adapter.mut_void(), + ) + ); + } + adapter + }; + + let query_memory = |segment: dxgi1_4::DXGI_MEMORY_SEGMENT_GROUP| unsafe { + let mut mem_info: dxgi1_4::DXGI_QUERY_VIDEO_MEMORY_INFO = mem::uninitialized(); + assert_eq!( + winerror::S_OK, + adapter.QueryVideoMemoryInfo(0, segment, &mut mem_info,) + ); + mem_info.Budget + }; + + let local = query_memory(dxgi1_4::DXGI_MEMORY_SEGMENT_GROUP_LOCAL); + match memory_architecture { + MemoryArchitecture::NUMA => { + let non_local = query_memory(dxgi1_4::DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL); + vec![local, non_local] + } + _ => vec![local], + } + }; + + let sample_count_mask = 0x3F; + + let physical_device = PhysicalDevice { + library: Arc::clone(&self.library), + adapter, + features: + + + Features::ROBUST_BUFFER_ACCESS | + Features::IMAGE_CUBE_ARRAY | + Features::GEOMETRY_SHADER | + Features::TESSELLATION_SHADER | + Features::NON_FILL_POLYGON_MODE | + if depth_bounds_test_supported { Features::DEPTH_BOUNDS } else { Features::empty() } | + + Features::MULTI_DRAW_INDIRECT | + Features::FORMAT_BC | + Features::INSTANCE_RATE | + Features::SAMPLER_MIP_LOD_BIAS | + Features::SAMPLER_ANISOTROPY, + limits: Limits { + max_image_1d_size: d3d12::D3D12_REQ_TEXTURE1D_U_DIMENSION as _, + max_image_2d_size: d3d12::D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION as _, + max_image_3d_size: d3d12::D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION as _, + max_image_cube_size: d3d12::D3D12_REQ_TEXTURECUBE_DIMENSION as _, + max_image_array_layers: d3d12::D3D12_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION as _, + max_texel_elements: 0, + max_patch_size: 0, + max_viewports: d3d12::D3D12_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE as _, + max_viewport_dimensions: [d3d12::D3D12_VIEWPORT_BOUNDS_MAX as _; 2], + max_framebuffer_extent: hal::image::Extent { + width: 4096, + height: 4096, + depth: 1, + }, + max_compute_work_group_count: [ + d3d12::D3D12_CS_THREAD_GROUP_MAX_X, + d3d12::D3D12_CS_THREAD_GROUP_MAX_Y, + d3d12::D3D12_CS_THREAD_GROUP_MAX_Z, + ], + max_compute_work_group_size: [ + d3d12::D3D12_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP, + 1, + 1, + ], + max_vertex_input_attributes: d3d12::D3D12_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT as _, + max_vertex_input_bindings: 31, + max_vertex_input_attribute_offset: 255, + max_vertex_input_binding_stride: d3d12::D3D12_REQ_MULTI_ELEMENT_STRUCTURE_SIZE_IN_BYTES as _, + max_vertex_output_components: 16, + min_texel_buffer_offset_alignment: 1, + min_uniform_buffer_offset_alignment: 256, + min_storage_buffer_offset_alignment: 1, + framebuffer_color_sample_counts: sample_count_mask, + framebuffer_depth_sample_counts: sample_count_mask, + framebuffer_stencil_sample_counts: sample_count_mask, + max_color_attachments: d3d12::D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT as _, + buffer_image_granularity: 1, + non_coherent_atom_size: 1, + max_sampler_anisotropy: 16., + optimal_buffer_copy_offset_alignment: d3d12::D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT as _, + optimal_buffer_copy_pitch_alignment: d3d12::D3D12_TEXTURE_DATA_PITCH_ALIGNMENT as _, + min_vertex_input_binding_stride_alignment: 1, + .. Limits::default() + }, + format_properties: Arc::new(FormatProperties::new(device)), + private_caps: Capabilities { + heterogeneous_resource_heaps, + memory_architecture, + }, + heap_properties, + memory_properties: adapter::MemoryProperties { + memory_types, + memory_heaps, + }, + is_open: Arc::new(Mutex::new(false)), + }; + + let queue_families = QUEUE_FAMILIES.to_vec(); + + adapters.push(adapter::Adapter { + info, + physical_device, + queue_families, + }); + } + adapters + } + + unsafe fn create_surface( + &self, + has_handle: &impl raw_window_handle::HasRawWindowHandle, + ) -> Result { + match has_handle.raw_window_handle() { + raw_window_handle::RawWindowHandle::Windows(handle) => { + Ok(self.create_surface_from_hwnd(handle.hwnd)) + } + _ => Err(hal::window::InitError::UnsupportedWindowHandle), + } + } + + unsafe fn destroy_surface(&self, _surface: window::Surface) { + + } +} + +#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)] +pub enum Backend {} +impl hal::Backend for Backend { + type Instance = Instance; + type PhysicalDevice = PhysicalDevice; + type Device = Device; + + type Surface = window::Surface; + type Swapchain = window::Swapchain; + + type QueueFamily = QueueFamily; + type CommandQueue = CommandQueue; + type CommandBuffer = command::CommandBuffer; + + type Memory = resource::Memory; + type CommandPool = pool::CommandPool; + + type ShaderModule = resource::ShaderModule; + type RenderPass = resource::RenderPass; + type Framebuffer = resource::Framebuffer; + + type Buffer = resource::Buffer; + type BufferView = resource::BufferView; + type Image = resource::Image; + type ImageView = resource::ImageView; + type Sampler = resource::Sampler; + + type ComputePipeline = resource::ComputePipeline; + type GraphicsPipeline = resource::GraphicsPipeline; + type PipelineLayout = resource::PipelineLayout; + type PipelineCache = (); + type DescriptorSetLayout = resource::DescriptorSetLayout; + type DescriptorPool = resource::DescriptorPool; + type DescriptorSet = resource::DescriptorSet; + + type Fence = resource::Fence; + type Semaphore = resource::Semaphore; + type Event = (); + type QueryPool = resource::QueryPool; +} + +fn validate_line_width(width: f32) { + + + + assert_eq!(width, 1.0); +} + +#[derive(Clone, Copy, Debug, Default)] +struct FormatInfo { + properties: f::Properties, + sample_count_mask: u8, +} + +#[derive(Debug)] +pub struct FormatProperties(Box<[Mutex>]>, native::Device); + +impl Drop for FormatProperties { + fn drop(&mut self) { + unsafe { + self.1.destroy(); + } + } +} + +impl FormatProperties { + fn new(device: native::Device) -> Self { + let mut buf = Vec::with_capacity(f::NUM_FORMATS); + buf.push(Mutex::new(Some(FormatInfo::default()))); + for _ in 1 .. f::NUM_FORMATS { + buf.push(Mutex::new(None)) + } + FormatProperties(buf.into_boxed_slice(), device) + } + + fn get(&self, idx: usize) -> FormatInfo { + let mut guard = self.0[idx].lock().unwrap(); + if let Some(info) = *guard { + return info; + } + let format: f::Format = unsafe { mem::transmute(idx as u32) }; + let dxgi_format = match conv::map_format(format) { + Some(format) => format, + None => { + let info = FormatInfo::default(); + *guard = Some(info); + return info; + } + }; + + let properties = { + let mut props = f::Properties::default(); + let mut data = d3d12::D3D12_FEATURE_DATA_FORMAT_SUPPORT { + Format: dxgi_format, + Support1: unsafe { mem::zeroed() }, + Support2: unsafe { mem::zeroed() }, + }; + assert_eq!(winerror::S_OK, unsafe { + self.1.CheckFeatureSupport( + d3d12::D3D12_FEATURE_FORMAT_SUPPORT, + &mut data as *mut _ as *mut _, + mem::size_of::() as _, + ) + }); + let can_buffer = 0 != data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_BUFFER; + let can_image = 0 + != data.Support1 + & (d3d12::D3D12_FORMAT_SUPPORT1_TEXTURE1D + | d3d12::D3D12_FORMAT_SUPPORT1_TEXTURE2D + | d3d12::D3D12_FORMAT_SUPPORT1_TEXTURE3D + | d3d12::D3D12_FORMAT_SUPPORT1_TEXTURECUBE); + let can_linear = can_image && !format.surface_desc().is_compressed(); + if can_image { + props.optimal_tiling |= f::ImageFeature::SAMPLED | f::ImageFeature::BLIT_SRC; + } + if can_linear { + props.linear_tiling |= f::ImageFeature::SAMPLED | f::ImageFeature::BLIT_SRC; + } + if data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_IA_VERTEX_BUFFER != 0 { + props.buffer_features |= f::BufferFeature::VERTEX; + } + if data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_SHADER_SAMPLE != 0 { + props.optimal_tiling |= f::ImageFeature::SAMPLED_LINEAR; + } + if data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_RENDER_TARGET != 0 { + props.optimal_tiling |= + f::ImageFeature::COLOR_ATTACHMENT | f::ImageFeature::BLIT_DST; + if can_linear { + props.linear_tiling |= + f::ImageFeature::COLOR_ATTACHMENT | f::ImageFeature::BLIT_DST; + } + } + if data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_BLENDABLE != 0 { + props.optimal_tiling |= f::ImageFeature::COLOR_ATTACHMENT_BLEND; + } + if data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_DEPTH_STENCIL != 0 { + props.optimal_tiling |= f::ImageFeature::DEPTH_STENCIL_ATTACHMENT; + } + if data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_SHADER_LOAD != 0 { + + if can_buffer { + props.buffer_features |= f::BufferFeature::UNIFORM_TEXEL; + } + } + if data.Support2 & d3d12::D3D12_FORMAT_SUPPORT2_UAV_ATOMIC_ADD != 0 { + + if can_buffer { + props.buffer_features |= f::BufferFeature::STORAGE_TEXEL_ATOMIC; + } + if can_image { + props.optimal_tiling |= f::ImageFeature::STORAGE_ATOMIC; + } + } + if data.Support2 & d3d12::D3D12_FORMAT_SUPPORT2_UAV_TYPED_STORE != 0 { + if can_buffer { + props.buffer_features |= f::BufferFeature::STORAGE_TEXEL; + } + if can_image { + props.optimal_tiling |= f::ImageFeature::STORAGE; + } + } + + props + }; + + let mut sample_count_mask = 0; + for i in 0 .. 6 { + let mut data = d3d12::D3D12_FEATURE_DATA_MULTISAMPLE_QUALITY_LEVELS { + Format: dxgi_format, + SampleCount: 1 << i, + Flags: 0, + NumQualityLevels: 0, + }; + assert_eq!(winerror::S_OK, unsafe { + self.1.CheckFeatureSupport( + d3d12::D3D12_FEATURE_MULTISAMPLE_QUALITY_LEVELS, + &mut data as *mut _ as *mut _, + mem::size_of::() as _, + ) + }); + if data.NumQualityLevels != 0 { + sample_count_mask |= 1 << i; + } + } + + let info = FormatInfo { + properties, + sample_count_mask, + }; + *guard = Some(info); + info + } +} diff --git a/third_party/rust/gfx-backend-dx12/src/pool.rs b/third_party/rust/gfx-backend-dx12/src/pool.rs new file mode 100644 index 000000000000..af8e036987ab --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/src/pool.rs @@ -0,0 +1,120 @@ +use std::sync::Arc; +use std::fmt; + +use winapi::shared::winerror::SUCCEEDED; + +use command::CommandBuffer; +use hal::{command, pool}; +use native; +use {Backend, Shared}; + +#[derive(Debug)] +pub enum CommandPoolAllocator { + Shared(native::CommandAllocator), + Individual(Vec), +} + +pub struct CommandPool { + pub(crate) allocator: CommandPoolAllocator, + pub(crate) device: native::Device, + pub(crate) list_type: native::CmdListType, + pub(crate) shared: Arc, + pub(crate) create_flags: pool::CommandPoolCreateFlags, +} + +impl fmt::Debug for CommandPool { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("CommandPool") + } +} + +impl CommandPool { + fn create_command_list(&mut self) -> (native::GraphicsCommandList, native::CommandAllocator) { + let command_allocator = match self.allocator { + CommandPoolAllocator::Shared(ref allocator) => allocator.clone(), + CommandPoolAllocator::Individual(ref mut allocators) => { + let (command_allocator, hr) = self.device.create_command_allocator(self.list_type); + + + if !SUCCEEDED(hr) { + error!("error on command allocator creation: {:x}", hr); + } + + allocators.push(command_allocator); + command_allocator + } + }; + + + let (command_list, hr) = self.device.create_graphics_command_list( + self.list_type, + command_allocator, + native::PipelineState::null(), + 0, + ); + + if !SUCCEEDED(hr) { + error!("error on command list creation: {:x}", hr); + } + + + + let _hr = command_list.close(); + + (command_list, command_allocator) + } + + pub(crate) fn destroy(self) { + match self.allocator { + CommandPoolAllocator::Shared(ref allocator) => unsafe { + allocator.destroy(); + }, + CommandPoolAllocator::Individual(ref allocators) => { + for allocator in allocators.iter() { + unsafe { + allocator.destroy(); + } + } + } + } + } +} + +unsafe impl Send for CommandPool {} +unsafe impl Sync for CommandPool {} + +impl pool::CommandPool for CommandPool { + unsafe fn reset(&mut self, _release_resources: bool) { + match self.allocator { + CommandPoolAllocator::Shared(ref allocator) => { + allocator.Reset(); + } + CommandPoolAllocator::Individual(ref mut allocators) => { + for allocator in allocators.iter_mut() { + allocator.Reset(); + } + } + } + } + + unsafe fn allocate_one(&mut self, level: command::Level) -> CommandBuffer { + + assert_eq!(level, command::Level::Primary); + let (command_list, command_allocator) = self.create_command_list(); + CommandBuffer::new( + command_list, + command_allocator, + self.shared.clone(), + self.create_flags, + ) + } + + unsafe fn free(&mut self, cbufs: I) + where + I: IntoIterator, + { + for mut cbuf in cbufs { + cbuf.destroy(); + } + } +} diff --git a/third_party/rust/gfx-backend-dx12/src/resource.rs b/third_party/rust/gfx-backend-dx12/src/resource.rs new file mode 100644 index 000000000000..5021138511e2 --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/src/resource.rs @@ -0,0 +1,759 @@ +use winapi::shared::dxgiformat::DXGI_FORMAT; +use winapi::shared::minwindef::UINT; +use winapi::um::d3d12; + +use hal::{buffer, format, image, memory, pass, pso}; +use range_alloc::RangeAllocator; + +use crate::{root_constants::RootConstant, Backend, MAX_VERTEX_BUFFERS}; + +use std::collections::BTreeMap; +use std::fmt; +use std::ops::Range; +use std::cell::UnsafeCell; + + + + +#[derive(Debug, Hash)] +pub enum ShaderModule { + Compiled(BTreeMap), + Spirv(Vec), +} +unsafe impl Send for ShaderModule {} +unsafe impl Sync for ShaderModule {} + +#[derive(Clone, Debug, Hash)] +pub struct BarrierDesc { + pub(crate) attachment_id: pass::AttachmentId, + pub(crate) states: Range, + pub(crate) flags: d3d12::D3D12_RESOURCE_BARRIER_FLAGS, +} + +impl BarrierDesc { + pub(crate) fn new( + attachment_id: pass::AttachmentId, + states: Range, + ) -> Self { + BarrierDesc { + attachment_id, + states, + flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, + } + } + + pub(crate) fn split(self) -> Range { + BarrierDesc { + flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_BEGIN_ONLY, + ..self.clone() + } .. BarrierDesc { + flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_END_ONLY, + ..self + } + } +} + +#[derive(Clone, Debug, Hash)] +pub struct SubpassDesc { + pub(crate) color_attachments: Vec, + pub(crate) depth_stencil_attachment: Option, + pub(crate) input_attachments: Vec, + pub(crate) resolve_attachments: Vec, + pub(crate) pre_barriers: Vec, + pub(crate) post_barriers: Vec, +} + +impl SubpassDesc { + + + pub(crate) fn is_using(&self, at_id: pass::AttachmentId) -> bool { + self.color_attachments + .iter() + .chain(self.depth_stencil_attachment.iter()) + .chain(self.input_attachments.iter()) + .chain(self.resolve_attachments.iter()) + .any(|&(id, _)| id == at_id) + } +} + +#[derive(Clone, Debug, Hash)] +pub struct RenderPass { + pub(crate) attachments: Vec, + pub(crate) subpasses: Vec, + pub(crate) post_barriers: Vec, +} + + + + + +#[derive(Copy, Clone, Debug)] +pub struct VertexBinding { + + pub mapped_binding: usize, + pub stride: UINT, + + pub offset: u32, +} + +#[derive(Debug)] +pub struct GraphicsPipeline { + pub(crate) raw: native::PipelineState, + pub(crate) signature: native::RootSignature, + pub(crate) num_parameter_slots: usize, + pub(crate) topology: d3d12::D3D12_PRIMITIVE_TOPOLOGY, + pub(crate) constants: Vec, + pub(crate) vertex_bindings: [Option; MAX_VERTEX_BUFFERS], + pub(crate) baked_states: pso::BakedStates, +} +unsafe impl Send for GraphicsPipeline {} +unsafe impl Sync for GraphicsPipeline {} + +#[derive(Debug)] +pub struct ComputePipeline { + pub(crate) raw: native::PipelineState, + pub(crate) signature: native::RootSignature, + pub(crate) num_parameter_slots: usize, + pub(crate) constants: Vec, +} + +unsafe impl Send for ComputePipeline {} +unsafe impl Sync for ComputePipeline {} + +bitflags! { + pub struct SetTableTypes: u8 { + const SRV_CBV_UAV = 0x1; + const SAMPLERS = 0x2; + } +} + +pub const SRV_CBV_UAV: SetTableTypes = SetTableTypes::SRV_CBV_UAV; +pub const SAMPLERS: SetTableTypes = SetTableTypes::SAMPLERS; + +pub type RootSignatureOffset = usize; + +#[derive(Debug, Hash)] +pub struct RootTable { + pub ty: SetTableTypes, + pub offset: RootSignatureOffset, +} + +#[derive(Debug, Hash)] +pub struct RootDescriptor { + pub offset: RootSignatureOffset, +} + +#[derive(Debug, Hash)] +pub struct RootElement { + pub table: RootTable, + pub descriptors: Vec, +} + +#[derive(Debug, Hash)] +pub struct PipelineLayout { + pub(crate) raw: native::RootSignature, + + pub(crate) constants: Vec, + + + pub(crate) elements: Vec, + + + pub(crate) num_parameter_slots: usize, +} +unsafe impl Send for PipelineLayout {} +unsafe impl Sync for PipelineLayout {} + +#[derive(Debug, Clone)] +pub struct Framebuffer { + pub(crate) attachments: Vec, + + pub(crate) layers: image::Layer, +} + +#[derive(Copy, Clone, Debug)] +pub struct BufferUnbound { + pub(crate) requirements: memory::Requirements, + pub(crate) usage: buffer::Usage, +} + +pub struct BufferBound { + pub(crate) resource: native::Resource, + pub(crate) requirements: memory::Requirements, + pub(crate) clear_uav: Option, +} + +impl fmt::Debug for BufferBound { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("BufferBound") + } +} + +unsafe impl Send for BufferBound {} +unsafe impl Sync for BufferBound {} + +pub enum Buffer { + Unbound(BufferUnbound), + Bound(BufferBound), +} + +impl fmt::Debug for Buffer { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Buffer") + } +} + +impl Buffer { + pub(crate) fn expect_unbound(&self) -> &BufferUnbound { + match *self { + Buffer::Unbound(ref unbound) => unbound, + Buffer::Bound(_) => panic!("Expected unbound buffer"), + } + } + + pub(crate) fn expect_bound(&self) -> &BufferBound { + match *self { + Buffer::Unbound(_) => panic!("Expected bound buffer"), + Buffer::Bound(ref bound) => bound, + } + } +} + +#[derive(Copy, Clone)] +pub struct BufferView { + + pub(crate) handle_srv: native::CpuDescriptor, + + pub(crate) handle_uav: native::CpuDescriptor, +} + +impl fmt::Debug for BufferView { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("BufferView") + } +} + +unsafe impl Send for BufferView {} +unsafe impl Sync for BufferView {} + +#[derive(Clone)] +pub enum Place { + SwapChain, + Heap { raw: native::Heap, offset: u64 }, +} + +#[derive(Clone)] +pub struct ImageBound { + pub(crate) resource: native::Resource, + pub(crate) place: Place, + pub(crate) surface_type: format::SurfaceType, + pub(crate) kind: image::Kind, + pub(crate) usage: image::Usage, + pub(crate) default_view_format: Option, + pub(crate) view_caps: image::ViewCapabilities, + pub(crate) descriptor: d3d12::D3D12_RESOURCE_DESC, + pub(crate) bytes_per_block: u8, + + pub(crate) block_dim: (u8, u8), + pub(crate) clear_cv: Vec, + pub(crate) clear_dv: Vec, + pub(crate) clear_sv: Vec, + pub(crate) requirements: memory::Requirements, +} + +impl fmt::Debug for ImageBound { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("ImageBound") + } +} + +unsafe impl Send for ImageBound {} +unsafe impl Sync for ImageBound {} + +impl ImageBound { + + pub fn to_subresource_range(&self, aspects: format::Aspects) -> image::SubresourceRange { + image::SubresourceRange { + aspects, + levels: 0 .. self.descriptor.MipLevels as _, + layers: 0 .. self.kind.num_layers(), + } + } + + pub fn calc_subresource(&self, mip_level: UINT, layer: UINT, plane: UINT) -> UINT { + mip_level + + (layer * self.descriptor.MipLevels as UINT) + + (plane * self.descriptor.MipLevels as UINT * self.kind.num_layers() as UINT) + } +} + +#[derive(Copy, Clone)] +pub struct ImageUnbound { + pub(crate) desc: d3d12::D3D12_RESOURCE_DESC, + pub(crate) view_format: Option, + pub(crate) dsv_format: Option, + pub(crate) requirements: memory::Requirements, + pub(crate) format: format::Format, + pub(crate) kind: image::Kind, + pub(crate) usage: image::Usage, + pub(crate) tiling: image::Tiling, + pub(crate) view_caps: image::ViewCapabilities, + + pub(crate) bytes_per_block: u8, + + pub(crate) block_dim: (u8, u8), +} + +impl fmt::Debug for ImageUnbound { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("ImageUnbound") + } +} + +impl ImageUnbound { + pub fn calc_subresource(&self, mip_level: UINT, layer: UINT, plane: UINT) -> UINT { + mip_level + + (layer * self.desc.MipLevels as UINT) + + (plane * self.desc.MipLevels as UINT * self.kind.num_layers() as UINT) + } +} + +#[derive(Clone)] +pub enum Image { + Unbound(ImageUnbound), + Bound(ImageBound), +} + +impl fmt::Debug for Image { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Image") + } +} + +impl Image { + pub(crate) fn expect_unbound(&self) -> &ImageUnbound { + match *self { + Image::Unbound(ref unbound) => unbound, + Image::Bound(_) => panic!("Expected unbound image"), + } + } + + pub(crate) fn expect_bound(&self) -> &ImageBound { + match *self { + Image::Unbound(_) => panic!("Expected bound image"), + Image::Bound(ref bound) => bound, + } + } + + pub fn get_desc(&self) -> &d3d12::D3D12_RESOURCE_DESC { + match self { + Image::Bound(i) => &i.descriptor, + Image::Unbound(i) => &i.desc, + } + } + + pub fn calc_subresource(&self, mip_level: UINT, layer: UINT, plane: UINT) -> UINT { + match self { + Image::Bound(i) => i.calc_subresource(mip_level, layer, plane), + Image::Unbound(i) => i.calc_subresource(mip_level, layer, plane), + } + } +} + +#[derive(Copy, Clone)] +pub struct ImageView { + pub(crate) resource: native::Resource, + pub(crate) handle_srv: Option, + pub(crate) handle_rtv: Option, + pub(crate) handle_dsv: Option, + pub(crate) handle_uav: Option, + + pub(crate) dxgi_format: DXGI_FORMAT, + pub(crate) num_levels: image::Level, + pub(crate) mip_levels: (image::Level, image::Level), + pub(crate) layers: (image::Layer, image::Layer), + pub(crate) kind: image::Kind, +} + +impl fmt::Debug for ImageView { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("ImageView") + } +} + +unsafe impl Send for ImageView {} +unsafe impl Sync for ImageView {} + +impl ImageView { + pub fn calc_subresource(&self, mip_level: UINT, layer: UINT) -> UINT { + mip_level + (layer * self.num_levels as UINT) + } +} + +pub struct Sampler { + pub(crate) handle: native::CpuDescriptor, +} + +impl fmt::Debug for Sampler { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Sampler") + } +} + +#[derive(Debug)] +pub struct DescriptorSetLayout { + pub(crate) bindings: Vec, +} + +#[derive(Debug)] +pub struct Fence { + pub(crate) raw: native::Fence, +} +unsafe impl Send for Fence {} +unsafe impl Sync for Fence {} + +#[derive(Debug)] +pub struct Semaphore { + pub(crate) raw: native::Fence, +} + +unsafe impl Send for Semaphore {} +unsafe impl Sync for Semaphore {} + +#[derive(Debug)] +pub struct Memory { + pub(crate) heap: native::Heap, + pub(crate) type_id: usize, + pub(crate) size: u64, + + pub(crate) resource: Option, +} + +unsafe impl Send for Memory {} +unsafe impl Sync for Memory {} + +bitflags! { + /// A set of D3D12 descriptor types that need to be associated + /// with a single gfx-hal `DescriptorType`. + #[derive(Default)] + pub struct DescriptorContent: u8 { + const CBV = 0x1; + const SRV = 0x2; + const UAV = 0x4; + const SAMPLER = 0x8; + + /// Indicates if the descriptor is a dynamic uniform/storage buffer. + /// Important as dynamic buffers are implemented as root descriptors. + const DYNAMIC = 0x10; + + const VIEW = DescriptorContent::CBV.bits |DescriptorContent::SRV.bits | DescriptorContent::UAV.bits; + } +} + +impl DescriptorContent { + pub fn is_dynamic(&self) -> bool { + self.contains(DescriptorContent::DYNAMIC) + } +} + +impl From for DescriptorContent { + fn from(ty: pso::DescriptorType) -> Self { + use hal::pso::DescriptorType as Dt; + match ty { + Dt::Sampler => DescriptorContent::SAMPLER, + Dt::CombinedImageSampler => DescriptorContent::SRV | DescriptorContent::SAMPLER, + Dt::SampledImage | Dt::InputAttachment | Dt::UniformTexelBuffer => { + DescriptorContent::SRV + } + Dt::StorageImage | Dt::StorageBuffer | Dt::StorageTexelBuffer => { + DescriptorContent::SRV | DescriptorContent::UAV + } + Dt::StorageBufferDynamic => { + DescriptorContent::SRV | DescriptorContent::UAV | DescriptorContent::DYNAMIC + } + Dt::UniformBuffer => DescriptorContent::CBV, + Dt::UniformBufferDynamic => DescriptorContent::CBV | DescriptorContent::DYNAMIC, + } + } +} + +#[derive(Debug)] +pub struct DescriptorRange { + pub(crate) handle: DualHandle, + pub(crate) ty: pso::DescriptorType, + pub(crate) handle_size: u64, + pub(crate) count: u64, +} + +impl DescriptorRange { + pub(crate) fn at(&self, index: u64) -> native::CpuDescriptor { + assert!(index < self.count); + let ptr = self.handle.cpu.ptr + (self.handle_size * index) as usize; + native::CpuDescriptor { ptr } + } +} + +#[derive(Copy, Clone, Debug)] +pub(crate) struct DynamicDescriptor { + pub content: DescriptorContent, + pub gpu_buffer_location: u64, +} + +#[derive(Debug, Default)] +pub struct DescriptorBindingInfo { + pub(crate) count: u64, + pub(crate) view_range: Option, + pub(crate) sampler_range: Option, + pub(crate) dynamic_descriptors: UnsafeCell>, + pub(crate) content: DescriptorContent, +} + +pub struct DescriptorSet { + + pub(crate) heap_srv_cbv_uav: native::DescriptorHeap, + pub(crate) heap_samplers: native::DescriptorHeap, + pub(crate) binding_infos: Vec, + pub(crate) first_gpu_sampler: Option, + pub(crate) first_gpu_view: Option, +} + +impl fmt::Debug for DescriptorSet { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("DescriptorSet") + } +} + + +unsafe impl Send for DescriptorSet {} +unsafe impl Sync for DescriptorSet {} + +impl DescriptorSet { + pub fn srv_cbv_uav_gpu_start(&self) -> native::GpuDescriptor { + self.heap_srv_cbv_uav.start_gpu_descriptor() + } + + pub fn sampler_gpu_start(&self) -> native::GpuDescriptor { + self.heap_samplers.start_gpu_descriptor() + } +} + +#[derive(Copy, Clone)] +pub struct DualHandle { + pub(crate) cpu: native::CpuDescriptor, + pub(crate) gpu: native::GpuDescriptor, + + pub(crate) size: u64, +} + +impl fmt::Debug for DualHandle { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("DualHandle") + } +} + +pub struct DescriptorHeap { + pub(crate) raw: native::DescriptorHeap, + pub(crate) handle_size: u64, + pub(crate) total_handles: u64, + pub(crate) start: DualHandle, + pub(crate) range_allocator: RangeAllocator, +} + +impl fmt::Debug for DescriptorHeap { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("DescriptorHeap") + } +} + +impl DescriptorHeap { + pub(crate) fn at(&self, index: u64, size: u64) -> DualHandle { + assert!(index < self.total_handles); + DualHandle { + cpu: native::CpuDescriptor { + ptr: self.start.cpu.ptr + (self.handle_size * index) as usize, + }, + gpu: native::GpuDescriptor { + ptr: self.start.gpu.ptr + self.handle_size * index, + }, + size, + } + } + + pub(crate) unsafe fn destroy(&self) { + self.raw.destroy(); + } +} + + + +#[derive(Debug)] +pub struct DescriptorHeapSlice { + pub(crate) heap: native::DescriptorHeap, + pub(crate) start: DualHandle, + pub(crate) handle_size: u64, + pub(crate) range_allocator: RangeAllocator, +} + +impl DescriptorHeapSlice { + pub(crate) fn alloc_handles(&mut self, count: u64) -> Option { + self.range_allocator + .allocate_range(count) + .ok() + .map(|range| DualHandle { + cpu: native::CpuDescriptor { + ptr: self.start.cpu.ptr + (self.handle_size * range.start) as usize, + }, + gpu: native::GpuDescriptor { + ptr: self.start.gpu.ptr + (self.handle_size * range.start) as u64, + }, + size: count, + }) + } + + + pub(crate) fn free_handles(&mut self, handle: DualHandle) { + let start = (handle.gpu.ptr - self.start.gpu.ptr) / self.handle_size; + let handle_range = start .. start + handle.size as u64; + self.range_allocator.free_range(handle_range); + } + + pub(crate) fn clear(&mut self) { + self.range_allocator.reset(); + } +} + +#[derive(Debug)] +pub struct DescriptorPool { + pub(crate) heap_srv_cbv_uav: DescriptorHeapSlice, + pub(crate) heap_sampler: DescriptorHeapSlice, + pub(crate) pools: Vec, + pub(crate) max_size: u64, +} +unsafe impl Send for DescriptorPool {} +unsafe impl Sync for DescriptorPool {} + +impl pso::DescriptorPool for DescriptorPool { + unsafe fn allocate_set( + &mut self, + layout: &DescriptorSetLayout, + ) -> Result { + let mut binding_infos = Vec::new(); + let mut first_gpu_sampler = None; + let mut first_gpu_view = None; + + info!("allocate_set"); + for binding in &layout.bindings { + + while binding_infos.len() <= binding.binding as usize { + binding_infos.push(DescriptorBindingInfo::default()); + } + let content = DescriptorContent::from(binding.ty); + debug!("\tbinding {:?} with content {:?}", binding, content); + + let (view_range, sampler_range, dynamic_descriptors) = if content.is_dynamic() { + let descriptor = DynamicDescriptor { + content: content ^ DescriptorContent::DYNAMIC, + gpu_buffer_location: 0, + }; + (None, None, vec![descriptor; binding.count]) + } else { + let view_range = if content.intersects(DescriptorContent::VIEW) { + let count = if content.contains(DescriptorContent::SRV | DescriptorContent::UAV) + { + 2 * binding.count as u64 + } else { + binding.count as u64 + }; + debug!("\tview handles: {}", count); + let handle = self + .heap_srv_cbv_uav + .alloc_handles(count) + .ok_or(pso::AllocationError::OutOfPoolMemory)?; + if first_gpu_view.is_none() { + first_gpu_view = Some(handle.gpu); + } + Some(DescriptorRange { + handle, + ty: binding.ty, + count, + handle_size: self.heap_srv_cbv_uav.handle_size, + }) + } else { + None + }; + + let sampler_range = if content.intersects(DescriptorContent::SAMPLER) && !content.is_dynamic() { + let count = binding.count as u64; + debug!("\tsampler handles: {}", count); + let handle = self + .heap_sampler + .alloc_handles(count) + .ok_or(pso::AllocationError::OutOfPoolMemory)?; + if first_gpu_sampler.is_none() { + first_gpu_sampler = Some(handle.gpu); + } + Some(DescriptorRange { + handle, + ty: binding.ty, + count, + handle_size: self.heap_sampler.handle_size, + }) + } else { + None + }; + + (view_range, sampler_range, Vec::new()) + }; + + binding_infos[binding.binding as usize] = DescriptorBindingInfo { + count: binding.count as _, + view_range, + sampler_range, + dynamic_descriptors: UnsafeCell::new(dynamic_descriptors), + content, + }; + } + + Ok(DescriptorSet { + heap_srv_cbv_uav: self.heap_srv_cbv_uav.heap.clone(), + heap_samplers: self.heap_sampler.heap.clone(), + binding_infos, + first_gpu_sampler, + first_gpu_view, + }) + } + + unsafe fn free_sets(&mut self, descriptor_sets: I) + where + I: IntoIterator, + { + for descriptor_set in descriptor_sets { + for binding_info in &descriptor_set.binding_infos { + if let Some(ref view_range) = binding_info.view_range { + if binding_info.content.intersects(DescriptorContent::VIEW) { + self.heap_srv_cbv_uav.free_handles(view_range.handle); + } + } + if let Some(ref sampler_range) = binding_info.sampler_range { + if binding_info.content.intersects(DescriptorContent::SAMPLER) { + self.heap_sampler.free_handles(sampler_range.handle); + } + } + } + } + } + + unsafe fn reset(&mut self) { + self.heap_srv_cbv_uav.clear(); + self.heap_sampler.clear(); + } +} + +#[derive(Debug)] +pub struct QueryPool { + pub(crate) raw: native::QueryHeap, + pub(crate) ty: native::QueryHeapType, +} + +unsafe impl Send for QueryPool {} +unsafe impl Sync for QueryPool {} diff --git a/third_party/rust/gfx-backend-dx12/src/root_constants.rs b/third_party/rust/gfx-backend-dx12/src/root_constants.rs new file mode 100644 index 000000000000..64c08204358b --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/src/root_constants.rs @@ -0,0 +1,300 @@ + + + + + + + +use hal::pso; +use std::borrow::Borrow; +use std::cmp::Ordering; +use std::ops::Range; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct RootConstant { + pub stages: pso::ShaderStageFlags, + pub range: Range, +} + +impl RootConstant { + fn is_empty(&self) -> bool { + self.range.end <= self.range.start + } + + + + fn divide(self, other: &RootConstant) -> (RootConstant, RootConstant) { + assert!(self.range.start <= other.range.start); + let left = RootConstant { + stages: self.stages, + range: self.range.start .. other.range.start, + }; + + let right = RootConstant { + stages: self.stages, + range: other.range.start .. self.range.end, + }; + + (left, right) + } +} + +impl PartialOrd for RootConstant { + fn partial_cmp(&self, other: &RootConstant) -> Option { + Some( + self.range + .start + .cmp(&other.range.start) + .then(self.range.end.cmp(&other.range.end)) + .then(self.stages.cmp(&other.stages)), + ) + } +} + +impl Ord for RootConstant { + fn cmp(&self, other: &RootConstant) -> Ordering { + self.partial_cmp(other).unwrap() + } +} + +pub fn split(ranges: I) -> Vec +where + I: IntoIterator, + I::Item: Borrow<(pso::ShaderStageFlags, Range)>, +{ + + + let mut ranges = into_vec(ranges); + ranges.sort_by(|a, b| b.cmp(a)); + + + let mut disjunct = Vec::with_capacity(ranges.len()); + + while let Some(cur) = ranges.pop() { + + + + + + + + + + + if let Some(mut next) = ranges.pop() { + let (left, mut right) = cur.divide(&next); + if !left.is_empty() { + + + + disjunct.push(left); + ranges.push(next); + if !right.is_empty() { + ranges.push(right); + } + } else if !right.is_empty() { + + + + + + + + + + + + right.stages |= next.stages; + next.range.start = right.range.end; + ranges.push(right); + if !next.is_empty() { + ranges.push(next); + } + } + } else { + disjunct.push(cur); + } + ranges.sort_by(|a, b| b.cmp(a)); + } + + disjunct +} + +fn into_vec(ranges: I) -> Vec +where + I: IntoIterator, + I::Item: Borrow<(pso::ShaderStageFlags, Range)>, +{ + ranges + .into_iter() + .map(|borrowable| { + let &(stages, ref range) = borrowable.borrow(); + debug_assert_eq!(range.start % 4, 0); + debug_assert_eq!(range.end % 4, 0); + RootConstant { + stages, + range: range.start / 4 .. range.end / 4, + } + }) + .collect() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_single() { + let range = &[(pso::ShaderStageFlags::VERTEX, 0 .. 12)]; + assert_eq!(into_vec(range), split(range)); + } + + #[test] + fn test_overlap_1() { + + + + let ranges = &[ + (pso::ShaderStageFlags::VERTEX, 0 .. 12), + (pso::ShaderStageFlags::FRAGMENT, 8 .. 16), + ]; + + let reference = vec![ + RootConstant { + stages: pso::ShaderStageFlags::VERTEX, + range: 0 .. 2, + }, + RootConstant { + stages: pso::ShaderStageFlags::VERTEX | pso::ShaderStageFlags::FRAGMENT, + range: 2 .. 3, + }, + RootConstant { + stages: pso::ShaderStageFlags::FRAGMENT, + range: 3 .. 4, + }, + ]; + assert_eq!(reference, split(ranges)); + } + + #[test] + fn test_overlap_2() { + + + + let ranges = &[ + (pso::ShaderStageFlags::VERTEX, 0 .. 20), + (pso::ShaderStageFlags::FRAGMENT, 8 .. 16), + ]; + + let reference = vec![ + RootConstant { + stages: pso::ShaderStageFlags::VERTEX, + range: 0 .. 2, + }, + RootConstant { + stages: pso::ShaderStageFlags::VERTEX | pso::ShaderStageFlags::FRAGMENT, + range: 2 .. 4, + }, + RootConstant { + stages: pso::ShaderStageFlags::VERTEX, + range: 4 .. 5, + }, + ]; + assert_eq!(reference, split(ranges)); + } + + #[test] + fn test_overlap_4() { + + + + let ranges = &[ + (pso::ShaderStageFlags::VERTEX, 0 .. 20), + (pso::ShaderStageFlags::FRAGMENT, 0 .. 16), + ]; + + let reference = vec![ + RootConstant { + stages: pso::ShaderStageFlags::VERTEX | pso::ShaderStageFlags::FRAGMENT, + range: 0 .. 4, + }, + RootConstant { + stages: pso::ShaderStageFlags::VERTEX, + range: 4 .. 5, + }, + ]; + assert_eq!(reference, split(ranges)); + } + + #[test] + fn test_equal() { + + + + let ranges = &[ + (pso::ShaderStageFlags::VERTEX, 0 .. 16), + (pso::ShaderStageFlags::FRAGMENT, 0 .. 16), + ]; + + let reference = vec![RootConstant { + stages: pso::ShaderStageFlags::VERTEX | pso::ShaderStageFlags::FRAGMENT, + range: 0 .. 4, + }]; + assert_eq!(reference, split(ranges)); + } + + #[test] + fn test_disjunct() { + + + + let ranges = &[ + (pso::ShaderStageFlags::VERTEX, 0 .. 12), + (pso::ShaderStageFlags::FRAGMENT, 12 .. 16), + ]; + assert_eq!(into_vec(ranges), split(ranges)); + } + + #[test] + fn test_complex() { + let ranges = &[ + (pso::ShaderStageFlags::VERTEX, 8 .. 40), + (pso::ShaderStageFlags::FRAGMENT, 0 .. 20), + (pso::ShaderStageFlags::GEOMETRY, 24 .. 40), + (pso::ShaderStageFlags::HULL, 16 .. 28), + ]; + + let reference = vec![ + RootConstant { + stages: pso::ShaderStageFlags::FRAGMENT, + range: 0 .. 2, + }, + RootConstant { + stages: pso::ShaderStageFlags::VERTEX | pso::ShaderStageFlags::FRAGMENT, + range: 2 .. 4, + }, + RootConstant { + stages: pso::ShaderStageFlags::VERTEX + | pso::ShaderStageFlags::FRAGMENT + | pso::ShaderStageFlags::HULL, + range: 4 .. 5, + }, + RootConstant { + stages: pso::ShaderStageFlags::VERTEX | pso::ShaderStageFlags::HULL, + range: 5 .. 6, + }, + RootConstant { + stages: pso::ShaderStageFlags::VERTEX + | pso::ShaderStageFlags::GEOMETRY + | pso::ShaderStageFlags::HULL, + range: 6 .. 7, + }, + RootConstant { + stages: pso::ShaderStageFlags::VERTEX | pso::ShaderStageFlags::GEOMETRY, + range: 7 .. 10, + }, + ]; + + assert_eq!(reference, split(ranges)); + } +} diff --git a/third_party/rust/gfx-backend-dx12/src/window.rs b/third_party/rust/gfx-backend-dx12/src/window.rs new file mode 100644 index 000000000000..8003272fb04d --- /dev/null +++ b/third_party/rust/gfx-backend-dx12/src/window.rs @@ -0,0 +1,233 @@ +use std::collections::VecDeque; +use std::{fmt, mem}; + +use winapi::shared::{ + dxgi1_4, + windef::{HWND, RECT}, + winerror, +}; +use winapi::um::winuser::GetClientRect; + +use hal::{self, device::Device as _, format as f, image as i, window as w}; +use {conv, native, resource as r, Backend, Device, Instance, PhysicalDevice, QueueFamily}; + +use std::os::raw::c_void; + +impl Instance { + pub fn create_surface_from_hwnd(&self, hwnd: *mut c_void) -> Surface { + Surface { + factory: self.factory, + wnd_handle: hwnd as *mut _, + presentation: None, + } + } +} + +#[derive(Debug)] +struct Presentation { + swapchain: Swapchain, + format: f::Format, + size: w::Extent2D, +} + +pub struct Surface { + pub(crate) factory: native::WeakPtr, + pub(crate) wnd_handle: HWND, + presentation: Option, +} + +impl fmt::Debug for Surface { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Surface") + } +} + +unsafe impl Send for Surface {} +unsafe impl Sync for Surface {} + +impl Surface { + pub(crate) unsafe fn present(&self) { + self.presentation + .as_ref() + .unwrap() + .swapchain + .inner + .Present(1, 0); + } +} + +impl w::Surface for Surface { + fn supports_queue_family(&self, queue_family: &QueueFamily) -> bool { + match queue_family { + &QueueFamily::Present => true, + _ => false, + } + } + + fn capabilities(&self, _physical_device: &PhysicalDevice) -> w::SurfaceCapabilities { + let current_extent = unsafe { + let mut rect: RECT = mem::zeroed(); + if GetClientRect(self.wnd_handle as *mut _, &mut rect as *mut RECT) == 0 { + panic!("GetClientRect failed"); + } + Some(w::Extent2D { + width: (rect.right - rect.left) as u32, + height: (rect.bottom - rect.top) as u32, + }) + }; + + w::SurfaceCapabilities { + present_modes: w::PresentMode::FIFO, + composite_alpha_modes: w::CompositeAlphaMode::OPAQUE, + image_count: 2 ..= 16, + current_extent, + extents: w::Extent2D { + width: 16, + height: 16, + } ..= w::Extent2D { + width: 4096, + height: 4096, + }, + max_image_layers: 1, + usage: i::Usage::COLOR_ATTACHMENT | i::Usage::TRANSFER_SRC | i::Usage::TRANSFER_DST, + } + } + + fn supported_formats(&self, _physical_device: &PhysicalDevice) -> Option> { + Some(vec![ + f::Format::Bgra8Srgb, + f::Format::Bgra8Unorm, + f::Format::Rgba8Srgb, + f::Format::Rgba8Unorm, + f::Format::A2b10g10r10Unorm, + f::Format::Rgba16Sfloat, + ]) + } +} + +impl w::PresentationSurface for Surface { + type SwapchainImage = r::ImageView; + + unsafe fn configure_swapchain( + &mut self, + device: &Device, + config: w::SwapchainConfig, + ) -> Result<(), w::CreationError> { + assert!(i::Usage::COLOR_ATTACHMENT.contains(config.image_usage)); + + let swapchain = match self.presentation.take() { + Some(present) => { + if present.format == config.format && present.size == config.extent { + self.presentation = Some(present); + return Ok(()); + } + + device.wait_idle().unwrap(); + + let inner = present.swapchain.release_resources(); + let result = inner.ResizeBuffers( + config.image_count, + config.extent.width, + config.extent.height, + conv::map_format_nosrgb(config.format).unwrap(), + 0, + ); + if result != winerror::S_OK { + error!("ResizeBuffers failed with 0x{:x}", result as u32); + return Err(w::CreationError::WindowInUse(hal::device::WindowInUse)); + } + inner + } + None => { + let (swapchain, _) = + device.create_swapchain_impl(&config, self.wnd_handle, self.factory.clone())?; + swapchain + } + }; + + self.presentation = Some(Presentation { + swapchain: device.wrap_swapchain(swapchain, &config), + format: config.format, + size: config.extent, + }); + Ok(()) + } + + unsafe fn unconfigure_swapchain(&mut self, device: &Device) { + if let Some(present) = self.presentation.take() { + device.destroy_swapchain(present.swapchain); + } + } + + unsafe fn acquire_image( + &mut self, + _timeout_ns: u64, + ) -> Result<(r::ImageView, Option), w::AcquireError> { + let present = self.presentation.as_mut().unwrap(); + let sc = &mut present.swapchain; + + let view = r::ImageView { + resource: sc.resources[sc.next_frame], + handle_srv: None, + handle_rtv: Some(sc.rtv_heap.at(sc.next_frame as _, 0).cpu), + handle_uav: None, + handle_dsv: None, + dxgi_format: conv::map_format(present.format).unwrap(), + num_levels: 1, + mip_levels: (0, 1), + layers: (0, 1), + kind: i::Kind::D2(present.size.width, present.size.height, 1, 1), + }; + sc.next_frame = (sc.next_frame + 1) % sc.resources.len(); + + Ok((view, None)) + } +} + +#[derive(Debug)] +pub struct Swapchain { + pub(crate) inner: native::WeakPtr, + pub(crate) next_frame: usize, + pub(crate) frame_queue: VecDeque, + #[allow(dead_code)] + pub(crate) rtv_heap: r::DescriptorHeap, + + + pub(crate) resources: Vec, +} + +impl Swapchain { + pub(crate) unsafe fn release_resources(self) -> native::WeakPtr { + for resource in &self.resources { + resource.destroy(); + } + self.rtv_heap.destroy(); + self.inner + } +} + +impl w::Swapchain for Swapchain { + unsafe fn acquire_image( + &mut self, + _timout_ns: u64, + _semaphore: Option<&r::Semaphore>, + _fence: Option<&r::Fence>, + ) -> Result<(w::SwapImageIndex, Option), w::AcquireError> { + + + if false { + + + let num_images = 1; + let index = self.next_frame; + self.frame_queue.push_back(index); + self.next_frame = (self.next_frame + 1) % num_images; + } + + + Ok((self.inner.GetCurrentBackBufferIndex(), None)) + } +} + +unsafe impl Send for Swapchain {} +unsafe impl Sync for Swapchain {} diff --git a/third_party/rust/gfx-backend-empty/.cargo-checksum.json b/third_party/rust/gfx-backend-empty/.cargo-checksum.json new file mode 100644 index 000000000000..5e856666b5b3 --- /dev/null +++ b/third_party/rust/gfx-backend-empty/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"b92fafa311fcd3d2553f556adce6b0a44f74ffc65eed9746de32754cd4625d8f","src/lib.rs":"1c84f39df58771de5f35dca2e4ef6fd00e61eea82fbd7f53b417b2c5778ec9a4"},"package":"3d383e6bc48867cb37d298a20139fd1eec298f8f6d594690cd1c50ef25470cc7"} \ No newline at end of file diff --git a/third_party/rust/gfx-backend-empty/Cargo.toml b/third_party/rust/gfx-backend-empty/Cargo.toml new file mode 100644 index 000000000000..8bab6ab59883 --- /dev/null +++ b/third_party/rust/gfx-backend-empty/Cargo.toml @@ -0,0 +1,28 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "gfx-backend-empty" +version = "0.4.0" +authors = ["The Gfx-rs Developers"] +description = "Empty backend for gfx-rs" +documentation = "https://docs.rs/gfx-backend-empty" +license = "MIT OR Apache-2.0" + +[lib] +name = "gfx_backend_empty" +[dependencies.gfx-hal] +version = "0.4" + +[dependencies.raw-window-handle] +version = "0.3" diff --git a/third_party/rust/gfx-backend-empty/src/lib.rs b/third_party/rust/gfx-backend-empty/src/lib.rs new file mode 100644 index 000000000000..6b97273b2690 --- /dev/null +++ b/third_party/rust/gfx-backend-empty/src/lib.rs @@ -0,0 +1,1021 @@ + + + +extern crate gfx_hal as hal; + +use hal::range::RangeArg; +use hal::{ + adapter, + buffer, + command, + device, + format, + image, + memory, + pass, + pool, + pso, + query, + queue, + window, +}; +use std::borrow::Borrow; +use std::ops::Range; + +const DO_NOT_USE_MESSAGE: &str = "You need to enable a native API feature (vulkan/metal/dx11/dx12/gl/wgl) in order to use gfx-rs"; + + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum Backend {} +impl hal::Backend for Backend { + type Instance = Instance; + type PhysicalDevice = PhysicalDevice; + type Device = Device; + + type Surface = Surface; + type Swapchain = Swapchain; + + type QueueFamily = QueueFamily; + type CommandQueue = CommandQueue; + type CommandBuffer = CommandBuffer; + + type Memory = (); + type CommandPool = CommandPool; + + type ShaderModule = (); + type RenderPass = (); + type Framebuffer = (); + + type Buffer = (); + type BufferView = (); + type Image = (); + type ImageView = (); + type Sampler = (); + + type ComputePipeline = (); + type GraphicsPipeline = (); + type PipelineCache = (); + type PipelineLayout = (); + type DescriptorSetLayout = (); + type DescriptorPool = DescriptorPool; + type DescriptorSet = (); + + type Fence = (); + type Semaphore = (); + type Event = (); + type QueryPool = (); +} + + +#[derive(Debug)] +pub struct PhysicalDevice; +impl adapter::PhysicalDevice for PhysicalDevice { + unsafe fn open( + &self, + _: &[(&QueueFamily, &[queue::QueuePriority])], + _: hal::Features, + ) -> Result, device::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + fn format_properties(&self, _: Option) -> format::Properties { + panic!(DO_NOT_USE_MESSAGE) + } + + fn image_format_properties( + &self, + _: format::Format, + _dim: u8, + _: image::Tiling, + _: image::Usage, + _: image::ViewCapabilities, + ) -> Option { + panic!(DO_NOT_USE_MESSAGE) + } + + fn memory_properties(&self) -> adapter::MemoryProperties { + panic!(DO_NOT_USE_MESSAGE) + } + + fn features(&self) -> hal::Features { + panic!(DO_NOT_USE_MESSAGE) + } + + fn limits(&self) -> hal::Limits { + panic!(DO_NOT_USE_MESSAGE) + } +} + + +#[derive(Debug)] +pub struct CommandQueue; +impl queue::CommandQueue for CommandQueue { + unsafe fn submit<'a, T, Ic, S, Iw, Is>( + &mut self, + _: queue::Submission, + _: Option<&()>, + ) where + T: 'a + Borrow, + Ic: IntoIterator, + S: 'a + Borrow<()>, + Iw: IntoIterator, + Is: IntoIterator, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn present<'a, W, Is, S, Iw>( + &mut self, + _: Is, + _: Iw, + ) -> Result, window::PresentError> + where + W: 'a + Borrow, + Is: IntoIterator, + S: 'a + Borrow<()>, + Iw: IntoIterator, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn present_surface( + &mut self, + _surface: &mut Surface, + _image: (), + _wait_semaphore: Option<&()>, + ) -> Result, window::PresentError> { + panic!(DO_NOT_USE_MESSAGE) + } + + fn wait_idle(&self) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } +} + + +#[derive(Debug)] +pub struct Device; +impl device::Device for Device { + unsafe fn create_command_pool( + &self, + _: queue::QueueFamilyId, + _: pool::CommandPoolCreateFlags, + ) -> Result { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_command_pool(&self, _: CommandPool) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn allocate_memory( + &self, + _: hal::MemoryTypeId, + _: u64, + ) -> Result<(), device::AllocationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_render_pass<'a, IA, IS, ID>( + &self, + _: IA, + _: IS, + _: ID, + ) -> Result<(), device::OutOfMemory> + where + IA: IntoIterator, + IA::Item: Borrow, + IS: IntoIterator, + IS::Item: Borrow>, + ID: IntoIterator, + ID::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_pipeline_layout(&self, _: IS, _: IR) -> Result<(), device::OutOfMemory> + where + IS: IntoIterator, + IS::Item: Borrow<()>, + IR: IntoIterator, + IR::Item: Borrow<(pso::ShaderStageFlags, Range)>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_pipeline_cache( + &self, + _data: Option<&[u8]>, + ) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_pipeline_cache_data(&self, _cache: &()) -> Result, device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_pipeline_cache(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_graphics_pipeline<'a>( + &self, + _: &pso::GraphicsPipelineDesc<'a, Backend>, + _: Option<&()>, + ) -> Result<(), pso::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_compute_pipeline<'a>( + &self, + _: &pso::ComputePipelineDesc<'a, Backend>, + _: Option<&()>, + ) -> Result<(), pso::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn merge_pipeline_caches(&self, _: &(), _: I) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<()>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_framebuffer( + &self, + _: &(), + _: I, + _: image::Extent, + ) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<()>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_shader_module(&self, _: &[u32]) -> Result<(), device::ShaderError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_sampler(&self, _: &image::SamplerDesc) -> Result<(), device::AllocationError> { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn create_buffer(&self, _: u64, _: buffer::Usage) -> Result<(), buffer::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_buffer_requirements(&self, _: &()) -> memory::Requirements { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_buffer_memory( + &self, + _: &(), + _: u64, + _: &mut (), + ) -> Result<(), device::BindError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_buffer_view>( + &self, + _: &(), + _: Option, + _: R, + ) -> Result<(), buffer::ViewCreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_image( + &self, + _: image::Kind, + _: image::Level, + _: format::Format, + _: image::Tiling, + _: image::Usage, + _: image::ViewCapabilities, + ) -> Result<(), image::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_image_requirements(&self, _: &()) -> memory::Requirements { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_image_subresource_footprint( + &self, + _: &(), + _: image::Subresource, + ) -> image::SubresourceFootprint { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_image_memory( + &self, + _: &(), + _: u64, + _: &mut (), + ) -> Result<(), device::BindError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_image_view( + &self, + _: &(), + _: image::ViewKind, + _: format::Format, + _: format::Swizzle, + _: image::SubresourceRange, + ) -> Result<(), image::ViewError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_descriptor_pool( + &self, + _: usize, + _: I, + _: pso::DescriptorPoolCreateFlags, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_descriptor_set_layout( + &self, + _: I, + _: J, + ) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow<()>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn write_descriptor_sets<'a, I, J>(&self, _: I) + where + I: IntoIterator>, + J: IntoIterator, + J::Item: Borrow>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn copy_descriptor_sets<'a, I>(&self, _: I) + where + I: IntoIterator, + I::Item: Borrow>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + fn create_semaphore(&self) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + fn create_fence(&self, _: bool) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_fence_status(&self, _: &()) -> Result { + panic!(DO_NOT_USE_MESSAGE) + } + + fn create_event(&self) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_event_status(&self, _: &()) -> Result { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_event(&self, _: &()) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn reset_event(&self, _: &()) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_query_pool(&self, _: query::Type, _: u32) -> Result<(), query::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_query_pool(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_query_pool_results( + &self, + _: &(), + _: Range, + _: &mut [u8], + _: buffer::Offset, + _: query::ResultFlags, + ) -> Result { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn map_memory>( + &self, + _: &(), + _: R, + ) -> Result<*mut u8, device::MapError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn unmap_memory(&self, _: &()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn flush_mapped_memory_ranges<'a, I, R>(&self, _: I) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a (), R)>, + R: RangeArg, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn invalidate_mapped_memory_ranges<'a, I, R>( + &self, + _: I, + ) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a (), R)>, + R: RangeArg, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn free_memory(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_shader_module(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_render_pass(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_pipeline_layout(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_graphics_pipeline(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_compute_pipeline(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_framebuffer(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_buffer(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_buffer_view(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_image(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_image_view(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_sampler(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_descriptor_pool(&self, _: DescriptorPool) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_descriptor_set_layout(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_fence(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_semaphore(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_event(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_swapchain( + &self, + _: &mut Surface, + _: window::SwapchainConfig, + _: Option, + ) -> Result<(Swapchain, Vec<()>), hal::window::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_swapchain(&self, _: Swapchain) { + panic!(DO_NOT_USE_MESSAGE) + } + + fn wait_idle(&self) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_image_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_buffer_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_command_buffer_name(&self, _: &mut CommandBuffer, _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_semaphore_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_fence_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_framebuffer_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_render_pass_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_descriptor_set_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_descriptor_set_layout_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } +} + +#[derive(Debug)] +pub struct QueueFamily; +impl queue::QueueFamily for QueueFamily { + fn queue_type(&self) -> queue::QueueType { + panic!(DO_NOT_USE_MESSAGE) + } + fn max_queues(&self) -> usize { + panic!(DO_NOT_USE_MESSAGE) + } + fn id(&self) -> queue::QueueFamilyId { + panic!(DO_NOT_USE_MESSAGE) + } +} + + +#[derive(Debug)] +pub struct CommandPool; +impl pool::CommandPool for CommandPool { + unsafe fn reset(&mut self, _: bool) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn free(&mut self, _: I) + where + I: IntoIterator, + { + panic!(DO_NOT_USE_MESSAGE) + } +} + + +#[derive(Debug)] +pub struct CommandBuffer; +impl command::CommandBuffer for CommandBuffer { + unsafe fn begin( + &mut self, + _: command::CommandBufferFlags, + _: command::CommandBufferInheritanceInfo, + ) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn finish(&mut self) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn reset(&mut self, _: bool) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn pipeline_barrier<'a, T>( + &mut self, + _: Range, + _: memory::Dependencies, + _: T, + ) where + T: IntoIterator, + T::Item: Borrow>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn fill_buffer(&mut self, _: &(), _: R, _: u32) + where + R: RangeArg, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn update_buffer(&mut self, _: &(), _: buffer::Offset, _: &[u8]) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn clear_image(&mut self, _: &(), _: image::Layout, _: command::ClearValue, _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn clear_attachments(&mut self, _: T, _: U) + where + T: IntoIterator, + T::Item: Borrow, + U: IntoIterator, + U::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn resolve_image(&mut self, _: &(), _: image::Layout, _: &(), _: image::Layout, _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn blit_image( + &mut self, + _: &(), + _: image::Layout, + _: &(), + _: image::Layout, + _: image::Filter, + _: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_index_buffer(&mut self, _: buffer::IndexBufferView) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_vertex_buffers(&mut self, _: u32, _: I) + where + I: IntoIterator, + T: Borrow<()>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_viewports(&mut self, _: u32, _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_scissors(&mut self, _: u32, _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_stencil_reference(&mut self, _: pso::Face, _: pso::StencilValue) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_stencil_read_mask(&mut self, _: pso::Face, _: pso::StencilValue) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_stencil_write_mask(&mut self, _: pso::Face, _: pso::StencilValue) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_blend_constants(&mut self, _: pso::ColorValue) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_depth_bounds(&mut self, _: Range) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_line_width(&mut self, _: f32) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_depth_bias(&mut self, _: pso::DepthBias) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn begin_render_pass( + &mut self, + _: &(), + _: &(), + _: pso::Rect, + _: T, + _: command::SubpassContents, + ) where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn next_subpass(&mut self, _: command::SubpassContents) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn end_render_pass(&mut self) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_graphics_pipeline(&mut self, _: &()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_graphics_descriptor_sets(&mut self, _: &(), _: usize, _: I, _: J) + where + I: IntoIterator, + I::Item: Borrow<()>, + J: IntoIterator, + J::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_compute_pipeline(&mut self, _: &()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_compute_descriptor_sets(&mut self, _: &(), _: usize, _: I, _: J) + where + I: IntoIterator, + I::Item: Borrow<()>, + J: IntoIterator, + J::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn dispatch(&mut self, _: hal::WorkGroupCount) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn dispatch_indirect(&mut self, _: &(), _: buffer::Offset) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn copy_buffer(&mut self, _: &(), _: &(), _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn copy_image(&mut self, _: &(), _: image::Layout, _: &(), _: image::Layout, _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn copy_buffer_to_image(&mut self, _: &(), _: &(), _: image::Layout, _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn copy_image_to_buffer(&mut self, _: &(), _: image::Layout, _: &(), _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn draw(&mut self, _: Range, _: Range) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn draw_indexed( + &mut self, + _: Range, + _: hal::VertexOffset, + _: Range, + ) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn draw_indirect(&mut self, _: &(), _: buffer::Offset, _: hal::DrawCount, _: u32) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn draw_indexed_indirect( + &mut self, + _: &(), + _: buffer::Offset, + _: hal::DrawCount, + _: u32, + ) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_event(&mut self, _: &(), _: pso::PipelineStage) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn reset_event(&mut self, _: &(), _: pso::PipelineStage) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn wait_events<'a, I, J>(&mut self, _: I, _: Range, _: J) + where + I: IntoIterator, + I::Item: Borrow<()>, + J: IntoIterator, + J::Item: Borrow>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn begin_query(&mut self, _: query::Query, _: query::ControlFlags) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn end_query(&mut self, _: query::Query) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn reset_query_pool(&mut self, _: &(), _: Range) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn copy_query_pool_results( + &mut self, + _: &(), + _: Range, + _: &(), + _: buffer::Offset, + _: buffer::Offset, + _: query::ResultFlags, + ) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn write_timestamp(&mut self, _: pso::PipelineStage, _: query::Query) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn push_graphics_constants( + &mut self, + _: &(), + _: pso::ShaderStageFlags, + _: u32, + _: &[u32], + ) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn push_compute_constants(&mut self, _: &(), _: u32, _: &[u32]) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn execute_commands<'a, T, I>(&mut self, _: I) + where + T: 'a + Borrow, + I: IntoIterator, + { + panic!(DO_NOT_USE_MESSAGE) + } +} + + +#[derive(Debug)] +pub struct DescriptorPool; +impl pso::DescriptorPool for DescriptorPool { + unsafe fn free_sets(&mut self, _descriptor_sets: I) + where + I: IntoIterator, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn reset(&mut self) { + panic!(DO_NOT_USE_MESSAGE) + } +} + + +#[derive(Debug)] +pub struct Surface; +impl window::Surface for Surface { + fn supports_queue_family(&self, _: &QueueFamily) -> bool { + panic!(DO_NOT_USE_MESSAGE) + } + + fn capabilities(&self, _: &PhysicalDevice) -> window::SurfaceCapabilities { + panic!(DO_NOT_USE_MESSAGE) + } + + fn supported_formats(&self, _: &PhysicalDevice) -> Option> { + panic!(DO_NOT_USE_MESSAGE) + } +} +impl window::PresentationSurface for Surface { + type SwapchainImage = (); + + unsafe fn configure_swapchain( + &mut self, + _: &Device, + _: window::SwapchainConfig, + ) -> Result<(), window::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn unconfigure_swapchain(&mut self, _: &Device) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn acquire_image( + &mut self, + _: u64, + ) -> Result<((), Option), window::AcquireError> { + panic!(DO_NOT_USE_MESSAGE) + } +} + + +#[derive(Debug)] +pub struct Swapchain; +impl window::Swapchain for Swapchain { + unsafe fn acquire_image( + &mut self, + _: u64, + _: Option<&()>, + _: Option<&()>, + ) -> Result<(window::SwapImageIndex, Option), window::AcquireError> { + panic!(DO_NOT_USE_MESSAGE) + } +} + +#[derive(Debug)] +pub struct Instance; + +impl hal::Instance for Instance { + fn create(_name: &str, _version: u32) -> Result { + Ok(Instance) + } + + fn enumerate_adapters(&self) -> Vec> { + vec![] + } + + unsafe fn create_surface( + &self, + _: &impl raw_window_handle::HasRawWindowHandle, + ) -> Result { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_surface(&self, _surface: Surface) { + panic!(DO_NOT_USE_MESSAGE) + } +} diff --git a/third_party/rust/gfx-backend-metal/.cargo-checksum.json b/third_party/rust/gfx-backend-metal/.cargo-checksum.json new file mode 100644 index 000000000000..873f04ca6c80 --- /dev/null +++ b/third_party/rust/gfx-backend-metal/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"59c2a87f5273f966fe55c9697ebbcaafd36ae8ada389db9e03a0346496f539af","README.md":"0b5008f38b9cf1bda9de72f8ca467c399404df0e75daf3b1e5796f4d1fd7568f","shaders/blit.metal":"b243873ac0d7ded37b199d17d1a7b53d5332b4a57bfa22f99dcf60273730be45","shaders/clear.metal":"796a612c1cb48e46fc94b7227feaab993d7ddeed293b69e9f09b2dd88e6a1189","shaders/fill.metal":"2642b5df62f8eb2246a442137d083010d2a3132110d9be4eb25b479123098d25","shaders/gfx_shaders.metallib":"d98a657dce490f62503b76f18f0bb76a183140a309e13b5bae446c268eac40b9","shaders/macros.h":"a4550ac7c180935c2edb57aa7a5f8442b53f1f3dc65df8cc800d0afb8289cdeb","src/command.rs":"a3c816d85e2648505d5b5c06b64aa12c4237d005b90fd6cca010cf346694d8a9","src/conversions.rs":"ab9daf8e97b7d28bea3b8e6773afc287b3441d148a1cc12822c646cdbba2a37f","src/device.rs":"833b5dc4cf42c29ca8e1c6eef92724ec0032f5da2ca97172bc0def1f16366a05","src/internal.rs":"922ff850465db0bd7aacf1a451f4c73752eb8310967683069bdd33c3024e938b","src/lib.rs":"f0402323cf08ec56419b1965550fb394c44da2e2939b9c83d3ed548e4cd3f2de","src/native.rs":"516229d72433df23296f11b1490278f080d5a90646e7961f0e928da036f7f28d","src/soft.rs":"795767c3756a95b5a1e3bf28d2d4ce3eb85fb358ef098a4fbe0af893509e3941","src/window.rs":"cebbe53f2fb45dbdfcf03ba18ca181fa966997665cec65ae1a1d77d0c193f20b"},"package":"8de5c71f18ba805c95b84d6c78c472ef44485a6fc46e3b49fe1e6739c8d7b0c0"} \ No newline at end of file diff --git a/third_party/rust/gfx-backend-metal/Cargo.toml b/third_party/rust/gfx-backend-metal/Cargo.toml new file mode 100644 index 000000000000..58ae95f1169c --- /dev/null +++ b/third_party/rust/gfx-backend-metal/Cargo.toml @@ -0,0 +1,98 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "gfx-backend-metal" +version = "0.4.0" +authors = ["The Gfx-rs Developers"] +description = "Metal API backend for gfx-rs" +homepage = "https://github.com/gfx-rs/gfx" +documentation = "https://docs.rs/gfx-backend-metal" +readme = "README.md" +keywords = ["graphics", "gamedev"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/gfx-rs/gfx" +[package.metadata.docs.rs] +default-target = "x86_64-apple-darwin" + +[lib] +name = "gfx_backend_metal" +[dependencies.arrayvec] +version = "0.5" + +[dependencies.auxil] +version = "0.1" +package = "gfx-auxil" + +[dependencies.bitflags] +version = "1.0" + +[dependencies.block] +version = "0.1" + +[dependencies.cocoa] +version = "0.19" + +[dependencies.copyless] +version = "0.1.4" + +[dependencies.core-graphics] +version = "0.17" + +[dependencies.dispatch] +version = "0.1" +optional = true + +[dependencies.foreign-types] +version = "0.3" + +[dependencies.hal] +version = "0.4" +package = "gfx-hal" + +[dependencies.lazy_static] +version = "1" + +[dependencies.log] +version = "0.4" + +[dependencies.metal] +version = "0.17" +features = ["private"] + +[dependencies.objc] +version = "0.2.5" + +[dependencies.parking_lot] +version = "0.9" + +[dependencies.range-alloc] +version = "0.1" + +[dependencies.raw-window-handle] +version = "0.3" + +[dependencies.smallvec] +version = "0.6" + +[dependencies.spirv_cross] +version = "0.16" +features = ["msl"] + +[dependencies.storage-map] +version = "0.2" + +[features] +auto-capture = [] +default = [] +signpost = [] diff --git a/third_party/rust/gfx-backend-metal/README.md b/third_party/rust/gfx-backend-metal/README.md new file mode 100644 index 000000000000..e95168a52c31 --- /dev/null +++ b/third_party/rust/gfx-backend-metal/README.md @@ -0,0 +1,13 @@ +# gfx-backend-metal + +[Metal](https://developer.apple.com/metal/) backend for gfx-rs. + +## Normalized Coordinates + +Render | Depth | Texture +-------|-------|-------- +![render_coordinates](../../../info/gl_render_coordinates.png) | ![depth_coordinates](../../../info/dx_depth_coordinates.png) | ![texture_coordinates](../../../info/dx_texture_coordinates.png) + +## Mirroring + +TODO diff --git a/third_party/rust/gfx-backend-metal/shaders/blit.metal b/third_party/rust/gfx-backend-metal/shaders/blit.metal new file mode 100644 index 000000000000..9fcaf1c2b02e --- /dev/null +++ b/third_party/rust/gfx-backend-metal/shaders/blit.metal @@ -0,0 +1,110 @@ +#include "macros.h" +#include +using namespace metal; + +typedef struct { + float4 src_coords [[attribute(0)]]; + float4 dst_coords [[attribute(1)]]; +} BlitAttributes; + +typedef struct { + float4 position [[position]]; + float4 uv; + uint layer GFX_RENDER_TARGET_ARRAY_INDEX; +} BlitVertexData; + +typedef struct { + float depth [[depth(any)]]; +} BlitDepthFragment; + + +vertex BlitVertexData vs_blit(BlitAttributes in [[stage_in]]) { + float4 pos = { 0.0, 0.0, in.dst_coords.z, 1.0f }; + pos.xy = in.dst_coords.xy * 2.0 - 1.0; + return BlitVertexData { pos, in.src_coords, uint(in.dst_coords.w) }; +} + +fragment float4 ps_blit_1d_float( + BlitVertexData in [[stage_in]], + texture1d tex1D [[ texture(0) ]], + sampler sampler2D [[ sampler(0) ]] +) { + return tex1D.sample(sampler2D, in.uv.x); +} + + +fragment float4 ps_blit_1d_array_float( + BlitVertexData in [[stage_in]], + texture1d_array tex1DArray [[ texture(0) ]], + sampler sampler2D [[ sampler(0) ]] +) { + return tex1DArray.sample(sampler2D, in.uv.x, uint(in.uv.z)); +} + + +fragment float4 ps_blit_2d_float( + BlitVertexData in [[stage_in]], + texture2d tex2D [[ texture(0) ]], + sampler sampler2D [[ sampler(0) ]] +) { + return tex2D.sample(sampler2D, in.uv.xy, level(in.uv.w)); +} + +fragment uint4 ps_blit_2d_uint( + BlitVertexData in [[stage_in]], + texture2d tex2D [[ texture(0) ]], + sampler sampler2D [[ sampler(0) ]] +) { + return tex2D.sample(sampler2D, in.uv.xy, level(in.uv.w)); +} + +fragment int4 ps_blit_2d_int( + BlitVertexData in [[stage_in]], + texture2d tex2D [[ texture(0) ]], + sampler sampler2D [[ sampler(0) ]] +) { + return tex2D.sample(sampler2D, in.uv.xy, level(in.uv.w)); +} + +fragment BlitDepthFragment ps_blit_2d_depth( + BlitVertexData in [[stage_in]], + depth2d tex2D [[ texture(0) ]], + sampler sampler2D [[ sampler(0) ]] +) { + float depth = tex2D.sample(sampler2D, in.uv.xy, level(in.uv.w)); + return BlitDepthFragment { depth }; +} + + +fragment float4 ps_blit_2d_array_float( + BlitVertexData in [[stage_in]], + texture2d_array tex2DArray [[ texture(0) ]], + sampler sampler2D [[ sampler(0) ]] +) { + return tex2DArray.sample(sampler2D, in.uv.xy, uint(in.uv.z), level(in.uv.w)); +} + +fragment uint4 ps_blit_2d_array_uint( + BlitVertexData in [[stage_in]], + texture2d_array tex2DArray [[ texture(0) ]], + sampler sampler2D [[ sampler(0) ]] +) { + return tex2DArray.sample(sampler2D, in.uv.xy, uint(in.uv.z), level(in.uv.w)); +} + +fragment int4 ps_blit_2d_array_int( + BlitVertexData in [[stage_in]], + texture2d_array tex2DArray [[ texture(0) ]], + sampler sampler2D [[ sampler(0) ]] +) { + return tex2DArray.sample(sampler2D, in.uv.xy, uint(in.uv.z), level(in.uv.w)); +} + + +fragment float4 ps_blit_3d_float( + BlitVertexData in [[stage_in]], + texture3d tex3D [[ texture(0) ]], + sampler sampler2D [[ sampler(0) ]] +) { + return tex3D.sample(sampler2D, in.uv.xyz, level(in.uv.w)); +} diff --git a/third_party/rust/gfx-backend-metal/shaders/clear.metal b/third_party/rust/gfx-backend-metal/shaders/clear.metal new file mode 100644 index 000000000000..4adbdd3b5659 --- /dev/null +++ b/third_party/rust/gfx-backend-metal/shaders/clear.metal @@ -0,0 +1,79 @@ +#include "macros.h" +#include +using namespace metal; + +//TODO: simplified code path for Metal 2.0? +//> Starting in Metal 2.0, the [[color(n)]] and [[raster_order_group(index)]] indices can +//> also be a function constant. The function constant specified as indices for color and +//> raster order group attributes must be a scalar integer type. + +typedef struct { + float4 coords [[attribute(0)]]; +} ClearAttributes; + +typedef struct { + float4 position [[position]]; + uint layer GFX_RENDER_TARGET_ARRAY_INDEX; +} ClearVertexData; + +vertex ClearVertexData vs_clear(ClearAttributes in [[stage_in]]) { + float4 pos = { 0.0, 0.0, in.coords.z, 1.0f }; + pos.xy = in.coords.xy * 2.0 - 1.0; + return ClearVertexData { pos, uint(in.coords.w) }; +} + + +fragment float4 ps_clear0_float( + ClearVertexData in [[stage_in]], + constant float4 &value [[ buffer(0) ]] +) { + return value; +} + +fragment int4 ps_clear0_int( + ClearVertexData in [[stage_in]], + constant int4 &value [[ buffer(0) ]] +) { + return value; +} + +fragment uint4 ps_clear0_uint( + ClearVertexData in [[stage_in]], + constant uint4 &value [[ buffer(0) ]] +) { + return value; +} + + +typedef struct { + float4 color [[color(1)]]; +} Clear1FloatFragment; + +fragment Clear1FloatFragment ps_clear1_float( + ClearVertexData in [[stage_in]], + constant float4 &value [[ buffer(0) ]] +) { + return Clear1FloatFragment { value }; +} + +typedef struct { + int4 color [[color(1)]]; +} Clear1IntFragment; + +fragment Clear1IntFragment ps_clear1_int( + ClearVertexData in [[stage_in]], + constant int4 &value [[ buffer(0) ]] +) { + return Clear1IntFragment { value }; +} + +typedef struct { + uint4 color [[color(1)]]; +} Clear1UintFragment; + +fragment Clear1UintFragment ps_clear1_uint( + ClearVertexData in [[stage_in]], + constant uint4 &value [[ buffer(0) ]] +) { + return Clear1UintFragment { value }; +} diff --git a/third_party/rust/gfx-backend-metal/shaders/fill.metal b/third_party/rust/gfx-backend-metal/shaders/fill.metal new file mode 100644 index 000000000000..fffe9b4a077e --- /dev/null +++ b/third_party/rust/gfx-backend-metal/shaders/fill.metal @@ -0,0 +1,33 @@ +#include +using namespace metal; + +typedef struct { + uint value; + uint length; +} FillBufferValue; + +kernel void cs_fill_buffer( + device uint *buffer [[ buffer(0) ]], + constant FillBufferValue &fill [[ buffer(1) ]], + uint index [[ thread_position_in_grid ]] +) { + if (index < fill.length) { + buffer[index] = fill.value; + } +} + +typedef struct { + uint size; + uint offsets; +} CopyBufferRange; + +kernel void cs_copy_buffer( + device uchar *dest [[ buffer(0) ]], + device uchar *source [[ buffer(1) ]], + constant CopyBufferRange &range [[ buffer(2) ]], + uint index [[ thread_position_in_grid ]] +) { + if (index < range.size) { + dest[(range.offsets>>16) + index] = source[(range.offsets & 0xFFFF) + index]; + } +} diff --git a/third_party/rust/gfx-backend-metal/shaders/gfx_shaders.metallib b/third_party/rust/gfx-backend-metal/shaders/gfx_shaders.metallib new file mode 100644 index 0000000000000000000000000000000000000000..b446e8f695a33b076207961168a68395c379f9a3 GIT binary patch literal 56133 zcmeI53tUrW|G>}K*oKU8K*S;2Is-NGg5wf`V2urt6cdva6JIwjhP|M3kPPgyvB6eN znyHCq#=J#uYG}QANsA3aL^PxjE+&SDW|D6$^Lnv3pMePy=}M8-!_DrD7Al{;%c9$r&=#)mc##} z(KDv7vE|tdQ!*FlV2Se*rqD1fE_!wxfjyq=5!~?8=!g7Qu!!uub-A^V?oEx<)~r>( zGHSB-rIXf~v9XCfd#`<)Ia3~+&BkaL`ln&>(eoaoVWKII#w4okQavywC3Rs?+QLPd zOBG$>X_R=&EbfQ{Lm&F*c5g=B$~}`e%i@byKEeGt{?M}2xHFtrLMZW}cFAmeE3qGU zPw`0Zb9TAIyUSImREoU*q_6of+q?46_aCI6_^n(x{{{R>_ifL`jjj#(QlVQpYQnLN zl=LXORNUTHpwkMkzS8?1b#T8&U3Kk$Hq%zMo_uZoq%}Xg{xd)JSK<3YuTSPL)Lq*# zLabI(a$_hlSl8({-$%{pD>oMwplgg@KWdQs^x`GGPWGDM74vLkr)6wB{>b@D{Ou=f zt0dq4H09Zc4t@G|dAlsll@c3o7fW=tW2lL~V!LiF_wHi*Nd4sR-%Z&OIh32?y8rN` zsh=KyUD)_9g`CmCfAOm+ZBb1rl+;AKv;ccsVZY{mrS^N0)6$hW8TLEdert71i@Bfu zVr5uW#e%FY*BHP2{z9`X|L~!Ie-y>OI4k?z4oYsaU7pz9${(gC`pWHlYr#pr>8q*(`_zGL+n?C-#hDOe`Hn5W{iB|eo@SRC-nAFgZYKR%Sb#3E zclUEmN$-21>o#thV4Jx8mB;6&-9K4;b>x@wr*EheWX`pWKmxkLK>P8|_6$v${@MD0Z$<~*-@qT!)!-z<3~{OZoO-7~WSwz)@9 z$Bd2yy%MMfYk7*A=qtDDJLQ6&o2i-U3RTYsXJXdlCuZ`RKK?Cqacj`h`iSSo`0GO> zT)xyS_)Yr3w>u4#JiT2`lD$f)$*p!bBzA$gup;Ie!fw9 zWm~TEJB<&nHwRoS*$`85@Bk%!yIrc>-in{^Pda*TqTNDQn2WBkU2LCA+i}S`Y3u4= z9^Q2(EpAH41g`ge>tD_dtzBQeJFsn`@k$sabeCONhP}mJxS7x%&kwuMzBltX2mkiD z>+fyOA&noTzcS|Gh0d!#rezI@_{95Mb4+zesf3dHzFnHq-nP7QGpT*ga8Q3|xcy(m zmg(z~Kg`Wde@Cr*dnNy;@1r;59NqUiF4c}xb`X@<5AEX6U5Z{68>zPM{nYmi2QBP# zh9C97uSUAg+QuN-ei~YQGApE7zs3}04iOBwYJ2^MH+E7&t0@_n#@^Q3uXA6aea~=E zKjT8C>hg~I*}Gp%GySXGXWCDDSJ6%sfAIA$elfd`H1AFPeHbP6xLumj-nNue6StC@ zn!Rw*;>^s2DNiq2l-~6Pa-xR*rq8@j`p}0Q+5W|>Uzg0#9zAvj|HkEfG`^$Y70KJl z*4HShpWCIG?QQ3Q0ZC0=s?6(Gs_gOUD;E2N?6j=i{I+Yz2kA44?o;fXdB4OJpFbqd z<@;z#YO`HhiM>tu_-0bkmq!c>=R_wa@-ZhYTa~&nb?H)7S~iAh(z0{9KF~C<#ykeB z!DOOs7_iNqY=!oHa0L##E+2`c!;C*g;+;N%|Fpm6is2;wB03`g5U;^YDywJ~aWJgV z3Bz0q2srx~<_Z5St^ca%8pb0`JUHSNnw~Stne8KFyp*EBr#MMpah2-vV6SjC?~x(F zF(c!Km_`0ok7+nQBGpJ%4cjB7th7v0-3bVd&EjF$i_rRB8TAX2M>AtD@(j6VR$hQ% zSt)CIjUmiz$Sq^#5(X7v2qO$R<%S%X&d}u47R^6p;Wui8Hj&_z#jRSzKLtx^VuMm{$O~X;fFepn|Iao)u@H`y5P0(y{ zYZnQQTLraRexu|**xP1>5RL+RYZUQ2XaX$XY{-W*fOdlp(@G6X4Lcm(yQ>r-NUv51 z;K18j1sxWCwN}^+$1dTYxAN;O{5De1We^&LvGduiyl{gGT@ix{E=L)-Gf65_bAy_4wA)bmc4?i+H#%1+K<7+N^ zo`^{nh-v?(vxfw=h@`b*kC-Vq;mhX5;(7&x#)~EN9?}^+`w}FK6YD6Io_1+`?-3t7o3mu=2@2G*iyX z$63ouSug<~vk6wdn58OZ!32El(Sy9_QhD#CY7BLyf}Y9uEV+A+&gJeZNw?1B?mcA5 z-E&~W>w7DA?>+kiy5UVC566r(tFC9ri($fR66QJFJ#~w#x7J zj&>Deq20q-N&=lR>SLCi+xwtexJW4U4`)fNe*l9Y%jm_k@g?@jG%cFJg#Tb+3CG7H z-fVIiJ++!4d&QGS3(yx}SRULrXy6+`kH^=35cHmS|2z9b5A6{Z5rjsfbSr3SIaMnu zDcP|>a-p`Zy`i~DqLQa2r$`bSYddVG>S~`LKPG3W9xGxS#Z@@Nf-}8mq~|CyW%yQ{ z5sfpKC>E;%Q;eA%W3&;&Umg8T@RiS*>#FCpr+9}= z{rRMKtl`Sz?cSrS=Y+*bJYS7Y+OYBHsrkOiBj&GvX?=3i{Pjt%ub;QRY27@ZuXv5a z=QSpm@aBJs)8_G*-9;@?X-`uV3hQ(1GT9zM6DZfAU;X_tvJW&82sn&S4O;|is(e}yyO!d(vSNzHRN_|EeYzHlIP;w0}(MP9l}<~34A#K=ab`KAzF z1tPu$c4qho!4U_7555yRao?m#)6kJ-E=`4FT)G5kFD+f&cD%N!w$0X2Ta%C?Z*Qyq zu)4Xqt)|_0A>+fk$I9xCtbBImp+iSj9donjY2TY?@)$H4ogwv$k=is(^VZK_pUhj} z<1>xJ`I7(D7pG3Y#{ZJPN^or5ydlH(L>zc$_uh#Sd-jXg5QOjtmjZ%F)kxy>acm!s z2oI9f%8j$II75lmXcU~X3LC9%jje(jix7sHrV1Dj_{SB(?F=is(7v=m%dfQxIuz5y zrX(1I+N^FD;GwAyHna+w6w@T8Qfek_*wo5D51U)~&sovv0*~)zt6)2$+V}v!AzIiZ z=QqP(fQB*tbFG3FE!Bf?J8rav`>#O{C?$PG02&Te6@~|_K5(C-`#lTV5VnS2)yiMw zMVoAth3hL#QVlCl4I|GoPCMp)5$9gka6>p!q0K4)@r%UHtceIMlGiZ&L7U?E@q8bF(a>Mix;*zVE9HUn2`Q(j(VY zP5`056-0hUk1Vg`9A0ctCK*&pT}}pii08n7Uuy6m>XKRUm(A|z3<#eR z)++cN@=o;b&xh|C?k3t4lVkB zFV!PP#L32W<>N-Ci|LYxS1|?*eOpsq2zCL%)4;o2{3=#J7_T|OjDNd^U|t~@Bb5OQ zegT2moOm-QIFz1Z72W5`{e=_EFoe`3FzR^)mrXoeYIc6t?KoB!#+V~MjdP^vY1{MF zogXuL@#DHbksr|yRo{Qv`B9F!lo^+)S$PQSV01G)lR?i@8}gMb6`bA-x+;#fA`Yez zZm7*9Zros~>)cl(LbQU&i$t&nt)Nkk=n|;#28aR8!LQhWm60Y$R@r81Ab-^HqwNMe@PJTM!uvhAgtV*z zdMg#!w8Hu>>?KhY5SYzUYhZ^B*wMY8Q$p-sX691a{H6u=q28m$7r#wv@j&dNs| z4T7TYtEE-YHKpijkR3o?5y0e91CXhzgjI^S3$?_QaSEXbisz;K@e* zCP2@q((6}2<5ncWTobayMNUbYr|8n(5KdxZ4I|OlcLqa-+xj0@JbI%$CfEqg-%%`zWNbE{E0N? zT4*fB@$-@f`AKU!+SvsK442*g2zro-%xN z?wr`eH-pb(xIRy>)s|!!E8@~@4Q<5E`O%D{`qve+XzoR1StA4}q^k~y&t0Pubs zA9!KM&bpPC#vXIqRb2e(b^LoV{&fLGeQzM*+!wq*V&9&i1EC)T4|#gIY-F@NbW!S< zqEupZO5pZX1nxbK=jUOg?;rl^&uifji>8i5a!lh*#W4#u_zGO#FFzQ(UsNm?-$!S1 zs`Rw%pp=SHuu9=B&da+V-DP?V(Af3#=_KBQ#@9|$JiHbW`2O;+@b`mLa?;f~Ppi^{ z(xOOH-#8*$k)_N`e`s7sZD)$HD8Xhm(`GYm1l!~z#nR8fL$>-ZzC}8p zHU1dGAKFwlGbE*yZJmu<>$`Jkcm8~@7k~a^p#14Y=`Xk5Tz?tYTYtF*_Q}DYrM+~J z4*nb@f1YHX=~96Y>BFJ%Y%a;6e7M(w{o+Vn=W3+uT-~MXd=Ye=TjO!bbq;;ACx^Pn zcxZnAlt&XA#f;(9BJ@3sxjZwKQ9(#WUjBN*XbG8$uNjHcV?15VV+yb;rcVIO~N4cp~b+ZOe@ar z(ucl@GZi@d*80$IZo{R@0duK!U|jmYD;|ay@IF?M!m=N zuE^%+jWaRNtDY56uHSblanRs-b=cpU-lrwKe3|7M{QFOx^jQq~^Si?<(wzEH&yxHK zX7ir>8jG=!h6nL$kut!aS%piiMynsALBWas-YcfJMzuT7KGTb5H~xt{>(Hn6XH$3Z z>>zpemip8|aBL5K>gikSQ>E+z-lR&llXxo5@n=h;oq`@2?`(9V6srzfeh){1yNhE( zZ>v}B!>|48Rl^6ZS4I5V-JbvE0Q9PNJ-_O$caC2x9)tGFCTx^nH zNWH38t|5*2*J6@~4)cI77pYd2^rKcS0JW-#Rx1iM5$tS+Ra}61ja;+EdwxNqq=NKf zbTH}N#-xwju=R)EmPws@>r@-yKI*MgWfD=?V0}zq7FS`6Qmo6z)chvKkYKm=G1cFW zN!0_^skRPCr>Y;APIU@0j32|>@-(g5O&P|{!KhTXD5|e`$4-@vw!5#Ao~!Jp)5#-{ z{TyfWgy-K9eyiQ|t#uP=S_i+`Q}gDp_x%b?>&XJao9_ni4c;$45FG5aeo~}gA)Ke=e!< zExedD&IC>h7pX3$K^I4plYIH}eI^_Tjyf=Df3Q#RCekm~OnCy_=M5^?;22pzi@LTY zh%U$U-*AK-(K^*ATd(K8OsyU<+HU!WO-5?epHrK#@=w9ugz+{AK-2_B*sG52#IsMGa3%>6Ri!;j)ZiC49cRWF-Vqw_5RC(+7?_hb?K=#%4JS{(NN zd)+54HDQOJk&^6AQ%qGM?^0eG?^54hikE}M}U^V^k{+0PaF?{%Z>{k^%gDi+H) ziS0&Q8gwfz1u^8y!KV;|0eL1wtAN5Z1>Aqh2 z)3+V^Q-}Wa9@d|V;1}4f^`{}2p)2_QGmlSlp7D^2V?rz##~-iY(|8+4&hQu&|L=$K zPjY^{(n-3cI(Z~hGbhmM-o;N)aa2VKCZa09P!F2aIuAxA+m_&&a6I9q@scb;j%*nY zed;}}PaV-)pV|o5w6{LBnux0S1Nu~+-om?~Pvz;Od3T>amDC%~VNNwvqxhx59h_qB z#j(2rP2-P^+ZkUx6%>EdOXdr=ppYLFvwt9J-ySk>-3w=3K$*R&Jx?i<0N1=K$J;$CFjtuyZL6Ko~r)(W@pKaTJVz=D&=-Ac1 zUvsKMnC?DN4YQf+ZEz z%S*>rt`?1ux*gUrXO7@h5>Zce#(gp(>a$8_@{Nt#)YR%ng4VnKU6(p`HEiPNR&~a{ zNyCq@GZj|+>^WhO`gqG?-TmAmo=a!)%lx}P!m5tu@Lj#feOFPu6cd|l*SDgfG^xUw zSVaz>*CbJ&et%lGuqlkvwQ|u|Y0n*i#!b+*R(onRbKJ}&G~@-K{OYv+w+gJnM+Ja`>m-fq4G`#ap4QaJS^zxc=jRAqyTyJF-soC-=ajMyWC0M(o31Hf_T$IZ^s35*5!e-?M83C+-r zG3tnbg21vr7I!7NkHN$lg|wXLKN5GxQ0wuM#iXDM~LJ(@%aYo;1kDE9FZo*#%TQmgEs>#E^0CGyds^MK5*e zYwynds{Q#Fa1_L?8Mlug>`Az&h))F2k5|?AEyj^tWSN zew&c-X*rw_&K!+`^8hRW8e}S^Tg#TY4}^wZOA3oxV_`vaEW#3 zXzy_yE#lnKO7J;weRQ_RiMH7fsZKxF3JI`isaTh$wkxOv2G^3ASBDit(t$)O=>P@4 z4;O+*f`Y)X-kf_Af7#n|F6aNeN>&3g}4&HU}u7h_SybB5YAq_uOtl%$EPvFk* zZhyJ7{-5(ME-Mwoav`YqnIr0+R+j$8{ovyf7E-kxG_mzdM zHCeBb?g`;Dro^nCd9^0%*TRLsx(Pj4w;`VLSGW_b+nsM6`LwiTsVql*g1<(6fwgB6Pm50KZ#j;9T2Q76BG90A)!nJPjlPk);JSZ8nil=I@25_IBcE2Uy#BYSQ&5%X z$fwm$oq~}5@@Xw{a9BJtwK$(yR_cw7i7rSC*v9Tp_ovQxvY52HLO6&Nn$@>W6bhk)G(cj zJQxaIG*;xnRyg9f{#x}`H^8&6w-11$-fCCu)=lcI-n!b<=D=esi=F2u{cU{{?)(6kS{EyiJ?L-ls08)p!8@7DmSV|g;fnXsNN-1;0(iZ-hec zn6OzSfO>JL&Sxnq3*6XMIFyNM*pn6BpoXf0-|jrW(NQ^5RLd0IwL|q1vUU`T`MYHe z(~;L>nRIMzf|2dBS*wX~O5i~!j=1`}Ew287E?r-Z6I>tk{Oncl=@m~NEx;@dF#47H zk`FIrPpg6T`Y;*J8+6Y6B8WY5vfzxanonDBClSuLbQ9hUir8kW|Vl&edi_#^rY01p$Kax*WBo0jtA#@>uTehI20Dg0xTr_9& z;rmf$(jCX^AT#OT182O`UpdiL0GPw~?-2*bqNsX^reBzor%Yd%y*NMpAc91cAE4uh zmf8<(ry7noec0Y`xi;xk;qH@b-Dq^H_#rw+Z)MY?aVHta6ofN%@KIx`*=GvorSR6b z`YaMx1z@Sc$2FhKe{KGVCf^0?eFZhVCrbEly!9TcU~0Ck9fpswl$MGLEKC{4-_7)3 zc(JI+%4GWT4;zhwQ&wT4)vd7=N{I@goPARTj0^nZ3aBY+Wf$6)HfZ^^P$gS2O>9b{ z>V;l_8q^A^>isl{sg#-t8#cA_&%@>x{&Uvul}M|N50I5e<@{!gpal(Q{O4K)En2Dv z;db0;3->Qk#4?5?(0b*^OO0h1cGjGA12<+BOr@tN3g%6nNS_i~*`hhW> zJ*TG{YtprmP@&sTFxHYO)E zQ-$405({}>JjwIv#Y=L=owEH<+g21VQD(AoB~x?dhN-A9JVkTGGf_`Tudj!mzDVBi zH=k{Y;@;o8TYTWXiF@A(owz4B%8$yDq)0RMz&HeOYNYJbA9?-R6KAb!pDo;7J=|t5QCd-#NjGxKSZE-WF z##C`H$lWitFfVpO%6Rt+3EU-e?ggCtjTmyrgL2Zo^qd3|&2uk{NAY6EQF-CJCyOUy z6R4T6;Uy*aC)hlJ`%#aK^Ql!{WX5?h_q%#-r)O0sz~#RzZh_@8G+9Im_o_S8g$|wJ_V5S_|D3_PAU;;iu zQuZ6+Am-lL*?TN?OkJ9#T%4I6xI}@f?<`DP`gBSrG>fwp(8>nLcU?7w z9DwHlyz7AH;Jd#7-$CAWQg25_Isb9K>+Ge!%X8@O9QwPvjPJbQH_5H_cceX5NhRHB zl47%dAI}v(OC?Sx+SwIyT!ar11;spwi>G2k{Lyb5hfeRF)9G#KPp4N4*Rr=xuNvnM zI`28?^mqjp-VL1|uRz4RH+6bNKBuARqTBlJ4s_nBOEZ_MWF=>O3h0wI%O-6X*`imu zOkbU?UGduVEz?(Ilg88FIqk7?M^hQgFJ5%r6~BbLE7KHsY?tXqDaj0+`|UpN1x;~> z`H$)KWOlt?+<#NArz7=xl}e+P;E;Mf4(RnNkzOyZhhA@%L$7xc=U&!uLpV~Q%_;zs z@glMFWTPxxuk)>=Pp*+oHqj^7sZBXb)(WuX#2J)IR=y+m<9{XhV|RP)$!_*qS2Uj4 zbCe@v2e>C;BiDS%6k)+^Bh0YdaQoHu!RkHqeZTbL!4(c3bnxK4#)ISR_FSrv4vMNc znG({)gNq=B!l)pLkTE5!iwGS&c(?Ij#K3L5r8jK6D2}W*4>ECL&^BIUPa7}3_4YPi zM%!)t_9O$g@zMrVSDdkRKz@73#@jCq^4e%l45?p?bi2q&Iv5(X6Q;+$z57Fv-BpAW zgttn>a}xRZiAH=tI@Se)DR%#a15kY@_yC!y@L)IV?4nFX&dBo(O&v+)g=g0uaa*Rr zz@n+e-B+1kMm4`F3ori-)YI-sA@2t7e`kN_q2PWh){sSS+G=ezydI`qWYqnB2=J}T z>d0R=AJvWhbx7*;$n0M?O#gIjs$zKcoVj5P4n}vO$8a<>AG!o{(sRtg7_$mKYR&9b zX0Y-SU=L=G0HZJVmN~TL$IsheW&RvHw>_2>T;x6a@RMV9x)tx_q-CT#>!$?-WEeH& zR-8VMzSZrORHJqu?Q7|1PAvFmO2gu85XL+C_(jLc!054e4IU+boC>h&W~FJl;dD2a`kf9d;bc4e0F>9 z$!_-E0dk?bw+*R3SlkH5wCGK+kbDXA9V0wPYbhq4w$L0Td?KsJ&T3kr8!$1 zJa{{s@Bn#`vxUp2?u+sSWu`mMVY+@wKWr)`~coHiVTJ)Mi#b8FAZH{I3oobV-P-5eeEw z{3@*gGE{>{TZ2~6C`X~auoTN?vz9}sF2;u6PVmz!OhX0=$3a9d6$)%n5;x+29mxRi zie!L#96QQP#Wl`l7#YKh^xQ*>BoMuck#W4kpuMHtSnLlq1?3R%!FZC)zj94Rs6BwR;Ob@07C%h8rv}{HxiVA`Hg9tSpw&T05-r#Nh1z+?G(0gbK0-S6f8Y#Ml zXpH+D+TOatSe!2*lx_trEvITFB_%sHNFZ5AdqZ=RL?us4PLU)u)^^xV)zv;heoW3# zJyyg9+cCR9!@gcKdkedOVBe-@_J2Pf#K$QC8GJq74hb2%dW#pt%G$1kdB?81(tc%a zd+eN|sbtLTu5qRd-#!|+a+hfq=;5Z9G<`XIfiU??GS=dqE{ifMr~Quj{r5ull4`l0 z0klgLS?Q#1uSRbVw%A|z&$TCTQm(JfxsoQBC1zMfg4qN=k>HqoY&+8vkeKep1>#`rjhIrZtJ!%v&7_?a4R zPovxQut3P%V{Z`pyJT*Ml7f(fHi`@M8h+XaU*J7CB%&wmmRdWGL1o1;Y}?{ zdKJ%OH$C!{EOK9TMJAaAcTWwDYq}3WR^wjqeGJDB9G}*u*c*z zx~MwYq=UN1co(BbA~};5snO5k1)j|!(dbc%lkoni~SVIgt!2Ug-N z+ykDK3IZb&%nW5zs&xqgUZ&O}Ij);mBw<(k4oS1fH)?}6nHf3{s~CHi;l!+hO)zA$ zo?k?0bmL_nRn}K&E^TOT&sTSS}x z?JYSo6mez^A;s%FG}d_yUbAd%&)4k9gs;^J*KuB2{rYHzG{D*Tnc0m2YuE@IQRl&! z4ZS*86TjOtt4_s-QvcY$rPcyzqjE6|Zw8;Y7I7LKUQD&SEks+=M$J(MBG$+@2 zpG=HDR_~jj-*Uq zP*B7+`f`HIVUlJd0*YJv5=t{)=9T7V4v*uGXFoj56NPBn^ z7|B_=0ifCOECW9{hhFoqpx0zKObB7v$7|@c;k- literal 0 HcmV?d00001 diff --git a/third_party/rust/gfx-backend-metal/shaders/macros.h b/third_party/rust/gfx-backend-metal/shaders/macros.h new file mode 100644 index 000000000000..b731190479e6 --- /dev/null +++ b/third_party/rust/gfx-backend-metal/shaders/macros.h @@ -0,0 +1,5 @@ +#ifdef __METAL_MACOS__ +# define GFX_RENDER_TARGET_ARRAY_INDEX [[render_target_array_index]] +#else +# define GFX_RENDER_TARGET_ARRAY_INDEX +#endif diff --git a/third_party/rust/gfx-backend-metal/src/command.rs b/third_party/rust/gfx-backend-metal/src/command.rs new file mode 100644 index 000000000000..c1f682d1332c --- /dev/null +++ b/third_party/rust/gfx-backend-metal/src/command.rs @@ -0,0 +1,4739 @@ +use crate::{ + conversions as conv, + internal::{BlitVertex, ClearKey, ClearVertex}, + native, + soft, + window, + AsNative, + Backend, + BufferPtr, + OnlineRecording, + PrivateDisabilities, + ResourceIndex, + ResourcePtr, + SamplerPtr, + Shared, + TexturePtr, + MAX_BOUND_DESCRIPTOR_SETS, +}; + +use hal::{ + buffer, + command as com, + device::OutOfMemory, + format::{Aspects, FormatDesc}, + image::{Extent, Filter, Layout, Level, SubresourceRange}, + memory, + pass::AttachmentLoadOp, + pso, + query, + range::RangeArg, + window::{PresentError, Suboptimal, SwapImageIndex}, + DrawCount, + IndexCount, + IndexType, + InstanceCount, + VertexCount, + VertexOffset, + WorkGroupCount, +}; + +use arrayvec::ArrayVec; +use auxil::FastHashMap; +use block::ConcreteBlock; +use cocoa::foundation::{NSRange, NSUInteger}; +use copyless::VecHelper; +#[cfg(feature = "dispatch")] +use dispatch; +use foreign_types::ForeignType; +use metal::{self, MTLIndexType, MTLPrimitiveType, MTLScissorRect, MTLSize, MTLViewport}; +use objc::rc::autoreleasepool; +use parking_lot::Mutex; + +#[cfg(feature = "dispatch")] +use std::fmt; +use std::{ + borrow::Borrow, + cell::RefCell, + iter, + mem, + ops::{Deref, Range}, + ptr, + slice, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread, + time, +}; + + +const WORD_SIZE: usize = 4; +const WORD_ALIGNMENT: u64 = WORD_SIZE as _; + +const COUNTERS_REPORT_WINDOW: usize = 0; + +#[cfg(feature = "dispatch")] +struct NoDebug(T); +#[cfg(feature = "dispatch")] +impl fmt::Debug for NoDebug { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "") + } +} + +#[derive(Debug)] +pub struct QueueInner { + raw: metal::CommandQueue, + reserve: Range, + debug_retain_references: bool, +} + +#[must_use] +#[derive(Debug)] +pub struct Token { + active: bool, +} + +impl Drop for Token { + fn drop(&mut self) { + + if !thread::panicking() { + debug_assert!(!self.active); + } + } +} + +impl QueueInner { + pub(crate) fn new(device: &metal::DeviceRef, pool_size: Option) -> Self { + match pool_size { + Some(count) => QueueInner { + raw: device.new_command_queue_with_max_command_buffer_count(count as u64), + reserve: 0 .. count, + debug_retain_references: false, + }, + None => QueueInner { + raw: device.new_command_queue(), + reserve: 0 .. 64, + debug_retain_references: true, + }, + } + } + + + pub(crate) fn spawn(&mut self) -> (metal::CommandBuffer, Token) { + self.reserve.start += 1; + let cmd_buf = autoreleasepool(|| self.spawn_temp().to_owned()); + (cmd_buf, Token { active: true }) + } + + pub(crate) fn spawn_temp(&self) -> &metal::CommandBufferRef { + if self.debug_retain_references { + self.raw.new_command_buffer() + } else { + self.raw.new_command_buffer_with_unretained_references() + } + } + + + pub(crate) fn release(&mut self, mut token: Token) { + token.active = false; + self.reserve.start -= 1; + } + + + pub(crate) fn wait_idle(queue: &Mutex) { + debug!("waiting for idle"); + + + let (cmd_buf, token) = queue.lock().spawn(); + cmd_buf.set_label("empty"); + cmd_buf.commit(); + cmd_buf.wait_until_completed(); + queue.lock().release(token); + } +} + +#[derive(Debug)] +pub struct BlockedSubmission { + wait_events: Vec>, + command_buffers: Vec, +} + + + + +#[derive(Debug, Default)] +pub struct QueueBlocker { + submissions: Vec, +} + +impl QueueBlocker { + fn submit_impl(&mut self, cmd_buffer: &metal::CommandBufferRef) { + match self.submissions.last_mut() { + Some(blocked) => blocked.command_buffers.push(cmd_buffer.to_owned()), + None => cmd_buffer.commit(), + } + } + + pub(crate) fn triage(&mut self) { + + let done = { + let blocked = match self.submissions.first_mut() { + Some(blocked) => blocked, + None => return, + }; + blocked.wait_events.retain(|ev| !ev.load(Ordering::Acquire)); + blocked.wait_events.is_empty() + }; + + + if done { + let blocked = self.submissions.remove(0); + for cmd_buf in blocked.command_buffers { + cmd_buf.commit(); + } + } + } +} + + +#[derive(Debug)] +struct PoolShared { + online_recording: OnlineRecording, + #[cfg(feature = "dispatch")] + dispatch_queue: Option>, +} + +type CommandBufferInnerPtr = Arc>; +type PoolSharedPtr = Arc>; + +#[derive(Debug)] +pub struct CommandPool { + shared: Arc, + allocated: Vec, + pool_shared: PoolSharedPtr, +} + +unsafe impl Send for CommandPool {} +unsafe impl Sync for CommandPool {} + +impl CommandPool { + pub(crate) fn new(shared: &Arc, online_recording: OnlineRecording) -> Self { + let pool_shared = PoolShared { + #[cfg(feature = "dispatch")] + dispatch_queue: match online_recording { + OnlineRecording::Immediate | OnlineRecording::Deferred => None, + OnlineRecording::Remote(ref priority) => { + Some(NoDebug(dispatch::Queue::global(priority.clone()))) + } + }, + online_recording, + }; + CommandPool { + shared: Arc::clone(shared), + allocated: Vec::new(), + pool_shared: Arc::new(RefCell::new(pool_shared)), + } + } +} + +#[derive(Debug)] +pub struct CommandBuffer { + shared: Arc, + pool_shared: PoolSharedPtr, + inner: CommandBufferInnerPtr, + state: State, + temp: Temp, + pub name: String, +} + +unsafe impl Send for CommandBuffer {} +unsafe impl Sync for CommandBuffer {} + +#[derive(Debug)] +struct Temp { + clear_vertices: Vec, + blit_vertices: FastHashMap<(Aspects, Level), Vec>, + clear_values: Vec>, +} + +type VertexBufferMaybeVec = Vec>; + +#[derive(Debug)] +struct RenderPipelineState { + raw: metal::RenderPipelineState, + ds_desc: pso::DepthStencilDesc, + vertex_buffers: VertexBufferMaybeVec, + formats: native::SubpassFormats, +} + +#[derive(Debug)] +struct SubpassInfo { + descriptor: metal::RenderPassDescriptor, + combined_aspects: Aspects, + formats: native::SubpassFormats, +} + +#[derive(Debug, Default)] +struct DescriptorSetInfo { + graphics_resources: Vec<(ResourcePtr, metal::MTLResourceUsage)>, + compute_resources: Vec<(ResourcePtr, metal::MTLResourceUsage)>, +} + + + + + + + + + + + + + + + +#[derive(Debug)] +struct State { + + viewport: Option<(pso::Rect, Range)>, + scissors: Option, + blend_color: Option, + + render_pso: Option, + + + render_pso_is_compatible: bool, + compute_pso: Option, + work_group_size: MTLSize, + primitive_type: MTLPrimitiveType, + + resources_vs: StageResources, + resources_ps: StageResources, + resources_cs: StageResources, + index_buffer: Option>, + rasterizer_state: Option, + depth_bias: pso::DepthBias, + stencil: native::StencilState, + push_constants: Vec, + vertex_buffers: Vec>, + + target_aspects: Aspects, + target_extent: Extent, + target_formats: native::SubpassFormats, + visibility_query: (metal::MTLVisibilityResultMode, buffer::Offset), + pending_subpasses: Vec, + descriptor_sets: ArrayVec<[DescriptorSetInfo; MAX_BOUND_DESCRIPTOR_SETS]>, +} + +impl State { + + fn reset_resources(&mut self) { + self.resources_vs.clear(); + self.resources_ps.clear(); + self.resources_cs.clear(); + self.push_constants.clear(); + self.vertex_buffers.clear(); + self.pending_subpasses.clear(); + for ds in self.descriptor_sets.iter_mut() { + ds.graphics_resources.clear(); + ds.compute_resources.clear(); + } + } + + fn clamp_scissor(sr: MTLScissorRect, extent: Extent) -> MTLScissorRect { + + let x = sr.x.min(extent.width.max(1) as u64 - 1); + let y = sr.y.min(extent.height.max(1) as u64 - 1); + + MTLScissorRect { + x, + y, + width: ((sr.x + sr.width).min(extent.width as u64) - x).max(1), + height: ((sr.y + sr.height).min(extent.height as u64) - y).max(1), + } + } + + fn make_pso_commands( + &self, + ) -> ( + Option>, + Option>, + ) { + if self.render_pso_is_compatible { + ( + self.render_pso + .as_ref() + .map(|ps| soft::RenderCommand::BindPipeline(&*ps.raw)), + self.rasterizer_state + .clone() + .map(soft::RenderCommand::SetRasterizerState), + ) + } else { + + (None, None) + } + } + + fn make_render_commands( + &self, + aspects: Aspects, + ) -> impl Iterator> { + + let com_vp = self + .viewport + .as_ref() + .map(|&(rect, ref depth)| soft::RenderCommand::SetViewport(rect, depth.clone())); + let com_scissor = self + .scissors + .map(|sr| soft::RenderCommand::SetScissor(Self::clamp_scissor(sr, self.target_extent))); + let com_blend = if aspects.contains(Aspects::COLOR) { + self.blend_color.map(soft::RenderCommand::SetBlendColor) + } else { + None + }; + let com_depth_bias = if aspects.contains(Aspects::DEPTH) { + Some(soft::RenderCommand::SetDepthBias(self.depth_bias)) + } else { + None + }; + let com_visibility = if self.visibility_query.0 != metal::MTLVisibilityResultMode::Disabled + { + Some(soft::RenderCommand::SetVisibilityResult( + self.visibility_query.0, + self.visibility_query.1, + )) + } else { + None + }; + let (com_pso, com_rast) = self.make_pso_commands(); + + let render_resources = iter::once(&self.resources_vs).chain(iter::once(&self.resources_ps)); + let push_constants = self.push_constants.as_slice(); + let com_resources = [pso::Stage::Vertex, pso::Stage::Fragment] + .iter() + .zip(render_resources) + .flat_map(move |(&stage, resources)| { + let com_buffers = soft::RenderCommand::BindBuffers { + stage, + index: 0, + buffers: (&resources.buffers[..], &resources.buffer_offsets[..]), + }; + let com_textures = soft::RenderCommand::BindTextures { + stage, + index: 0, + textures: &resources.textures[..], + }; + let com_samplers = soft::RenderCommand::BindSamplers { + stage, + index: 0, + samplers: &resources.samplers[..], + }; + let com_push_constants = + resources + .push_constants + .map(|pc| soft::RenderCommand::BindBufferData { + stage, + index: pc.buffer_index as _, + words: &push_constants[.. pc.count as usize], + }); + iter::once(com_buffers) + .chain(iter::once(com_textures)) + .chain(iter::once(com_samplers)) + .chain(com_push_constants) + }); + let com_used_resources = self.descriptor_sets.iter().flat_map(|ds| { + ds.graphics_resources + .iter() + .map(|&(resource, usage)| soft::RenderCommand::UseResource { resource, usage }) + }); + + com_vp + .into_iter() + .chain(com_scissor) + .chain(com_blend) + .chain(com_depth_bias) + .chain(com_visibility) + .chain(com_pso) + .chain(com_rast) + + .chain(com_resources) + .chain(com_used_resources) + } + + fn make_compute_commands(&self) -> impl Iterator> { + let resources = &self.resources_cs; + let com_pso = self + .compute_pso + .as_ref() + .map(|pso| soft::ComputeCommand::BindPipeline(&**pso)); + let com_buffers = soft::ComputeCommand::BindBuffers { + index: 0, + buffers: (&resources.buffers[..], &resources.buffer_offsets[..]), + }; + let com_textures = soft::ComputeCommand::BindTextures { + index: 0, + textures: &resources.textures[..], + }; + let com_samplers = soft::ComputeCommand::BindSamplers { + index: 0, + samplers: &resources.samplers[..], + }; + let com_push_constants = + resources + .push_constants + .map(|pc| soft::ComputeCommand::BindBufferData { + index: pc.buffer_index as _, + words: &self.push_constants[.. pc.count as usize], + }); + let com_used_resources = self.descriptor_sets.iter().flat_map(|ds| { + ds.compute_resources + .iter() + .map(|&(resource, usage)| soft::ComputeCommand::UseResource { resource, usage }) + }); + + + com_pso + .into_iter() + .chain(iter::once(com_buffers)) + .chain(iter::once(com_textures)) + .chain(iter::once(com_samplers)) + .chain(com_push_constants) + .chain(com_used_resources) + } + + fn set_vertex_buffers(&mut self, end: usize) -> Option> { + let rps = self.render_pso.as_ref()?; + let start = end - rps.vertex_buffers.len(); + self.resources_vs.pre_allocate_buffers(end); + + for ((out_buffer, out_offset), vb_maybe) in self.resources_vs.buffers[.. end] + .iter_mut() + .rev() + .zip(self.resources_vs.buffer_offsets[.. end].iter_mut().rev()) + .zip(&rps.vertex_buffers) + { + match vb_maybe { + Some((ref vb, extra_offset)) => { + match self.vertex_buffers.get(vb.binding as usize) { + Some(&Some((buffer, base_offset))) => { + *out_buffer = Some(buffer); + *out_offset = *extra_offset as u64 + base_offset; + } + _ => { + + + *out_buffer = None; + } + } + } + None => { + *out_buffer = None; + } + } + } + + Some(soft::RenderCommand::BindBuffers { + stage: pso::Stage::Vertex, + index: start as ResourceIndex, + buffers: ( + &self.resources_vs.buffers[start .. end], + &self.resources_vs.buffer_offsets[start .. end], + ), + }) + } + + fn build_depth_stencil(&self) -> Option { + let mut desc = match self.render_pso { + Some(ref ps) => ps.ds_desc, + None => return None, + }; + + if !self.target_aspects.contains(Aspects::DEPTH) { + desc.depth = None; + } + if !self.target_aspects.contains(Aspects::STENCIL) { + desc.stencil = None; + } + + if let Some(ref mut stencil) = desc.stencil { + stencil.reference_values = pso::State::Dynamic; + if stencil.read_masks.is_dynamic() { + stencil.read_masks = pso::State::Static(self.stencil.read_masks); + } + if stencil.write_masks.is_dynamic() { + stencil.write_masks = pso::State::Static(self.stencil.write_masks); + } + } + + Some(desc) + } + + fn set_depth_bias<'a>( + &mut self, + depth_bias: &pso::DepthBias, + ) -> soft::RenderCommand<&'a soft::Ref> { + self.depth_bias = *depth_bias; + soft::RenderCommand::SetDepthBias(*depth_bias) + } + + fn push_vs_constants( + &mut self, + pc: native::PushConstantInfo, + ) -> soft::RenderCommand<&soft::Ref> { + self.resources_vs.push_constants = Some(pc); + soft::RenderCommand::BindBufferData { + stage: pso::Stage::Vertex, + index: pc.buffer_index, + words: &self.push_constants[.. pc.count as usize], + } + } + + fn push_ps_constants( + &mut self, + pc: native::PushConstantInfo, + ) -> soft::RenderCommand<&soft::Ref> { + self.resources_ps.push_constants = Some(pc); + soft::RenderCommand::BindBufferData { + stage: pso::Stage::Fragment, + index: pc.buffer_index, + words: &self.push_constants[.. pc.count as usize], + } + } + + fn push_cs_constants( + &mut self, + pc: native::PushConstantInfo, + ) -> soft::ComputeCommand<&soft::Ref> { + self.resources_cs.push_constants = Some(pc); + soft::ComputeCommand::BindBufferData { + index: pc.buffer_index, + words: &self.push_constants[.. pc.count as usize], + } + } + + fn set_viewport<'a>( + &mut self, + vp: &'a pso::Viewport, + disabilities: PrivateDisabilities, + ) -> soft::RenderCommand<&'a soft::Ref> { + let depth = vp.depth.start .. if disabilities.broken_viewport_near_depth { + (vp.depth.end - vp.depth.start) + } else { + vp.depth.end + }; + self.viewport = Some((vp.rect, depth.clone())); + soft::RenderCommand::SetViewport(vp.rect, depth) + } + + fn set_scissor<'a>(&mut self, rect: pso::Rect) -> soft::RenderCommand<&'a soft::Ref> { + let scissor = MTLScissorRect { + x: rect.x as _, + y: rect.y as _, + width: rect.w as _, + height: rect.h as _, + }; + self.scissors = Some(scissor); + let clamped = State::clamp_scissor(scissor, self.target_extent); + soft::RenderCommand::SetScissor(clamped) + } + + fn set_blend_color<'a>( + &mut self, + color: &'a pso::ColorValue, + ) -> soft::RenderCommand<&'a soft::Ref> { + self.blend_color = Some(*color); + soft::RenderCommand::SetBlendColor(*color) + } + + fn update_push_constants(&mut self, offset: u32, constants: &[u32], total: u32) { + assert_eq!(offset % WORD_ALIGNMENT as u32, 0); + let offset = (offset / WORD_ALIGNMENT as u32) as usize; + let data = &mut self.push_constants; + if data.len() < total as usize { + data.resize(total as usize, 0); + } + data[offset .. offset + constants.len()].copy_from_slice(constants); + } + + fn set_visibility_query( + &mut self, + mode: metal::MTLVisibilityResultMode, + offset: buffer::Offset, + ) -> soft::RenderCommand<&soft::Ref> { + self.visibility_query = (mode, offset); + soft::RenderCommand::SetVisibilityResult(mode, offset) + } +} + +#[derive(Debug)] +struct StageResources { + buffers: Vec>, + buffer_offsets: Vec, + textures: Vec>, + samplers: Vec>, + push_constants: Option, +} + +impl StageResources { + fn new() -> Self { + StageResources { + buffers: Vec::new(), + buffer_offsets: Vec::new(), + textures: Vec::new(), + samplers: Vec::new(), + push_constants: None, + } + } + + fn clear(&mut self) { + self.buffers.clear(); + self.buffer_offsets.clear(); + self.textures.clear(); + self.samplers.clear(); + self.push_constants = None; + } + + fn pre_allocate_buffers(&mut self, count: usize) { + debug_assert_eq!(self.buffers.len(), self.buffer_offsets.len()); + if self.buffers.len() < count { + self.buffers.resize(count, None); + self.buffer_offsets.resize(count, 0); + } + } + fn pre_allocate(&mut self, counters: &native::ResourceData) { + if self.textures.len() < counters.textures as usize { + self.textures.resize(counters.textures as usize, None); + } + if self.samplers.len() < counters.samplers as usize { + self.samplers.resize(counters.samplers as usize, None); + } + self.pre_allocate_buffers(counters.buffers as usize); + } + + fn bind_set( + &mut self, + stage: pso::ShaderStageFlags, + data: &native::DescriptorEmulatedPoolInner, + mut res_offset: native::ResourceData, + layouts: &[native::DescriptorLayout], + pool_range: &native::ResourceData>, + ) -> native::ResourceData { + let mut pool_offsets = pool_range.map(|r| r.start); + for layout in layouts { + if layout.stages.contains(stage) { + if layout.content.contains(native::DescriptorContent::SAMPLER) { + self.samplers[res_offset.samplers as usize] = + data.samplers[pool_offsets.samplers as usize]; + res_offset.samplers += 1; + pool_offsets.samplers += 1; + } + if layout.content.contains(native::DescriptorContent::TEXTURE) { + self.textures[res_offset.textures as usize] = + data.textures[pool_offsets.textures as usize].map(|(t, _)| t); + res_offset.textures += 1; + pool_offsets.textures += 1; + } + if layout.content.contains(native::DescriptorContent::BUFFER) { + let (buffer, offset) = match data.buffers[pool_offsets.buffers as usize] { + Some((buffer, offset)) => (Some(buffer), offset), + None => (None, 0), + }; + self.buffers[res_offset.buffers as usize] = buffer; + self.buffer_offsets[res_offset.buffers as usize] = offset; + res_offset.buffers += 1; + pool_offsets.buffers += 1; + } + } else { + pool_offsets.add(layout.content); + } + } + res_offset + } +} + +#[cfg(feature = "dispatch")] +#[derive(Debug, Default)] +struct Capacity { + render: usize, + compute: usize, + blit: usize, +} + + +#[cfg(feature = "dispatch")] +#[derive(Debug)] +enum EncodePass { + Render( + Vec>, + soft::Own, + metal::RenderPassDescriptor, + String, + ), + Compute(Vec>, soft::Own, String), + Blit(Vec, String), +} +#[cfg(feature = "dispatch")] +unsafe impl Send for EncodePass {} + +#[cfg(feature = "dispatch")] +struct SharedCommandBuffer(Arc>); +#[cfg(feature = "dispatch")] +unsafe impl Send for SharedCommandBuffer {} + +#[cfg(feature = "dispatch")] +impl EncodePass { + fn schedule(self, queue: &dispatch::Queue, cmd_buffer_arc: &Arc>) { + let cmd_buffer = SharedCommandBuffer(Arc::clone(cmd_buffer_arc)); + queue.r#async(move || match self { + EncodePass::Render(list, resources, desc, label) => { + let encoder = cmd_buffer + .0 + .lock() + .new_render_command_encoder(&desc) + .to_owned(); + encoder.set_label(&label); + for command in list { + exec_render(&encoder, command, &resources); + } + encoder.end_encoding(); + } + EncodePass::Compute(list, resources, label) => { + let encoder = cmd_buffer.0.lock().new_compute_command_encoder().to_owned(); + encoder.set_label(&label); + for command in list { + exec_compute(&encoder, command, &resources); + } + encoder.end_encoding(); + } + EncodePass::Blit(list, label) => { + let encoder = cmd_buffer.0.lock().new_blit_command_encoder().to_owned(); + encoder.set_label(&label); + for command in list { + exec_blit(&encoder, command); + } + encoder.end_encoding(); + } + }); + } + + fn update(&self, capacity: &mut Capacity) { + match &self { + EncodePass::Render(ref list, _, _, _) => capacity.render = capacity.render.max(list.len()), + EncodePass::Compute(ref list, _, _) => capacity.compute = capacity.compute.max(list.len()), + EncodePass::Blit(ref list, _) => capacity.blit = capacity.blit.max(list.len()), + } + } +} + +#[derive(Debug, Default)] +struct Journal { + resources: soft::Own, + passes: Vec<(soft::Pass, Range, String)>, + render_commands: Vec>, + compute_commands: Vec>, + blit_commands: Vec, +} + +impl Journal { + fn clear(&mut self) { + self.resources.clear(); + self.passes.clear(); + self.render_commands.clear(); + self.compute_commands.clear(); + self.blit_commands.clear(); + } + + fn stop(&mut self) { + match self.passes.last_mut() { + None => {} + Some(&mut (soft::Pass::Render(_), ref mut range, _)) => { + range.end = self.render_commands.len(); + } + Some(&mut (soft::Pass::Compute, ref mut range, _)) => { + range.end = self.compute_commands.len(); + } + Some(&mut (soft::Pass::Blit, ref mut range, _)) => { + range.end = self.blit_commands.len(); + } + }; + } + + fn record(&self, command_buf: &metal::CommandBufferRef) { + for (ref pass, ref range, ref label) in &self.passes { + match *pass { + soft::Pass::Render(ref desc) => { + let encoder = command_buf.new_render_command_encoder(desc); + encoder.set_label(label); + for command in &self.render_commands[range.clone()] { + exec_render(&encoder, command, &self.resources); + } + encoder.end_encoding(); + } + soft::Pass::Blit => { + let encoder = command_buf.new_blit_command_encoder(); + encoder.set_label(label); + for command in &self.blit_commands[range.clone()] { + exec_blit(&encoder, command); + } + encoder.end_encoding(); + } + soft::Pass::Compute => { + let encoder = command_buf.new_compute_command_encoder(); + encoder.set_label(label); + for command in &self.compute_commands[range.clone()] { + exec_compute(&encoder, command, &self.resources); + } + encoder.end_encoding(); + } + } + } + } + + fn extend(&mut self, other: &Self, inherit_pass: bool) { + if inherit_pass { + assert_eq!(other.passes.len(), 1); + match *self.passes.last_mut().unwrap() { + (soft::Pass::Render(_), ref mut range, _) => { + range.end += other.render_commands.len(); + } + (soft::Pass::Compute, _, _) | (soft::Pass::Blit, _, _) => { + panic!("Only render passes can inherit") + } + } + } else { + for (pass, range, label) in &other.passes { + let offset = match *pass { + soft::Pass::Render(_) => self.render_commands.len(), + soft::Pass::Compute => self.compute_commands.len(), + soft::Pass::Blit => self.blit_commands.len(), + }; + self.passes + .alloc() + .init((pass.clone(), range.start + offset .. range.end + offset, label.clone())); + } + } + + // Note: journals contain 3 levels of stuff: + // resources, commands, and passes + // Each upper level points to the lower one with index + // sub-ranges. In order to merge two journals, we need + // to fix those indices of the one that goes on top. + // This is referred here as "rebasing". + for mut com in other.render_commands.iter().cloned() { + self.resources.rebase_render(&mut com); + self.render_commands.push(com); + } + for mut com in other.compute_commands.iter().cloned() { + self.resources.rebase_compute(&mut com); + self.compute_commands.push(com); + } + self.blit_commands.extend_from_slice(&other.blit_commands); + + self.resources.extend(&other.resources); + } +} + +#[derive(Debug)] +enum CommandSink { + Immediate { + cmd_buffer: metal::CommandBuffer, + token: Token, + encoder_state: EncoderState, + num_passes: usize, + label: String, + }, + Deferred { + is_encoding: bool, + is_inheriting: bool, + journal: Journal, + label: String, + }, + #[cfg(feature = "dispatch")] + Remote { + queue: NoDebug, + cmd_buffer: Arc>, + token: Token, + pass: Option, + capacity: Capacity, + label: String, + }, +} + +/// A helper temporary object that consumes state-setting commands only +/// applicable to a render pass currently encoded. +enum PreRender<'a> { + Immediate(&'a metal::RenderCommandEncoderRef), + Deferred( + &'a mut soft::Own, + &'a mut Vec>, + ), + Void, +} + +impl<'a> PreRender<'a> { + fn is_void(&self) -> bool { + match *self { + PreRender::Void => true, + _ => false, + } + } + + fn issue(&mut self, command: soft::RenderCommand<&soft::Ref>) { + match *self { + PreRender::Immediate(encoder) => exec_render(encoder, command, &&soft::Ref), + PreRender::Deferred(ref mut resources, ref mut list) => { + list.alloc().init(resources.own_render(command)); + } + PreRender::Void => (), + } + } + + fn issue_many<'b, I>(&mut self, commands: I) + where + I: Iterator>, + { + match *self { + PreRender::Immediate(encoder) => { + for com in commands { + exec_render(encoder, com, &&soft::Ref); + } + } + PreRender::Deferred(ref mut resources, ref mut list) => { + list.extend(commands.map(|com| resources.own_render(com))) + } + PreRender::Void => {} + } + } +} + +/// A helper temporary object that consumes state-setting commands only +/// applicable to a compute pass currently encoded. +enum PreCompute<'a> { + Immediate(&'a metal::ComputeCommandEncoderRef), + Deferred( + &'a mut soft::Own, + &'a mut Vec>, + ), + Void, +} + +impl<'a> PreCompute<'a> { + fn issue<'b>(&mut self, command: soft::ComputeCommand<&'b soft::Ref>) { + match *self { + PreCompute::Immediate(encoder) => exec_compute(encoder, command, &&soft::Ref), + PreCompute::Deferred(ref mut resources, ref mut list) => { + list.alloc().init(resources.own_compute(command)); + } + PreCompute::Void => (), + } + } + + fn issue_many<'b, I>(&mut self, commands: I) + where + I: Iterator>, + { + match *self { + PreCompute::Immediate(encoder) => { + for com in commands { + exec_compute(encoder, com, &&soft::Ref); + } + } + PreCompute::Deferred(ref mut resources, ref mut list) => { + list.extend(commands.map(|com| resources.own_compute(com))) + } + PreCompute::Void => {} + } + } +} + +impl CommandSink { + fn label(&mut self, label: &str) -> &Self { + match self { + CommandSink::Immediate { label: l, .. } | CommandSink::Deferred { label: l, .. } => *l = label.to_string(), + #[cfg(feature = "dispatch")] + CommandSink::Remote { label: l, .. } => *l = label.to_string(), + } + self + } + + fn stop_encoding(&mut self) { + match *self { + CommandSink::Immediate { + ref mut encoder_state, + .. + } => { + encoder_state.end(); + } + CommandSink::Deferred { + ref mut is_encoding, + ref mut journal, + .. + } => { + *is_encoding = false; + journal.stop(); + } + #[cfg(feature = "dispatch")] + CommandSink::Remote { + queue: NoDebug(ref queue), + ref cmd_buffer, + ref mut pass, + ref mut capacity, + .. + } => { + if let Some(pass) = pass.take() { + pass.update(capacity); + pass.schedule(queue, cmd_buffer); + } + } + } + } + + /// Start issuing pre-render commands. Those can be rejected, so the caller is responsible + /// for updating the state cache accordingly, so that it's set upon the start of a next pass. + fn pre_render(&mut self) -> PreRender { + match *self { + CommandSink::Immediate { + encoder_state: EncoderState::Render(ref encoder), + .. + } => PreRender::Immediate(encoder), + CommandSink::Deferred { + is_encoding: true, + ref mut journal, + .. + } => match journal.passes.last() { + Some(&(soft::Pass::Render(_), _, _)) => { + PreRender::Deferred(&mut journal.resources, &mut journal.render_commands) + } + _ => PreRender::Void, + }, + #[cfg(feature = "dispatch")] + CommandSink::Remote { + pass: Some(EncodePass::Render(ref mut list, ref mut resources, _, _)), + .. + } => PreRender::Deferred(resources, list), + _ => PreRender::Void, + } + } + + /// Switch the active encoder to render by starting a render pass. + fn switch_render(&mut self, descriptor: metal::RenderPassDescriptor) -> PreRender { + //assert!(AutoReleasePool::is_active()); + self.stop_encoding(); + + match *self { + CommandSink::Immediate { + ref cmd_buffer, + ref mut encoder_state, + ref mut num_passes, + ref label, + .. + } => { + *num_passes += 1; + let encoder = cmd_buffer.new_render_command_encoder(&descriptor); + encoder.set_label(label); + *encoder_state = EncoderState::Render(encoder.to_owned()); + PreRender::Immediate(encoder) + } + CommandSink::Deferred { + ref mut is_encoding, + ref mut journal, + is_inheriting, + ref label, + .. + } => { + assert!(!is_inheriting); + *is_encoding = true; + journal.passes.alloc().init(( + soft::Pass::Render(descriptor), + journal.render_commands.len() .. 0, + label.clone(), + )); + PreRender::Deferred(&mut journal.resources, &mut journal.render_commands) + } + #[cfg(feature = "dispatch")] + CommandSink::Remote { + ref mut pass, + ref capacity, + ref label, + .. + } => { + let list = Vec::with_capacity(capacity.render); + *pass = Some(EncodePass::Render(list, soft::Own::default(), descriptor, label.clone())); + match *pass { + Some(EncodePass::Render(ref mut list, ref mut resources, _, _)) => { + PreRender::Deferred(resources, list) + } + _ => unreachable!(), + } + } + } + } + + fn quick_render<'a, I>( + &mut self, + label: &str, + descriptor: metal::RenderPassDescriptor, + commands: I, + ) where + I: Iterator>, + { + { + let mut pre = self.switch_render(descriptor); + if let PreRender::Immediate(encoder) = pre { + encoder.set_label(label); + } + pre.issue_many(commands); + } + self.stop_encoding(); + } + + /// Issue provided blit commands. This function doesn't expect an active blit pass, + /// it will automatically start one when needed. + fn blit_commands(&mut self, commands: I) + where + I: Iterator, + { + enum PreBlit<'b> { + Immediate(&'b metal::BlitCommandEncoderRef), + Deferred(&'b mut Vec), + } + + let pre = match *self { + CommandSink::Immediate { + encoder_state: EncoderState::Blit(ref encoder), + .. + } => PreBlit::Immediate(encoder), + CommandSink::Immediate { + ref cmd_buffer, + ref mut encoder_state, + ref mut num_passes, + .. + } => { + *num_passes += 1; + encoder_state.end(); + let encoder = cmd_buffer.new_blit_command_encoder(); + *encoder_state = EncoderState::Blit(encoder.to_owned()); + PreBlit::Immediate(encoder) + } + CommandSink::Deferred { + ref mut is_encoding, + is_inheriting, + ref mut journal, + ref label, + .. + } => { + assert!(!is_inheriting); + *is_encoding = true; + if let Some(&(soft::Pass::Blit, _, _)) = journal.passes.last() { + } else { + journal.stop(); + journal + .passes + .alloc() + .init((soft::Pass::Blit, journal.blit_commands.len() .. 0, label.clone())); + } + PreBlit::Deferred(&mut journal.blit_commands) + } + #[cfg(feature = "dispatch")] + CommandSink::Remote { + pass: Some(EncodePass::Blit(ref mut list, _)), + .. + } => PreBlit::Deferred(list), + #[cfg(feature = "dispatch")] + CommandSink::Remote { + queue: NoDebug(ref queue), + ref cmd_buffer, + ref mut pass, + ref mut capacity, + ref label, + .. + } => { + if let Some(pass) = pass.take() { + pass.update(capacity); + pass.schedule(queue, cmd_buffer); + } + let list = Vec::with_capacity(capacity.blit); + *pass = Some(EncodePass::Blit(list, label.clone())); + match *pass { + Some(EncodePass::Blit(ref mut list, _)) => PreBlit::Deferred(list), + _ => unreachable!(), + } + } + }; + + match pre { + PreBlit::Immediate(encoder) => { + for com in commands { + exec_blit(encoder, com); + } + } + PreBlit::Deferred(list) => { + list.extend(commands); + } + } + } + + /// Start issuing pre-compute commands. Those can be rejected, so the caller is responsible + /// for updating the state cache accordingly, so that it's set upon the start of a next pass. + fn pre_compute(&mut self) -> PreCompute { + match *self { + CommandSink::Immediate { + encoder_state: EncoderState::Compute(ref encoder), + .. + } => PreCompute::Immediate(encoder), + CommandSink::Deferred { + is_encoding: true, + is_inheriting: false, + ref mut journal, + .. + } => match journal.passes.last() { + Some(&(soft::Pass::Compute, _, _)) => { + PreCompute::Deferred(&mut journal.resources, &mut journal.compute_commands) + } + _ => PreCompute::Void, + }, + #[cfg(feature = "dispatch")] + CommandSink::Remote { + pass: Some(EncodePass::Compute(ref mut list, ref mut resources, _)), + .. + } => PreCompute::Deferred(resources, list), + _ => PreCompute::Void, + } + } + + /// Switch the active encoder to compute. + /// Second returned value is `true` if the switch has just happened. + fn switch_compute(&mut self) -> (PreCompute, bool) { + match *self { + CommandSink::Immediate { + encoder_state: EncoderState::Compute(ref encoder), + .. + } => (PreCompute::Immediate(encoder), false), + CommandSink::Immediate { + ref cmd_buffer, + ref mut encoder_state, + ref mut num_passes, + .. + } => { + *num_passes += 1; + encoder_state.end(); + let encoder = cmd_buffer.new_compute_command_encoder(); + *encoder_state = EncoderState::Compute(encoder.to_owned()); + (PreCompute::Immediate(encoder), true) + } + CommandSink::Deferred { + ref mut is_encoding, + is_inheriting, + ref mut journal, + ref label, + .. + } => { + assert!(!is_inheriting); + *is_encoding = true; + let switch = if let Some(&(soft::Pass::Compute, _, _)) = journal.passes.last() { + false + } else { + journal.stop(); + journal + .passes + .alloc() + .init((soft::Pass::Compute, journal.compute_commands.len() .. 0, label.clone())); + true + }; + ( + PreCompute::Deferred(&mut journal.resources, &mut journal.compute_commands), + switch, + ) + } + #[cfg(feature = "dispatch")] + CommandSink::Remote { + pass: Some(EncodePass::Compute(ref mut list, ref mut resources, _)), + .. + } => (PreCompute::Deferred(resources, list), false), + #[cfg(feature = "dispatch")] + CommandSink::Remote { + queue: NoDebug(ref queue), + ref cmd_buffer, + ref mut pass, + ref mut capacity, + ref label, + .. + } => { + if let Some(pass) = pass.take() { + pass.update(capacity); + pass.schedule(queue, cmd_buffer); + } + let list = Vec::with_capacity(capacity.compute); + *pass = Some(EncodePass::Compute(list, soft::Own::default(), label.clone())); + match *pass { + Some(EncodePass::Compute(ref mut list, ref mut resources, _)) => { + (PreCompute::Deferred(resources, list), true) + } + _ => unreachable!(), + } + } + } + } + + fn quick_compute<'a, I>(&mut self, label: &str, commands: I) + where + I: Iterator>, + { + { + let (mut pre, switch) = self.switch_compute(); + pre.issue_many(commands); + if switch { + if let PreCompute::Immediate(encoder) = pre { + encoder.set_label(label); + } + } + } + self.stop_encoding(); + } +} + +#[derive(Clone, Debug)] +pub struct IndexBuffer { + buffer: B, + offset: u32, + stride: u32, +} + +/// This is an inner mutable part of the command buffer that is +/// accessible by the owning command pool for one single reason: +/// to reset it. +#[derive(Debug)] +pub struct CommandBufferInner { + sink: Option, + level: com::Level, + backup_journal: Option, + #[cfg(feature = "dispatch")] + backup_capacity: Option, + retained_buffers: Vec, + retained_textures: Vec, + active_visibility_queries: Vec, + events: Vec<(Arc, bool)>, + host_events: Vec>, +} + +impl Drop for CommandBufferInner { + fn drop(&mut self) { + if self.sink.is_some() { + error!("Command buffer not released properly!"); + } + } +} + +impl CommandBufferInner { + pub(crate) fn reset(&mut self, shared: &Shared, release: bool) { + match self.sink.take() { + Some(CommandSink::Immediate { + token, + mut encoder_state, + .. + }) => { + encoder_state.end(); + shared.queue.lock().release(token); + } + Some(CommandSink::Deferred { mut journal, .. }) => { + if !release { + journal.clear(); + self.backup_journal = Some(journal); + } + } + #[cfg(feature = "dispatch")] + Some(CommandSink::Remote { + token, capacity, .. + }) => { + shared.queue.lock().release(token); + if !release { + self.backup_capacity = Some(capacity); + } + } + None => {} + }; + self.retained_buffers.clear(); + self.retained_textures.clear(); + self.active_visibility_queries.clear(); + self.events.clear(); + } + + fn sink(&mut self) -> &mut CommandSink { + self.sink.as_mut().unwrap() + } +} + +#[derive(Debug)] +enum EncoderState { + None, + Blit(metal::BlitCommandEncoder), + Render(metal::RenderCommandEncoder), + Compute(metal::ComputeCommandEncoder), +} + +impl EncoderState { + fn end(&mut self) { + match mem::replace(self, EncoderState::None) { + EncoderState::None => {} + EncoderState::Render(ref encoder) => { + encoder.end_encoding(); + } + EncoderState::Blit(ref encoder) => { + encoder.end_encoding(); + } + EncoderState::Compute(ref encoder) => { + encoder.end_encoding(); + } + } + } +} + +fn div(a: u32, b: u32) -> u32 { + (a + b - 1) / b +} + +fn compute_pitches(region: &com::BufferImageCopy, fd: FormatDesc, extent: &MTLSize) -> (u32, u32) { + let buffer_width = if region.buffer_width == 0 { + extent.width as u32 + } else { + region.buffer_width + }; + let buffer_height = if region.buffer_height == 0 { + extent.height as u32 + } else { + region.buffer_height + }; + let row_pitch = div(buffer_width, fd.dim.0 as _) * (fd.bits / 8) as u32; + let slice_pitch = div(buffer_height, fd.dim.1 as _) * row_pitch; + (row_pitch, slice_pitch) +} + +fn exec_render(encoder: &metal::RenderCommandEncoderRef, command: C, resources: &R) +where + R: soft::Resources, + R::Data: Borrow<[u32]>, + R::BufferArray: soft::AsSlice, R> + soft::AsSlice, + R::TextureArray: soft::AsSlice, R>, + R::SamplerArray: soft::AsSlice, R>, + R::DepthStencil: Borrow, + R::RenderPipeline: Borrow, + C: Borrow>, +{ + use crate::soft::RenderCommand as Cmd; + match *command.borrow() { + Cmd::SetViewport(ref rect, ref depth) => { + encoder.set_viewport(MTLViewport { + originX: rect.x as _, + originY: rect.y as _, + width: rect.w as _, + height: rect.h as _, + znear: depth.start as _, + zfar: depth.end as _, + }); + } + Cmd::SetScissor(scissor) => { + encoder.set_scissor_rect(scissor); + } + Cmd::SetBlendColor(color) => { + encoder.set_blend_color(color[0], color[1], color[2], color[3]); + } + Cmd::SetDepthBias(depth_bias) => { + encoder.set_depth_bias( + depth_bias.const_factor, + depth_bias.slope_factor, + depth_bias.clamp, + ); + } + Cmd::SetDepthStencilState(ref depth_stencil) => { + encoder.set_depth_stencil_state(depth_stencil.borrow()); + } + Cmd::SetStencilReferenceValues(sided) => { + encoder.set_stencil_front_back_reference_value(sided.front, sided.back); + } + Cmd::SetRasterizerState(ref rs) => { + encoder.set_front_facing_winding(rs.front_winding); + encoder.set_cull_mode(rs.cull_mode); + encoder.set_triangle_fill_mode(rs.fill_mode); + if let Some(depth_clip) = rs.depth_clip { + encoder.set_depth_clip_mode(depth_clip); + } + } + Cmd::SetVisibilityResult(mode, offset) => { + encoder.set_visibility_result_mode(mode, offset); + } + Cmd::BindBuffer { + stage, + index, + buffer, + offset, + } => { + let native = Some(buffer.as_native()); + match stage { + pso::Stage::Vertex => encoder.set_vertex_buffer(index as _, native, offset as _), + pso::Stage::Fragment => { + encoder.set_fragment_buffer(index as _, native, offset as _) + } + _ => unreachable!(), + } + } + Cmd::BindBuffers { + stage, + index, + ref buffers, + } => { + use crate::soft::AsSlice; + let values: &[Option] = buffers.as_slice(resources); + if !values.is_empty() { + let data = unsafe { + // convert `BufferPtr` -> `&metal::BufferRef` + mem::transmute(values) + }; + let offsets = buffers.as_slice(resources); + match stage { + pso::Stage::Vertex => encoder.set_vertex_buffers(index as _, data, offsets), + pso::Stage::Fragment => encoder.set_fragment_buffers(index as _, data, offsets), + _ => unreachable!(), + } + } + } + Cmd::BindBufferData { + stage, + index, + ref words, + } => { + let slice = words.borrow(); + match stage { + pso::Stage::Vertex => encoder.set_vertex_bytes( + index as _, + (slice.len() * WORD_SIZE) as u64, + slice.as_ptr() as _, + ), + pso::Stage::Fragment => encoder.set_fragment_bytes( + index as _, + (slice.len() * WORD_SIZE) as u64, + slice.as_ptr() as _, + ), + _ => unreachable!(), + } + } + Cmd::BindTextures { + stage, + index, + ref textures, + } => { + use crate::soft::AsSlice; + let values = textures.as_slice(resources); + if !values.is_empty() { + let data = unsafe { + // convert `TexturePtr` -> `&metal::TextureRef` + mem::transmute(values) + }; + match stage { + pso::Stage::Vertex => encoder.set_vertex_textures(index as _, data), + pso::Stage::Fragment => encoder.set_fragment_textures(index as _, data), + _ => unreachable!(), + } + } + } + Cmd::BindSamplers { + stage, + index, + ref samplers, + } => { + use crate::soft::AsSlice; + let values = samplers.as_slice(resources); + if !values.is_empty() { + let data = unsafe { + // convert `SamplerPtr` -> `&metal::SamplerStateRef` + mem::transmute(values) + }; + match stage { + pso::Stage::Vertex => encoder.set_vertex_sampler_states(index as _, data), + pso::Stage::Fragment => encoder.set_fragment_sampler_states(index as _, data), + _ => unreachable!(), + } + } + } + Cmd::BindPipeline(ref pipeline_state) => { + encoder.set_render_pipeline_state(pipeline_state.borrow()); + } + Cmd::UseResource { resource, usage } => { + encoder.use_resource(resource.as_native(), usage); + } + Cmd::Draw { + primitive_type, + ref vertices, + ref instances, + } => { + if instances.end == 1 { + encoder.draw_primitives( + primitive_type, + vertices.start as _, + (vertices.end - vertices.start) as _, + ); + } else if instances.start == 0 { + encoder.draw_primitives_instanced( + primitive_type, + vertices.start as _, + (vertices.end - vertices.start) as _, + instances.end as _, + ); + } else { + encoder.draw_primitives_instanced_base_instance( + primitive_type, + vertices.start as _, + (vertices.end - vertices.start) as _, + (instances.end - instances.start) as _, + instances.start as _, + ); + } + } + Cmd::DrawIndexed { + primitive_type, + ref index, + ref indices, + base_vertex, + ref instances, + } => { + let index_count = (indices.end - indices.start) as _; + let index_type = match index.stride { + 2 => MTLIndexType::UInt16, + 4 => MTLIndexType::UInt32, + _ => unreachable!(), + }; + let offset = (index.offset + indices.start * index.stride) as u64; + let index_buffer = index.buffer.as_native(); + if base_vertex == 0 && instances.end == 1 { + encoder.draw_indexed_primitives( + primitive_type, + index_count, + index_type, + index_buffer, + offset, + ); + } else if base_vertex == 0 && instances.start == 0 { + encoder.draw_indexed_primitives_instanced( + primitive_type, + index_count, + index_type, + index_buffer, + offset, + instances.end as _, + ); + } else { + encoder.draw_indexed_primitives_instanced_base_instance( + primitive_type, + index_count, + index_type, + index_buffer, + offset, + (instances.end - instances.start) as _, + base_vertex as _, + instances.start as _, + ); + } + } + Cmd::DrawIndirect { + primitive_type, + buffer, + offset, + } => { + encoder.draw_primitives_indirect(primitive_type, buffer.as_native(), offset); + } + Cmd::DrawIndexedIndirect { + primitive_type, + ref index, + buffer, + offset, + } => { + let index_type = match index.stride { + 2 => MTLIndexType::UInt16, + 4 => MTLIndexType::UInt32, + _ => unreachable!(), + }; + encoder.draw_indexed_primitives_indirect( + primitive_type, + index_type, + index.buffer.as_native(), + index.offset as u64, + buffer.as_native(), + offset, + ); + } + } +} + +fn exec_blit(encoder: &metal::BlitCommandEncoderRef, command: C) +where + C: Borrow, +{ + use crate::soft::BlitCommand as Cmd; + match *command.borrow() { + Cmd::FillBuffer { + dst, + ref range, + value, + } => { + encoder.fill_buffer( + dst.as_native(), + NSRange { + location: range.start, + length: range.end - range.start, + }, + value, + ); + } + Cmd::CopyBuffer { + src, + dst, + ref region, + } => { + encoder.copy_from_buffer( + src.as_native(), + region.src as NSUInteger, + dst.as_native(), + region.dst as NSUInteger, + region.size as NSUInteger, + ); + } + Cmd::CopyImage { + src, + dst, + ref region, + } => { + let size = conv::map_extent(region.extent); + let src_offset = conv::map_offset(region.src_offset); + let dst_offset = conv::map_offset(region.dst_offset); + let layers = region + .src_subresource + .layers + .clone() + .zip(region.dst_subresource.layers.clone()); + for (src_layer, dst_layer) in layers { + encoder.copy_from_texture( + src.as_native(), + src_layer as _, + region.src_subresource.level as _, + src_offset, + size, + dst.as_native(), + dst_layer as _, + region.dst_subresource.level as _, + dst_offset, + ); + } + } + Cmd::CopyBufferToImage { + src, + dst, + dst_desc, + ref region, + } => { + let extent = conv::map_extent(region.image_extent); + let origin = conv::map_offset(region.image_offset); + let (row_pitch, slice_pitch) = compute_pitches(®ion, dst_desc, &extent); + let r = ®ion.image_layers; + + for layer in r.layers.clone() { + let offset = region.buffer_offset + + slice_pitch as NSUInteger * (layer - r.layers.start) as NSUInteger; + encoder.copy_from_buffer_to_texture( + src.as_native(), + offset as NSUInteger, + row_pitch as NSUInteger, + slice_pitch as NSUInteger, + extent, + dst.as_native(), + layer as NSUInteger, + r.level as NSUInteger, + origin, + metal::MTLBlitOption::empty(), + ); + } + } + Cmd::CopyImageToBuffer { + src, + src_desc, + dst, + ref region, + } => { + let extent = conv::map_extent(region.image_extent); + let origin = conv::map_offset(region.image_offset); + let (row_pitch, slice_pitch) = compute_pitches(®ion, src_desc, &extent); + let r = ®ion.image_layers; + + for layer in r.layers.clone() { + let offset = region.buffer_offset + + slice_pitch as NSUInteger * (layer - r.layers.start) as NSUInteger; + encoder.copy_from_texture_to_buffer( + src.as_native(), + layer as NSUInteger, + r.level as NSUInteger, + origin, + extent, + dst.as_native(), + offset as NSUInteger, + row_pitch as NSUInteger, + slice_pitch as NSUInteger, + metal::MTLBlitOption::empty(), + ); + } + } + } +} + +fn exec_compute(encoder: &metal::ComputeCommandEncoderRef, command: C, resources: &R) +where + R: soft::Resources, + R::Data: Borrow<[u32]>, + R::BufferArray: soft::AsSlice, R> + soft::AsSlice, + R::TextureArray: soft::AsSlice, R>, + R::SamplerArray: soft::AsSlice, R>, + R::ComputePipeline: Borrow, + C: Borrow>, +{ + use crate::soft::ComputeCommand as Cmd; + match *command.borrow() { + Cmd::BindBuffer { + index, + buffer, + offset, + } => { + let native = Some(buffer.as_native()); + encoder.set_buffer(index as _, native, offset); + } + Cmd::BindBuffers { index, ref buffers } => { + use crate::soft::AsSlice; + let values: &[Option] = buffers.as_slice(resources); + if !values.is_empty() { + let data = unsafe { + // convert `BufferPtr` -> `&metal::BufferRef` + mem::transmute(values) + }; + let offsets = buffers.as_slice(resources); + encoder.set_buffers(index as _, data, offsets); + } + } + Cmd::BindBufferData { ref words, index } => { + let slice = words.borrow(); + encoder.set_bytes( + index as _, + (slice.len() * WORD_SIZE) as u64, + slice.as_ptr() as _, + ); + } + Cmd::BindTextures { + index, + ref textures, + } => { + use crate::soft::AsSlice; + let values = textures.as_slice(resources); + if !values.is_empty() { + let data = unsafe { + // convert `TexturePtr` -> `&metal::TextureRef` + mem::transmute(values) + }; + encoder.set_textures(index as _, data); + } + } + Cmd::BindSamplers { + index, + ref samplers, + } => { + use crate::soft::AsSlice; + let values = samplers.as_slice(resources); + if !values.is_empty() { + let data = unsafe { + // convert `SamplerPtr` -> `&metal::SamplerStateRef` + mem::transmute(values) + }; + encoder.set_sampler_states(index as _, data); + } + } + Cmd::BindPipeline(ref pipeline) => { + encoder.set_compute_pipeline_state(pipeline.borrow()); + } + Cmd::UseResource { resource, usage } => { + encoder.use_resource(resource.as_native(), usage); + } + Cmd::Dispatch { wg_size, wg_count } => { + encoder.dispatch_thread_groups(wg_count, wg_size); + } + Cmd::DispatchIndirect { + wg_size, + buffer, + offset, + } => { + encoder.dispatch_thread_groups_indirect(buffer.as_native(), offset, wg_size); + } + } +} + + +#[derive(Default, Debug)] +struct PerformanceCounters { + immediate_command_buffers: usize, + deferred_command_buffers: usize, + remote_command_buffers: usize, + signal_command_buffers: usize, + frame_wait_duration: time::Duration, + frame_wait_count: usize, + frame: usize, +} + +#[derive(Debug)] +pub struct CommandQueue { + shared: Arc, + retained_buffers: Vec, + retained_textures: Vec, + active_visibility_queries: Vec, + perf_counters: Option, + /// If true, we combine deferred command buffers together into one giant + /// command buffer per submission, including the signalling logic. + pub stitch_deferred: bool, + /// Hack around the Metal System Trace logic that ignores empty command buffers entirely. + pub insert_dummy_encoders: bool, +} + +unsafe impl Send for CommandQueue {} +unsafe impl Sync for CommandQueue {} + +impl CommandQueue { + pub(crate) fn new(shared: Arc) -> Self { + CommandQueue { + shared, + retained_buffers: Vec::new(), + retained_textures: Vec::new(), + active_visibility_queries: Vec::new(), + perf_counters: if COUNTERS_REPORT_WINDOW != 0 { + Some(PerformanceCounters::default()) + } else { + None + }, + stitch_deferred: true, + insert_dummy_encoders: false, + } + } + + /// This is a hack around Metal System Trace logic that ignores empty command buffers entirely. + fn record_empty(&self, command_buf: &metal::CommandBufferRef) { + if self.insert_dummy_encoders { + command_buf.new_blit_command_encoder().end_encoding(); + } + } + + fn wait<'a, T, I>(&mut self, wait_semaphores: I) + where + T: 'a + Borrow, + I: IntoIterator, + { + for semaphore in wait_semaphores { + let sem = semaphore.borrow(); + if let Some(ref system) = sem.system { + system.wait(!0); + } + if let Some(swap_image) = sem.image_ready.lock().take() { + let start = time::Instant::now(); + let count = swap_image.wait_until_ready(); + if let Some(ref mut counters) = self.perf_counters { + counters.frame_wait_count += count; + counters.frame_wait_duration += start.elapsed(); + } + } + } + } +} + +impl hal::queue::CommandQueue for CommandQueue { + unsafe fn submit<'a, T, Ic, S, Iw, Is>( + &mut self, + hal::queue::Submission { + command_buffers, + wait_semaphores, + signal_semaphores, + }: hal::queue::Submission, + fence: Option<&native::Fence>, + ) where + T: 'a + Borrow, + Ic: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + Is: IntoIterator, + { + use smallvec::SmallVec; + + debug!("submitting with fence {:?}", fence); + self.wait(wait_semaphores.into_iter().map(|(s, _)| s)); + + const BLOCK_BUCKET: usize = 4; + let system_semaphores = signal_semaphores + .into_iter() + .filter_map(|sem| sem.borrow().system.clone()) + .collect::>(); + + #[allow(unused_mut)] + let (mut num_immediate, mut num_deferred, mut num_remote) = (0, 0, 0); + let mut event_commands = Vec::new(); + let do_signal = fence.is_some() || !system_semaphores.is_empty(); + + autoreleasepool(|| { + // for command buffers + let cmd_queue = self.shared.queue.lock(); + let mut blocker = self.shared.queue_blocker.lock(); + let mut deferred_cmd_buffer = None::<&metal::CommandBufferRef>; + + for buffer in command_buffers { + let mut inner = buffer.borrow().inner.borrow_mut(); + let CommandBufferInner { + ref sink, + ref mut retained_buffers, + ref mut retained_textures, + ref mut active_visibility_queries, + ref events, + ref host_events, + .. + } = *inner; + + //TODO: split event commands into immediate/blocked submissions? + event_commands.extend_from_slice(events); + // wait for anything not previously fired + let wait_events = host_events + .iter() + .filter(|event| { + event_commands + .iter() + .rfind(|ev| Arc::ptr_eq(event, &ev.0)) + .map_or(true, |ev| !ev.1) + }) + .cloned() + .collect::>(); + if !wait_events.is_empty() { + blocker.submissions.push(BlockedSubmission { + wait_events, + command_buffers: Vec::new(), + }); + } + + match *sink { + Some(CommandSink::Immediate { + ref cmd_buffer, + ref token, + num_passes, + .. + }) => { + num_immediate += 1; + trace!("\timmediate {:?} with {} passes", token, num_passes); + self.retained_buffers.extend(retained_buffers.drain(..)); + self.retained_textures.extend(retained_textures.drain(..)); + self.active_visibility_queries + .extend(active_visibility_queries.drain(..)); + if num_passes != 0 { + // flush the deferred recording, if any + if let Some(cb) = deferred_cmd_buffer.take() { + blocker.submit_impl(cb); + } + blocker.submit_impl(cmd_buffer); + } + } + Some(CommandSink::Deferred { ref journal, .. }) => { + num_deferred += 1; + trace!("\tdeferred with {} passes", journal.passes.len()); + self.active_visibility_queries + .extend_from_slice(active_visibility_queries); + if !journal.passes.is_empty() { + let cmd_buffer = deferred_cmd_buffer.take().unwrap_or_else(|| { + let cmd_buffer = cmd_queue.spawn_temp(); + cmd_buffer.enqueue(); + cmd_buffer.set_label("deferred"); + cmd_buffer + }); + journal.record(&*cmd_buffer); + if self.stitch_deferred { + deferred_cmd_buffer = Some(cmd_buffer); + } else { + blocker.submit_impl(cmd_buffer); + } + } + } + #[cfg(feature = "dispatch")] + Some(CommandSink::Remote { + queue: NoDebug(ref queue), + ref cmd_buffer, + ref token, + .. + }) => { + num_remote += 1; + trace!("\tremote {:?}", token); + cmd_buffer.lock().enqueue(); + let shared_cb = SharedCommandBuffer(Arc::clone(cmd_buffer)); + //TODO: make this compatible with events + queue.sync(move || { + shared_cb.0.lock().commit(); + }); + } + None => panic!("Command buffer not recorded for submission"), + } + } + + if do_signal || !event_commands.is_empty() || !self.active_visibility_queries.is_empty() + { + //Note: there is quite a bit copying here + let free_buffers = self + .retained_buffers + .drain(..) + .collect::>(); + let free_textures = self + .retained_textures + .drain(..) + .collect::>(); + let visibility = if self.active_visibility_queries.is_empty() { + None + } else { + let queries = self + .active_visibility_queries + .drain(..) + .collect::>(); + Some((Arc::clone(&self.shared), queries)) + }; + + let block = ConcreteBlock::new(move |_cb: *mut ()| { + // signal the semaphores + for semaphore in &system_semaphores { + semaphore.signal(); + } + // process events + for &(ref atomic, value) in &event_commands { + atomic.store(value, Ordering::Release); + } + // free all the manually retained resources + let _ = free_buffers; + let _ = free_textures; + // update visibility queries + if let Some((ref shared, ref queries)) = visibility { + let vis = &shared.visibility; + let availability_ptr = (vis.buffer.contents() as *mut u8) + .offset(vis.availability_offset as isize) + as *mut u32; + for &q in queries { + *availability_ptr.offset(q as isize) = 1; + } + //HACK: the lock is needed to wake up, but it doesn't hold the checked data + let _ = vis.allocator.lock(); + vis.condvar.notify_all(); + } + }) + .copy(); + + let cmd_buffer = deferred_cmd_buffer.take().unwrap_or_else(|| { + let cmd_buffer = cmd_queue.spawn_temp(); + cmd_buffer.set_label("signal"); + self.record_empty(cmd_buffer); + cmd_buffer + }); + let () = msg_send![cmd_buffer, addCompletedHandler: block.deref() as *const _]; + blocker.submit_impl(cmd_buffer); + + if let Some(fence) = fence { + debug!("\tmarking fence ptr {:?} as pending", fence.0.as_ptr()); + fence + .0 + .replace(native::FenceInner::PendingSubmission(cmd_buffer.to_owned())); + } + } else if let Some(cmd_buffer) = deferred_cmd_buffer { + blocker.submit_impl(cmd_buffer); + } + }); + + debug!( + "\t{} immediate, {} deferred, and {} remote command buffers", + num_immediate, num_deferred, num_remote + ); + if let Some(ref mut counters) = self.perf_counters { + counters.immediate_command_buffers += num_immediate; + counters.deferred_command_buffers += num_deferred; + counters.remote_command_buffers += num_remote; + if do_signal { + counters.signal_command_buffers += 1; + } + } + } + + unsafe fn present<'a, W, Is, S, Iw>( + &mut self, + swapchains: Is, + wait_semaphores: Iw, + ) -> Result, PresentError> + where + W: 'a + Borrow, + Is: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + { + self.wait(wait_semaphores); + + let queue = self.shared.queue.lock(); + autoreleasepool(|| { + let command_buffer = queue.raw.new_command_buffer(); + command_buffer.set_label("present"); + self.record_empty(command_buffer); + + for (swapchain, index) in swapchains { + debug!("presenting frame {}", index); + let drawable = swapchain + .borrow() + .take_drawable(index) + .map_err(|()| PresentError::OutOfDate)?; // What `Err(())` represents? + command_buffer.present_drawable(&drawable); + } + command_buffer.commit(); + Ok(()) + })?; + + if let Some(ref mut counters) = self.perf_counters { + counters.frame += 1; + if counters.frame >= COUNTERS_REPORT_WINDOW { + let time = counters.frame_wait_duration / counters.frame as u32; + let total_submitted = counters.immediate_command_buffers + + counters.deferred_command_buffers + + counters.remote_command_buffers + + counters.signal_command_buffers; + println!("Performance counters:"); + println!( + "\tCommand buffers: {} immediate, {} deferred, {} remote, {} signals", + counters.immediate_command_buffers / counters.frame, + counters.deferred_command_buffers / counters.frame, + counters.remote_command_buffers / counters.frame, + counters.signal_command_buffers / counters.frame, + ); + println!("\tEstimated pipeline length is {} frames, given the total active {} command buffers", + counters.frame * queue.reserve.start / total_submitted.max(1), + queue.reserve.start, + ); + println!( + "\tFrame wait time is {}ms over {} requests", + time.as_secs() as u32 * 1000 + time.subsec_millis(), + counters.frame_wait_count as f32 / counters.frame as f32, + ); + *counters = PerformanceCounters::default(); + } + } + + Ok(None) + } + + unsafe fn present_surface( + &mut self, + _surface: &mut window::Surface, + image: window::SurfaceImage, + wait_semaphore: Option<&native::Semaphore>, + ) -> Result, PresentError> { + self.wait(wait_semaphore); + + let queue = self.shared.queue.lock(); + let drawable = image.into_drawable(); + autoreleasepool(|| { + let command_buffer = queue.raw.new_command_buffer(); + command_buffer.set_label("present"); + self.record_empty(command_buffer); + + command_buffer.present_drawable(&drawable); + command_buffer.commit(); + }); + Ok(None) + } + + fn wait_idle(&self) -> Result<(), OutOfMemory> { + QueueInner::wait_idle(&self.shared.queue); + Ok(()) + } +} + +fn assign_sides( + this: &mut pso::Sided, + faces: pso::Face, + value: pso::StencilValue, +) { + if faces.contains(pso::Face::FRONT) { + this.front = value; + } + if faces.contains(pso::Face::BACK) { + this.back = value; + } +} + +impl hal::pool::CommandPool for CommandPool { + unsafe fn reset(&mut self, release_resources: bool) { + for cmd_buffer in &self.allocated { + cmd_buffer + .borrow_mut() + .reset(&self.shared, release_resources); + } + } + + unsafe fn allocate_one(&mut self, level: com::Level) -> CommandBuffer { + //TODO: fail with OOM if we allocate more actual command buffers + // than our mega-queue supports. + let inner = Arc::new(RefCell::new(CommandBufferInner { + sink: None, + level, + backup_journal: None, + #[cfg(feature = "dispatch")] + backup_capacity: None, + retained_buffers: Vec::new(), + retained_textures: Vec::new(), + active_visibility_queries: Vec::new(), + events: Vec::new(), + host_events: Vec::new(), + })); + self.allocated.push(Arc::clone(&inner)); + + CommandBuffer { + shared: Arc::clone(&self.shared), + pool_shared: Arc::clone(&self.pool_shared), + inner, + state: State { + viewport: None, + scissors: None, + blend_color: None, + render_pso: None, + render_pso_is_compatible: false, + compute_pso: None, + work_group_size: MTLSize { + width: 0, + height: 0, + depth: 0, + }, + primitive_type: MTLPrimitiveType::Point, + resources_vs: StageResources::new(), + resources_ps: StageResources::new(), + resources_cs: StageResources::new(), + index_buffer: None, + rasterizer_state: None, + depth_bias: pso::DepthBias::default(), + stencil: native::StencilState { + reference_values: pso::Sided::new(0), + read_masks: pso::Sided::new(!0), + write_masks: pso::Sided::new(!0), + }, + push_constants: Vec::new(), + vertex_buffers: Vec::new(), + target_aspects: Aspects::empty(), + target_extent: Extent::default(), + target_formats: native::SubpassFormats::default(), + visibility_query: (metal::MTLVisibilityResultMode::Disabled, 0), + pending_subpasses: Vec::new(), + descriptor_sets: (0 .. MAX_BOUND_DESCRIPTOR_SETS) + .map(|_| DescriptorSetInfo::default()) + .collect(), + }, + temp: Temp { + clear_vertices: Vec::new(), + blit_vertices: FastHashMap::default(), + clear_values: Vec::new(), + }, + name: String::new(), + } + } + + /// Free command buffers which are allocated from this pool. + unsafe fn free(&mut self, cmd_buffers: I) + where + I: IntoIterator, + { + use hal::command::CommandBuffer as _; + for mut cmd_buf in cmd_buffers { + cmd_buf.reset(true); + match self + .allocated + .iter_mut() + .position(|b| Arc::ptr_eq(b, &cmd_buf.inner)) + { + Some(index) => { + self.allocated.swap_remove(index); + } + None => error!("Unable to free a command buffer!"), + } + } + } +} + +impl CommandBuffer { + fn update_depth_stencil(&self) { + let mut inner = self.inner.borrow_mut(); + let mut pre = inner.sink().pre_render(); + if !pre.is_void() { + let ds_store = &self.shared.service_pipes.depth_stencil_states; + if let Some(desc) = self.state.build_depth_stencil() { + let state = &**ds_store.get(desc, &self.shared.device); + pre.issue(soft::RenderCommand::SetDepthStencilState(state)); + } + } + } +} + +impl com::CommandBuffer for CommandBuffer { + unsafe fn begin( + &mut self, + flags: com::CommandBufferFlags, + info: com::CommandBufferInheritanceInfo, + ) { + self.reset(false); + + let mut inner = self.inner.borrow_mut(); + let can_immediate = inner.level == com::Level::Primary + && flags.contains(com::CommandBufferFlags::ONE_TIME_SUBMIT); + let sink = match self.pool_shared.borrow_mut().online_recording { + OnlineRecording::Immediate if can_immediate => { + let (cmd_buffer, token) = self.shared.queue.lock().spawn(); + cmd_buffer.set_label(&self.name); + CommandSink::Immediate { + cmd_buffer, + token, + encoder_state: EncoderState::None, + num_passes: 0, + label: String::new(), + } + } + #[cfg(feature = "dispatch")] + OnlineRecording::Remote(_) if can_immediate => { + let (cmd_buffer, token) = self.shared.queue.lock().spawn(); + cmd_buffer.set_label(&self.name); + CommandSink::Remote { + queue: NoDebug(dispatch::Queue::with_target_queue( + "gfx-metal", + dispatch::QueueAttribute::Serial, + &self + .pool_shared + .borrow_mut() + .dispatch_queue + .as_ref() + .unwrap() + .0, + )), + cmd_buffer: Arc::new(Mutex::new(cmd_buffer)), + token, + pass: None, + capacity: inner.backup_capacity.take().unwrap_or_default(), + label: String::new(), + } + } + _ => CommandSink::Deferred { + is_encoding: false, + is_inheriting: info.subpass.is_some(), + journal: inner.backup_journal.take().unwrap_or_default(), + label: String::new(), + }, + }; + inner.sink = Some(sink); + + if let Some(framebuffer) = info.framebuffer { + self.state.target_extent = framebuffer.extent; + } + if let Some(sp) = info.subpass { + let subpass = &sp.main_pass.subpasses[sp.index]; + self.state.target_formats.copy_from(&subpass.target_formats); + + self.state.target_aspects = Aspects::empty(); + if !subpass.colors.is_empty() { + self.state.target_aspects |= Aspects::COLOR; + } + if let Some((at_id, _)) = subpass.depth_stencil { + let rat = &sp.main_pass.attachments[at_id]; + let aspects = rat.format.unwrap().surface_desc().aspects; + self.state.target_aspects |= aspects; + } + + match inner.sink { + Some(CommandSink::Deferred { + ref mut is_encoding, + ref mut journal, + ref label, + .. + }) => { + *is_encoding = true; + let pass_desc = metal::RenderPassDescriptor::new().to_owned(); + journal + .passes + .alloc() + .init((soft::Pass::Render(pass_desc), 0 .. 0, label.clone())); + } + _ => { + warn!("Unexpected inheritance info on a primary command buffer"); + } + } + } + } + + unsafe fn finish(&mut self) { + self.inner.borrow_mut().sink().stop_encoding(); + } + + unsafe fn reset(&mut self, release_resources: bool) { + self.state.reset_resources(); + self.inner + .borrow_mut() + .reset(&self.shared, release_resources); + } + + unsafe fn pipeline_barrier<'a, T>( + &mut self, + _stages: Range, + _dependencies: memory::Dependencies, + _barriers: T, + ) where + T: IntoIterator, + T::Item: Borrow>, + { + } + + unsafe fn fill_buffer(&mut self, buffer: &native::Buffer, range: R, data: u32) + where + R: RangeArg, + { + let (raw, base_range) = buffer.as_bound(); + let mut inner = self.inner.borrow_mut(); + + let start = base_range.start + *range.start().unwrap_or(&0); + assert_eq!(start % WORD_ALIGNMENT, 0); + + let end = match range.end() { + Some(&e) => { + assert_eq!(e % WORD_ALIGNMENT, 0); + base_range.start + e + } + None => base_range.end, + }; + + if (data & 0xFF) * 0x0101_0101 == data { + let command = soft::BlitCommand::FillBuffer { + dst: AsNative::from(raw), + range: start .. end, + value: data as u8, + }; + inner.sink().blit_commands(iter::once(command)); + } else { + let pso = &*self.shared.service_pipes.fill_buffer; + let length = (end - start) / WORD_ALIGNMENT; + let value_and_length = [data, length as _]; + + // TODO: Consider writing multiple values per thread in shader + let threads_per_threadgroup = pso.thread_execution_width(); + let threadgroups = (length + threads_per_threadgroup - 1) / threads_per_threadgroup; + + let wg_count = MTLSize { + width: threadgroups, + height: 1, + depth: 1, + }; + let wg_size = MTLSize { + width: threads_per_threadgroup, + height: 1, + depth: 1, + }; + + let commands = [ + soft::ComputeCommand::BindPipeline(pso), + soft::ComputeCommand::BindBuffer { + index: 0, + buffer: AsNative::from(raw), + offset: start, + }, + soft::ComputeCommand::BindBufferData { + index: 1, + words: &value_and_length[..], + }, + soft::ComputeCommand::Dispatch { wg_size, wg_count }, + ]; + + inner + .sink() + .quick_compute("fill_buffer", commands.iter().cloned()); + } + } + + unsafe fn update_buffer(&mut self, dst: &native::Buffer, offset: buffer::Offset, data: &[u8]) { + let (dst_raw, dst_range) = dst.as_bound(); + assert!(dst_range.start + offset + data.len() as buffer::Offset <= dst_range.end); + + let src = self.shared.device.lock().new_buffer_with_data( + data.as_ptr() as _, + data.len() as _, + metal::MTLResourceOptions::CPUCacheModeWriteCombined, + ); + src.set_label("update_buffer"); + + let mut inner = self.inner.borrow_mut(); + { + let command = soft::BlitCommand::CopyBuffer { + src: AsNative::from(src.as_ref()), + dst: AsNative::from(dst_raw), + region: com::BufferCopy { + src: 0, + dst: dst_range.start + offset, + size: data.len() as _, + }, + }; + + inner.sink().blit_commands(iter::once(command)); + } + + inner.retained_buffers.push(src); + } + + unsafe fn clear_image( + &mut self, + image: &native::Image, + _layout: Layout, + value: com::ClearValue, + subresource_ranges: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let CommandBufferInner { + ref mut retained_textures, + ref mut sink, + .. + } = *self.inner.borrow_mut(); + + let clear_color = image.shader_channel.interpret(value.color); + let base_extent = image.kind.extent(); + let is_layered = !self.shared.disabilities.broken_layered_clear_image; + + autoreleasepool(|| { + let raw = image.like.as_texture(); + for subresource_range in subresource_ranges { + let sub = subresource_range.borrow(); + let num_layers = (sub.layers.end - sub.layers.start) as u64; + let layers = if is_layered { + 0 .. 1 + } else { + sub.layers.clone() + }; + let texture = if is_layered && sub.layers.start > 0 { + // aliasing is necessary for bulk-clearing all layers starting with 0 + let tex = raw.new_texture_view_from_slice( + image.mtl_format, + image.mtl_type, + NSRange { + location: 0, + length: raw.mipmap_level_count(), + }, + NSRange { + location: sub.layers.start as _, + length: num_layers, + }, + ); + retained_textures.push(tex); + retained_textures.last().unwrap() + } else { + raw + }; + + for layer in layers { + for level in sub.levels.clone() { + let descriptor = metal::RenderPassDescriptor::new().to_owned(); + if base_extent.depth > 1 { + assert_eq!(sub.layers.end, 1); + let depth = base_extent.at_level(level).depth as u64; + descriptor.set_render_target_array_length(depth); + } else if is_layered { + descriptor.set_render_target_array_length(num_layers); + }; + + if image.format_desc.aspects.contains(Aspects::COLOR) { + let attachment = descriptor.color_attachments().object_at(0).unwrap(); + attachment.set_texture(Some(texture)); + attachment.set_level(level as _); + if !is_layered { + attachment.set_slice(layer as _); + } + attachment.set_store_action(metal::MTLStoreAction::Store); + if sub.aspects.contains(Aspects::COLOR) { + attachment.set_load_action(metal::MTLLoadAction::Clear); + attachment.set_clear_color(clear_color.clone()); + } else { + attachment.set_load_action(metal::MTLLoadAction::Load); + } + } else { + assert!(!sub.aspects.contains(Aspects::COLOR)); + }; + + if image.format_desc.aspects.contains(Aspects::DEPTH) { + let attachment = descriptor.depth_attachment().unwrap(); + attachment.set_texture(Some(texture)); + attachment.set_level(level as _); + if !is_layered { + attachment.set_slice(layer as _); + } + attachment.set_store_action(metal::MTLStoreAction::Store); + if sub.aspects.contains(Aspects::DEPTH) { + attachment.set_load_action(metal::MTLLoadAction::Clear); + attachment.set_clear_depth(value.depth_stencil.depth as _); + } else { + attachment.set_load_action(metal::MTLLoadAction::Load); + } + } else { + assert!(!sub.aspects.contains(Aspects::DEPTH)); + }; + + if image.format_desc.aspects.contains(Aspects::STENCIL) { + let attachment = descriptor.stencil_attachment().unwrap(); + attachment.set_texture(Some(texture)); + attachment.set_level(level as _); + if !is_layered { + attachment.set_slice(layer as _); + } + attachment.set_store_action(metal::MTLStoreAction::Store); + if sub.aspects.contains(Aspects::STENCIL) { + attachment.set_load_action(metal::MTLLoadAction::Clear); + attachment.set_clear_stencil(value.depth_stencil.stencil); + } else { + attachment.set_load_action(metal::MTLLoadAction::Load); + } + } else { + assert!(!sub.aspects.contains(Aspects::STENCIL)); + }; + + sink.as_mut().unwrap().quick_render( + "clear_image", + descriptor, + iter::empty(), + ); + } + } + } + }); + } + + unsafe fn clear_attachments(&mut self, clears: T, rects: U) + where + T: IntoIterator, + T::Item: Borrow, + U: IntoIterator, + U::Item: Borrow, + { + // gather vertices/polygons + let de = self.state.target_extent; + let vertices = &mut self.temp.clear_vertices; + vertices.clear(); + + for rect in rects { + let r = rect.borrow(); + for layer in r.layers.clone() { + let data = [ + [r.rect.x, r.rect.y], + [r.rect.x, r.rect.y + r.rect.h], + [r.rect.x + r.rect.w, r.rect.y + r.rect.h], + [r.rect.x + r.rect.w, r.rect.y], + ]; + // now use the hard-coded index array to add 6 vertices to the list + //TODO: could use instancing here + // - with triangle strips + // - with half of the data supplied per instance + + for &index in &[0usize, 1, 2, 2, 3, 0] { + let d = data[index]; + vertices.alloc().init(ClearVertex { + pos: [ + d[0] as f32 / de.width as f32, + d[1] as f32 / de.height as f32, + 0.0, //TODO: depth Z + layer as f32, + ], + }); + } + } + } + + let mut vertex_is_dirty = true; + let mut inner = self.inner.borrow_mut(); + let clear_pipes = &self.shared.service_pipes.clears; + let ds_store = &self.shared.service_pipes.depth_stencil_states; + let ds_state; + + // issue a PSO+color switch and a draw for each requested clear + let mut key = ClearKey { + framebuffer_aspects: self.state.target_aspects, + color_formats: [metal::MTLPixelFormat::Invalid; 1], + depth_stencil_format: self + .state + .target_formats + .depth_stencil + .unwrap_or(metal::MTLPixelFormat::Invalid), + target_index: None, + }; + for (out, &(mtl_format, _)) in key + .color_formats + .iter_mut() + .zip(&self.state.target_formats.colors) + { + *out = mtl_format; + } + + for clear in clears { + let pso; // has to live at least as long as all the commands + let depth_stencil; + let raw_value; + + let (com_clear, target_index) = match *clear.borrow() { + com::AttachmentClear::Color { index, value } => { + let channel = self.state.target_formats.colors[index].1; + //Note: technically we should be able to derive the Channel from the + // `value` variant, but this is blocked by the portability that is + // always passing the attachment clears as `ClearColor::Sfloat` atm. + raw_value = com::ClearColor::from(value); + let com = soft::RenderCommand::BindBufferData { + stage: pso::Stage::Fragment, + index: 0, + words: slice::from_raw_parts( + raw_value.float32.as_ptr() as *const u32, + mem::size_of::() / WORD_SIZE, + ), + }; + (com, Some((index as u8, channel))) + } + com::AttachmentClear::DepthStencil { depth, stencil } => { + let mut aspects = Aspects::empty(); + if let Some(value) = depth { + for v in vertices.iter_mut() { + v.pos[2] = value; + } + vertex_is_dirty = true; + aspects |= Aspects::DEPTH; + } + if stencil.is_some() { + //TODO: soft::RenderCommand::SetStencilReference + aspects |= Aspects::STENCIL; + } + depth_stencil = ds_store.get_write(aspects); + let com = soft::RenderCommand::SetDepthStencilState(&**depth_stencil); + (com, None) + } + }; + + key.target_index = target_index; + pso = clear_pipes.get( + key, + &self.shared.service_pipes.library, + &self.shared.device, + &self.shared.private_caps, + ); + + let com_pso = iter::once(soft::RenderCommand::BindPipeline(&**pso)); + let com_rast = iter::once(soft::RenderCommand::SetRasterizerState( + native::RasterizerState::default(), + )); + + let com_vertex = if vertex_is_dirty { + vertex_is_dirty = false; + Some(soft::RenderCommand::BindBufferData { + stage: pso::Stage::Vertex, + index: 0, + words: slice::from_raw_parts( + vertices.as_ptr() as *const u32, + vertices.len() * mem::size_of::() / WORD_SIZE, + ), + }) + } else { + None + }; + + let ext = self.state.target_extent; + let rect = pso::Rect { + x: 0, + y: ext.height as _, + w: ext.width as _, + h: -(ext.height as i16), + }; + let com_viewport = iter::once(soft::RenderCommand::SetViewport(rect, 0.0 .. 1.0)); + let com_scissor = iter::once(soft::RenderCommand::SetScissor(MTLScissorRect { + x: 0, + y: 0, + width: ext.width as _, + height: ext.height as _, + })); + + let com_draw = iter::once(soft::RenderCommand::Draw { + primitive_type: MTLPrimitiveType::Triangle, + vertices: 0 .. vertices.len() as _, + instances: 0 .. 1, + }); + + let commands = iter::once(com_clear) + .chain(com_pso) + .chain(com_rast) + .chain(com_viewport) + .chain(com_scissor) + .chain(com_vertex) + .chain(com_draw); + + inner.sink().pre_render().issue_many(commands); + } + + // reset all the affected states + let (com_pso, com_rast) = self.state.make_pso_commands(); + + let device_lock = &self.shared.device; + let com_ds = match self.state.build_depth_stencil() { + Some(desc) => { + ds_state = ds_store.get(desc, device_lock); + Some(soft::RenderCommand::SetDepthStencilState(&**ds_state)) + } + None => None, + }; + + let com_vs = match ( + self.state.resources_vs.buffers.first(), + self.state.resources_vs.buffer_offsets.first(), + ) { + (Some(&Some(buffer)), Some(&offset)) => Some(soft::RenderCommand::BindBuffer { + stage: pso::Stage::Vertex, + index: 0, + buffer, + offset, + }), + _ => None, + }; + let com_ps = match ( + self.state.resources_ps.buffers.first(), + self.state.resources_ps.buffer_offsets.first(), + ) { + (Some(&Some(buffer)), Some(&offset)) => Some(soft::RenderCommand::BindBuffer { + stage: pso::Stage::Fragment, + index: 0, + buffer, + offset, + }), + _ => None, + }; + + let commands = com_pso + .into_iter() + .chain(com_rast) + .chain(com_ds) + .chain(com_vs) + .chain(com_ps); + + inner.sink().pre_render().issue_many(commands); + + vertices.clear(); + } + + unsafe fn resolve_image( + &mut self, + _src: &native::Image, + _src_layout: Layout, + _dst: &native::Image, + _dst_layout: Layout, + _regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + unimplemented!() + } + + unsafe fn blit_image( + &mut self, + src: &native::Image, + _src_layout: Layout, + dst: &native::Image, + _dst_layout: Layout, + filter: Filter, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let CommandBufferInner { + ref mut retained_textures, + ref mut sink, + .. + } = *self.inner.borrow_mut(); + + let src_cubish = src.view_cube_as_2d(); + let dst_cubish = dst.view_cube_as_2d(); + let dst_layers = dst.kind.num_layers(); + + let vertices = &mut self.temp.blit_vertices; + vertices.clear(); + + let sampler = self.shared.service_pipes.sampler_states.get(filter); + let ds_state; + let key_mtl_type = match dst_cubish { + Some(_) => metal::MTLTextureType::D2Array, + None => dst.mtl_type, + }; + let key = ( + key_mtl_type, + dst.mtl_format, + src.format_desc.aspects, + dst.shader_channel, + ); + let pso = self.shared.service_pipes.blits.get( + key, + &self.shared.service_pipes.library, + &self.shared.device, + &self.shared.private_caps, + ); + + for region in regions { + let r = region.borrow(); + + // layer count must be equal in both subresources + debug_assert_eq!( + r.src_subresource.layers.len(), + r.dst_subresource.layers.len() + ); + debug_assert_eq!(r.src_subresource.aspects, r.dst_subresource.aspects); + debug_assert!(src.format_desc.aspects.contains(r.src_subresource.aspects)); + debug_assert!(dst.format_desc.aspects.contains(r.dst_subresource.aspects)); + + let se = src.kind.extent().at_level(r.src_subresource.level); + let de = dst.kind.extent().at_level(r.dst_subresource.level); + //TODO: support 3D textures + if se.depth != 1 || de.depth != 1 { + warn!( + "3D image blits are not supported properly yet: {:?} -> {:?}", + se, de + ); + } + + let layers = r + .src_subresource + .layers + .clone() + .zip(r.dst_subresource.layers.clone()); + let list = vertices + .entry((r.dst_subresource.aspects, r.dst_subresource.level)) + .or_insert_with(Vec::new); + + for (src_layer, dst_layer) in layers { + // this helper array defines unique data for quad vertices + let data = [ + [ + r.src_bounds.start.x, + r.src_bounds.start.y, + r.dst_bounds.start.x, + r.dst_bounds.start.y, + ], + [ + r.src_bounds.start.x, + r.src_bounds.end.y, + r.dst_bounds.start.x, + r.dst_bounds.end.y, + ], + [ + r.src_bounds.end.x, + r.src_bounds.end.y, + r.dst_bounds.end.x, + r.dst_bounds.end.y, + ], + [ + r.src_bounds.end.x, + r.src_bounds.start.y, + r.dst_bounds.end.x, + r.dst_bounds.start.y, + ], + ]; + // now use the hard-coded index array to add 6 vertices to the list + //TODO: could use instancing here + // - with triangle strips + // - with half of the data supplied per instance + + for &index in &[0usize, 1, 2, 2, 3, 0] { + let d = data[index]; + list.alloc().init(BlitVertex { + uv: [ + d[0] as f32 / se.width as f32, + d[1] as f32 / se.height as f32, + src_layer as f32, + r.src_subresource.level as f32, + ], + pos: [ + d[2] as f32 / de.width as f32, + d[3] as f32 / de.height as f32, + 0.0, + dst_layer as f32, + ], + }); + } + } + } + + // Note: we don't bother to restore any render states here, since we are currently + // outside of a render pass, and the state will be reset automatically once + // we enter the next pass. + + let src_native = AsNative::from(match src_cubish { + Some(ref tex) => tex.as_ref(), + None => src.like.as_texture(), + }); + let prelude = [ + soft::RenderCommand::BindPipeline(&**pso), + soft::RenderCommand::BindSamplers { + stage: pso::Stage::Fragment, + index: 0, + samplers: &[Some(AsNative::from(sampler))][..], + }, + soft::RenderCommand::BindTextures { + stage: pso::Stage::Fragment, + index: 0, + textures: &[Some(src_native)][..], + }, + ]; + + let com_ds = if src + .format_desc + .aspects + .intersects(Aspects::DEPTH | Aspects::STENCIL) + { + ds_state = self + .shared + .service_pipes + .depth_stencil_states + .get_write(src.format_desc.aspects); + Some(soft::RenderCommand::SetDepthStencilState(&**ds_state)) + } else { + None + }; + + let layered_rendering = self.shared.private_caps.layered_rendering; + autoreleasepool(|| { + let dst_new = match dst_cubish { + Some(ref tex) => tex.as_ref(), + None => dst.like.as_texture(), + }; + + for ((aspects, level), list) in vertices.drain() { + let descriptor = metal::RenderPassDescriptor::new().to_owned(); + if layered_rendering { + descriptor.set_render_target_array_length(dst_layers as _); + } + + if aspects.contains(Aspects::COLOR) { + let att = descriptor.color_attachments().object_at(0).unwrap(); + att.set_texture(Some(dst_new)); + att.set_level(level as _); + } + if aspects.contains(Aspects::DEPTH) { + let att = descriptor.depth_attachment().unwrap(); + att.set_texture(Some(dst_new)); + att.set_level(level as _); + } + if aspects.contains(Aspects::STENCIL) { + let att = descriptor.stencil_attachment().unwrap(); + att.set_texture(Some(dst_new)); + att.set_level(level as _); + } + + let ext = dst.kind.extent().at_level(level); + //Note: flipping Y coordinate of the destination here + let rect = pso::Rect { + x: 0, + y: ext.height as _, + w: ext.width as _, + h: -(ext.height as i16), + }; + + let extra = [ + soft::RenderCommand::SetViewport(rect, 0.0 .. 1.0), + soft::RenderCommand::SetScissor(MTLScissorRect { + x: 0, + y: 0, + width: ext.width as _, + height: ext.height as _, + }), + soft::RenderCommand::BindBufferData { + stage: pso::Stage::Vertex, + index: 0, + words: slice::from_raw_parts( + list.as_ptr() as *const u32, + list.len() * mem::size_of::() / WORD_SIZE, + ), + }, + soft::RenderCommand::Draw { + primitive_type: MTLPrimitiveType::Triangle, + vertices: 0 .. list.len() as _, + instances: 0 .. 1, + }, + ]; + + let commands = prelude.iter().chain(&com_ds).chain(&extra).cloned(); + + sink.as_mut() + .unwrap() + .quick_render("blit_image", descriptor, commands); + } + }); + + retained_textures.extend(src_cubish); + retained_textures.extend(dst_cubish); + } + + unsafe fn bind_index_buffer(&mut self, view: buffer::IndexBufferView) { + let (raw, range) = view.buffer.as_bound(); + assert!(range.start + view.offset < range.end); // conservative + self.state.index_buffer = Some(IndexBuffer { + buffer: AsNative::from(raw), + offset: (range.start + view.offset) as _, + stride: match view.index_type { + IndexType::U16 => 2, + IndexType::U32 => 4, + }, + }); + } + + unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) + where + I: IntoIterator, + T: Borrow, + { + if self.state.vertex_buffers.len() <= first_binding as usize { + self.state + .vertex_buffers + .resize(first_binding as usize + 1, None); + } + for (i, (buffer, offset)) in buffers.into_iter().enumerate() { + let b = buffer.borrow(); + let (raw, range) = b.as_bound(); + let buffer_ptr = AsNative::from(raw); + let index = first_binding as usize + i; + self.state + .vertex_buffers + .entry(index) + .set(Some((buffer_ptr, range.start + offset))); + } + + if let Some(command) = self + .state + .set_vertex_buffers(self.shared.private_caps.max_buffers_per_stage as usize) + { + self.inner.borrow_mut().sink().pre_render().issue(command); + } + } + + unsafe fn set_viewports(&mut self, first_viewport: u32, vps: T) + where + T: IntoIterator, + T::Item: Borrow, + { + // macOS_GPUFamily1_v3 supports >1 viewport, todo + if first_viewport != 0 { + panic!("First viewport != 0; Metal supports only one viewport"); + } + let mut vps = vps.into_iter(); + let vp_borrowable = vps + .next() + .expect("No viewport provided, Metal supports exactly one"); + let vp = vp_borrowable.borrow(); + if vps.next().is_some() { + // TODO should we panic here or set buffer in an erroneous state? + panic!("More than one viewport set; Metal supports only one viewport"); + } + + let com = self.state.set_viewport(vp, self.shared.disabilities); + self.inner.borrow_mut().sink().pre_render().issue(com); + } + + unsafe fn set_scissors(&mut self, first_scissor: u32, rects: T) + where + T: IntoIterator, + T::Item: Borrow, + { + // macOS_GPUFamily1_v3 supports >1 scissor/viewport, todo + if first_scissor != 0 { + panic!("First scissor != 0; Metal supports only one viewport"); + } + let mut rects = rects.into_iter(); + let rect_borrowable = rects + .next() + .expect("No scissor provided, Metal supports exactly one"); + let rect = rect_borrowable.borrow(); + if rects.next().is_some() { + panic!("More than one scissor set; Metal supports only one viewport"); + } + + let com = self.state.set_scissor(*rect); + self.inner.borrow_mut().sink().pre_render().issue(com); + } + + unsafe fn set_blend_constants(&mut self, color: pso::ColorValue) { + let com = self.state.set_blend_color(&color); + self.inner.borrow_mut().sink().pre_render().issue(com); + } + + unsafe fn set_depth_bounds(&mut self, _: Range) { + warn!("Depth bounds test is not supported"); + } + + unsafe fn set_line_width(&mut self, width: f32) { + // Note from the Vulkan spec: + // > If the wide lines feature is not enabled, lineWidth must be 1.0 + // Simply assert and no-op because Metal never exposes `Features::LINE_WIDTH` + assert_eq!(width, 1.0); + } + + unsafe fn set_depth_bias(&mut self, depth_bias: pso::DepthBias) { + let com = self.state.set_depth_bias(&depth_bias); + self.inner.borrow_mut().sink().pre_render().issue(com); + } + + unsafe fn set_stencil_reference(&mut self, faces: pso::Face, value: pso::StencilValue) { + assign_sides(&mut self.state.stencil.reference_values, faces, value); + let com = + soft::RenderCommand::SetStencilReferenceValues(self.state.stencil.reference_values); + self.inner.borrow_mut().sink().pre_render().issue(com); + } + + unsafe fn set_stencil_read_mask(&mut self, faces: pso::Face, value: pso::StencilValue) { + assign_sides(&mut self.state.stencil.read_masks, faces, value); + self.update_depth_stencil(); + } + + unsafe fn set_stencil_write_mask(&mut self, faces: pso::Face, value: pso::StencilValue) { + assign_sides(&mut self.state.stencil.write_masks, faces, value); + self.update_depth_stencil(); + } + + unsafe fn begin_render_pass( + &mut self, + render_pass: &native::RenderPass, + framebuffer: &native::Framebuffer, + _render_area: pso::Rect, + clear_values: T, + first_subpass_contents: com::SubpassContents, + ) where + T: IntoIterator, + T::Item: Borrow, + { + // fill out temporary clear values per attachment + self.temp + .clear_values + .resize(render_pass.attachments.len(), None); + for ((out_val, _), in_val) in self + .temp + .clear_values + .iter_mut() + .zip(&render_pass.attachments) + .filter(|(_, rat)| rat.has_clears()) + .zip(clear_values) + { + *out_val = Some(*in_val.borrow()); + } + + self.state.pending_subpasses.clear(); + self.state.target_extent = framebuffer.extent; + + //TODO: cache produced `RenderPassDescriptor` objects + // we stack the subpasses in the opposite order + for subpass in render_pass.subpasses.iter().rev() { + let mut combined_aspects = Aspects::empty(); + let descriptor = autoreleasepool(|| { + let descriptor = metal::RenderPassDescriptor::new().to_owned(); + descriptor.set_visibility_result_buffer(Some(&self.shared.visibility.buffer)); + if self.shared.private_caps.layered_rendering { + descriptor.set_render_target_array_length(framebuffer.extent.depth as _); + } + + for (i, &(at_id, op_flags, resolve_id)) in subpass.colors.iter().enumerate() { + let rat = &render_pass.attachments[at_id]; + let texture = framebuffer.attachments[at_id].as_ref(); + let desc = descriptor.color_attachments().object_at(i as _).unwrap(); + + combined_aspects |= Aspects::COLOR; + desc.set_texture(Some(texture)); + + if op_flags.contains(native::SubpassOps::LOAD) { + desc.set_load_action(conv::map_load_operation(rat.ops.load)); + if rat.ops.load == AttachmentLoadOp::Clear { + let channel = subpass.target_formats.colors[i].1; + let raw = self.temp.clear_values[at_id].unwrap().color; + desc.set_clear_color(channel.interpret(raw)); + } + } + if let Some(id) = resolve_id { + let resolve = &framebuffer.attachments[id]; + //Note: the selection of levels and slices is already handled by `ImageView` + desc.set_resolve_texture(Some(resolve)); + desc.set_store_action(conv::map_resolved_store_operation(rat.ops.store)); + } else if op_flags.contains(native::SubpassOps::STORE) { + desc.set_store_action(conv::map_store_operation(rat.ops.store)); + } + } + + if let Some((at_id, op_flags)) = subpass.depth_stencil { + let rat = &render_pass.attachments[at_id]; + let texture = framebuffer.attachments[at_id].as_ref(); + let aspects = rat.format.unwrap().surface_desc().aspects; + combined_aspects |= aspects; + + if aspects.contains(Aspects::DEPTH) { + let desc = descriptor.depth_attachment().unwrap(); + desc.set_texture(Some(texture)); + + if op_flags.contains(native::SubpassOps::LOAD) { + desc.set_load_action(conv::map_load_operation(rat.ops.load)); + if rat.ops.load == AttachmentLoadOp::Clear { + let raw = self.temp.clear_values[at_id].unwrap().depth_stencil; + desc.set_clear_depth(raw.depth as f64); + } + } + if op_flags.contains(native::SubpassOps::STORE) { + desc.set_store_action(conv::map_store_operation(rat.ops.store)); + } + } + if aspects.contains(Aspects::STENCIL) { + let desc = descriptor.stencil_attachment().unwrap(); + desc.set_texture(Some(texture)); + + if op_flags.contains(native::SubpassOps::LOAD) { + desc.set_load_action(conv::map_load_operation(rat.stencil_ops.load)); + if rat.stencil_ops.load == AttachmentLoadOp::Clear { + let raw = self.temp.clear_values[at_id].unwrap().depth_stencil; + desc.set_clear_stencil(raw.stencil); + } + } + if op_flags.contains(native::SubpassOps::STORE) { + desc.set_store_action(conv::map_store_operation(rat.stencil_ops.store)); + } + } + } + + descriptor + }); + + self.state.pending_subpasses.alloc().init(SubpassInfo { + descriptor, + combined_aspects, + formats: subpass.target_formats.clone(), + }); + } + + self.inner.borrow_mut().sink().label(&render_pass.name); + self.next_subpass(first_subpass_contents); + } + + unsafe fn next_subpass(&mut self, _contents: com::SubpassContents) { + let sin = self.state.pending_subpasses.pop().unwrap(); + + self.state.render_pso_is_compatible = match self.state.render_pso { + Some(ref ps) => ps.formats == sin.formats, + None => false, + }; + self.state.target_aspects = sin.combined_aspects; + self.state.target_formats.copy_from(&sin.formats); + + let ds_store = &self.shared.service_pipes.depth_stencil_states; + let ds_state; + let com_ds = if sin + .combined_aspects + .intersects(Aspects::DEPTH | Aspects::STENCIL) + { + match self.state.build_depth_stencil() { + Some(desc) => { + ds_state = ds_store.get(desc, &self.shared.device); + Some(soft::RenderCommand::SetDepthStencilState(&**ds_state)) + } + None => None, + } + } else { + None + }; + + let init_commands = self + .state + .make_render_commands(sin.combined_aspects) + .chain(com_ds); + + autoreleasepool(|| { + self.inner + .borrow_mut() + .sink() + .switch_render(sin.descriptor) + .issue_many(init_commands); + }); + } + + unsafe fn end_render_pass(&mut self) { + self.inner.borrow_mut().sink().stop_encoding(); + } + + unsafe fn bind_graphics_pipeline(&mut self, pipeline: &native::GraphicsPipeline) { + let mut inner = self.inner.borrow_mut(); + let mut pre = inner.sink().pre_render(); + + if let Some(ref stencil) = pipeline.depth_stencil_desc.stencil { + if let pso::State::Static(value) = stencil.read_masks { + self.state.stencil.read_masks = value; + } + if let pso::State::Static(value) = stencil.write_masks { + self.state.stencil.write_masks = value; + } + if let pso::State::Static(value) = stencil.reference_values { + self.state.stencil.reference_values = value; + pre.issue(soft::RenderCommand::SetStencilReferenceValues(value)); + } + } + + self.state.render_pso_is_compatible = + pipeline.attachment_formats == self.state.target_formats; + let set_pipeline = match self.state.render_pso { + Some(ref ps) if ps.raw.as_ptr() == pipeline.raw.as_ptr() => false, + Some(ref mut ps) => { + ps.raw = pipeline.raw.to_owned(); + ps.vertex_buffers.clear(); + ps.vertex_buffers + .extend(pipeline.vertex_buffers.iter().cloned().map(Some)); + ps.ds_desc = pipeline.depth_stencil_desc; + ps.formats.copy_from(&pipeline.attachment_formats); + true + } + None => { + self.state.render_pso = Some(RenderPipelineState { + raw: pipeline.raw.to_owned(), + ds_desc: pipeline.depth_stencil_desc, + vertex_buffers: pipeline.vertex_buffers.iter().cloned().map(Some).collect(), + formats: pipeline.attachment_formats.clone(), + }); + true + } + }; + + if self.state.render_pso_is_compatible { + if set_pipeline { + self.state.rasterizer_state = pipeline.rasterizer_state.clone(); + self.state.primitive_type = pipeline.primitive_type; + + pre.issue(soft::RenderCommand::BindPipeline(&*pipeline.raw)); + if let Some(ref rs) = pipeline.rasterizer_state { + pre.issue(soft::RenderCommand::SetRasterizerState(rs.clone())) + } + // re-bind vertex buffers + if let Some(command) = self + .state + .set_vertex_buffers(self.shared.private_caps.max_buffers_per_stage as usize) + { + pre.issue(command); + } + // re-bind push constants + if let Some(pc) = pipeline.vs_pc_info { + if Some(pc) != self.state.resources_vs.push_constants { + // if we don't have enough constants, then binding will follow + if pc.count as usize <= self.state.push_constants.len() { + pre.issue(self.state.push_vs_constants(pc)); + } + } + } + if let Some(pc) = pipeline.ps_pc_info { + if Some(pc) != self.state.resources_ps.push_constants + && pc.count as usize <= self.state.push_constants.len() + { + pre.issue(self.state.push_ps_constants(pc)); + } + } + } else { + debug_assert_eq!(self.state.rasterizer_state, pipeline.rasterizer_state); + debug_assert_eq!(self.state.primitive_type, pipeline.primitive_type); + } + + if let Some(desc) = self.state.build_depth_stencil() { + let ds_store = &self.shared.service_pipes.depth_stencil_states; + let state = &**ds_store.get(desc, &self.shared.device); + pre.issue(soft::RenderCommand::SetDepthStencilState(state)); + } + } else { + // This may be tricky: we expect either another pipeline to be bound + // (this overwriting these), or a new render pass started (thus using these). + self.state.rasterizer_state = pipeline.rasterizer_state.clone(); + self.state.primitive_type = pipeline.primitive_type; + } + + if let pso::State::Static(value) = pipeline.depth_bias { + self.state.depth_bias = value; + pre.issue(soft::RenderCommand::SetDepthBias(value)); + } + + if let Some(ref vp) = pipeline.baked_states.viewport { + pre.issue(self.state.set_viewport(vp, self.shared.disabilities)); + } + if let Some(rect) = pipeline.baked_states.scissor { + pre.issue(self.state.set_scissor(rect)); + } + if let Some(ref color) = pipeline.baked_states.blend_color { + pre.issue(self.state.set_blend_color(color)); + } + } + + unsafe fn bind_graphics_descriptor_sets( + &mut self, + pipe_layout: &native::PipelineLayout, + first_set: usize, + sets: I, + dynamic_offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + let vbuf_count = self + .state + .render_pso + .as_ref() + .map_or(0, |pso| pso.vertex_buffers.len()) as ResourceIndex; + assert!( + pipe_layout.total.vs.buffers + vbuf_count + <= self.shared.private_caps.max_buffers_per_stage + ); + + self.state.resources_vs.pre_allocate(&pipe_layout.total.vs); + self.state.resources_ps.pre_allocate(&pipe_layout.total.ps); + + let mut dynamic_offset_iter = dynamic_offsets.into_iter(); + let mut inner = self.inner.borrow_mut(); + let mut pre = inner.sink().pre_render(); + let mut bind_range = { + let first = &pipe_layout.infos[first_set].offsets; + native::MultiStageData { + vs: first.vs.map(|&i| i .. i), + ps: first.ps.map(|&i| i .. i), + cs: first.cs.map(|&i| i .. i), + } + }; + for ((info, desc_set), cached_ds) in pipe_layout.infos[first_set ..] + .iter() + .zip(sets) + .zip(self.state.descriptor_sets[first_set ..].iter_mut()) + { + match *desc_set.borrow() { + native::DescriptorSet::Emulated { + ref pool, + ref layouts, + ref resources, + } => { + let data = pool.read(); + + let end_vs_offsets = self.state.resources_vs.bind_set( + pso::ShaderStageFlags::VERTEX, + &*data, + info.offsets.vs.clone(), + layouts, + resources, + ); + bind_range.vs.expand(end_vs_offsets); + let end_ps_offsets = self.state.resources_ps.bind_set( + pso::ShaderStageFlags::FRAGMENT, + &*data, + info.offsets.ps.clone(), + layouts, + resources, + ); + bind_range.ps.expand(end_ps_offsets); + + for (dyn_data, offset) in info + .dynamic_buffers + .iter() + .zip(dynamic_offset_iter.by_ref()) + { + if dyn_data.vs != !0 { + self.state.resources_vs.buffer_offsets[dyn_data.vs as usize] += + *offset.borrow() as buffer::Offset; + } + if dyn_data.ps != !0 { + self.state.resources_ps.buffer_offsets[dyn_data.ps as usize] += + *offset.borrow() as buffer::Offset; + } + } + } + native::DescriptorSet::ArgumentBuffer { + ref raw, + raw_offset, + ref pool, + ref range, + stage_flags, + .. + } => { + //Note: this is incompatible with the binding scheme below + if stage_flags.contains(pso::ShaderStageFlags::VERTEX) { + let index = info.offsets.vs.buffers; + self.state.resources_vs.buffers[index as usize] = + Some(AsNative::from(raw.as_ref())); + self.state.resources_vs.buffer_offsets[index as usize] = raw_offset; + pre.issue(soft::RenderCommand::BindBuffer { + stage: pso::Stage::Vertex, + index, + buffer: AsNative::from(raw.as_ref()), + offset: raw_offset, + }); + } + if stage_flags.contains(pso::ShaderStageFlags::FRAGMENT) { + let index = info.offsets.ps.buffers; + self.state.resources_ps.buffers[index as usize] = + Some(AsNative::from(raw.as_ref())); + self.state.resources_ps.buffer_offsets[index as usize] = raw_offset; + pre.issue(soft::RenderCommand::BindBuffer { + stage: pso::Stage::Fragment, + index, + buffer: AsNative::from(raw.as_ref()), + offset: raw_offset, + }); + } + if stage_flags + .intersects(pso::ShaderStageFlags::VERTEX | pso::ShaderStageFlags::FRAGMENT) + { + cached_ds.graphics_resources.clear(); + cached_ds.graphics_resources.extend( + pool.read().resources[range.start as usize .. range.end as usize] + .iter() + .filter_map(|ur| { + ptr::NonNull::new(ur.ptr).map(|res| (res, ur.usage)) + }), + ); + pre.issue_many(cached_ds.graphics_resources.iter().map( + |&(resource, usage)| soft::RenderCommand::UseResource { + resource, + usage, + }, + )); + } + } + } + } + + // now bind all the affected resources + for (stage, cache, range) in + iter::once((pso::Stage::Vertex, &self.state.resources_vs, bind_range.vs)).chain( + iter::once(( + pso::Stage::Fragment, + &self.state.resources_ps, + bind_range.ps, + )), + ) + { + if range.textures.start != range.textures.end { + pre.issue(soft::RenderCommand::BindTextures { + stage, + index: range.textures.start, + textures: &cache.textures + [range.textures.start as usize .. range.textures.end as usize], + }); + } + if range.samplers.start != range.samplers.end { + pre.issue(soft::RenderCommand::BindSamplers { + stage, + index: range.samplers.start, + samplers: &cache.samplers + [range.samplers.start as usize .. range.samplers.end as usize], + }); + } + if range.buffers.start != range.buffers.end { + pre.issue(soft::RenderCommand::BindBuffers { + stage, + index: range.buffers.start, + buffers: { + let range = range.buffers.start as usize .. range.buffers.end as usize; + (&cache.buffers[range.clone()], &cache.buffer_offsets[range]) + }, + }); + } + } + } + + unsafe fn bind_compute_pipeline(&mut self, pipeline: &native::ComputePipeline) { + self.state.compute_pso = Some(pipeline.raw.clone()); + self.state.work_group_size = pipeline.work_group_size; + + let mut inner = self.inner.borrow_mut(); + let mut pre = inner.sink().pre_compute(); + + pre.issue(soft::ComputeCommand::BindPipeline(&*pipeline.raw)); + + if let Some(pc) = pipeline.pc_info { + if Some(pc) != self.state.resources_cs.push_constants + && pc.count as usize <= self.state.push_constants.len() + { + pre.issue(self.state.push_cs_constants(pc)); + } + } + } + + unsafe fn bind_compute_descriptor_sets( + &mut self, + pipe_layout: &native::PipelineLayout, + first_set: usize, + sets: I, + dynamic_offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + self.state.resources_cs.pre_allocate(&pipe_layout.total.cs); + + let mut dynamic_offset_iter = dynamic_offsets.into_iter(); + let mut inner = self.inner.borrow_mut(); + let mut pre = inner.sink().pre_compute(); + let cache = &mut self.state.resources_cs; + let mut bind_range = pipe_layout.infos[first_set].offsets.cs.map(|&i| i .. i); + + for ((info, desc_set), cached_ds) in pipe_layout.infos[first_set ..] + .iter() + .zip(sets) + .zip(self.state.descriptor_sets[first_set ..].iter_mut()) + { + let res_offset = &info.offsets.cs; + match *desc_set.borrow() { + native::DescriptorSet::Emulated { + ref pool, + ref layouts, + ref resources, + } => { + let data = pool.read(); + + let end_offsets = cache.bind_set( + pso::ShaderStageFlags::COMPUTE, + &*data, + res_offset.clone(), + layouts, + resources, + ); + bind_range.expand(end_offsets); + + for (dyn_data, offset) in info + .dynamic_buffers + .iter() + .zip(dynamic_offset_iter.by_ref()) + { + if dyn_data.cs != !0 { + cache.buffer_offsets[dyn_data.cs as usize] += + *offset.borrow() as buffer::Offset; + } + } + } + native::DescriptorSet::ArgumentBuffer { + ref raw, + raw_offset, + ref pool, + ref range, + stage_flags, + .. + } => { + if stage_flags.contains(pso::ShaderStageFlags::COMPUTE) { + let index = res_offset.buffers; + cache.buffers[index as usize] = Some(AsNative::from(raw.as_ref())); + cache.buffer_offsets[index as usize] = raw_offset; + pre.issue(soft::ComputeCommand::BindBuffer { + index, + buffer: AsNative::from(raw.as_ref()), + offset: raw_offset, + }); + + cached_ds.compute_resources.clear(); + cached_ds.compute_resources.extend( + pool.read().resources[range.start as usize .. range.end as usize] + .iter() + .filter_map(|ur| { + ptr::NonNull::new(ur.ptr).map(|res| (res, ur.usage)) + }), + ); + pre.issue_many(cached_ds.compute_resources.iter().map( + |&(resource, usage)| soft::ComputeCommand::UseResource { + resource, + usage, + }, + )); + } + } + } + } + + // now bind all the affected resources + if bind_range.textures.start != bind_range.textures.end { + pre.issue(soft::ComputeCommand::BindTextures { + index: bind_range.textures.start, + textures: &cache.textures + [bind_range.textures.start as usize .. bind_range.textures.end as usize], + }); + } + if bind_range.samplers.start != bind_range.samplers.end { + pre.issue(soft::ComputeCommand::BindSamplers { + index: bind_range.samplers.start, + samplers: &cache.samplers + [bind_range.samplers.start as usize .. bind_range.samplers.end as usize], + }); + } + if bind_range.buffers.start != bind_range.buffers.end { + pre.issue(soft::ComputeCommand::BindBuffers { + index: bind_range.buffers.start, + buffers: { + let range = + bind_range.buffers.start as usize .. bind_range.buffers.end as usize; + (&cache.buffers[range.clone()], &cache.buffer_offsets[range]) + }, + }); + } + } + + unsafe fn dispatch(&mut self, count: WorkGroupCount) { + let mut inner = self.inner.borrow_mut(); + let (mut pre, init) = inner.sink().switch_compute(); + if init { + pre.issue_many(self.state.make_compute_commands()); + } + + pre.issue(soft::ComputeCommand::Dispatch { + wg_size: self.state.work_group_size, + wg_count: MTLSize { + width: count[0] as _, + height: count[1] as _, + depth: count[2] as _, + }, + }); + } + + unsafe fn dispatch_indirect(&mut self, buffer: &native::Buffer, offset: buffer::Offset) { + let mut inner = self.inner.borrow_mut(); + let (mut pre, init) = inner.sink().switch_compute(); + if init { + pre.issue_many(self.state.make_compute_commands()); + } + + let (raw, range) = buffer.as_bound(); + assert!(range.start + offset < range.end); + + pre.issue(soft::ComputeCommand::DispatchIndirect { + wg_size: self.state.work_group_size, + buffer: AsNative::from(raw), + offset: range.start + offset, + }); + } + + unsafe fn copy_buffer(&mut self, src: &native::Buffer, dst: &native::Buffer, regions: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let pso = &*self.shared.service_pipes.copy_buffer; + let wg_size = MTLSize { + width: pso.thread_execution_width(), + height: 1, + depth: 1, + }; + + let (src_raw, src_range) = src.as_bound(); + let (dst_raw, dst_range) = dst.as_bound(); + + let mut compute_datas = Vec::new(); + let mut inner = self.inner.borrow_mut(); + let mut blit_commands = Vec::new(); + let mut compute_commands = vec![ + //TODO: get rid of heap + soft::ComputeCommand::BindPipeline(pso), + ]; + + for region in regions { + let r = region.borrow(); + if r.size % WORD_SIZE as u64 == 0 + && r.src % WORD_SIZE as u64 == 0 + && r.dst % WORD_SIZE as u64 == 0 + { + blit_commands.alloc().init(soft::BlitCommand::CopyBuffer { + src: AsNative::from(src_raw), + dst: AsNative::from(dst_raw), + region: com::BufferCopy { + src: r.src + src_range.start, + dst: r.dst + dst_range.start, + size: r.size, + }, + }); + } else { + // not natively supported, going through a compute shader + assert_eq!(0, r.size >> 32); + let src_aligned = r.src & !(WORD_SIZE as u64 - 1); + let dst_aligned = r.dst & !(WORD_SIZE as u64 - 1); + let offsets = (r.src - src_aligned) | ((r.dst - dst_aligned) << 16); + let size_and_offsets = [r.size as u32, offsets as u32]; + compute_datas.push(Box::new(size_and_offsets)); + + let wg_count = MTLSize { + width: (r.size + wg_size.width - 1) / wg_size.width, + height: 1, + depth: 1, + }; + + compute_commands + .alloc() + .init(soft::ComputeCommand::BindBuffer { + index: 0, + buffer: AsNative::from(dst_raw), + offset: dst_aligned + dst_range.start, + }); + compute_commands + .alloc() + .init(soft::ComputeCommand::BindBuffer { + index: 1, + buffer: AsNative::from(src_raw), + offset: src_aligned + src_range.start, + }); + compute_commands + .alloc() + .init(soft::ComputeCommand::BindBufferData { + index: 2, + // Rust doesn't see that compute_datas will not lose this + // item and the boxed contents can't be moved otherwise. + words: mem::transmute(&compute_datas.last().unwrap()[..]), + }); + compute_commands + .alloc() + .init(soft::ComputeCommand::Dispatch { wg_size, wg_count }); + } + } + + let sink = inner.sink(); + if !blit_commands.is_empty() { + sink.blit_commands(blit_commands.into_iter()); + } + if compute_commands.len() > 1 { + // first is bind PSO + sink.quick_compute("copy_buffer", compute_commands.into_iter()); + } + } + + unsafe fn copy_image( + &mut self, + src: &native::Image, + src_layout: Layout, + dst: &native::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + match (&src.like, &dst.like) { + (&native::ImageLike::Unbound { .. }, _) | (_, &native::ImageLike::Unbound { .. }) => { + panic!("Unexpected Image::Unbound"); + } + ( + &native::ImageLike::Texture(ref src_raw), + &native::ImageLike::Texture(ref dst_raw), + ) => { + let CommandBufferInner { + ref mut retained_textures, + ref mut sink, + .. + } = *self.inner.borrow_mut(); + + let new_src = if src.mtl_format == dst.mtl_format { + src_raw + } else { + assert_eq!(src.format_desc.bits, dst.format_desc.bits); + let tex = src_raw.new_texture_view(dst.mtl_format); + retained_textures.push(tex); + retained_textures.last().unwrap() + }; + + let commands = regions.into_iter().filter_map(|region| { + let r = region.borrow(); + if r.extent.is_empty() { + None + } else { + Some(soft::BlitCommand::CopyImage { + src: AsNative::from(new_src.as_ref()), + dst: AsNative::from(dst_raw.as_ref()), + region: r.clone(), + }) + } + }); + + sink.as_mut().unwrap().blit_commands(commands); + } + (&native::ImageLike::Buffer(ref src_buffer), &native::ImageLike::Texture(_)) => { + let src_extent = src.kind.extent(); + self.copy_buffer_to_image( + src_buffer, + dst, + dst_layout, + regions.into_iter().map(|region| { + let r = region.borrow(); + com::BufferImageCopy { + buffer_offset: src.byte_offset(r.src_offset), + buffer_width: src_extent.width, + buffer_height: src_extent.height, + image_layers: r.dst_subresource.clone(), + image_offset: r.dst_offset, + image_extent: r.extent, + } + }), + ) + } + (&native::ImageLike::Texture(_), &native::ImageLike::Buffer(ref dst_buffer)) => { + let dst_extent = dst.kind.extent(); + self.copy_image_to_buffer( + src, + src_layout, + dst_buffer, + regions.into_iter().map(|region| { + let r = region.borrow(); + com::BufferImageCopy { + buffer_offset: dst.byte_offset(r.dst_offset), + buffer_width: dst_extent.width, + buffer_height: dst_extent.height, + image_layers: r.src_subresource.clone(), + image_offset: r.src_offset, + image_extent: r.extent, + } + }), + ) + } + ( + &native::ImageLike::Buffer(ref src_buffer), + &native::ImageLike::Buffer(ref dst_buffer), + ) => self.copy_buffer( + src_buffer, + dst_buffer, + regions.into_iter().map(|region| { + let r = region.borrow(); + com::BufferCopy { + src: src.byte_offset(r.src_offset), + dst: dst.byte_offset(r.dst_offset), + size: src.byte_extent(r.extent), + } + }), + ), + } + } + + unsafe fn copy_buffer_to_image( + &mut self, + src: &native::Buffer, + dst: &native::Image, + _dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + match dst.like { + native::ImageLike::Unbound { .. } => { + panic!("Unexpected Image::Unbound"); + } + native::ImageLike::Texture(ref dst_raw) => { + let (src_raw, src_range) = src.as_bound(); + let commands = regions.into_iter().filter_map(|region| { + let r = region.borrow(); + if r.image_extent.is_empty() { + None + } else { + Some(soft::BlitCommand::CopyBufferToImage { + src: AsNative::from(src_raw), + dst: AsNative::from(dst_raw.as_ref()), + dst_desc: dst.format_desc, + region: com::BufferImageCopy { + buffer_offset: r.buffer_offset + src_range.start, + ..r.clone() + }, + }) + } + }); + self.inner.borrow_mut().sink().blit_commands(commands); + } + native::ImageLike::Buffer(ref dst_buffer) => self.copy_buffer( + src, + dst_buffer, + regions.into_iter().map(|region| { + let r = region.borrow(); + com::BufferCopy { + src: r.buffer_offset, + dst: dst.byte_offset(r.image_offset), + size: dst.byte_extent(r.image_extent), + } + }), + ), + } + } + + unsafe fn copy_image_to_buffer( + &mut self, + src: &native::Image, + _src_layout: Layout, + dst: &native::Buffer, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + match src.like { + native::ImageLike::Unbound { .. } => { + panic!("Unexpected Image::Unbound"); + } + native::ImageLike::Texture(ref src_raw) => { + let (dst_raw, dst_range) = dst.as_bound(); + let commands = regions.into_iter().filter_map(|region| { + let r = region.borrow(); + if r.image_extent.is_empty() { + None + } else { + Some(soft::BlitCommand::CopyImageToBuffer { + src: AsNative::from(src_raw.as_ref()), + src_desc: src.format_desc, + dst: AsNative::from(dst_raw), + region: com::BufferImageCopy { + buffer_offset: r.buffer_offset + dst_range.start, + ..r.clone() + }, + }) + } + }); + self.inner.borrow_mut().sink().blit_commands(commands); + } + native::ImageLike::Buffer(ref src_buffer) => self.copy_buffer( + src_buffer, + dst, + regions.into_iter().map(|region| { + let r = region.borrow(); + com::BufferCopy { + src: src.byte_offset(r.image_offset), + dst: r.buffer_offset, + size: src.byte_extent(r.image_extent), + } + }), + ), + } + } + + unsafe fn draw(&mut self, vertices: Range, instances: Range) { + debug_assert!(self.state.render_pso_is_compatible); + if instances.start == instances.end { + return; + } + + let command = soft::RenderCommand::Draw { + primitive_type: self.state.primitive_type, + vertices, + instances, + }; + self.inner.borrow_mut().sink().pre_render().issue(command); + } + + unsafe fn draw_indexed( + &mut self, + indices: Range, + base_vertex: VertexOffset, + instances: Range, + ) { + debug_assert!(self.state.render_pso_is_compatible); + if instances.start == instances.end { + return; + } + + let command = soft::RenderCommand::DrawIndexed { + primitive_type: self.state.primitive_type, + index: self + .state + .index_buffer + .clone() + .expect("must bind index buffer"), + indices, + base_vertex, + instances, + }; + self.inner.borrow_mut().sink().pre_render().issue(command); + } + + unsafe fn draw_indirect( + &mut self, + buffer: &native::Buffer, + offset: buffer::Offset, + count: DrawCount, + stride: u32, + ) { + assert_eq!(offset % WORD_ALIGNMENT, 0); + assert_eq!(stride % WORD_ALIGNMENT as u32, 0); + debug_assert!(self.state.render_pso_is_compatible); + let (raw, range) = buffer.as_bound(); + + let commands = (0 .. count).map(|i| soft::RenderCommand::DrawIndirect { + primitive_type: self.state.primitive_type, + buffer: AsNative::from(raw), + offset: range.start + offset + (i * stride) as buffer::Offset, + }); + + self.inner + .borrow_mut() + .sink() + .pre_render() + .issue_many(commands); + } + + unsafe fn draw_indexed_indirect( + &mut self, + buffer: &native::Buffer, + offset: buffer::Offset, + count: DrawCount, + stride: u32, + ) { + assert_eq!(offset % WORD_ALIGNMENT, 0); + assert_eq!(stride % WORD_ALIGNMENT as u32, 0); + debug_assert!(self.state.render_pso_is_compatible); + let (raw, range) = buffer.as_bound(); + + let commands = (0 .. count).map(|i| soft::RenderCommand::DrawIndexedIndirect { + primitive_type: self.state.primitive_type, + index: self + .state + .index_buffer + .clone() + .expect("must bind index buffer"), + buffer: AsNative::from(raw), + offset: range.start + offset + (i * stride) as buffer::Offset, + }); + + self.inner + .borrow_mut() + .sink() + .pre_render() + .issue_many(commands); + } + + unsafe fn set_event(&mut self, event: &native::Event, _: pso::PipelineStage) { + self.inner + .borrow_mut() + .events + .push((Arc::clone(&event.0), true)); + } + + unsafe fn reset_event(&mut self, event: &native::Event, _: pso::PipelineStage) { + self.inner + .borrow_mut() + .events + .push((Arc::clone(&event.0), false)); + } + + unsafe fn wait_events<'a, I, J>( + &mut self, + events: I, + stages: Range, + barriers: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow>, + { + let mut need_barrier = false; + + for event in events { + let mut inner = self.inner.borrow_mut(); + let event = &event.borrow().0; + let is_local = inner + .events + .iter() + .rfind(|ev| Arc::ptr_eq(&ev.0, event)) + .map_or(false, |ev| ev.1); + if is_local { + need_barrier = true; + } else { + inner.host_events.push(Arc::clone(event)); + } + } + + if need_barrier { + self.pipeline_barrier(stages, memory::Dependencies::empty(), barriers); + } + } + + unsafe fn begin_query(&mut self, query: query::Query, flags: query::ControlFlags) { + match query.pool { + native::QueryPool::Occlusion(ref pool_range) => { + debug_assert!(pool_range.start + query.id < pool_range.end); + let offset = (query.id + pool_range.start) as buffer::Offset + * mem::size_of::() as buffer::Offset; + let mode = if flags.contains(query::ControlFlags::PRECISE) { + metal::MTLVisibilityResultMode::Counting + } else { + metal::MTLVisibilityResultMode::Boolean + }; + + let com = self.state.set_visibility_query(mode, offset); + self.inner.borrow_mut().sink().pre_render().issue(com); + } + } + } + + unsafe fn end_query(&mut self, query: query::Query) { + match query.pool { + native::QueryPool::Occlusion(ref pool_range) => { + let mut inner = self.inner.borrow_mut(); + debug_assert!(pool_range.start + query.id < pool_range.end); + inner + .active_visibility_queries + .push(pool_range.start + query.id); + + let com = self + .state + .set_visibility_query(metal::MTLVisibilityResultMode::Disabled, 0); + inner.sink().pre_render().issue(com); + } + } + } + + unsafe fn reset_query_pool(&mut self, pool: &native::QueryPool, queries: Range) { + let visibility = &self.shared.visibility; + match *pool { + native::QueryPool::Occlusion(ref pool_range) => { + let mut inner = self.inner.borrow_mut(); + debug_assert!(pool_range.start + queries.end <= pool_range.end); + inner.active_visibility_queries.retain(|&id| { + id < pool_range.start + queries.start || id >= pool_range.start + queries.end + }); + + let size_data = mem::size_of::() as buffer::Offset; + let offset_data = pool_range.start as buffer::Offset * size_data; + let command_data = soft::BlitCommand::FillBuffer { + dst: AsNative::from(visibility.buffer.as_ref()), + range: offset_data + queries.start as buffer::Offset * size_data + .. offset_data + queries.end as buffer::Offset * size_data, + value: 0, + }; + + let size_meta = mem::size_of::() as buffer::Offset; + let offset_meta = + visibility.availability_offset + pool_range.start as buffer::Offset * size_meta; + let command_meta = soft::BlitCommand::FillBuffer { + dst: AsNative::from(visibility.buffer.as_ref()), + range: offset_meta + queries.start as buffer::Offset * size_meta + .. offset_meta + queries.end as buffer::Offset * size_meta, + value: 0, + }; + + let commands = iter::once(command_data).chain(iter::once(command_meta)); + inner.sink().blit_commands(commands); + } + } + } + + unsafe fn copy_query_pool_results( + &mut self, + pool: &native::QueryPool, + queries: Range, + buffer: &native::Buffer, + offset: buffer::Offset, + stride: buffer::Offset, + flags: query::ResultFlags, + ) { + let (raw, range) = buffer.as_bound(); + match *pool { + native::QueryPool::Occlusion(ref pool_range) => { + let visibility = &self.shared.visibility; + let size_data = mem::size_of::() as buffer::Offset; + let size_meta = mem::size_of::() as buffer::Offset; + + if stride == size_data + && flags.contains(query::ResultFlags::BITS_64) + && !flags.contains(query::ResultFlags::WITH_AVAILABILITY) + { + // if stride is matching, copy everything in one go + let com = soft::BlitCommand::CopyBuffer { + src: AsNative::from(visibility.buffer.as_ref()), + dst: AsNative::from(raw), + region: com::BufferCopy { + src: (pool_range.start + queries.start) as buffer::Offset * size_data, + dst: range.start + offset, + size: (queries.end - queries.start) as buffer::Offset * size_data, + }, + }; + self.inner + .borrow_mut() + .sink() + .blit_commands(iter::once(com)); + } else { + // copy parts of individual entries + let size_payload = if flags.contains(query::ResultFlags::BITS_64) { + mem::size_of::() as buffer::Offset + } else { + mem::size_of::() as buffer::Offset + }; + let commands = (0 .. queries.end - queries.start).flat_map(|i| { + let absolute_index = + (pool_range.start + queries.start + i) as buffer::Offset; + let dst_offset = range.start + offset + i as buffer::Offset * stride; + let com_data = soft::BlitCommand::CopyBuffer { + src: AsNative::from(visibility.buffer.as_ref()), + dst: AsNative::from(raw), + region: com::BufferCopy { + src: absolute_index * size_data, + dst: dst_offset, + size: size_payload, + }, + }; + + let (com_avail, com_pad) = if flags.contains( + query::ResultFlags::WITH_AVAILABILITY | query::ResultFlags::WAIT, + ) { + // Technically waiting is a no-op on a single queue. However, + // the client expects the availability to be set regardless. + let com = soft::BlitCommand::FillBuffer { + dst: AsNative::from(raw), + range: dst_offset + size_payload .. dst_offset + 2 * size_payload, + value: !0, + }; + (Some(com), None) + } else if flags.contains(query::ResultFlags::WITH_AVAILABILITY) { + let com_avail = soft::BlitCommand::CopyBuffer { + src: AsNative::from(visibility.buffer.as_ref()), + dst: AsNative::from(raw), + region: com::BufferCopy { + src: visibility.availability_offset + + absolute_index * size_meta, + dst: dst_offset + size_payload, + size: size_meta, + }, + }; + // An extra padding is required if the client expects 64 bits availability without a wait + let com_pad = if flags.contains(query::ResultFlags::BITS_64) { + Some(soft::BlitCommand::FillBuffer { + dst: AsNative::from(raw), + range: dst_offset + size_payload + size_meta + .. dst_offset + 2 * size_payload, + value: 0, + }) + } else { + None + }; + (Some(com_avail), com_pad) + } else { + (None, None) + }; + + iter::once(com_data).chain(com_avail).chain(com_pad) + }); + self.inner.borrow_mut().sink().blit_commands(commands); + } + } + } + } + + unsafe fn write_timestamp(&mut self, _: pso::PipelineStage, _: query::Query) { + // nothing to do, timestamps are unsupported on Metal + } + + unsafe fn push_graphics_constants( + &mut self, + layout: &native::PipelineLayout, + stages: pso::ShaderStageFlags, + offset: u32, + constants: &[u32], + ) { + self.state + .update_push_constants(offset, constants, layout.total_push_constants); + if stages.intersects(pso::ShaderStageFlags::GRAPHICS) { + let mut inner = self.inner.borrow_mut(); + let mut pre = inner.sink().pre_render(); + // Note: the whole range is re-uploaded, which may be inefficient + if stages.contains(pso::ShaderStageFlags::VERTEX) { + let pc = layout.push_constants.vs.unwrap(); + pre.issue(self.state.push_vs_constants(pc)); + } + if stages.contains(pso::ShaderStageFlags::FRAGMENT) { + let pc = layout.push_constants.ps.unwrap(); + pre.issue(self.state.push_ps_constants(pc)); + } + } + } + + unsafe fn push_compute_constants( + &mut self, + layout: &native::PipelineLayout, + offset: u32, + constants: &[u32], + ) { + self.state + .update_push_constants(offset, constants, layout.total_push_constants); + let pc = layout.push_constants.cs.unwrap(); + + // Note: the whole range is re-uploaded, which may be inefficient + self.inner + .borrow_mut() + .sink() + .pre_compute() + .issue(self.state.push_cs_constants(pc)); + } + + unsafe fn execute_commands<'a, T, I>(&mut self, cmd_buffers: I) + where + T: 'a + Borrow, + I: IntoIterator, + { + for cmd_buffer in cmd_buffers { + let outer_borrowed = cmd_buffer.borrow(); + let inner_borrowed = outer_borrowed.inner.borrow_mut(); + + let (exec_journal, is_inheriting) = match inner_borrowed.sink { + Some(CommandSink::Deferred { + ref journal, + is_inheriting, + .. + }) => (journal, is_inheriting), + _ => panic!("Unexpected secondary sink!"), + }; + + for (a, b) in self + .state + .descriptor_sets + .iter_mut() + .zip(&outer_borrowed.state.descriptor_sets) + { + if !b.graphics_resources.is_empty() { + a.graphics_resources.clear(); + a.graphics_resources + .extend_from_slice(&b.graphics_resources); + } + if !b.compute_resources.is_empty() { + a.compute_resources.clear(); + a.compute_resources.extend_from_slice(&b.compute_resources); + } + } + + let mut inner_self = self.inner.borrow_mut(); + inner_self.events.extend_from_slice(&inner_borrowed.events); + + match *inner_self.sink() { + CommandSink::Immediate { + ref mut cmd_buffer, + ref mut encoder_state, + ref mut num_passes, + .. + } => { + if is_inheriting { + let encoder = match encoder_state { + EncoderState::Render(ref encoder) => encoder, + _ => panic!("Expected Render encoder!"), + }; + for command in &exec_journal.render_commands { + exec_render(encoder, command, &exec_journal.resources); + } + } else { + encoder_state.end(); + *num_passes += exec_journal.passes.len(); + exec_journal.record(cmd_buffer); + } + } + CommandSink::Deferred { + ref mut journal, .. + } => { + journal.extend(exec_journal, is_inheriting); + } + #[cfg(feature = "dispatch")] + CommandSink::Remote { .. } => unimplemented!(), + } + } + } +} diff --git a/third_party/rust/gfx-backend-metal/src/conversions.rs b/third_party/rust/gfx-backend-metal/src/conversions.rs new file mode 100644 index 000000000000..c9003a1926fc --- /dev/null +++ b/third_party/rust/gfx-backend-metal/src/conversions.rs @@ -0,0 +1,1231 @@ +use hal; + +use crate::PrivateCapabilities; + +use hal::{ + format::{Format, Properties, Swizzle}, + image, + pass, + pso, + pso::{Comparison, StencilOp}, + IndexType, +}; +use metal::*; + +impl PrivateCapabilities { + pub fn map_format(&self, format: Format) -> Option { + use self::hal::format::Format as f; + use metal::MTLPixelFormat::*; + Some(match format { + f::R5g6b5Unorm if self.format_b5 => B5G6R5Unorm, + f::R5g5b5a1Unorm if self.format_b5 => A1BGR5Unorm, + f::A1r5g5b5Unorm if self.format_b5 => BGR5A1Unorm, + f::Rgba4Unorm if self.format_b5 => ABGR4Unorm, + f::R8Srgb if self.format_min_srgb_channels <= 1 => R8Unorm_sRGB, + f::Rg8Srgb if self.format_min_srgb_channels <= 2 => RG8Unorm_sRGB, + f::Rgba8Srgb if self.format_min_srgb_channels <= 4 => RGBA8Unorm_sRGB, + f::Bgra8Srgb if self.format_min_srgb_channels <= 4 => BGRA8Unorm_sRGB, + f::D16Unorm if self.format_depth16unorm => Depth16Unorm, + f::D24UnormS8Uint if self.format_depth24_stencil8 => Depth24Unorm_Stencil8, + f::D32Sfloat => Depth32Float, + f::D32SfloatS8Uint => Depth32Float_Stencil8, + f::R8Unorm => R8Unorm, + f::R8Snorm => R8Snorm, + f::R8Uint => R8Uint, + f::R8Sint => R8Sint, + f::Rg8Unorm => RG8Unorm, + f::Rg8Snorm => RG8Snorm, + f::Rg8Uint => RG8Uint, + f::Rg8Sint => RG8Sint, + f::Rgba8Unorm => RGBA8Unorm, + f::Rgba8Snorm => RGBA8Snorm, + f::Rgba8Uint => RGBA8Uint, + f::Rgba8Sint => RGBA8Sint, + f::Bgra8Unorm => BGRA8Unorm, + f::R16Unorm => R16Unorm, + f::R16Snorm => R16Snorm, + f::R16Uint => R16Uint, + f::R16Sint => R16Sint, + f::R16Sfloat => R16Float, + f::Rg16Unorm => RG16Unorm, + f::Rg16Snorm => RG16Snorm, + f::Rg16Uint => RG16Uint, + f::Rg16Sint => RG16Sint, + f::Rg16Sfloat => RG16Float, + f::Rgba16Unorm => RGBA16Unorm, + f::Rgba16Snorm => RGBA16Snorm, + f::Rgba16Uint => RGBA16Uint, + f::Rgba16Sint => RGBA16Sint, + f::Rgba16Sfloat => RGBA16Float, + f::A2r10g10b10Unorm => BGR10A2Unorm, + f::A2b10g10r10Unorm => RGB10A2Unorm, + f::B10g11r11Ufloat => RG11B10Float, + f::E5b9g9r9Ufloat => RGB9E5Float, + f::R32Uint => R32Uint, + f::R32Sint => R32Sint, + f::R32Sfloat => R32Float, + f::Rg32Uint => RG32Uint, + f::Rg32Sint => RG32Sint, + f::Rg32Sfloat => RG32Float, + f::Rgba32Uint => RGBA32Uint, + f::Rgba32Sint => RGBA32Sint, + f::Rgba32Sfloat => RGBA32Float, + f::Bc1RgbaUnorm if self.format_bc => BC1_RGBA, + f::Bc1RgbaSrgb if self.format_bc => BC1_RGBA_sRGB, + f::Bc1RgbUnorm if self.format_bc => BC1_RGBA, + f::Bc1RgbSrgb if self.format_bc => BC1_RGBA_sRGB, + f::Bc2Unorm if self.format_bc => BC2_RGBA, + f::Bc2Srgb if self.format_bc => BC2_RGBA_sRGB, + f::Bc3Unorm if self.format_bc => BC3_RGBA, + f::Bc3Srgb if self.format_bc => BC3_RGBA_sRGB, + f::Bc4Unorm if self.format_bc => BC4_RUnorm, + f::Bc4Snorm if self.format_bc => BC4_RSnorm, + f::Bc5Unorm if self.format_bc => BC5_RGUnorm, + f::Bc5Snorm if self.format_bc => BC5_RGSnorm, + f::Bc6hUfloat if self.format_bc => BC6H_RGBUfloat, + f::Bc6hSfloat if self.format_bc => BC6H_RGBFloat, + f::Bc7Unorm if self.format_bc => BC7_RGBAUnorm, + f::Bc7Srgb if self.format_bc => BC7_RGBAUnorm_sRGB, + f::EacR11Unorm if self.format_eac_etc => EAC_R11Unorm, + f::EacR11Snorm if self.format_eac_etc => EAC_R11Snorm, + f::EacR11g11Unorm if self.format_eac_etc => EAC_RG11Unorm, + f::EacR11g11Snorm if self.format_eac_etc => EAC_RG11Snorm, + f::Etc2R8g8b8Unorm if self.format_eac_etc => ETC2_RGB8, + f::Etc2R8g8b8Srgb if self.format_eac_etc => ETC2_RGB8_sRGB, + f::Etc2R8g8b8a1Unorm if self.format_eac_etc => ETC2_RGB8A1, + f::Etc2R8g8b8a1Srgb if self.format_eac_etc => ETC2_RGB8A1_sRGB, + f::Astc4x4Unorm if self.format_astc => ASTC_4x4_LDR, + f::Astc4x4Srgb if self.format_astc => ASTC_4x4_sRGB, + f::Astc5x4Unorm if self.format_astc => ASTC_5x4_LDR, + f::Astc5x4Srgb if self.format_astc => ASTC_5x4_sRGB, + f::Astc5x5Unorm if self.format_astc => ASTC_5x5_LDR, + f::Astc5x5Srgb if self.format_astc => ASTC_5x5_sRGB, + f::Astc6x5Unorm if self.format_astc => ASTC_6x5_LDR, + f::Astc6x5Srgb if self.format_astc => ASTC_6x5_sRGB, + f::Astc6x6Unorm if self.format_astc => ASTC_6x6_LDR, + f::Astc6x6Srgb if self.format_astc => ASTC_6x6_sRGB, + f::Astc8x5Unorm if self.format_astc => ASTC_8x5_LDR, + f::Astc8x5Srgb if self.format_astc => ASTC_8x5_sRGB, + f::Astc8x6Unorm if self.format_astc => ASTC_8x6_LDR, + f::Astc8x6Srgb if self.format_astc => ASTC_8x6_sRGB, + f::Astc8x8Unorm if self.format_astc => ASTC_8x8_LDR, + f::Astc8x8Srgb if self.format_astc => ASTC_8x8_sRGB, + f::Astc10x5Unorm if self.format_astc => ASTC_10x5_LDR, + f::Astc10x5Srgb if self.format_astc => ASTC_10x5_sRGB, + f::Astc10x6Unorm if self.format_astc => ASTC_10x6_LDR, + f::Astc10x6Srgb if self.format_astc => ASTC_10x6_sRGB, + f::Astc10x8Unorm if self.format_astc => ASTC_10x8_LDR, + f::Astc10x8Srgb if self.format_astc => ASTC_10x8_sRGB, + f::Astc10x10Unorm if self.format_astc => ASTC_10x10_LDR, + f::Astc10x10Srgb if self.format_astc => ASTC_10x10_sRGB, + f::Astc12x10Unorm if self.format_astc => ASTC_12x10_LDR, + f::Astc12x10Srgb if self.format_astc => ASTC_12x10_sRGB, + f::Astc12x12Unorm if self.format_astc => ASTC_12x12_LDR, + f::Astc12x12Srgb if self.format_astc => ASTC_12x12_sRGB, + + + + + + + + + + + + + + + + + + + + + + + _ => return None, + }) + } + + pub fn map_format_with_swizzle( + &self, + format: Format, + swizzle: Swizzle, + ) -> Option { + use self::hal::format::{Component::*, Format::*}; + use metal::MTLPixelFormat as Pf; + match (format, swizzle) { + (R8Unorm, Swizzle(Zero, Zero, Zero, R)) => Some(Pf::A8Unorm), + (Rgba8Unorm, Swizzle(B, G, R, A)) => Some(Pf::BGRA8Unorm), + (Bgra8Unorm, Swizzle(B, G, R, A)) => Some(Pf::RGBA8Unorm), + (Bgra8Srgb, Swizzle(B, G, R, A)) => Some(Pf::RGBA8Unorm_sRGB), + (B5g6r5Unorm, Swizzle(B, G, R, A)) if self.format_b5 => Some(Pf::B5G6R5Unorm), + _ => { + let bits = format.base_format().0.describe_bits(); + if swizzle != Swizzle::NO && !(bits.alpha == 0 && swizzle == Swizzle(R, G, B, One)) + { + error!("Unsupported swizzle {:?} for format {:?}", swizzle, format); + } + self.map_format(format) + } + } + } + + pub fn map_format_properties(&self, format: Format) -> Properties { + use self::hal::format::{BufferFeature as Bf, ImageFeature as If}; + use metal::MTLPixelFormat::*; + + let buffer_features = Bf::all(); + let color_if = If::SAMPLED | If::BLIT_SRC | If::BLIT_DST; + let compressed_if = color_if | If::SAMPLED_LINEAR; + let depth_if = color_if | If::DEPTH_STENCIL_ATTACHMENT; + + match self.map_format(format) { + Some(A8Unorm) => Properties { + optimal_tiling: compressed_if, + buffer_features, + ..Properties::default() + }, + Some(R8Unorm) => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(R8Unorm_sRGB) if self.format_r8unorm_srgb_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(R8Unorm_sRGB) if self.format_r8unorm_srgb_no_write => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(R8Snorm) if self.format_r8snorm_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(R8Uint) => Properties { + optimal_tiling: color_if | If::STORAGE | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(R8Sint) => Properties { + optimal_tiling: color_if | If::STORAGE | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(R16Unorm) if self.format_r16_norm_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(R16Snorm) if self.format_r16_norm_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(R16Uint) => Properties { + optimal_tiling: color_if | If::STORAGE | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(R16Sint) => Properties { + optimal_tiling: color_if | If::STORAGE | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(R16Float) => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RG8Unorm) => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RG8Unorm_sRGB) if self.format_rg8unorm_srgb_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RG8Unorm_sRGB) if self.format_rg8unorm_srgb_no_write => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RG8Snorm) if self.format_rg8snorm_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RG8Uint) => Properties { + optimal_tiling: color_if | If::SAMPLED_LINEAR | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(RG8Sint) => Properties { + optimal_tiling: color_if | If::SAMPLED_LINEAR | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(B5G6R5Unorm) if self.format_b5 => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(A1BGR5Unorm) if self.format_b5 => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(ABGR4Unorm) if self.format_b5 => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(BGR5A1Unorm) if self.format_b5 => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(R32Uint) if self.format_r32_all => Properties { + optimal_tiling: color_if | If::STORAGE | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(R32Uint) if self.format_r32_no_write => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(R32Sint) if self.format_r32_all => Properties { + optimal_tiling: color_if | If::STORAGE | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(R32Sint) if self.format_r32_no_write => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(R32Float) if self.format_r32float_no_write_no_filter => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(R32Float) if self.format_r32float_no_filter => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(R32Float) if self.format_r32float_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RG16Unorm) => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RG16Snorm) => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RG16Float) => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGBA8Unorm) => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGBA8Unorm_sRGB) if self.format_rgba8_srgb_no_write => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGBA8Unorm_sRGB) if self.format_rgba8_srgb_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGBA8Snorm) => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGBA8Uint) => Properties { + optimal_tiling: color_if | If::STORAGE | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(RGBA8Sint) => Properties { + optimal_tiling: color_if | If::STORAGE | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(BGRA8Unorm) => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(BGRA8Unorm_sRGB) if self.format_rgba8_srgb_no_write => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(BGRA8Unorm_sRGB) if self.format_rgba8_srgb_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGB10A2Unorm) if self.format_rgb10a2_unorm_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGB10A2Unorm) if self.format_rgb10a2_unorm_no_write => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGB10A2Uint) if self.format_rgb10a2_uint_color => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(RGB10A2Uint) if self.format_rgb10a2_uint_color_write => Properties { + optimal_tiling: color_if | If::STORAGE | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(RG11B10Float) if self.format_rg11b10_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RG11B10Float) if self.format_rg11b10_no_write => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGB9E5Float) if self.format_rgb9e5_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGB9E5Float) if self.format_rgb9e5_filter_only => Properties { + optimal_tiling: compressed_if, + buffer_features, + ..Properties::default() + }, + Some(RGB9E5Float) if self.format_rgb9e5_no_write => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RG32Uint) if self.format_rg32_color => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(RG32Sint) if self.format_rg32_color => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(RG32Uint) if self.format_rg32_color_write => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT | If::STORAGE, + buffer_features, + ..Properties::default() + }, + Some(RG32Sint) if self.format_rg32_color_write => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT | If::STORAGE, + buffer_features, + ..Properties::default() + }, + Some(RG32Float) if self.format_rg32float_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RG32Float) if self.format_rg32float_color_blend => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RG32Float) if self.format_rg32float_no_filter => Properties { + optimal_tiling: color_if + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGBA16Unorm) => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGBA16Snorm) => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGBA16Uint) => Properties { + optimal_tiling: color_if | If::STORAGE | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(RGBA16Sint) => Properties { + optimal_tiling: color_if | If::STORAGE | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(RGBA16Float) => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGBA32Uint) if self.format_rgba32int_color => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(RGBA32Uint) if self.format_rgba32int_color_write => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT | If::STORAGE, + buffer_features, + ..Properties::default() + }, + Some(RGBA32Sint) if self.format_rgba32int_color => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(RGBA32Sint) if self.format_rgba32int_color_write => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT | If::STORAGE, + buffer_features, + ..Properties::default() + }, + Some(RGBA32Float) if self.format_rgba32float_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + buffer_features, + ..Properties::default() + }, + Some(RGBA32Float) if self.format_rgba32float_color => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT, + buffer_features, + ..Properties::default() + }, + Some(RGBA32Float) if self.format_rgba32float_color_write => Properties { + optimal_tiling: color_if | If::COLOR_ATTACHMENT | If::STORAGE, + buffer_features, + ..Properties::default() + }, + Some(EAC_R11Unorm) if self.format_eac_etc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(EAC_R11Snorm) if self.format_eac_etc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(EAC_RG11Unorm) if self.format_eac_etc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(EAC_RG11Snorm) if self.format_eac_etc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ETC2_RGB8) if self.format_eac_etc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ETC2_RGB8_sRGB) if self.format_eac_etc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ETC2_RGB8A1) if self.format_eac_etc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ETC2_RGB8A1_sRGB) if self.format_eac_etc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_4x4_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_4x4_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_5x4_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_5x4_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_5x5_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_5x5_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_6x5_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_6x5_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_6x6_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_6x6_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_8x5_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_8x5_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_8x6_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_8x6_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_8x8_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_8x8_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_10x5_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_10x5_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_10x6_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_10x6_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_10x8_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_10x8_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_10x10_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_10x10_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_12x10_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_12x10_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_12x12_LDR) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(ASTC_12x12_sRGB) if self.format_astc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC1_RGBA) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC1_RGBA_sRGB) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC2_RGBA) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC2_RGBA_sRGB) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC3_RGBA) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC3_RGBA_sRGB) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC4_RUnorm) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC4_RSnorm) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC5_RGUnorm) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC5_RGSnorm) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC6H_RGBUfloat) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC6H_RGBFloat) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC7_RGBAUnorm) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(BC7_RGBAUnorm_sRGB) if self.format_bc => Properties { + optimal_tiling: compressed_if, + ..Properties::default() + }, + Some(Depth16Unorm) if self.format_depth16unorm => Properties { + optimal_tiling: depth_if | If::SAMPLED_LINEAR, + ..Properties::default() + }, + Some(Depth32Float) if self.format_depth32float_filter => Properties { + optimal_tiling: depth_if | If::SAMPLED_LINEAR, + ..Properties::default() + }, + Some(Depth32Float) if self.format_depth32float_none => Properties { + optimal_tiling: depth_if, + ..Properties::default() + }, + Some(Stencil8) => Properties { + ..Properties::default() + }, + Some(Depth24Unorm_Stencil8) if self.format_depth24_stencil8 => Properties { + optimal_tiling: depth_if, + ..Properties::default() + }, + Some(Depth32Float_Stencil8) if self.format_depth32_stencil8_filter => Properties { + optimal_tiling: depth_if | If::SAMPLED_LINEAR, + ..Properties::default() + }, + Some(Depth32Float_Stencil8) if self.format_depth32_stencil8_none => Properties { + optimal_tiling: depth_if, + ..Properties::default() + }, + Some(BGR10A2Unorm) if self.format_bgr10a2_all => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::STORAGE + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + ..Properties::default() + }, + Some(BGR10A2Unorm) if self.format_bgr10a2_no_write => Properties { + optimal_tiling: color_if + | If::SAMPLED_LINEAR + | If::COLOR_ATTACHMENT + | If::COLOR_ATTACHMENT_BLEND, + ..Properties::default() + }, + _ if map_vertex_format(format).is_some() => Properties { + buffer_features: Bf::VERTEX, + ..Properties::default() + }, + _ => Properties::default(), + } + } +} + +pub fn map_load_operation(operation: pass::AttachmentLoadOp) -> MTLLoadAction { + use self::pass::AttachmentLoadOp::*; + + match operation { + Load => MTLLoadAction::Load, + Clear => MTLLoadAction::Clear, + DontCare => MTLLoadAction::DontCare, + } +} + +pub fn map_store_operation(operation: pass::AttachmentStoreOp) -> MTLStoreAction { + use self::pass::AttachmentStoreOp::*; + + match operation { + Store => MTLStoreAction::Store, + DontCare => MTLStoreAction::DontCare, + } +} + +pub fn map_resolved_store_operation(operation: pass::AttachmentStoreOp) -> MTLStoreAction { + use self::pass::AttachmentStoreOp::*; + + match operation { + Store => MTLStoreAction::StoreAndMultisampleResolve, + DontCare => MTLStoreAction::MultisampleResolve, + } +} + +pub fn map_write_mask(mask: pso::ColorMask) -> MTLColorWriteMask { + let mut mtl_mask = MTLColorWriteMask::empty(); + + if mask.contains(pso::ColorMask::RED) { + mtl_mask |= MTLColorWriteMask::Red; + } + if mask.contains(pso::ColorMask::GREEN) { + mtl_mask |= MTLColorWriteMask::Green; + } + if mask.contains(pso::ColorMask::BLUE) { + mtl_mask |= MTLColorWriteMask::Blue; + } + if mask.contains(pso::ColorMask::ALPHA) { + mtl_mask |= MTLColorWriteMask::Alpha; + } + + mtl_mask +} + +fn map_factor(factor: pso::Factor) -> MTLBlendFactor { + use self::hal::pso::Factor::*; + + match factor { + Zero => MTLBlendFactor::Zero, + One => MTLBlendFactor::One, + SrcColor => MTLBlendFactor::SourceColor, + OneMinusSrcColor => MTLBlendFactor::OneMinusSourceColor, + DstColor => MTLBlendFactor::DestinationColor, + OneMinusDstColor => MTLBlendFactor::OneMinusDestinationColor, + SrcAlpha => MTLBlendFactor::SourceAlpha, + OneMinusSrcAlpha => MTLBlendFactor::OneMinusSourceAlpha, + DstAlpha => MTLBlendFactor::DestinationAlpha, + OneMinusDstAlpha => MTLBlendFactor::OneMinusDestinationAlpha, + ConstColor => MTLBlendFactor::BlendColor, + OneMinusConstColor => MTLBlendFactor::OneMinusBlendColor, + ConstAlpha => MTLBlendFactor::BlendAlpha, + OneMinusConstAlpha => MTLBlendFactor::OneMinusBlendAlpha, + SrcAlphaSaturate => MTLBlendFactor::SourceAlphaSaturated, + Src1Color => MTLBlendFactor::Source1Color, + OneMinusSrc1Color => MTLBlendFactor::OneMinusSource1Color, + Src1Alpha => MTLBlendFactor::Source1Alpha, + OneMinusSrc1Alpha => MTLBlendFactor::OneMinusSource1Alpha, + } +} + +pub fn map_blend_op( + operation: pso::BlendOp, +) -> (MTLBlendOperation, MTLBlendFactor, MTLBlendFactor) { + use self::hal::pso::BlendOp::*; + + match operation { + Add { src, dst } => (MTLBlendOperation::Add, map_factor(src), map_factor(dst)), + Sub { src, dst } => ( + MTLBlendOperation::Subtract, + map_factor(src), + map_factor(dst), + ), + RevSub { src, dst } => ( + MTLBlendOperation::ReverseSubtract, + map_factor(src), + map_factor(dst), + ), + Min => ( + MTLBlendOperation::Min, + MTLBlendFactor::Zero, + MTLBlendFactor::Zero, + ), + Max => ( + MTLBlendOperation::Max, + MTLBlendFactor::Zero, + MTLBlendFactor::Zero, + ), + } +} + +pub fn map_vertex_format(format: Format) -> Option { + use self::hal::format::Format as f; + use metal::MTLVertexFormat::*; + Some(match format { + f::R8Unorm => UCharNormalized, + f::R8Snorm => CharNormalized, + f::R8Uint => UChar, + f::R8Sint => Char, + f::Rg8Unorm => UChar2Normalized, + f::Rg8Snorm => Char2Normalized, + f::Rg8Uint => UChar2, + f::Rg8Sint => Char2, + f::Rgb8Unorm => UChar3Normalized, + f::Rgb8Snorm => Char3Normalized, + f::Rgb8Uint => UChar3, + f::Rgb8Sint => Char3, + f::Rgba8Unorm => UChar4Normalized, + f::Rgba8Snorm => Char4Normalized, + f::Rgba8Uint => UChar4, + f::Rgba8Sint => Char4, + f::Bgra8Unorm => UChar4Normalized_BGRA, + f::R16Unorm => UShortNormalized, + f::R16Snorm => ShortNormalized, + f::R16Uint => UShort, + f::R16Sint => Short, + f::R16Sfloat => Half, + f::Rg16Unorm => UShort2Normalized, + f::Rg16Snorm => Short2Normalized, + f::Rg16Uint => UShort2, + f::Rg16Sint => Short2, + f::Rg16Sfloat => Half2, + f::Rgb16Unorm => UShort3Normalized, + f::Rgb16Snorm => Short3Normalized, + f::Rgb16Uint => UShort3, + f::Rgb16Sint => Short3, + f::Rgb16Sfloat => Half3, + f::Rgba16Unorm => UShort4Normalized, + f::Rgba16Snorm => Short4Normalized, + f::Rgba16Uint => UShort4, + f::Rgba16Sint => Short4, + f::Rgba16Sfloat => Half4, + f::R32Uint => UInt, + f::R32Sint => Int, + f::R32Sfloat => Float, + f::Rg32Uint => UInt2, + f::Rg32Sint => Int2, + f::Rg32Sfloat => Float2, + f::Rgb32Uint => UInt3, + f::Rgb32Sint => Int3, + f::Rgb32Sfloat => Float3, + f::Rgba32Uint => UInt4, + f::Rgba32Sint => Int4, + f::Rgba32Sfloat => Float4, + _ => return None, + }) +} + +pub fn resource_options_from_storage_and_cache( + storage: MTLStorageMode, + cache: MTLCPUCacheMode, +) -> MTLResourceOptions { + MTLResourceOptions::from_bits( + ((storage as u64) << MTLResourceStorageModeShift) + | ((cache as u64) << MTLResourceCPUCacheModeShift), + ) + .unwrap() +} + +pub fn map_texture_usage(usage: image::Usage, tiling: image::Tiling) -> MTLTextureUsage { + use self::hal::image::Usage as U; + + let mut texture_usage = MTLTextureUsage::PixelFormatView; + if usage.intersects(U::COLOR_ATTACHMENT | U::DEPTH_STENCIL_ATTACHMENT) { + texture_usage |= MTLTextureUsage::RenderTarget; + } + if usage.intersects(U::SAMPLED | U::INPUT_ATTACHMENT) { + texture_usage |= MTLTextureUsage::ShaderRead; + } + if usage.intersects(U::STORAGE) { + texture_usage |= MTLTextureUsage::ShaderRead | MTLTextureUsage::ShaderWrite; + } + + // Note: for blitting, we do actual rendering, so we add more flags for TRANSFER_* usage + if usage.contains(U::TRANSFER_DST) && tiling == image::Tiling::Optimal { + texture_usage |= MTLTextureUsage::RenderTarget; + } + if usage.contains(U::TRANSFER_SRC) { + texture_usage |= MTLTextureUsage::ShaderRead; + } + + texture_usage +} + +pub fn map_texture_type(view_kind: image::ViewKind) -> MTLTextureType { + use self::hal::image::ViewKind as Vk; + match view_kind { + Vk::D1 => MTLTextureType::D1, + Vk::D1Array => MTLTextureType::D1Array, + Vk::D2 => MTLTextureType::D2, + Vk::D2Array => MTLTextureType::D2Array, + Vk::D3 => MTLTextureType::D3, + Vk::Cube => MTLTextureType::Cube, + Vk::CubeArray => MTLTextureType::CubeArray, + } +} + +pub fn _map_index_type(index_type: IndexType) -> MTLIndexType { + match index_type { + IndexType::U16 => MTLIndexType::UInt16, + IndexType::U32 => MTLIndexType::UInt32, + } +} + +pub fn map_compare_function(fun: Comparison) -> MTLCompareFunction { + match fun { + Comparison::Never => MTLCompareFunction::Never, + Comparison::Less => MTLCompareFunction::Less, + Comparison::LessEqual => MTLCompareFunction::LessEqual, + Comparison::Equal => MTLCompareFunction::Equal, + Comparison::GreaterEqual => MTLCompareFunction::GreaterEqual, + Comparison::Greater => MTLCompareFunction::Greater, + Comparison::NotEqual => MTLCompareFunction::NotEqual, + Comparison::Always => MTLCompareFunction::Always, + } +} + +pub fn map_filter(filter: image::Filter) -> MTLSamplerMinMagFilter { + match filter { + image::Filter::Nearest => MTLSamplerMinMagFilter::Nearest, + image::Filter::Linear => MTLSamplerMinMagFilter::Linear, + } +} + +pub fn map_wrap_mode(wrap: image::WrapMode) -> MTLSamplerAddressMode { + match wrap { + image::WrapMode::Tile => MTLSamplerAddressMode::Repeat, + image::WrapMode::Mirror => MTLSamplerAddressMode::MirrorRepeat, + image::WrapMode::Clamp => MTLSamplerAddressMode::ClampToEdge, + image::WrapMode::Border => MTLSamplerAddressMode::ClampToBorderColor, + } +} + +pub fn map_extent(extent: image::Extent) -> MTLSize { + MTLSize { + width: extent.width as _, + height: extent.height as _, + depth: extent.depth as _, + } +} + +pub fn map_offset(offset: image::Offset) -> MTLOrigin { + MTLOrigin { + x: offset.x as _, + y: offset.y as _, + z: offset.z as _, + } +} + +pub fn map_stencil_op(op: StencilOp) -> MTLStencilOperation { + match op { + StencilOp::Keep => MTLStencilOperation::Keep, + StencilOp::Zero => MTLStencilOperation::Zero, + StencilOp::Replace => MTLStencilOperation::Replace, + StencilOp::IncrementClamp => MTLStencilOperation::IncrementClamp, + StencilOp::IncrementWrap => MTLStencilOperation::IncrementWrap, + StencilOp::DecrementClamp => MTLStencilOperation::DecrementClamp, + StencilOp::DecrementWrap => MTLStencilOperation::DecrementWrap, + StencilOp::Invert => MTLStencilOperation::Invert, + } +} + +pub fn map_winding(face: pso::FrontFace) -> MTLWinding { + match face { + pso::FrontFace::Clockwise => MTLWinding::Clockwise, + pso::FrontFace::CounterClockwise => MTLWinding::CounterClockwise, + } +} + +pub fn map_polygon_mode(mode: pso::PolygonMode) -> MTLTriangleFillMode { + match mode { + pso::PolygonMode::Point => { + warn!("Unable to fill with points"); + MTLTriangleFillMode::Lines + } + pso::PolygonMode::Line(width) => { + match width { + pso::State::Static(w) if w != 1.0 => { + warn!("Unsupported line width: {:?}", w); + } + _ => {} + } + MTLTriangleFillMode::Lines + } + pso::PolygonMode::Fill => MTLTriangleFillMode::Fill, + } +} + +pub fn map_cull_face(face: pso::Face) -> Option { + match face { + pso::Face::NONE => Some(MTLCullMode::None), + pso::Face::FRONT => Some(MTLCullMode::Front), + pso::Face::BACK => Some(MTLCullMode::Back), + _ => None, + } +} diff --git a/third_party/rust/gfx-backend-metal/src/device.rs b/third_party/rust/gfx-backend-metal/src/device.rs new file mode 100644 index 000000000000..573ac93e250f --- /dev/null +++ b/third_party/rust/gfx-backend-metal/src/device.rs @@ -0,0 +1,3042 @@ +use crate::{ + command, + conversions as conv, + internal::{Channel, FastStorageMap}, + native as n, + AsNative, + Backend, + OnlineRecording, + QueueFamily, + ResourceIndex, + Shared, + Surface, + Swapchain, + VisibilityShared, + MAX_BOUND_DESCRIPTOR_SETS, + MAX_COLOR_ATTACHMENTS, +}; + +use arrayvec::ArrayVec; +use auxil::{ + FastHashMap, + spirv_cross_specialize_ast, +}; +use cocoa::foundation::{NSRange, NSUInteger}; +use copyless::VecHelper; +use foreign_types::{ForeignType, ForeignTypeRef}; +use hal::{ + adapter, + buffer, + device::{ + AllocationError, + BindError, + CreationError as DeviceCreationError, + DeviceLost, + MapError, + OomOrDeviceLost, + OutOfMemory, + ShaderError, + }, + format, + image, + memory, + memory::Properties, + pass, + pool::CommandPoolCreateFlags, + pso, + pso::VertexInputRate, + query, + queue::{QueueFamilyId, QueueGroup, QueuePriority}, + range::RangeArg, + window, +}; +use metal::{ + self, + CaptureManager, + MTLCPUCacheMode, + MTLLanguageVersion, + MTLPrimitiveTopologyClass, + MTLPrimitiveType, + MTLResourceOptions, + MTLSamplerBorderColor, + MTLSamplerMipFilter, + MTLStorageMode, + MTLTextureType, + MTLVertexStepFunction, +}; +use objc::rc::autoreleasepool; +use objc::runtime::{Object, BOOL, NO}; +use parking_lot::Mutex; +use spirv_cross::{msl, spirv, ErrorCode as SpirvErrorCode}; + +use std::borrow::Borrow; +use std::cell::RefCell; +use std::collections::hash_map::Entry; +use std::collections::BTreeMap; +use std::ops::Range; +use std::path::Path; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; +use std::{cmp, iter, mem, ptr, thread, time}; + + +const PUSH_CONSTANTS_DESC_SET: u32 = !0; +const PUSH_CONSTANTS_DESC_BINDING: u32 = 0; +const STRIDE_GRANULARITY: pso::ElemStride = 4; +const SHADER_STAGE_COUNT: usize = 3; + + + +fn gen_unexpected_error(err: SpirvErrorCode) -> ShaderError { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unexpected error".into(), + }; + ShaderError::CompilationFailed(msg) +} + +#[derive(Clone, Debug)] +enum FunctionError { + InvalidEntryPoint, + MissingRequiredSpecialization, + BadSpecialization, +} + +fn get_final_function( + library: &metal::LibraryRef, + entry: &str, + specialization: &pso::Specialization, + function_specialization: bool, +) -> Result { + type MTLFunctionConstant = Object; + + let mut mtl_function = library.get_function(entry, None).map_err(|e| { + error!("Function retrieval error {:?}", e); + FunctionError::InvalidEntryPoint + })?; + + if !function_specialization { + assert!( + specialization.data.is_empty() && specialization.constants.is_empty(), + "platform does not support specialization", + ); + return Ok(mtl_function); + } + + let dictionary = mtl_function.function_constants_dictionary(); + let count: NSUInteger = unsafe { msg_send![dictionary, count] }; + if count == 0 { + return Ok(mtl_function); + } + + let all_values: *mut Object = unsafe { msg_send![dictionary, allValues] }; + + let constants = metal::FunctionConstantValues::new(); + for i in 0 .. count { + let object: *mut MTLFunctionConstant = unsafe { msg_send![all_values, objectAtIndex: i] }; + let index: NSUInteger = unsafe { msg_send![object, index] }; + let required: BOOL = unsafe { msg_send![object, required] }; + match specialization + .constants + .iter() + .find(|c| c.id as NSUInteger == index) + { + Some(c) => unsafe { + let ptr = &specialization.data[c.range.start as usize] as *const u8 as *const _; + let ty: metal::MTLDataType = msg_send![object, type]; + constants.set_constant_value_at_index(c.id as NSUInteger, ty, ptr); + }, + None if required != NO => { + + error!("Missing required specialization constant id {}", index); + return Err(FunctionError::MissingRequiredSpecialization); + } + None => {} + } + } + + mtl_function = library.get_function(entry, Some(constants)).map_err(|e| { + error!("Specialized function retrieval error {:?}", e); + FunctionError::BadSpecialization + })?; + + Ok(mtl_function) +} + +impl VisibilityShared { + fn are_available(&self, pool_base: query::Id, queries: &Range) -> bool { + unsafe { + let availability_ptr = ((self.buffer.contents() as *mut u8) + .offset(self.availability_offset as isize) + as *mut u32) + .offset(pool_base as isize); + queries + .clone() + .all(|id| *availability_ptr.offset(id as isize) != 0) + } + } +} + +#[derive(Clone, Debug)] +pub struct Device { + pub(crate) shared: Arc, + memory_types: Vec, + features: hal::Features, + pub online_recording: OnlineRecording, +} +unsafe impl Send for Device {} +unsafe impl Sync for Device {} + +impl Drop for Device { + fn drop(&mut self) { + if cfg!(feature = "auto-capture") { + info!("Metal capture stop"); + let shared_capture_manager = CaptureManager::shared(); + if let Some(default_capture_scope) = shared_capture_manager.default_capture_scope() { + default_capture_scope.end_scope(); + } + shared_capture_manager.stop_capture(); + } + } +} + +bitflags! { + /// Memory type bits. + struct MemoryTypes: u64 { + const PRIVATE = 1<<0; + const SHARED = 1<<1; + const MANAGED_UPLOAD = 1<<2; + const MANAGED_DOWNLOAD = 1<<3; + } +} + +impl MemoryTypes { + fn describe(index: usize) -> (MTLStorageMode, MTLCPUCacheMode) { + match Self::from_bits(1 << index).unwrap() { + Self::PRIVATE => (MTLStorageMode::Private, MTLCPUCacheMode::DefaultCache), + Self::SHARED => (MTLStorageMode::Shared, MTLCPUCacheMode::DefaultCache), + Self::MANAGED_UPLOAD => (MTLStorageMode::Managed, MTLCPUCacheMode::WriteCombined), + Self::MANAGED_DOWNLOAD => (MTLStorageMode::Managed, MTLCPUCacheMode::DefaultCache), + _ => unreachable!(), + } + } +} + +#[derive(Debug)] +pub struct PhysicalDevice { + pub(crate) shared: Arc, + memory_types: Vec, +} +unsafe impl Send for PhysicalDevice {} +unsafe impl Sync for PhysicalDevice {} + +impl PhysicalDevice { + pub(crate) fn new(shared: Arc) -> Self { + let memory_types = if shared.private_caps.os_is_mac { + vec![ + adapter::MemoryType { + // PRIVATE + properties: Properties::DEVICE_LOCAL, + heap_index: 0, + }, + adapter::MemoryType { + // SHARED + properties: Properties::CPU_VISIBLE | Properties::COHERENT, + heap_index: 1, + }, + adapter::MemoryType { + // MANAGED_UPLOAD + properties: Properties::DEVICE_LOCAL | Properties::CPU_VISIBLE, + heap_index: 1, + }, + adapter::MemoryType { + // MANAGED_DOWNLOAD + properties: Properties::DEVICE_LOCAL + | Properties::CPU_VISIBLE + | Properties::CPU_CACHED, + heap_index: 1, + }, + ] + } else { + vec![ + adapter::MemoryType { + // PRIVATE + properties: Properties::DEVICE_LOCAL, + heap_index: 0, + }, + adapter::MemoryType { + // SHARED + properties: Properties::CPU_VISIBLE | Properties::COHERENT, + heap_index: 1, + }, + ] + }; + PhysicalDevice { + shared: shared.clone(), + memory_types, + } + } + + + pub fn supports_swizzle(&self, format: format::Format, swizzle: format::Swizzle) -> bool { + self.shared + .private_caps + .map_format_with_swizzle(format, swizzle) + .is_some() + } +} + +impl adapter::PhysicalDevice for PhysicalDevice { + unsafe fn open( + &self, + families: &[(&QueueFamily, &[QueuePriority])], + requested_features: hal::Features, + ) -> Result, DeviceCreationError> { + use hal::queue::QueueFamily as _; + + + + if !self.features().contains(requested_features) { + warn!( + "Features missing: {:?}", + requested_features - self.features() + ); + return Err(DeviceCreationError::MissingFeature); + } + + let device = self.shared.device.lock(); + + if cfg!(feature = "auto-capture") { + info!("Metal capture start"); + let shared_capture_manager = CaptureManager::shared(); + let default_capture_scope = + shared_capture_manager.new_capture_scope_with_device(&*device); + shared_capture_manager.set_default_capture_scope(&default_capture_scope); + shared_capture_manager.start_capture_with_scope(&default_capture_scope); + default_capture_scope.begin_scope(); + } + + assert_eq!(families.len(), 1); + assert_eq!(families[0].1.len(), 1); + let mut queue_group = QueueGroup::new(families[0].0.id()); + for _ in 0 .. self.shared.private_caps.exposed_queues { + queue_group.add_queue(command::CommandQueue::new(self.shared.clone())); + } + + let device = Device { + shared: self.shared.clone(), + memory_types: self.memory_types.clone(), + features: requested_features, + online_recording: OnlineRecording::default(), + }; + + Ok(adapter::Gpu { + device, + queue_groups: vec![queue_group], + }) + } + + fn format_properties(&self, format: Option) -> format::Properties { + match format { + Some(format) => self.shared.private_caps.map_format_properties(format), + None => format::Properties { + linear_tiling: format::ImageFeature::empty(), + optimal_tiling: format::ImageFeature::empty(), + buffer_features: format::BufferFeature::empty(), + }, + } + } + + fn image_format_properties( + &self, + format: format::Format, + dimensions: u8, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Option { + if let image::Tiling::Linear = tiling { + let format_desc = format.surface_desc(); + let host_usage = image::Usage::TRANSFER_SRC | image::Usage::TRANSFER_DST; + if dimensions != 2 + || !view_caps.is_empty() + || !host_usage.contains(usage) + || format_desc.aspects != format::Aspects::COLOR + || format_desc.is_compressed() + { + return None; + } + } + if dimensions == 1 + && usage + .intersects(image::Usage::COLOR_ATTACHMENT | image::Usage::DEPTH_STENCIL_ATTACHMENT) + { + + return None; + } + if dimensions == 3 && view_caps.contains(image::ViewCapabilities::KIND_2D_ARRAY) { + + return None; + } + let max_dimension = if dimensions == 3 { + self.shared.private_caps.max_texture_3d_size as _ + } else { + self.shared.private_caps.max_texture_size as _ + }; + + let max_extent = image::Extent { + width: max_dimension, + height: if dimensions >= 2 { max_dimension } else { 1 }, + depth: if dimensions >= 3 { max_dimension } else { 1 }, + }; + + self.shared + .private_caps + .map_format(format) + .map(|_| image::FormatProperties { + max_extent, + max_levels: if dimensions == 1 { 1 } else { 12 }, + + max_layers: if dimensions == 3 { + 1 + } else { + self.shared.private_caps.max_texture_layers as _ + }, + sample_count_mask: self.shared.private_caps.sample_count_mask as _, + + + + max_resource_size: self.shared.private_caps.max_buffer_size as _, + }) + } + + fn memory_properties(&self) -> adapter::MemoryProperties { + adapter::MemoryProperties { + memory_heaps: vec![ + !0, //TODO: private memory limits + self.shared.private_caps.max_buffer_size, + ], + memory_types: self.memory_types.to_vec(), + } + } + + fn features(&self) -> hal::Features { + hal::Features::ROBUST_BUFFER_ACCESS + | hal::Features::DRAW_INDIRECT_FIRST_INSTANCE + | hal::Features::DEPTH_CLAMP + | hal::Features::SAMPLER_ANISOTROPY + | hal::Features::FORMAT_BC + | hal::Features::PRECISE_OCCLUSION_QUERY + | hal::Features::SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING + | hal::Features::VERTEX_STORES_AND_ATOMICS + | hal::Features::FRAGMENT_STORES_AND_ATOMICS + | if self.shared.private_caps.dual_source_blending { + hal::Features::DUAL_SRC_BLENDING + } else { + hal::Features::empty() + } + | hal::Features::INSTANCE_RATE + | hal::Features::SEPARATE_STENCIL_REF_VALUES + | if self.shared.private_caps.expose_line_mode { + hal::Features::NON_FILL_POLYGON_MODE + } else { + hal::Features::empty() + } + | hal::Features::SHADER_CLIP_DISTANCE + } + + fn limits(&self) -> hal::Limits { + let pc = &self.shared.private_caps; + hal::Limits { + max_image_1d_size: pc.max_texture_size as _, + max_image_2d_size: pc.max_texture_size as _, + max_image_3d_size: pc.max_texture_3d_size as _, + max_image_cube_size: pc.max_texture_size as _, + max_image_array_layers: pc.max_texture_layers as _, + max_texel_elements: (pc.max_texture_size * pc.max_texture_size) as usize, + max_uniform_buffer_range: pc.max_buffer_size, + max_storage_buffer_range: pc.max_buffer_size, + + max_push_constants_size: 0x1000, + max_sampler_allocation_count: !0, + max_bound_descriptor_sets: MAX_BOUND_DESCRIPTOR_SETS as _, + max_descriptor_set_samplers: pc.max_samplers_per_stage as usize * SHADER_STAGE_COUNT, + max_descriptor_set_uniform_buffers: pc.max_buffers_per_stage as usize * SHADER_STAGE_COUNT, + max_descriptor_set_storage_buffers: pc.max_buffers_per_stage as usize * SHADER_STAGE_COUNT, + max_descriptor_set_sampled_images: pc.max_textures_per_stage as usize * SHADER_STAGE_COUNT, + max_descriptor_set_storage_images: pc.max_textures_per_stage as usize * SHADER_STAGE_COUNT, + max_descriptor_set_input_attachments: pc.max_textures_per_stage as usize * SHADER_STAGE_COUNT, + max_fragment_input_components: pc.max_fragment_input_components as usize, + max_framebuffer_layers: 2048, + max_memory_allocation_count: 4096, + + max_per_stage_descriptor_samplers: pc.max_samplers_per_stage as usize, + max_per_stage_descriptor_uniform_buffers: pc.max_buffers_per_stage as usize, + max_per_stage_descriptor_storage_buffers: pc.max_buffers_per_stage as usize, + max_per_stage_descriptor_sampled_images: pc.max_textures_per_stage as usize, + max_per_stage_descriptor_storage_images: pc.max_textures_per_stage as usize, + max_per_stage_descriptor_input_attachments: pc.max_textures_per_stage as usize, + max_per_stage_resources: 0x100, + + max_patch_size: 0, + + + + max_viewports: 1, + max_viewport_dimensions: [pc.max_texture_size as _; 2], + max_framebuffer_extent: hal::image::Extent { + + width: pc.max_texture_size as _, + height: pc.max_texture_size as _, + depth: pc.max_texture_layers as _, + }, + + optimal_buffer_copy_offset_alignment: pc.buffer_alignment, + optimal_buffer_copy_pitch_alignment: 4, + min_texel_buffer_offset_alignment: pc.buffer_alignment, + min_uniform_buffer_offset_alignment: pc.buffer_alignment, + min_storage_buffer_offset_alignment: pc.buffer_alignment, + + max_compute_work_group_count: [16; 3], + max_compute_work_group_size: [64; 3], + + max_vertex_input_attributes: 31, + max_vertex_input_bindings: 31, + max_vertex_input_attribute_offset: 255, + max_vertex_input_binding_stride: 256, + max_vertex_output_components: pc.max_fragment_input_components as usize, + + framebuffer_color_sample_counts: 0b101, + framebuffer_depth_sample_counts: 0b101, + framebuffer_stencil_sample_counts: 0b101, + max_color_attachments: MAX_COLOR_ATTACHMENTS, + + buffer_image_granularity: 1, + + + non_coherent_atom_size: 4, + max_sampler_anisotropy: 16., + min_vertex_input_binding_stride_alignment: STRIDE_GRANULARITY as u64, + + ..hal::Limits::default() + } + } +} + +pub struct LanguageVersion { + pub major: u8, + pub minor: u8, +} + +impl LanguageVersion { + pub fn new(major: u8, minor: u8) -> Self { + LanguageVersion { major, minor } + } +} + +impl Device { + fn _is_heap_coherent(&self, heap: &n::MemoryHeap) -> bool { + match *heap { + n::MemoryHeap::Private => false, + n::MemoryHeap::Public(memory_type, _) => self.memory_types[memory_type.0] + .properties + .contains(Properties::COHERENT), + n::MemoryHeap::Native(ref heap) => heap.storage_mode() == MTLStorageMode::Shared, + } + } + + pub fn create_shader_library_from_file
, bool)> { - qinfo!([self] "read_response_headers from stream {}.", stream_id); + qinfo!([self], "read_response_headers from stream {}.", stream_id); let transaction = self .base_handler .transactions @@ -167,7 +172,7 @@ impl Http3Client { stream_id: u64, buf: &mut [u8], ) -> Res<(usize, bool)> { - qinfo!([self] "read_data from stream {}.", stream_id); + qinfo!([self], "read_data from stream {}.", stream_id); let transaction = self .base_handler .transactions @@ -219,17 +224,17 @@ impl Http3Client { } pub fn process(&mut self, dgram: Option, now: Instant) -> Output { - qtrace!([self] "Process."); + qtrace!([self], "Process."); self.base_handler.process(dgram, now) } pub fn process_input(&mut self, dgram: Datagram, now: Instant) { - qtrace!([self] "Process input."); + qtrace!([self], "Process input."); self.base_handler.process_input(dgram, now); } pub fn process_timer(&mut self, now: Instant) { - qtrace!([self] "Process timer."); + qtrace!([self], "Process timer."); self.base_handler.process_timer(now); } @@ -238,13 +243,13 @@ impl Http3Client { } pub fn process_http3(&mut self, now: Instant) { - qtrace!([self] "Process http3 internal."); + qtrace!([self], "Process http3 internal."); self.base_handler.process_http3(now); } pub fn process_output(&mut self, now: Instant) -> Output { - qtrace!([self] "Process output."); + qtrace!([self], "Process output."); self.base_handler.process_output(now) } } diff --git a/third_party/rust/neqo-http3/src/connection_server.rs b/third_party/rust/neqo-http3/src/connection_server.rs index 23cbc356ac8e..081c3bf07f5d 100644 --- a/third_party/rust/neqo-http3/src/connection_server.rs +++ b/third_party/rust/neqo-http3/src/connection_server.rs @@ -51,17 +51,17 @@ impl Http3Server { } pub fn process(&mut self, dgram: Option, now: Instant) -> Output { - qtrace!([self] "Process."); + qtrace!([self], "Process."); self.base_handler.process(dgram, now) } pub fn process_input(&mut self, dgram: Datagram, now: Instant) { - qtrace!([self] "Process input."); + qtrace!([self], "Process input."); self.base_handler.process_input(dgram, now); } pub fn process_timer(&mut self, now: Instant) { - qtrace!([self] "Process timer."); + qtrace!([self], "Process timer."); self.base_handler.process_timer(now); } @@ -70,17 +70,17 @@ impl Http3Server { } pub fn process_http3(&mut self, now: Instant) { - qtrace!([self] "Process http3 internal."); + qtrace!([self], "Process http3 internal."); self.base_handler.process_http3(now); } pub fn process_output(&mut self, now: Instant) -> Output { - qtrace!([self] "Process output."); + qtrace!([self], "Process output."); self.base_handler.process_output(now) } pub fn close(&mut self, now: Instant, error: AppError, msg: &str) { - qinfo!([self] "Close connection."); + qinfo!([self], "Close connection."); self.base_handler.close(now, error, msg); } @@ -107,7 +107,7 @@ impl Http3Server { } pub fn set_response(&mut self, stream_id: u64, headers: &[Header], data: Vec) -> Res<()> { - qinfo!([self] "Set new respons for stream {}.", stream_id); + qinfo!([self], "Set new respons for stream {}.", stream_id); self.base_handler .transactions .get_mut(&stream_id) @@ -119,12 +119,17 @@ impl Http3Server { } pub fn stream_stop_sending(&mut self, stream_id: u64, app_error: AppError) -> Res<()> { - qdebug!([self] "stop sending stream_id:{} error:{}.", stream_id, app_error); + qdebug!( + [self], + "stop sending stream_id:{} error:{}.", + stream_id, + app_error + ); self.base_handler.stream_stop_sending(stream_id, app_error) } pub fn stream_reset(&mut self, stream_id: u64, app_error: AppError) -> Res<()> { - qdebug!([self] "reset stream_id:{} error:{}.", stream_id, app_error); + qdebug!([self], "reset stream_id:{} error:{}.", stream_id, app_error); self.base_handler.stream_reset(stream_id, app_error) } } diff --git a/third_party/rust/neqo-http3/src/control_stream_local.rs b/third_party/rust/neqo-http3/src/control_stream_local.rs index c4e776f7ccdc..e0ff5621ee67 100644 --- a/third_party/rust/neqo-http3/src/control_stream_local.rs +++ b/third_party/rust/neqo-http3/src/control_stream_local.rs @@ -34,7 +34,7 @@ impl ControlStreamLocal { pub fn send(&mut self, conn: &mut Connection) -> Res<()> { if let Some(stream_id) = self.stream_id { if !self.buf.is_empty() { - qtrace!([self] "sending data."); + qtrace!([self], "sending data."); let sent = conn.stream_send(stream_id, &self.buf[..])?; if sent == self.buf.len() { self.buf.clear(); @@ -48,7 +48,7 @@ impl ControlStreamLocal { } pub fn create(&mut self, conn: &mut Connection) -> Res<()> { - qtrace!([self] "Create a control stream."); + qtrace!([self], "Create a control stream."); self.stream_id = Some(conn.stream_create(StreamType::UniDi)?); let mut enc = Encoder::default(); enc.encode_varint(HTTP3_UNI_STREAM_TYPE_CONTROL as u64); diff --git a/third_party/rust/neqo-http3/src/control_stream_remote.rs b/third_party/rust/neqo-http3/src/control_stream_remote.rs index c494b7bdfe08..d08368510e8b 100644 --- a/third_party/rust/neqo-http3/src/control_stream_remote.rs +++ b/third_party/rust/neqo-http3/src/control_stream_remote.rs @@ -33,9 +33,9 @@ impl ControlStreamRemote { } pub fn add_remote_stream(&mut self, stream_id: u64) -> Res<()> { - qinfo!([self] "A new control stream {}.", stream_id); + qinfo!([self], "A new control stream {}.", stream_id); if self.stream_id.is_some() { - qdebug!([self] "A control stream already exists"); + qdebug!([self], "A control stream already exists"); return Err(Error::HttpStreamCreationError); } self.stream_id = Some(stream_id); @@ -45,7 +45,7 @@ impl ControlStreamRemote { pub fn receive_if_this_stream(&mut self, conn: &mut Connection, stream_id: u64) -> Res { if let Some(id) = self.stream_id { if id == stream_id { - qdebug!([self] "Receiving data."); + qdebug!([self], "Receiving data."); self.fin = self.frame_reader.receive(conn, stream_id)?; return Ok(true); } diff --git a/third_party/rust/neqo-http3/src/hframe.rs b/third_party/rust/neqo-http3/src/hframe.rs index 164e261feb25..4b689c052958 100644 --- a/third_party/rust/neqo-http3/src/hframe.rs +++ b/third_party/rust/neqo-http3/src/hframe.rs @@ -213,7 +213,7 @@ impl HFrameReader { let fin; let mut input = match conn.stream_recv(stream_id, &mut buf[..]) { Ok((0, true)) => { - qtrace!([conn] "HFrameReader::receive: stream has been closed"); + qtrace!([conn], "HFrameReader::receive: stream has been closed"); break match self.state { HFrameReaderState::BeforeFrame => Ok(true), _ => Err(Error::HttpFrameError), @@ -221,12 +221,22 @@ impl HFrameReader { } Ok((0, false)) => break Ok(false), Ok((amount, f)) => { - qtrace!([conn] "HFrameReader::receive: reading {} byte, fin={}", amount, f); + qtrace!( + [conn], + "HFrameReader::receive: reading {} byte, fin={}", + amount, + f + ); fin = f; Decoder::from(&buf[..amount]) } Err(e) => { - qdebug!([conn] "HFrameReader::receive: error reading data from stream {}: {:?}", stream_id, e); + qdebug!( + [conn], + "HFrameReader::receive: error reading data from stream {}: {:?}", + stream_id, + e + ); break Err(e.into()); } }; @@ -238,7 +248,7 @@ impl HFrameReader { match self.state { HFrameReaderState::BeforeFrame | HFrameReaderState::GetType => match progress { IncrementalDecoderResult::Uint(v) => { - qtrace!([conn] "HFrameReader::receive: read frame type {}", v); + qtrace!([conn], "HFrameReader::receive: read frame type {}", v); self.hframe_type = v; self.decoder = IncrementalDecoder::decode_varint(); self.state = HFrameReaderState::GetLength; @@ -252,7 +262,12 @@ impl HFrameReader { HFrameReaderState::GetLength => { match progress { IncrementalDecoderResult::Uint(len) => { - qtrace!([conn] "HFrameReader::receive: frame type {} length {}", self.hframe_type, len); + qtrace!( + [conn], + "HFrameReader::receive: frame type {} length {}", + self.hframe_type, + len + ); self.hframe_len = len; self.state = match self.hframe_type { @@ -310,7 +325,12 @@ impl HFrameReader { HFrameReaderState::GetData => { match progress { IncrementalDecoderResult::Buffer(data) => { - qtrace!([conn] "received frame {}: {}", self.hframe_type, hex(&data[..])); + qtrace!( + [conn], + "received frame {}: {}", + self.hframe_type, + hex(&data[..]) + ); self.payload = data; self.state = HFrameReaderState::Done; } diff --git a/third_party/rust/neqo-http3/src/stream_type_reader.rs b/third_party/rust/neqo-http3/src/stream_type_reader.rs index 57bfaa0beb57..1820bcff5e18 100644 --- a/third_party/rust/neqo-http3/src/stream_type_reader.rs +++ b/third_party/rust/neqo-http3/src/stream_type_reader.rs @@ -46,7 +46,12 @@ impl NewStreamTypeReader { } } Err(e) => { - qdebug!([conn] "Error reading stream type for stream {}: {:?}", stream_id, e); + qdebug!( + [conn], + "Error reading stream type for stream {}: {:?}", + stream_id, + e + ); self.fin = true; return None; } diff --git a/third_party/rust/neqo-http3/src/transaction_client.rs b/third_party/rust/neqo-http3/src/transaction_client.rs index 8fb0abd81e2c..90bdb6a51cf5 100644 --- a/third_party/rust/neqo-http3/src/transaction_client.rs +++ b/third_party/rust/neqo-http3/src/transaction_client.rs @@ -58,7 +58,7 @@ impl Request { return; } - qinfo!([self] "Encoding headers for {}/{}", self.host, self.path); + qinfo!([self], "Encoding headers for {}/{}", self.host, self.path); let encoded_headers = encoder.encode_header_block(&self.headers, stream_id); let f = HFrame::Headers { len: encoded_headers.len() as u64, @@ -83,10 +83,10 @@ impl Request { self.ensure_encoded(encoder, stream_id); if let Some(buf) = &mut self.buf { let sent = conn.stream_send(stream_id, &buf)?; - qinfo!([label] "{} bytes sent", sent); + qinfo!([label], "{} bytes sent", sent); if sent == buf.len() { - qinfo!([label] "done sending request"); + qinfo!([label], "done sending request"); Ok(true) } else { let b = buf.split_off(sent); @@ -196,7 +196,12 @@ impl TransactionClient { } pub fn send_request_body(&mut self, conn: &mut Connection, buf: &[u8]) -> Res { - qinfo!([self] "send_request_body: send_state={:?} len={}", self.send_state, buf.len()); + qinfo!( + [self], + "send_request_body: send_state={:?} len={}", + self.send_state, + buf.len() + ); match self.send_state { TransactionSendState::SendingHeaders { .. } => Ok(0), TransactionSendState::SendingData => { @@ -218,7 +223,12 @@ impl TransactionClient { to_send = min(buf.len(), available - 9); } - qinfo!([self] "send_request_body: available={} to_send={}.", available, to_send); + qinfo!( + [self], + "send_request_body: available={} to_send={}.", + available, + to_send + ); let data_frame = HFrame::Data { len: to_send as u64, @@ -241,7 +251,12 @@ impl TransactionClient { } fn handle_frame_in_state_waiting_for_headers(&mut self, frame: HFrame, fin: bool) -> Res<()> { - qinfo!([self] "A new frame has been received: {:?}; state={:?}", frame, self.recv_state); + qinfo!( + [self], + "A new frame has been received: {:?}; state={:?}", + frame, + self.recv_state + ); match frame { HFrame::Headers { len } => self.handle_headers_frame(len, fin), HFrame::PushPromise { .. } => Err(Error::HttpIdError), @@ -265,7 +280,12 @@ impl TransactionClient { } fn handle_frame_in_state_waiting_for_data(&mut self, frame: HFrame, fin: bool) -> Res<()> { - qinfo!([self] "A new frame has been received: {:?}; state={:?}", frame, self.recv_state); + qinfo!( + [self], + "A new frame has been received: {:?}; state={:?}", + frame, + self.recv_state + ); match frame { HFrame::Data { len } => self.handle_data_frame(len, fin), HFrame::PushPromise { .. } => Err(Error::HttpIdError), @@ -302,7 +322,11 @@ impl TransactionClient { fn set_state_to_close_pending(&mut self) { // Stream has received fin. Depending on headers state set header_ready // or data_readable event so that app can pick up the fin. - qdebug!([self] "set_state_to_close_pending: response_headers_state={:?}", self.response_headers_state); + qdebug!( + [self], + "set_state_to_close_pending: response_headers_state={:?}", + self.response_headers_state + ); match self.response_headers_state { ResponseHeadersState::NoHeaders => { self.conn_events.header_ready(self.stream_id); @@ -317,7 +341,7 @@ impl TransactionClient { } fn recv_frame_header(&mut self, conn: &mut Connection) -> Res> { - qtrace!([self] "receiving frame header"); + qtrace!([self], "receiving frame header"); let fin = self.frame_reader.receive(conn, self.stream_id)?; if !self.frame_reader.done() { if fin { @@ -329,7 +353,7 @@ impl TransactionClient { } Ok(None) } else { - qdebug!([self] "A new frame has been received."); + qdebug!([self], "A new frame has been received."); Ok(Some((self.frame_reader.get_frame()?, fin))) } } @@ -350,7 +374,7 @@ impl TransactionClient { } = self.recv_state { let (amount, fin) = conn.stream_recv(self.stream_id, &mut buf[*offset..])?; - qdebug!([label] "read_headers: read {} bytes fin={}.", amount, fin); + qdebug!([label], "read_headers: read {} bytes fin={}.", amount, fin); *offset += amount as usize; if *offset < buf.len() { if fin { @@ -361,7 +385,10 @@ impl TransactionClient { } // we have read the headers, try decoding them. - qinfo!([label] "read_headers: read all headers, try decoding them."); + qinfo!( + [label], + "read_headers: read all headers, try decoding them." + ); match decoder.decode_header_block(buf, self.stream_id)? { Some(headers) => { self.add_headers(Some(headers))?; @@ -469,11 +496,11 @@ impl Http3Transaction for TransactionClient { if fin { conn.stream_close_send(self.stream_id)?; self.send_state = TransactionSendState::Closed; - qinfo!([label] "done sending request"); + qinfo!([label], "done sending request"); } else { self.send_state = TransactionSendState::SendingData; self.conn_events.data_writable(self.stream_id); - qinfo!([label] "change to state SendingData"); + qinfo!([label], "change to state SendingData"); } } } @@ -487,7 +514,12 @@ impl Http3Transaction for TransactionClient { String::new() }; loop { - qdebug!([label] "send_state={:?} recv_state={:?}.", self.send_state, self.recv_state); + qdebug!( + [label], + "send_state={:?} recv_state={:?}.", + self.send_state, + self.recv_state + ); match self.recv_state { TransactionRecvState::WaitingForResponseHeaders => { match self.recv_frame_header(conn)? { @@ -516,7 +548,7 @@ impl Http3Transaction for TransactionClient { } } None => { - qinfo!([self] "decoding header is blocked."); + qinfo!([self], "decoding header is blocked."); break Ok(()); } } diff --git a/third_party/rust/neqo-http3/src/transaction_server.rs b/third_party/rust/neqo-http3/src/transaction_server.rs index d5fcb0455cbb..cc9523d20a53 100644 --- a/third_party/rust/neqo-http3/src/transaction_server.rs +++ b/third_party/rust/neqo-http3/src/transaction_server.rs @@ -9,7 +9,7 @@ use crate::hframe::{HFrame, HFrameReader}; use crate::server_events::Http3ServerEvents; use crate::Header; use crate::{Error, Res}; -use neqo_common::{qdebug, qinfo, qtrace, Encoder}; +use neqo_common::{matches, qdebug, qinfo, qtrace, Encoder}; use neqo_qpack::decoder::QPackDecoder; use neqo_qpack::encoder::QPackEncoder; use neqo_transport::Connection; @@ -54,7 +54,7 @@ impl TransactionServer { } pub fn set_response(&mut self, headers: &[Header], data: Vec, encoder: &mut QPackEncoder) { - qdebug!([self] "Encoding headers"); + qdebug!([self], "Encoding headers"); let encoded_headers = encoder.encode_header_block(&headers, self.stream_id); let hframe = HFrame::Headers { len: encoded_headers.len() as u64, @@ -63,7 +63,7 @@ impl TransactionServer { hframe.encode(&mut d); d.encode(&encoded_headers); if !data.is_empty() { - qdebug!([self] "Encoding data"); + qdebug!([self], "Encoding data"); let d_frame = HFrame::Data { len: data.len() as u64, }; @@ -75,12 +75,12 @@ impl TransactionServer { } fn recv_frame_header(&mut self, conn: &mut Connection) -> Res<(Option, bool)> { - qtrace!([self] "receiving frame header"); + qtrace!([self], "receiving frame header"); let fin = self.frame_reader.receive(conn, self.stream_id)?; if !self.frame_reader.done() { Ok((None, fin)) } else { - qinfo!([self] "A new frame has been received."); + qinfo!([self], "A new frame has been received."); Ok((Some(self.frame_reader.get_frame()?), fin)) } } @@ -101,7 +101,7 @@ impl TransactionServer { } = self.recv_state { let (amount, fin) = conn.stream_recv(self.stream_id, &mut buf[*offset..])?; - qdebug!([label] "read_headers: read {} bytes fin={}.", amount, fin); + qdebug!([label], "read_headers: read {} bytes fin={}.", amount, fin); *offset += amount as usize; if *offset < buf.len() { if fin { @@ -112,7 +112,10 @@ impl TransactionServer { } // we have read the headers, try decoding them. - qinfo!([label] "read_headers: read all headers, try decoding them."); + qinfo!( + [label], + "read_headers: read all headers, try decoding them." + ); match decoder.decode_header_block(buf, self.stream_id)? { Some(headers) => { self.conn_events.headers(self.stream_id, headers, fin); @@ -137,7 +140,7 @@ impl TransactionServer { } fn handle_frame_in_state_waiting_for_headers(&mut self, frame: HFrame, fin: bool) -> Res<()> { - qdebug!([self] "A new frame has been received: {:?}", frame); + qdebug!([self], "A new frame has been received: {:?}", frame); match frame { HFrame::Headers { len } => self.handle_headers_frame(len, fin), _ => Err(Error::HttpFrameUnexpected), @@ -145,7 +148,7 @@ impl TransactionServer { } fn handle_frame_in_state_waiting_for_data(&mut self, frame: HFrame, fin: bool) -> Res<()> { - qdebug!([self] "A new frame has been received: {:?}", frame); + qdebug!([self], "A new frame has been received: {:?}", frame); match frame { HFrame::Data { len } => self.handle_data_frame(len, fin), _ => Err(Error::HttpFrameUnexpected), @@ -153,7 +156,7 @@ impl TransactionServer { } fn handle_headers_frame(&mut self, len: u64, fin: bool) -> Res<()> { - qinfo!([self] "A new header frame len={} fin={}", len, fin); + qinfo!([self], "A new header frame len={} fin={}", len, fin); if len == 0 { self.conn_events.headers(self.stream_id, Vec::new(), fin); } else { @@ -169,7 +172,7 @@ impl TransactionServer { } fn handle_data_frame(&mut self, len: u64, fin: bool) -> Res<()> { - qinfo!([self] "A new data frame len={} fin={}", len, fin); + qinfo!([self], "A new data frame len={} fin={}", len, fin); if len > 0 { if fin { return Err(Error::HttpFrameError); @@ -193,7 +196,7 @@ impl ::std::fmt::Display for TransactionServer { impl Http3Transaction for TransactionServer { fn send(&mut self, conn: &mut Connection, _encoder: &mut QPackEncoder) -> Res<()> { - qtrace!([self] "Sending response."); + qtrace!([self], "Sending response."); let label = if ::log::log_enabled!(::log::Level::Debug) { format!("{}", self) } else { @@ -201,11 +204,11 @@ impl Http3Transaction for TransactionServer { }; if let TransactionSendState::SendingResponse { ref mut buf } = self.send_state { let sent = conn.stream_send(self.stream_id, &buf[..])?; - qinfo!([label] "{} bytes sent", sent); + qinfo!([label], "{} bytes sent", sent); if sent == buf.len() { conn.stream_close_send(self.stream_id)?; self.send_state = TransactionSendState::Closed; - qinfo!([label] "done sending request"); + qinfo!([label], "done sending request"); } else { let mut b = buf.split_off(sent); mem::swap(buf, &mut b); @@ -223,7 +226,11 @@ impl Http3Transaction for TransactionServer { }; loop { - qtrace!([label] "[recv_state={:?}] receiving data.", self.recv_state); + qtrace!( + [label], + "[recv_state={:?}] receiving data.", + self.recv_state + ); match self.recv_state { TransactionRecvState::WaitingForHeaders => { let (f, fin) = self.recv_frame_header(conn)?; @@ -258,7 +265,7 @@ impl Http3Transaction for TransactionServer { } } None => { - qinfo!([self] "decoding header is blocked."); + qinfo!([self], "decoding header is blocked."); return Ok(()); } } @@ -323,11 +330,7 @@ impl Http3Transaction for TransactionServer { } fn has_data_to_send(&self) -> bool { - if let TransactionSendState::SendingResponse { .. } = self.send_state { - true - } else { - false - } + matches!(self.send_state, TransactionSendState::SendingResponse { .. }) } fn is_state_sending_data(&self) -> bool { diff --git a/third_party/rust/neqo-qpack/.cargo-checksum.json b/third_party/rust/neqo-qpack/.cargo-checksum.json index e088b23f4cca..0d8e19eb463c 100644 --- a/third_party/rust/neqo-qpack/.cargo-checksum.json +++ b/third_party/rust/neqo-qpack/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"1b0641ab11933da85b807fc64e9b9c19d49a53602359e8b51722d80ac3774a63","src/decoder.rs":"bbbeae34f8d1d42d51fd344a4d558aec1ddaa3c3bb41b0428796c316c160a778","src/encoder.rs":"854864b93d63b127659c5ed85822fe9452eb5f40a362cb2a19dc8273a7c2e81e","src/huffman.rs":"720eedace45205098a0b2210c876906ce15b7be469a799e75e70baafac8adee8","src/huffman_decode_helper.rs":"e4734353591770dfe9a9047b0be5d9068150433e9cea8cad029444b42b0afa39","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"b223e4a709a9cc144d36777d2bcf78d81780bdbc09e9f2f09c7bffa110a098da","src/qpack_helper.rs":"200ab8bcb60728e3bcacf25b7006fa54b544458bfee5e66e09fa472a614347fc","src/qpack_send_buf.rs":"471e3b0af9f8783aa1bfe11a1959bf5694e62bc2d8e1cf783c933af81e3f3cf9","src/static_table.rs":"fda9d5c6f38f94b0bf92d3afdf8432dce6e27e189736596e16727090c77b78ec","src/table.rs":"f4f09692bf6ec863b0f066c88837d99f59a1fc4a8ca61bee4ed76d45a77c3cc4"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"1b0641ab11933da85b807fc64e9b9c19d49a53602359e8b51722d80ac3774a63","src/decoder.rs":"18f08a510d8a63012146eb0bb063218bf691a720624f521a29dc8cc3b1e52237","src/encoder.rs":"78da509611b5869d320795c42bef944b6499c0f207c73818c1908f1a1cf001fc","src/huffman.rs":"720eedace45205098a0b2210c876906ce15b7be469a799e75e70baafac8adee8","src/huffman_decode_helper.rs":"e4734353591770dfe9a9047b0be5d9068150433e9cea8cad029444b42b0afa39","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"b223e4a709a9cc144d36777d2bcf78d81780bdbc09e9f2f09c7bffa110a098da","src/qpack_helper.rs":"200ab8bcb60728e3bcacf25b7006fa54b544458bfee5e66e09fa472a614347fc","src/qpack_send_buf.rs":"471e3b0af9f8783aa1bfe11a1959bf5694e62bc2d8e1cf783c933af81e3f3cf9","src/static_table.rs":"fda9d5c6f38f94b0bf92d3afdf8432dce6e27e189736596e16727090c77b78ec","src/table.rs":"f4f09692bf6ec863b0f066c88837d99f59a1fc4a8ca61bee4ed76d45a77c3cc4"},"package":null} \ No newline at end of file diff --git a/third_party/rust/neqo-qpack/src/decoder.rs b/third_party/rust/neqo-qpack/src/decoder.rs index a3386b0b05c3..dd2ed50e8052 100644 --- a/third_party/rust/neqo-qpack/src/decoder.rs +++ b/third_party/rust/neqo-qpack/src/decoder.rs @@ -133,7 +133,7 @@ impl QPackDecoder { #[allow(clippy::useless_let_if_seq)] fn read_instructions(&mut self, conn: &mut Connection, stream_id: u64) -> Res<()> { let label = self.to_string(); - qdebug!([self] "reading instructions"); + qdebug!([self], "reading instructions"); loop { match self.state { QPackDecoderState::ReadInstruction => { @@ -209,7 +209,7 @@ impl QPackDecoder { conn, stream_id, &mut v, &mut cnt, 3, b[0], true, )?; if done { - qdebug!([label] "received instruction - duplicate index={}", v); + qdebug!([label], "received instruction - duplicate index={}", v); self.table.duplicate(v)?; self.total_num_of_inserts += 1; self.increment += 1; @@ -310,7 +310,7 @@ impl QPackDecoder { } else { mem::swap(&mut value_to_insert, value); } - qdebug!([label] "received instruction - insert with name ref index={} static={} value={:x?}", name_index, name_static_table, value_to_insert); + qdebug!([label], "received instruction - insert with name ref index={} static={} value={:x?}", name_index, name_static_table, value_to_insert); self.table.insert_with_name_ref( *name_static_table, *name_index, @@ -430,7 +430,7 @@ impl QPackDecoder { } else { mem::swap(&mut value_to_insert, value); } - qdebug!([label] "received instruction - insert with name literal name={:x?} value={:x?}", name_to_insert, value_to_insert); + qdebug!([label], "received instruction - insert with name literal name={:x?} value={:x?}", name_to_insert, value_to_insert); self.table.insert(name_to_insert, value_to_insert)?; self.total_num_of_inserts += 1; self.increment += 1; @@ -450,7 +450,7 @@ impl QPackDecoder { conn, stream_id, index, cnt, 0, 0x0, false, )?; if done { - qdebug!([label] "received instruction - duplicate index={}", index); + qdebug!([label], "received instruction - duplicate index={}", index); self.table.duplicate(*index)?; self.total_num_of_inserts += 1; self.increment += 1; @@ -481,7 +481,7 @@ impl QPackDecoder { } pub fn set_capacity(&mut self, cap: u64) -> Res<()> { - qdebug!([self] "received instruction capacity cap={}", cap); + qdebug!([self], "received instruction capacity cap={}", cap); if cap > u64::from(self.max_table_size) { return Err(Error::EncoderStreamError); } @@ -512,7 +512,7 @@ impl QPackDecoder { match conn.stream_send(stream_id, &self.send_buf[..]) { Err(_) => Err(Error::DecoderStreamError), Ok(r) => { - qdebug!([self] "{} bytes sent.", r); + qdebug!([self], "{} bytes sent.", r); self.send_buf.read(r as usize); Ok(()) } @@ -524,19 +524,19 @@ impl QPackDecoder { pub fn decode_header_block(&mut self, buf: &[u8], stream_id: u64) -> Res>> { - qdebug!([self] "decode header block."); + qdebug!([self], "decode header block."); let mut reader = BufWrapper { buf, offset: 0 }; let (req_inserts, base) = self.read_base(&mut reader)?; qdebug!( - [self] + [self], "requested inserts count is {} and base is {}", req_inserts, base ); if self.table.base() < req_inserts { qdebug!( - [self] + [self], "stream is blocked stream_id={} requested inserts count={}", stream_id, req_inserts @@ -555,7 +555,7 @@ impl QPackDecoder { if req_inserts != 0 { self.header_ack(stream_id); } - qdebug!([self] "done decoding header block."); + qdebug!([self], "done decoding header block."); break Ok(Some(h)); } @@ -594,7 +594,7 @@ impl QPackDecoder { fn read_indexed(&self, buf: &mut BufWrapper, base: u64) -> Res
{ let static_table = buf.peek()? & 0x40 != 0; let index = read_prefixed_encoded_int_slice(buf, 2)?; - qdebug!([self] "decoder indexed {} static={}.", index, static_table); + qdebug!([self], "decoder indexed {} static={}.", index, static_table); if static_table { match self.table.get_static(index) { Ok(entry) => Ok((to_string(entry.name())?, to_string(entry.value())?)), @@ -609,7 +609,7 @@ impl QPackDecoder { fn read_post_base_index(&self, buf: &mut BufWrapper, base: u64) -> Res
{ let index = read_prefixed_encoded_int_slice(buf, 4)?; - qdebug!([self] "decode post-based {}.", index); + qdebug!([self], "decode post-based {}.", index); if let Ok(entry) = self.table.get_dynamic(index, base, true) { Ok((to_string(entry.name())?, to_string(entry.value())?)) } else { @@ -618,7 +618,7 @@ impl QPackDecoder { } fn read_literal_with_name_ref(&self, buf: &mut BufWrapper, base: u64) -> Res
{ - qdebug!([self] "read literal with name reference."); + qdebug!([self], "read literal with name reference."); let static_table = buf.peek()? & 0x10 != 0; let index = read_prefixed_encoded_int_slice(buf, 4)?; @@ -645,7 +645,7 @@ impl QPackDecoder { buf.slice(value_len)?.to_vec() }; qdebug!( - [self] + [self], "name index={} static={} value={:x?}.", index, static_table, @@ -655,7 +655,7 @@ impl QPackDecoder { } fn read_literal_with_post_base_name_ref(&self, buf: &mut BufWrapper, base: u64) -> Res
{ - qdebug!([self] "decoder literal with post-based index."); + qdebug!([self], "decoder literal with post-based index."); let index = read_prefixed_encoded_int_slice(buf, 5)?; let name: Vec; @@ -674,12 +674,12 @@ impl QPackDecoder { buf.slice(value_len)?.to_vec() }; - qdebug!([self] "name={:x?} value={:x?}.", name, value); + qdebug!([self], "name={:x?} value={:x?}.", name, value); Ok((to_string(&name)?, to_string(&value)?)) } fn read_literal_with_name_literal(&self, buf: &mut BufWrapper) -> Res
{ - qdebug!([self] "decode literal with name literal."); + qdebug!([self], "decode literal with name literal."); let name_is_huffman = buf.peek()? & 0x08 != 0; @@ -700,7 +700,7 @@ impl QPackDecoder { buf.slice(value_len)?.to_vec() }; - qdebug!([self] "name={:x?} value={:x?}.", name, value); + qdebug!([self], "name={:x?} value={:x?}.", name, value); Ok((to_string(&name)?, to_string(&value)?)) } diff --git a/third_party/rust/neqo-qpack/src/encoder.rs b/third_party/rust/neqo-qpack/src/encoder.rs index f835a6504b67..6f0da7f85bff 100644 --- a/third_party/rust/neqo-qpack/src/encoder.rs +++ b/third_party/rust/neqo-qpack/src/encoder.rs @@ -72,7 +72,7 @@ impl QPackEncoder { return Err(Error::EncoderStreamError); } - qdebug!([self] "Set max capacity to {}.", cap); + qdebug!([self], "Set max capacity to {}.", cap); self.max_entries = (cap as f64 / 32.0).floor() as u64; self.change_capacity(cap); @@ -83,7 +83,7 @@ impl QPackEncoder { if blocked_streams > (1 << 16) - 1 { return Err(Error::EncoderStreamError); } - qdebug!([self] "Set max blocked streams to {}.", blocked_streams); + qdebug!([self], "Set max blocked streams to {}.", blocked_streams); self.max_blocked_streams = blocked_streams as u16; Ok(()) } @@ -103,7 +103,7 @@ impl QPackEncoder { } fn read_instructions(&mut self, conn: &mut Connection, stream_id: u64) -> Res<()> { - qdebug!([self] "read a new instraction"); + qdebug!([self], "read a new instraction"); loop { match self.instruction_reader_current_inst { None => { @@ -174,7 +174,7 @@ impl QPackEncoder { fn call_instruction(&mut self) { if let Some(inst) = &self.instruction_reader_current_inst { - qdebug!([self] "call intruction {:?}", inst); + qdebug!([self], "call intruction {:?}", inst); match inst { DecoderInstructions::InsertCountIncrement => { self.table.increment_acked(self.instruction_reader_value); @@ -203,7 +203,7 @@ impl QPackEncoder { value: Vec, ) -> Res<()> { qdebug!( - [self] + [self], "insert with name reference {} from {} value={:x?}.", name_index, if name_static_table { @@ -226,7 +226,7 @@ impl QPackEncoder { } pub fn insert_with_name_literal(&mut self, name: Vec, value: Vec) -> Res<()> { - qdebug!([self] "insert name {:x?}, value={:x?}.", name, value); + qdebug!([self], "insert name {:x?}, value={:x?}.", name, value); self.table.insert(name, value)?; @@ -239,14 +239,14 @@ impl QPackEncoder { } pub fn duplicate(&mut self, index: u64) -> Res<()> { - qdebug!([self] "duplicate entry {}.", index); + qdebug!([self], "duplicate entry {}.", index); self.table.duplicate(index)?; self.send_buf.encode_prefixed_encoded_int(0x00, 3, index); Ok(()) } pub fn change_capacity(&mut self, cap: u64) { - qdebug!([self] "change capacity: {}", cap); + qdebug!([self], "change capacity: {}", cap); self.table.set_capacity(cap); self.send_buf.encode_prefixed_encoded_int(0x20, 3, cap); } @@ -258,7 +258,7 @@ impl QPackEncoder { match conn.stream_send(stream_id, &self.send_buf[..]) { Err(_) => Err(Error::EncoderStreamError), Ok(r) => { - qdebug!([self] "{} bytes sent.", r); + qdebug!([self], "{} bytes sent.", r); self.send_buf.read(r as usize); Ok(()) } @@ -269,7 +269,7 @@ impl QPackEncoder { } pub fn encode_header_block(&mut self, h: &[Header], stream_id: u64) -> QPData { - qdebug!([self] "encoding headers."); + qdebug!([self], "encoding headers."); let mut encoded_h = QPData::default(); let base = self.table.base(); let mut req_insert_cnt = 0; @@ -290,7 +290,7 @@ impl QPackEncoder { let (e_s, e_d, found_value) = self.table.lookup(&name, &value); if let Some(entry) = e_s { - qtrace!([label] "found a static entry, value-match={}", found_value); + qtrace!([label], "found a static entry, value-match={}", found_value); can_use = true; index = entry.index(); value_as_well = found_value; @@ -300,7 +300,7 @@ impl QPackEncoder { index = entry.index(); can_use = index < acked_inserts_cnt || can_be_blocked; qtrace!( - [label] + [label], "found a dynamic entry - can_use={} value-match={},", can_use, found_value @@ -385,7 +385,7 @@ impl QPackEncoder { positive: bool, ) { qdebug!( - [self] + [self], "encode header block prefix req_insert_cnt={} delta={} (fix={}).", req_insert_cnt, delta, @@ -435,7 +435,7 @@ impl QPackEncoder { } fn encode_indexed(&self, buf: &mut QPData, is_static: bool, index: u64) { - qdebug!([self] "encode index {} (static={}).", index, is_static); + qdebug!([self], "encode index {} (static={}).", index, is_static); let prefix = if is_static { 0xc0 } else { 0x80 }; buf.encode_prefixed_encoded_int(prefix, 2, index); } @@ -448,7 +448,7 @@ impl QPackEncoder { value: &[u8], ) { qdebug!( - [self] + [self], "encode literal with name ref - index={}, static={}, value={:x?}", index, is_static, @@ -460,13 +460,13 @@ impl QPackEncoder { } fn encode_post_base_index(&self, buf: &mut QPData, index: u64) { - qdebug!([self] "encode post base index {}.", index); + qdebug!([self], "encode post base index {}.", index); buf.encode_prefixed_encoded_int(0x10, 4, index); } fn encode_literal_with_post_based_name_ref(&self, buf: &mut QPData, index: u64, value: &[u8]) { qdebug!( - [self] + [self], "encode literal with post base index - index={}, value={:x?}.", index, value @@ -477,7 +477,7 @@ impl QPackEncoder { fn encode_literal_with_name_literal(&self, buf: &mut QPData, name: &[u8], value: &[u8]) { qdebug!( - [self] + [self], "encode literal with name literal - name={:x?}, value={:x?}.", name, value diff --git a/third_party/rust/neqo-transport/.cargo-checksum.json b/third_party/rust/neqo-transport/.cargo-checksum.json index 29e1ac553193..2c15b5e8a523 100644 --- a/third_party/rust/neqo-transport/.cargo-checksum.json +++ b/third_party/rust/neqo-transport/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"82faeea7483f4181d323f7e6889d336606c52dd52d9e7cb900b9e51e6f3e3a83","TODO":"d759cb804b32fa9d96ea8d3574a3c4073da9fe6a0b02b708a0e22cce5a5b4a0f","src/connection.rs":"ced6150694f2bfda6f7ff1799087a52dabe07155401919fe84cbb3cdbf5ef307","src/crypto.rs":"2eccba8925d74b199f524686c70de3c6715fab95af7654e0657e58cfd338b640","src/dump.rs":"d3c327d303c1257094687f750b3d468eedf945b06166484bee75dc4cab7ca001","src/events.rs":"07b1fa18efc538b96736ebfedba929b4854dffd460e1250ae02dc79cc86bb310","src/flow_mgr.rs":"9ced2d8f9a8747a960795c80aad384bde7a9a25ed4ac3ebf4ea0ebf1133d8e7a","src/frame.rs":"97fc6b83a71e51106d5f947ca9855892bae657d50c7f3aa55e18c67c1bf71359","src/lib.rs":"784bc483e981c92a9f2301ed67cee62654405c6bdb854dadb1f2464b8e3ac5a5","src/packet.rs":"7fa31e596082d577397853b2043b4e0ac534218a7fdc6cfc52eeba1224948970","src/recovery.rs":"5fea4291a54ef6693ed7d2022b6e0574f94224421015714e090e38132b1f7cff","src/recv_stream.rs":"f092be0c94655938461d69cdbb146200681341145f15c57817873c64bc25c538","src/send_stream.rs":"f778f0904d1a944c934198af9138dc872912c45662338ae8562093f648735b3e","src/server.rs":"a3b44025a9ee5f1071ca75f5ae8f8e7e5070ae7b9c053737d1e090e7855907c1","src/stats.rs":"dca5afcb6252f3f32f494513f76964cffb945afd6d18b8669dea98a7aeed1689","src/stream_id.rs":"b3158cf2c6072da79bf6e77a31f71f2f3b970429221142a9ff1dd6cd07df2442","src/tparams.rs":"d35e2ec14958de74c315710bce80d8a72262f2437ddd1121fe28e752a1b4244d","src/tracking.rs":"9ce66170fa240db42a8c596f70846cc8e62925c30be6e622bc40b1a8f1ec73f2","tests/conn_vectors.rs":"5c381c8f1e0d126cb675bea302d1d118ea2ae988ec9c1536db2b18552074d845","tests/connection.rs":"195f52a876b9bd92f4368f301c98169a36438b5c9c1bb9ebaab146c7f8e0bb24","tests/server.rs":"8126ee12424487ee723d062169295e20858b041284a66c019173d39a7eaaa066"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"82faeea7483f4181d323f7e6889d336606c52dd52d9e7cb900b9e51e6f3e3a83","TODO":"d759cb804b32fa9d96ea8d3574a3c4073da9fe6a0b02b708a0e22cce5a5b4a0f","src/connection.rs":"e48ff0d87312044dc45b9c0ad7a08bce37b4880e47a193e7bdfecefe498b6006","src/crypto.rs":"6d7cd99595274565bf0214a46c182d8d146f207b598000beb14fff06100c0ec6","src/dump.rs":"e4058d89acf50c3dea7e9f067e6fa1d8abfe0a65a77acf187f5012b53ed2568d","src/events.rs":"07b1fa18efc538b96736ebfedba929b4854dffd460e1250ae02dc79cc86bb310","src/flow_mgr.rs":"acd567f932be71ac4d342308369dd1620f6498fdb1c9a35a7325725702575c41","src/frame.rs":"13dea3b6b5781efc5acae3a6e95d441816fc030d4a7a06d578a94a9faa95d5db","src/lib.rs":"784bc483e981c92a9f2301ed67cee62654405c6bdb854dadb1f2464b8e3ac5a5","src/packet.rs":"7fa31e596082d577397853b2043b4e0ac534218a7fdc6cfc52eeba1224948970","src/recovery.rs":"f7bfbc605a03ba5c60a403fecbf843129d2868cb77e6378aa17c7c27c3dc6003","src/recv_stream.rs":"e3f8339aa8152587ec093ed426618a37a3b836fac56417fdf7822270bb5e87de","src/send_stream.rs":"a24e364f98b0741dd8d3e4e3dc67051606a5184659d09f53342e7148f3399a1d","src/server.rs":"affdb0b28000b983e052858f17307df54e8662a15c2ac75e8201e6d507f13d8d","src/stats.rs":"dca5afcb6252f3f32f494513f76964cffb945afd6d18b8669dea98a7aeed1689","src/stream_id.rs":"b3158cf2c6072da79bf6e77a31f71f2f3b970429221142a9ff1dd6cd07df2442","src/tparams.rs":"d35e2ec14958de74c315710bce80d8a72262f2437ddd1121fe28e752a1b4244d","src/tracking.rs":"49d2ca42ade8c2f9d119a0b96c25106a657f5e15022a1a19ddababb097944aea","tests/conn_vectors.rs":"5c381c8f1e0d126cb675bea302d1d118ea2ae988ec9c1536db2b18552074d845","tests/connection.rs":"195f52a876b9bd92f4368f301c98169a36438b5c9c1bb9ebaab146c7f8e0bb24","tests/server.rs":"8126ee12424487ee723d062169295e20858b041284a66c019173d39a7eaaa066"},"package":null} \ No newline at end of file diff --git a/third_party/rust/neqo-transport/src/connection.rs b/third_party/rust/neqo-transport/src/connection.rs index fd3cbcc276fa..d05b0f4b83e0 100644 --- a/third_party/rust/neqo-transport/src/connection.rs +++ b/third_party/rust/neqo-transport/src/connection.rs @@ -6,13 +6,11 @@ -#![allow(dead_code)] use std::cell::RefCell; use std::cmp::{max, Ordering}; use std::collections::HashMap; use std::convert::TryInto; use std::fmt::{self, Debug}; -use std::mem; use std::net::SocketAddr; use std::rc::Rc; use std::time::{Duration, Instant}; @@ -26,7 +24,7 @@ use neqo_crypto::{ SecretAgentInfo, Server, }; -use crate::crypto::Crypto; +use crate::crypto::{Crypto, CryptoState}; use crate::dump::*; use crate::events::{ConnectionEvent, ConnectionEvents}; use crate::flow_mgr::FlowMgr; @@ -155,6 +153,14 @@ impl Path { pub fn received_on(&self, d: &Datagram) -> bool { self.local == d.destination() && self.remote == d.source() } + + fn mtu(&self) -> usize { + if self.local.is_ipv4() { + 1252 + } else { + 1232 + } + } } #[derive(Clone, Debug, PartialEq)] @@ -349,7 +355,7 @@ impl Connection { remote_cid: dcid.clone(), }), ); - c.crypto.states[0] = Some(c.crypto.create_initial_state(Role::Client, &dcid)); + c.crypto.create_initial_state(Role::Client, &dcid); Ok(c) } @@ -437,14 +443,6 @@ impl Connection { self.tps.borrow_mut().local.set(key, value) } - fn pmtu(&self) -> usize { - match &self.path { - Some(path) if path.local.is_ipv4() => 1252, - Some(_) => 1232, - None => 1280, - } - } - pub(crate) fn original_connection_id(&mut self, odcid: &ConnectionId) { assert_eq!(self.role, Role::Server); @@ -495,21 +493,21 @@ impl Connection { pub fn set_resumption_token(&mut self, now: Instant, token: &[u8]) -> Res<()> { if self.state != State::Init { - qerror!([self] "set token in state {:?}", self.state); + qerror!([self], "set token in state {:?}", self.state); return Err(Error::ConnectionState); } - qinfo!([self] "resumption token {}", hex(token)); + qinfo!([self], "resumption token {}", hex(token)); let mut dec = Decoder::from(token); let tp_slice = match dec.decode_vvec() { Some(v) => v, _ => return Err(Error::InvalidResumptionToken), }; - qtrace!([self] " transport parameters {}", hex(&tp_slice)); + qtrace!([self], " transport parameters {}", hex(&tp_slice)); let mut dec_tp = Decoder::from(tp_slice); let tp = TransportParameters::decode(&mut dec_tp)?; let tok = dec.decode_remainder(); - qtrace!([self] " TLS token {}", hex(&tok)); + qtrace!([self], " TLS token {}", hex(&tok)); match self.crypto.tls { Agent::Client(ref mut c) => c.set_resumption_token(&tok)?, Agent::Server(_) => return Err(Error::WrongRole), @@ -533,7 +531,7 @@ impl Connection { }); enc.encode(extra); let records = s.send_ticket(now, &enc)?; - qinfo!([self] "send session ticket {}", hex(&enc)); + qinfo!([self], "send session ticket {}", hex(&enc)); self.buffer_crypto_records(records); Ok(()) } @@ -581,7 +579,7 @@ impl Connection { #[cfg(not(debug_assertions))] let msg = String::from(""); if let State::Closed(err) | State::Closing { error: err, .. } = &self.state { - qwarn!([self] "Closing again after error {:?}", err); + qwarn!([self], "Closing again after error {:?}", err); } else { self.set_state(State::Closing { error: ConnectionError::Transport(v.clone()), @@ -626,7 +624,7 @@ impl Connection { /// Get the time that we next need to be called back, relative to `now`. fn next_delay(&mut self, now: Instant) -> Duration { - self.loss_recovery_state = self.loss_recovery.get_timer(&self.state); + self.loss_recovery_state = self.loss_recovery.get_timer(); let mut delays = SmallVec::<[_; 4]>::new(); @@ -705,10 +703,10 @@ impl Connection { // assume that the DCID is OK. if hdr.dcid.len() < 8 { if token.is_empty() { - qinfo!([self] "Drop Initial with short DCID"); + qinfo!([self], "Drop Initial with short DCID"); false } else { - qinfo!([self] "Initial received with token, assuming OK"); + qinfo!([self], "Initial received with token, assuming OK"); true } } else { @@ -716,31 +714,34 @@ impl Connection { true } } else { - qdebug!([self] "Dropping non-Initial packet"); + qdebug!([self], "Dropping non-Initial packet"); false } } fn handle_retry(&mut self, scid: &ConnectionId, odcid: &ConnectionId, token: &[u8]) -> Res<()> { - qdebug!([self] "received Retry"); + qdebug!([self], "received Retry"); if self.retry_info.is_some() { - qinfo!([self] "Dropping extra Retry"); + qinfo!([self], "Dropping extra Retry"); return Ok(()); } if token.is_empty() { - qinfo!([self] "Dropping Retry without a token"); + qinfo!([self], "Dropping Retry without a token"); return Ok(()); } match self.path.iter_mut().find(|p| p.remote_cid == *odcid) { None => { - qinfo!([self] "Ignoring Retry with mismatched ODCID"); + qinfo!([self], "Ignoring Retry with mismatched ODCID"); return Ok(()); } Some(path) => { path.remote_cid = scid.clone(); } } - qinfo!([self] "Valid Retry received, restarting with provided token"); + qinfo!( + [self], + "Valid Retry received, restarting with provided token" + ); self.retry_info = Some(RetryInfo { token: token.to_vec(), odcid: odcid.clone(), @@ -750,14 +751,14 @@ impl Connection { // Switching crypto state here might not happen eventually. // https://github.com/quicwg/base-drafts/issues/2823 - self.crypto.states[0] = Some(self.crypto.create_initial_state(self.role, scid)); + self.crypto.create_initial_state(self.role, scid); Ok(()) } fn input(&mut self, d: Datagram, now: Instant) -> Res<()> { let mut slc = &d[..]; - qinfo!([self] "input {}", hex( &**d)); + qinfo!([self], "input {}", hex(&**d)); // Handle each packet in the datagram while !slc.is_empty() { @@ -765,7 +766,12 @@ impl Connection { let mut hdr = match res { Ok(h) => h, Err(e) => { - qinfo!([self] "Received indecipherable packet header {} {}", hex(slc), e); + qinfo!( + [self], + "Received indecipherable packet header {} {}", + hex(slc), + e + ); return Ok(()); // Drop the remainder of the datagram. } }; @@ -800,22 +806,21 @@ impl Connection { match self.state { State::Init => { - qinfo!([self] "Received message while in Init state"); + qinfo!([self], "Received message while in Init state"); return Ok(()); } State::WaitInitial => { - qinfo!([self] "Received packet in WaitInitial"); + qinfo!([self], "Received packet in WaitInitial"); if self.role == Role::Server { if !self.is_valid_initial(&hdr) { return Ok(()); } - self.crypto.states[0] = - Some(self.crypto.create_initial_state(self.role, &hdr.dcid)); + self.crypto.create_initial_state(self.role, &hdr.dcid); } } State::Handshaking | State::Connected => { if !self.is_valid_cid(&hdr.dcid) { - qinfo!([self] "Ignoring packet with CID {:?}", hdr.dcid); + qinfo!([self], "Ignoring packet with CID {:?}", hdr.dcid); return Ok(()); } } @@ -831,7 +836,7 @@ impl Connection { } } - qdebug!([self] "Received unverified packet {:?}", hdr); + qdebug!([self], "Received unverified packet {:?}", hdr); let body = self.decrypt_body(&mut hdr, slc); slc = &slc[hdr.hdr_len + hdr.body_len()..]; @@ -841,7 +846,7 @@ impl Connection { // on the assert for doesn't exist. // OK, we have a valid packet. self.idle_timeout.on_packet_received(now); - dump_packet(self, "<- RX", &hdr, &body); + dump_packet(self, "-> RX", &hdr, &body); if self.process_packet(&hdr, body, now)? { continue; } @@ -858,15 +863,12 @@ impl Connection { // the rest of the datagram on the floor, but don't generate an error. let largest_acknowledged = self .loss_recovery - .largest_acknowledged(PNSpace::from(hdr.epoch)); + .largest_acknowledged_pn(PNSpace::from(hdr.epoch)); match self.crypto.obtain_crypto_state(self.role, hdr.epoch) { - Ok(cs) => match cs.rx.as_ref() { - Some(rx) => { - let pn_decoder = PacketNumberDecoder::new(largest_acknowledged); - decrypt_packet(rx, pn_decoder, &mut hdr, slc).ok() - } - _ => None, - }, + Ok(CryptoState { rx: Some(rx), .. }) => { + let pn_decoder = PacketNumberDecoder::new(largest_acknowledged); + decrypt_packet(rx, pn_decoder, &mut hdr, slc).ok() + } _ => None, } } @@ -883,7 +885,12 @@ impl Connection { let ack_eliciting = self.input_packet(hdr.epoch, Decoder::from(&body[..]), now)?; let space = PNSpace::from(hdr.epoch); if self.acks[space].is_duplicate(hdr.pn) { - qdebug!([self] "Received duplicate packet epoch={} pn={}", hdr.epoch, hdr.pn); + qdebug!( + [self], + "Received duplicate packet epoch={} pn={}", + hdr.epoch, + hdr.pn + ); self.stats.dups_rx += 1; Ok(true) } else { @@ -916,7 +923,11 @@ impl Connection { ZeroRttState::Rejected }; } else { - qdebug!([self] "Changing to use Server CID={}", hdr.scid.as_ref().unwrap()); + qdebug!( + [self], + "Changing to use Server CID={}", + hdr.scid.as_ref().unwrap() + ); let p = self .path .iter_mut() @@ -956,41 +967,40 @@ impl Connection { fn output(&mut self, now: Instant) -> Option { let mut out = None; - // Can't call a method on self while iterating over self.paths - let paths = mem::replace(&mut self.path, Default::default()); - for p in &paths { - match self.output_path(&p, now) { - Ok(Some(dgram)) => { - out = Some(dgram); - break; + if self.path.is_some() { + match self.output_pkt_for_path(now) { + Ok(res) => { + out = res; } Err(e) => { if !matches!(self.state, State::Closing{..}) { // An error here causes us to transition to closing. self.absorb_error(now, Err(e)); // Rerun to give a chance to send a CONNECTION_CLOSE. - out = match self.output_path(&p, now) { + out = match self.output_pkt_for_path(now) { Ok(x) => x, Err(e) => { - qwarn!([self] "two output_path errors in a row: {:?}", e); + qwarn!([self], "two output_path errors in a row: {:?}", e); None } }; - break; } } - _ => (), }; } - self.path = paths; out } + #[allow(clippy::cognitive_complexity)] /// Build a datagram, possibly from multiple packets (for different PN /// spaces) and each containing 1+ frames. - fn output_path(&mut self, path: &Path, now: Instant) -> Res> { + fn output_pkt_for_path(&mut self, now: Instant) -> Res> { let mut out_bytes = Vec::new(); let mut needs_padding = false; + let path = self + .path + .take() + .expect("we know we have a path because calling fn checked"); // Frames for different epochs must go in different packets, but then these // packets can go in a single datagram @@ -999,21 +1009,19 @@ impl Connection { let mut encoder = Encoder::default(); let mut tokens = Vec::new(); - // Try to make our own crypo state and if we can't, skip this epoch. - match self.crypto.obtain_crypto_state(self.role, epoch) { - Ok(cs) => { - if cs.tx.is_none() { - continue; - } - } - _ => continue, + // Ensure we have tx crypto state for this epoch, or skip it. + if !matches!( + self.crypto.obtain_crypto_state(self.role, epoch), + Ok(CryptoState { tx: Some(_), .. }) + ) { + continue; } let mut ack_eliciting = false; match &self.state { State::Init | State::WaitInitial | State::Handshaking | State::Connected => { loop { - let remaining = self.pmtu() - out_bytes.len() - encoder.len(); + let remaining = path.mtu() - out_bytes.len() - encoder.len(); // Check sources in turn for available frames if let Some((frame, token)) = self @@ -1031,8 +1039,7 @@ impl Connection { if let Some(t) = token { tokens.push(t); } - assert!(encoder.len() <= self.pmtu()); - if out_bytes.len() + encoder.len() == self.pmtu() { + if out_bytes.len() + encoder.len() == path.mtu() { // No more space for frames. break; } @@ -1069,7 +1076,7 @@ impl Connection { continue; } - qdebug!([self] "Need to send a packet"); + qdebug!([self], "Need to send a packet"); match epoch { // Packets containing Initial packets need padding. 0 => needs_padding = true, @@ -1118,29 +1125,32 @@ impl Connection { let mut packet = encode_packet(tx, &hdr, &encoder); dump_packet(self, "TX ->", &hdr, &encoder); out_bytes.append(&mut packet); - if out_bytes.len() >= self.pmtu() { + if out_bytes.len() >= path.mtu() { break; } } if out_bytes.is_empty() { + self.path = Some(path); return Ok(None); } // Pad Initial packets sent by the client to 1200 bytes. if self.role == Role::Client && needs_padding { - qdebug!([self] "pad Initial to 1200"); + qdebug!([self], "pad Initial to 1200"); out_bytes.resize(1200, 0); } - Ok(Some(Datagram::new(path.local, path.remote, out_bytes))) + let dgram = Some(Datagram::new(path.local, path.remote, out_bytes)); + self.path = Some(path); + Ok(dgram) } fn client_start(&mut self, now: Instant) -> Res<()> { - qinfo!([self] "client_start"); + qinfo!([self], "client_start"); self.handshake(now, 0, None)?; self.set_state(State::WaitInitial); if self.crypto.tls.preinfo()?.early_data() { - qdebug!([self] "Enabling 0-RTT"); + qdebug!([self], "Enabling 0-RTT"); self.zero_rtt_state = ZeroRttState::Enabled; } Ok(()) @@ -1165,7 +1175,7 @@ impl Connection { fn buffer_crypto_records(&mut self, records: RecordList) { for r in records { assert_eq!(r.ct, 22); - qdebug!([self] "Adding CRYPTO data {:?}", r); + qdebug!([self], "Adding CRYPTO data {:?}", r); self.crypto.streams[r.epoch as usize].tx.send(&r.data); } } @@ -1205,7 +1215,7 @@ impl Connection { let mut rec: Option = None; if let Some(d) = data { - qdebug!([self] "Handshake received {:0x?} ", d); + qdebug!([self], "Handshake received {:0x?} ", d); rec = Some(Record { ct: 22, // TODO(ekr@rtfm.com): Symbolic constants for CT. This is handshake. epoch, @@ -1220,7 +1230,7 @@ impl Connection { match m { Err(e) => { - qwarn!([self] "Handshake failed"); + qwarn!([self], "Handshake failed"); return Err(match self.crypto.tls.alert() { Some(a) => Error::CryptoAlert(*a), _ => Error::CryptoError(e), @@ -1229,10 +1239,10 @@ impl Connection { Ok(msgs) => self.buffer_crypto_records(msgs), } if self.crypto.tls.state().connected() { - qinfo!([self] "TLS handshake completed"); + qinfo!([self], "TLS handshake completed"); if self.crypto.tls.info().map(SecretAgentInfo::alpn).is_none() { - qwarn!([self] "No ALPN. Closing connection."); + qwarn!([self], "No ALPN. Closing connection."); // 120 = no_application_protocol return Err(Error::CryptoAlert(120)); } @@ -1307,7 +1317,7 @@ impl Connection { } Frame::Crypto { offset, data } => { qdebug!( - [self] + [self], "Crypto frame on epoch={} offset={}, data={:0x?}", epoch, offset, @@ -1358,7 +1368,11 @@ impl Connection { } Frame::DataBlocked { data_limit } => { // Should never happen since we set data limit to 2^62-1 - qwarn!([self] "Received DataBlocked with data limit {}", data_limit); + qwarn!( + [self], + "Received DataBlocked with data limit {}", + data_limit + ); } Frame::StreamDataBlocked { stream_id, .. } => { // TODO(agrover@mozilla.com): how should we be using @@ -1402,7 +1416,7 @@ impl Connection { Frame::PathResponse { .. } => { // Should never see this, we don't support migration atm and // do not send path challenges - qwarn!([self] "Received Path Response"); + qwarn!([self], "Received Path Response"); } Frame::ConnectionClose { error_code, @@ -1410,11 +1424,13 @@ impl Connection { reason_phrase, } => { let reason_phrase = String::from_utf8_lossy(&reason_phrase); - qinfo!([self] - "ConnectionClose received. Error code: {:?} frame type {:x} reason {}", - error_code, - frame_type, - reason_phrase); + qinfo!( + [self], + "ConnectionClose received. Error code: {:?} frame type {:x} reason {}", + error_code, + frame_type, + reason_phrase + ); self.set_state(State::Closed(error_code.into())); } }; @@ -1434,7 +1450,7 @@ impl Connection { RecoveryToken::Stream(st) => self.send_streams.lost(&st), RecoveryToken::Crypto(ct) => self.crypto.lost(ct), RecoveryToken::Flow(ft) => self.flow_mgr.borrow_mut().lost( - ft, + &ft, &mut self.send_streams, &mut self.recv_streams, &mut self.indexes, @@ -1454,7 +1470,7 @@ impl Connection { now: Instant, ) -> Res<()> { qinfo!( - [self] + [self], "Rx ACK epoch={}, largest_acked={}, first_ack_range={}, ranges={:?}", epoch, largest_acknowledged, @@ -1503,7 +1519,7 @@ impl Connection { RecoveryToken::Stream(st) => self.send_streams.lost(&st), RecoveryToken::Crypto(ct) => self.crypto.lost(ct), RecoveryToken::Flow(ft) => self.flow_mgr.borrow_mut().lost( - ft, + &ft, &mut self.send_streams, &mut self.recv_streams, &mut self.indexes, @@ -1518,7 +1534,7 @@ impl Connection { fn set_state(&mut self, state: State) { if state > self.state { - qinfo!([self] "State change from {:?} -> {:?}", self.state, state); + qinfo!([self], "State change from {:?} -> {:?}", self.state, state); self.state = state.clone(); match &self.state { State::Connected => { @@ -1615,9 +1631,12 @@ impl Connection { if stream_idx >= *next_stream_idx { let recv_initial_max_stream_data = if stream_id.is_bidi() { if stream_idx > self.indexes.local_max_stream_bidi { - qwarn!([self] "remote bidi stream create blocked, next={:?} max={:?}", - stream_idx, - self.indexes.local_max_stream_bidi); + qwarn!( + [self], + "remote bidi stream create blocked, next={:?} max={:?}", + stream_idx, + self.indexes.local_max_stream_bidi + ); return Err(Error::StreamLimitError); } self.tps @@ -1626,9 +1645,12 @@ impl Connection { .get_integer(tp_const::INITIAL_MAX_STREAM_DATA_BIDI_REMOTE) } else { if stream_idx > self.indexes.local_max_stream_uni { - qwarn!([self] "remote uni stream create blocked, next={:?} max={:?}", - stream_idx, - self.indexes.local_max_stream_uni); + qwarn!( + [self], + "remote uni stream create blocked, next={:?} max={:?}", + stream_idx, + self.indexes.local_max_stream_uni + ); return Err(Error::StreamLimitError); } self.tps @@ -1710,9 +1732,12 @@ impl Connection { self.flow_mgr .borrow_mut() .streams_blocked(self.indexes.remote_max_stream_uni, StreamType::UniDi); - qwarn!([self] "local uni stream create blocked, next={:?} max={:?}", - self.indexes.remote_next_stream_uni, - self.indexes.remote_max_stream_uni); + qwarn!( + [self], + "local uni stream create blocked, next={:?} max={:?}", + self.indexes.remote_next_stream_uni, + self.indexes.remote_max_stream_uni + ); return Err(Error::StreamLimitError); } let new_id = self @@ -1742,9 +1767,12 @@ impl Connection { self.flow_mgr .borrow_mut() .streams_blocked(self.indexes.remote_max_stream_bidi, StreamType::BiDi); - qwarn!([self] "local bidi stream create blocked, next={:?} max={:?}", - self.indexes.remote_next_stream_bidi, - self.indexes.remote_max_stream_bidi); + qwarn!( + [self], + "local bidi stream create blocked, next={:?} max={:?}", + self.indexes.remote_next_stream_bidi, + self.indexes.remote_max_stream_bidi + ); return Err(Error::StreamLimitError); } let new_id = self @@ -1856,7 +1884,7 @@ impl Connection { } fn check_loss_detection_timeout(&mut self, now: Instant) { - qdebug!([self] "check_loss_timeouts"); + qdebug!([self], "check_loss_timeouts"); if matches!(self.loss_recovery_state.mode(), LossRecoveryMode::None) { // LR not the active timer @@ -1887,7 +1915,7 @@ impl Connection { RecoveryToken::Stream(st) => self.send_streams.lost(&st), RecoveryToken::Crypto(ct) => self.crypto.lost(ct), RecoveryToken::Flow(ft) => self.flow_mgr.borrow_mut().lost( - ft, + &ft, &mut self.send_streams, &mut self.recv_streams, &mut self.indexes, @@ -1898,7 +1926,7 @@ impl Connection { } LossRecoveryMode::PTO => { qinfo!( - [self] + [self], "check_loss_detection_timeout -send_one_or_two_packets" ); self.loss_recovery.increment_pto_count(); @@ -1930,6 +1958,7 @@ impl ::std::fmt::Display for Connection { mod tests { use super::*; use crate::frame::StreamType; + use std::mem; use test_fixture::{self, assertions, fixture_init, loopback, now}; // This is fabulous: because test_fixture uses the public API for Connection, diff --git a/third_party/rust/neqo-transport/src/crypto.rs b/third_party/rust/neqo-transport/src/crypto.rs index 9f5bfc923502..30a4b1f9034c 100644 --- a/third_party/rust/neqo-transport/src/crypto.rs +++ b/third_party/rust/neqo-transport/src/crypto.rs @@ -62,12 +62,12 @@ impl Crypto { } - pub fn create_initial_state(&mut self, role: Role, dcid: &[u8]) -> CryptoState { + pub fn create_initial_state(&mut self, role: Role, dcid: &[u8]) { const CLIENT_INITIAL_LABEL: &str = "client in"; const SERVER_INITIAL_LABEL: &str = "server in"; qinfo!( - [self] + [self], "Creating initial cipher state role={:?} dcid={}", role, hex(dcid) @@ -78,11 +78,11 @@ impl Crypto { Role::Server => (SERVER_INITIAL_LABEL, CLIENT_INITIAL_LABEL), }; - CryptoState { + self.states[0] = Some(CryptoState { epoch: 0, tx: CryptoDxState::new_initial(CryptoDxDirection::Write, write_label, dcid), rx: CryptoDxState::new_initial(CryptoDxDirection::Read, read_label, dcid), - } + }); } @@ -94,7 +94,7 @@ impl Crypto { let cs = &mut self.states[epoch as usize]; if cs.is_none() { - qtrace!([label] "Build crypto state for epoch {}", epoch); + qtrace!([label], "Build crypto state for epoch {}", epoch); assert!(epoch != 0); let cipher = match (epoch, self.tls.info()) { @@ -103,7 +103,7 @@ impl Crypto { (_, Some(info)) => Some(info.cipher_suite()), }; if cipher.is_none() { - qdebug!([label] "cipher info not available yet"); + qdebug!([label], "cipher info not available yet"); return Err(Error::KeysNotFound); } let cipher = cipher.unwrap(); @@ -123,7 +123,7 @@ impl Crypto { | (Some(_), None, Role::Server, 1) | (Some(_), Some(_), _, _) => {} (None, None, _, _) => { - qdebug!([label] "Keying material not available for epoch {}", epoch); + qdebug!([label], "Keying material not available for epoch {}", epoch); return Err(Error::KeysNotFound); } _ => panic!("bad configuration of keys"), @@ -276,7 +276,7 @@ impl CryptoCtx for CryptoDxState { fn aead_decrypt(&self, pn: PacketNumber, hdr: &[u8], body: &[u8]) -> Res> { qinfo!( - [self] + [self], "aead_decrypt pn={} hdr={} body={}", pn, hex(hdr), @@ -289,7 +289,7 @@ impl CryptoCtx for CryptoDxState { fn aead_encrypt(&self, pn: PacketNumber, hdr: &[u8], body: &[u8]) -> Res> { qdebug!( - [self] + [self], "aead_encrypt pn={} hdr={} body={}", pn, hex(hdr), @@ -300,7 +300,7 @@ impl CryptoCtx for CryptoDxState { let mut out = vec![0; size]; let res = self.aead.encrypt(pn, hdr, body, &mut out)?; - qdebug!([self] "aead_encrypt ct={}", hex(res),); + qdebug!([self], "aead_encrypt ct={}", hex(res),); Ok(res.to_vec()) } diff --git a/third_party/rust/neqo-transport/src/dump.rs b/third_party/rust/neqo-transport/src/dump.rs index a8727ff6f5b8..74fd81ad8b89 100644 --- a/third_party/rust/neqo-transport/src/dump.rs +++ b/third_party/rust/neqo-transport/src/dump.rs @@ -28,5 +28,5 @@ pub fn dump_packet(conn: &Connection, dir: &str, hdr: &PacketHdr, payload: &[u8] s.push_str(&format!("\n {} {}", dir, &x)); } } - qdebug!([conn] "pn={} type={:?}{}", hdr.pn, hdr.tipe, s); + qdebug!([conn], "pn={} type={:?}{}", hdr.pn, hdr.tipe, s); } diff --git a/third_party/rust/neqo-transport/src/flow_mgr.rs b/third_party/rust/neqo-transport/src/flow_mgr.rs index 408da5cd4c31..ed964f1691e1 100644 --- a/third_party/rust/neqo-transport/src/flow_mgr.rs +++ b/third_party/rust/neqo-transport/src/flow_mgr.rs @@ -52,9 +52,15 @@ impl FlowMgr { } + + pub fn conn_increase_max_credit(&mut self, new: u64) -> bool { if new > self.max_data { self.max_data = new; + + const DB_FRAME: Frame = Frame::DataBlocked { data_limit: 0 }; + self.from_conn.remove(&mem::discriminant(&DB_FRAME)); + true } else { false @@ -75,6 +81,11 @@ impl FlowMgr { self.from_conn.insert(mem::discriminant(&frame), frame); } + pub fn max_data(&mut self, maximum_data: u64) { + let frame = Frame::MaxData { maximum_data }; + self.from_conn.insert(mem::discriminant(&frame), frame); + } + @@ -205,12 +216,12 @@ impl FlowMgr { pub(crate) fn lost( &mut self, - token: FlowControlRecoveryToken, + token: &FlowControlRecoveryToken, send_streams: &mut SendStreams, recv_streams: &mut RecvStreams, indexes: &mut StreamIndexes, ) { - match token { + match *token { Frame::ResetStream { stream_id, diff --git a/third_party/rust/neqo-transport/src/frame.rs b/third_party/rust/neqo-transport/src/frame.rs index 627ef3254ae7..923d0f8d3e8a 100644 --- a/third_party/rust/neqo-transport/src/frame.rs +++ b/third_party/rust/neqo-transport/src/frame.rs @@ -6,7 +6,7 @@ -use neqo_common::{qdebug, Decoder, Encoder}; +use neqo_common::{matches, qdebug, Decoder, Encoder}; use crate::stream_id::StreamIndex; use crate::{AppError, TransportError}; @@ -345,10 +345,7 @@ impl Frame { } pub fn ack_eliciting(&self) -> bool { - match self { - Frame::Ack { .. } | Frame::Padding => false, - _ => true, - } + !matches!(self, Frame::Ack { .. } | Frame::Padding) } @@ -591,7 +588,6 @@ pub fn decode_frame(dec: &mut Decoder) -> Res { #[derive(Debug, PartialEq, Clone, Copy)] pub enum TxMode { Normal, - #[allow(dead_code)] Pto, } diff --git a/third_party/rust/neqo-transport/src/recovery.rs b/third_party/rust/neqo-transport/src/recovery.rs index e393ee60596e..9aa11935246d 100644 --- a/third_party/rust/neqo-transport/src/recovery.rs +++ b/third_party/rust/neqo-transport/src/recovery.rs @@ -19,7 +19,6 @@ use crate::crypto::CryptoRecoveryToken; use crate::flow_mgr::FlowControlRecoveryToken; use crate::send_stream::StreamRecoveryToken; use crate::tracking::{AckToken, PNSpace}; -use crate::State; const GRANULARITY: Duration = Duration::from_millis(20); @@ -244,7 +243,7 @@ impl LossRecovery { self.pto_count += 1; } - pub fn largest_acknowledged(&self, pn_space: PNSpace) -> Option { + pub fn largest_acknowledged_pn(&self, pn_space: PNSpace) -> Option { self.spaces[pn_space].largest_acked } @@ -264,7 +263,7 @@ impl LossRecovery { tokens: Vec, now: Instant, ) { - qdebug!([self] "packet {:?}-{} sent.", pn_space, packet_number); + qdebug!([self], "packet {:?}-{} sent.", pn_space, packet_number); self.spaces[pn_space].sent_packets.insert( packet_number, SentPacket { @@ -290,8 +289,12 @@ impl LossRecovery { ack_delay: Duration, now: Instant, ) -> (Vec, Vec) { - qdebug!([self] "ack received for {:?} - largest_acked={}.", - pn_space, largest_acked); + qdebug!( + [self], + "ack received for {:?} - largest_acked={}.", + pn_space, + largest_acked + ); let (acked_packets, any_ack_eliciting) = self.spaces[pn_space].remove_acked(acked_ranges); if acked_packets.is_empty() { @@ -354,9 +357,12 @@ impl LossRecovery { let lost_deadline = now - loss_delay; - qdebug!([self] + qdebug!( + [self], "detect lost packets = now {:?} loss delay {:?} lost_deadline {:?}", - now, loss_delay, lost_deadline + now, + loss_delay, + lost_deadline ); let packet_space = &mut self.spaces[pn_space]; @@ -436,8 +442,8 @@ impl LossRecovery { lost_packets } - pub fn get_timer(&mut self, conn_state: &State) -> LossRecoveryState { - qdebug!([self] "get_loss_detection_timer."); + pub fn get_timer(&mut self) -> LossRecoveryState { + qdebug!([self], "get_loss_detection_timer."); let has_ack_eliciting_out = self .spaces @@ -445,17 +451,14 @@ impl LossRecovery { .flat_map(|spc| spc.sent_packets.values()) .any(|sp| sp.ack_eliciting); - qdebug!( - [self] - "has_ack_eliciting_out={}", - has_ack_eliciting_out, - ); + qdebug!([self], "has_ack_eliciting_out={}", has_ack_eliciting_out,); - if !has_ack_eliciting_out && *conn_state == State::Connected { + if !has_ack_eliciting_out { return LossRecoveryState::new(LossRecoveryMode::None, None); } - qinfo!([self] + qinfo!( + [self], "sent packets {} {} {}", self.spaces[PNSpace::Initial].sent_packets.len(), self.spaces[PNSpace::Handshake].sent_packets.len(), @@ -478,7 +481,12 @@ impl LossRecovery { ) }; - qdebug!([self] "loss_detection_timer mode={:?} timer={:?}", mode, maybe_timer); + qdebug!( + [self], + "loss_detection_timer mode={:?} timer={:?}", + mode, + maybe_timer + ); LossRecoveryState::new(mode, maybe_timer) } @@ -743,7 +751,7 @@ mod tests { assert_sent_times(&lr, None, None, Some(pn1_sent_time)); - let lr_state = lr.get_timer(&State::Connected); + let lr_state = lr.get_timer(); let pn1_lost_time = pn1_sent_time + (INITIAL_RTT * 9 / 8); assert_eq!(lr_state.callback_time, Some(pn1_lost_time)); match lr_state.mode { diff --git a/third_party/rust/neqo-transport/src/recv_stream.rs b/third_party/rust/neqo-transport/src/recv_stream.rs index c09369d4a453..e965c919d406 100644 --- a/third_party/rust/neqo-transport/src/recv_stream.rs +++ b/third_party/rust/neqo-transport/src/recv_stream.rs @@ -21,7 +21,7 @@ use crate::events::ConnectionEvents; use crate::flow_mgr::FlowMgr; use crate::stream_id::StreamId; use crate::{AppError, Error, Res}; -use neqo_common::qtrace; +use neqo_common::{matches, qtrace}; pub const RX_STREAM_DATA_WINDOW: u64 = 0xFFFF; @@ -464,18 +464,15 @@ impl RecvStream { } pub fn is_terminal(&self) -> bool { - match self.state { - RecvStreamState::ResetRecvd | RecvStreamState::DataRead => true, - _ => false, - } + matches!( + self.state, + RecvStreamState::ResetRecvd | RecvStreamState::DataRead + ) } fn needs_to_inform_app_about_fin(&self) -> bool { - match self.state { - RecvStreamState::DataRecvd { .. } => true, - _ => false, - } + matches!(self.state, RecvStreamState::DataRecvd { .. }) } fn data_ready(&self) -> bool { @@ -751,5 +748,4 @@ mod tests { assert_eq!(rx_ord.buffered(), 15); assert_eq!(rx_ord.retired(), 2); } - } diff --git a/third_party/rust/neqo-transport/src/send_stream.rs b/third_party/rust/neqo-transport/src/send_stream.rs index c7d0c5a1fc67..a7c8ba4a891e 100644 --- a/third_party/rust/neqo-transport/src/send_stream.rs +++ b/third_party/rust/neqo-transport/src/send_stream.rs @@ -16,7 +16,7 @@ use std::rc::Rc; use slice_deque::SliceDeque; use smallvec::SmallVec; -use neqo_common::{matches, qerror, qinfo, qtrace, qwarn, Encoder}; +use neqo_common::{matches, qdebug, qerror, qinfo, qtrace, qwarn, Encoder}; use crate::events::ConnectionEvents; use crate::flow_mgr::FlowMgr; @@ -299,20 +299,31 @@ impl TxBuffer { can_buffer } - pub fn next_bytes(&self, _mode: TxMode) -> Option<(u64, &[u8])> { - let (start, maybe_len) = self.ranges.first_unmarked_range(); + pub fn next_bytes(&self, mode: TxMode) -> Option<(u64, &[u8])> { + match mode { + TxMode::Normal => { + let (start, maybe_len) = self.ranges.first_unmarked_range(); - if start == self.retired + u64::try_from(self.buffered()).unwrap() { - return None; - } + if start == self.retired + u64::try_from(self.buffered()).unwrap() { + return None; + } - let buff_off = usize::try_from(start - self.retired).unwrap(); - match maybe_len { - Some(len) => Some(( - start, - &self.send_buf[buff_off..buff_off + usize::try_from(len).unwrap()], - )), - None => Some((start, &self.send_buf[buff_off..])), + let buff_off = usize::try_from(start - self.retired).unwrap(); + match maybe_len { + Some(len) => Some(( + start, + &self.send_buf[buff_off..buff_off + usize::try_from(len).unwrap()], + )), + None => Some((start, &self.send_buf[buff_off..])), + } + } + TxMode::Pto => { + if self.buffered() == 0 { + None + } else { + Some((self.retired, &self.send_buf)) + } + } } } @@ -606,10 +617,7 @@ impl SendStream { } pub fn is_terminal(&self) -> bool { - match self.state { - SendStreamState::DataRecvd { .. } | SendStreamState::ResetRecvd => true, - _ => false, - } + matches!(self.state, SendStreamState::DataRecvd { .. } | SendStreamState::ResetRecvd) } pub fn send(&mut self, buf: &[u8]) -> Res { @@ -761,7 +769,7 @@ impl SendStreams { for (stream_id, stream) in self { let fin = stream.final_size(); if let Some((offset, data)) = stream.next_bytes(mode) { - qtrace!( + qdebug!( "Stream {} sending bytes {}-{}, epoch {}, mode {:?}, remaining {}", stream_id.as_u64(), offset, @@ -882,6 +890,15 @@ mod tests { let res = rt.first_unmarked_range(); assert_eq!(res, (0, Some(5))); + assert_eq!( + rt.used.iter().nth(0).unwrap(), + (&5, &(5, RangeState::Acked)) + ); + assert_eq!( + rt.used.iter().nth(1).unwrap(), + (&13, &(2, RangeState::Sent)) + ); + assert!(rt.used.iter().nth(2).is_none()); rt.mark_range(0, 5, RangeState::Sent); let res = rt.first_unmarked_range(); diff --git a/third_party/rust/neqo-transport/src/server.rs b/third_party/rust/neqo-transport/src/server.rs index a5d839fc293d..b0af93bb482e 100644 --- a/third_party/rust/neqo-transport/src/server.rs +++ b/third_party/rust/neqo-transport/src/server.rs @@ -264,17 +264,17 @@ impl Server { dgram: Option, now: Instant, ) -> Option { - qtrace!([self] "Process connection {:?}", c); + qtrace!([self], "Process connection {:?}", c); let out = c.borrow_mut().process(dgram, now); match out { Output::Datagram(_) => { - qtrace!([self] "Sending packet, added to waiting connections"); + qtrace!([self], "Sending packet, added to waiting connections"); self.waiting.push_back(c.clone()); } Output::Callback(delay) => { let next = now + delay; if next != c.borrow().last_timer { - qtrace!([self] "Change timer to {:?}", next); + qtrace!([self], "Change timer to {:?}", next); self.remove_timer(&c); c.borrow_mut().last_timer = next; self.timers.add(next, c.clone()); @@ -285,7 +285,7 @@ impl Server { } } if c.borrow().has_events() { - qtrace!([self] "Connection active: {:?}", c); + qtrace!([self], "Connection active: {:?}", c); self.active.insert(ActiveConnectionRef { c: c.clone() }); } if matches!(c.borrow().state(), State::Closed(_)) { @@ -315,7 +315,7 @@ impl Server { RetryTokenResult::Pass => self.accept_connection(None, dgram, now), RetryTokenResult::Valid(dcid) => self.accept_connection(Some(dcid), dgram, now), RetryTokenResult::Validate => { - qinfo!([self] "Send retry for {:?}", hdr.dcid); + qinfo!([self], "Send retry for {:?}", hdr.dcid); let res = self.retry.generate_token(&hdr.dcid, dgram.source(), now); let token = if let Ok(t) = res { @@ -348,7 +348,7 @@ impl Server { dgram: Datagram, now: Instant, ) -> Option { - qinfo!([self] "Accept connection"); + qinfo!([self], "Accept connection"); let cid_mgr = Rc::new(RefCell::new(ServerConnectionIdManager { @@ -370,7 +370,7 @@ impl Server { cid_mgr.borrow_mut().c = Some(c.clone()); self.process_connection(c, Some(dgram), now) } else { - qwarn!([self] "Unable to create connection"); + qwarn!([self], "Unable to create connection"); None } } @@ -384,7 +384,7 @@ impl Server { let hdr = match res { Ok(h) => h, _ => { - qtrace!([self] "Discarding {:?}", dgram); + qtrace!([self], "Discarding {:?}", dgram); return None; } }; @@ -396,12 +396,12 @@ impl Server { if hdr.tipe == PacketType::Short { - qtrace!([self] "Short header packet for an unknown connection"); + qtrace!([self], "Short header packet for an unknown connection"); return None; } if dgram.len() < MIN_INITIAL_PACKET_SIZE { - qtrace!([self] "Bogus packet: too short"); + qtrace!([self], "Bogus packet: too short"); return None; } @@ -415,13 +415,13 @@ impl Server { fn process_next_output(&mut self, now: Instant) -> Option { - qtrace!([self] "No packet to send, look at waiting connections"); + qtrace!([self], "No packet to send, look at waiting connections"); while let Some(c) = self.waiting.pop_front() { if let Some(d) = self.process_connection(c, None, now) { return Some(d); } } - qtrace!([self] "No packet to send still, run timers"); + qtrace!([self], "No packet to send still, run timers"); while let Some(c) = self.timers.take_next(now) { if let Some(d) = self.process_connection(c, None, now) { return Some(d); @@ -447,16 +447,16 @@ impl Server { let out = out.or_else(|| self.process_next_output(now)); match out { Some(d) => { - qtrace!([self] "Send packet: {:?}", d); + qtrace!([self], "Send packet: {:?}", d); Output::Datagram(d) } _ => match self.next_time(now) { Some(delay) => { - qtrace!([self] "Wait: {:?}", delay); + qtrace!([self], "Wait: {:?}", delay); Output::Callback(delay) } _ => { - qtrace!([self] "Go dormant"); + qtrace!([self], "Go dormant"); Output::None } }, diff --git a/third_party/rust/neqo-transport/src/tracking.rs b/third_party/rust/neqo-transport/src/tracking.rs index 0b0fa0fb5f92..fed82e5f6e6c 100644 --- a/third_party/rust/neqo-transport/src/tracking.rs +++ b/third_party/rust/neqo-transport/src/tracking.rs @@ -84,12 +84,12 @@ impl PacketRange { assert!(!self.contains(pn)); if (self.largest + 1) == pn { - qtrace!([self] "Adding largest {}", pn); + qtrace!([self], "Adding largest {}", pn); self.largest += 1; self.ack_needed = true; true } else if self.smallest == (pn + 1) { - qtrace!([self] "Adding smallest {}", pn); + qtrace!([self], "Adding smallest {}", pn); self.smallest -= 1; self.ack_needed = true; true @@ -100,7 +100,7 @@ impl PacketRange { pub fn merge_smaller(&mut self, other: &Self) { - qinfo!([self] "Merging {}", other); + qinfo!([self], "Merging {}", other); assert_eq!(self.smallest - 1, other.largest); @@ -113,7 +113,7 @@ impl PacketRange { pub fn acknowledged(&mut self, other: &Self) { if (other.smallest <= self.smallest) && (other.largest >= self.largest) { - qinfo!([self] "Acknowledged"); + qinfo!([self], "Acknowledged"); self.ack_needed = false; } } @@ -201,6 +201,8 @@ impl RecvdPackets { pub fn set_received(&mut self, now: Instant, pn: u64, ack_eliciting: bool) { + let next_in_order_pn = self.ranges.get(0).map(|pr| pr.largest + 1).unwrap_or(0); + qdebug!("next in order pn: {}", next_in_order_pn); let i = self.add(pn); @@ -212,10 +214,10 @@ impl RecvdPackets { if self.ranges.len() > MAX_TRACKED_RANGES { let oldest = self.ranges.pop_back().unwrap(); if oldest.ack_needed { - qwarn!([self] "Dropping unacknowledged ACK range: {}", oldest); + qwarn!([self], "Dropping unacknowledged ACK range: {}", oldest); } else { - qdebug!([self] "Drop ACK range: {}", oldest); + qdebug!([self], "Drop ACK range: {}", oldest); } self.min_tracked = oldest.largest + 1; } @@ -223,7 +225,10 @@ impl RecvdPackets { if ack_eliciting { - if self.ack_time.is_none() && self.space == PNSpace::ApplicationData { + + if pn != next_in_order_pn { + self.ack_time = Some(now); + } else if self.ack_time.is_none() && self.space == PNSpace::ApplicationData { self.ack_time = Some(now + ACK_DELAY); } else { self.ack_time = Some(now); @@ -507,6 +512,24 @@ mod tests { } } + #[test] + fn ooo_no_ack_delay() { + for space in &[ + PNSpace::Initial, + PNSpace::Handshake, + PNSpace::ApplicationData, + ] { + let mut rp = RecvdPackets::new(*space); + assert!(rp.ack_time().is_none()); + assert!(!rp.ack_now(now())); + + + rp.set_received(now(), 3, true); + assert_eq!(Some(now()), rp.ack_time()); + assert!(rp.ack_now(now())); + } + } + #[test] fn aggregate_ack_time() { let mut tracker = AckTracker::default(); diff --git a/third_party/rust/objc/.cargo-checksum.json b/third_party/rust/objc/.cargo-checksum.json new file mode 100644 index 000000000000..5ba9c984437f --- /dev/null +++ b/third_party/rust/objc/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"fa382fb9daa02785c57f4c98872ae708c180978af76209496581995a752cb8e3","Cargo.toml":"36d9ae0828380222920be3087e815e00608c27d449ca3ea3cf660123e531c270","LICENSE.txt":"e353f37b12aefbb9f9b29490e837cfee05d9bda70804b3562839a3285c1df1e5","README.md":"2798cf78a640ea9fb83bdf27e2c82d98d9f8f7459196ab9db10372fb843c7a65","examples/example.rs":"df5fd939ffa46505b368e4e31aa0a0b144202fbceac72e5343bedb706313ef5e","src/declare.rs":"a4409a3b849413af55398b9b6a86ca482bdea94dd16ce818b39d420228426b54","src/encode.rs":"6e792c73f7cfced248f20cebe93dc038a39d25bdd9cbe1a115464b7baa02885b","src/exception.rs":"a641f9fdb9267e1d44f680b685d650faa2931d5d22949b621c8e2783ed534a4f","src/lib.rs":"1b03c53c792b0abe909d2958d7b692b2264ae0f25a109c15dacf57da2352f6f7","src/macros.rs":"43d8c797430589f4d55b4c9a34010f15bf01d503eff1d5ccadd8a0258257b3da","src/message/apple/arm.rs":"3df72141b9fa48e7eab13b33acfa76c0eae730c2eedad0cc92ed7f89c51ca0da","src/message/apple/arm64.rs":"3efa598e34232c1e1c2171de864beac7f79e10f85807b23d10537335e0e84bd3","src/message/apple/mod.rs":"8aa9b9419084f92acc4468dae647b6bc3bd4570de0dbffe82dd8b6a18883345e","src/message/apple/x86.rs":"a268b01e54d0c7cbd9ae765815524fbd5f7c92c339f110f1f3d76877e5238597","src/message/apple/x86_64.rs":"bb64ad8de038b65cda61eaa55a46ce56795aeb36b574dc4dbbfd0d328aa23889","src/message/gnustep.rs":"15bbf9abc5aa0edc25b8b1d9622ebcacc33fd1103fe20a6a93ba9d82ca1b262d","src/message/mod.rs":"aa9da24db7d02ed7d827d78d53936c132fc9302e7fdc09380bdf7021ddd16ae6","src/message/verify.rs":"0047219354c49568a14f7aa7a5bb91edca16e5636a30c66c15a1533668759845","src/rc/autorelease.rs":"f56e26a5f866b5dbbe5c336289bbe21d5a5872928d504c5dfdfda3eeaedd5a3e","src/rc/mod.rs":"ce4b5206fa8273ad3931376d18c1b9aca6cef8172eb2ff8daf962fee710db9d7","src/rc/strong.rs":"f472889e5827cd67f6df62f50e55fdc2101bcdfeb59c7d39dacc5f30a0ed06bb","src/rc/weak.rs":"0b4f77abcd9f1eec1993b6cc6f3db564d90aafe3dbf15233a6f268ded48ef6cb","src/runtime.rs":"69b33722d727faef47e3fb14e68bb18b96a970930a1f9b244e2bb4e161d67874","src/test_utils.rs":"db73875ff5ae4761187d3691998829a689e3dfd26b9812bdebc1bcae0388f78b"},"package":"31d20fd2b37e07cf5125be68357b588672e8cefe9a96f8c17a9d46053b3e590d"} \ No newline at end of file diff --git a/third_party/rust/objc/CHANGELOG.md b/third_party/rust/objc/CHANGELOG.md new file mode 100644 index 000000000000..73bdbb699505 --- /dev/null +++ b/third_party/rust/objc/CHANGELOG.md @@ -0,0 +1,109 @@ +## 0.2.6 + +### Fixed + +* Suppressed a deprecation warning in `sel!`, `msg_send!`, and `class!`. + +## 0.2.5 + +### Added + +* `autoreleasepool` returns the value returned by its body closure. + +## 0.2.4 + +### Added + +* Added an `rc` module with reference counting utilities: + `StrongPtr`, `WeakPtr`, and `autoreleasepool`. + +* Added some reference counting ABI foreign functions to the `runtime` module. + +### Fixed + +* Messaging nil under GNUstep now correctly returns zeroed results for all + return types. + +## 0.2.3 + +### Added + +* Added a `class!` macro for getting statically-known classes. The result is + non-optional (avoiding a need to unwrap) and cached so each usage will only + look up the class once. + +* Added caching to the `sel!` macro so that each usage will only register the + selector once. + +### Fixed + +* Fixed the implementation of `objc::runtime` structs so there can't be unsound + references to uninhabited types. + +## 0.2.2 + +### Added + +* Implemented `Sync` and `Send` for `Sel`. + +## 0.2.1 + +### Added + +* Added support for working with protocols with the `Protocol` struct. + The protocols a class conforms to can be examined with the new + `Class::adopted_protocols` and `Class::conforms_to` methods. + +* Protocols can be declared using the new `ProtocolDecl` struct. + +## 0.2.0 + +### Added + +* Added verification for the types used when sending messages. + This can be enabled for all messages with the `"verify_message"` feature, + or you can test before sending specific messages with the + `Message::verify_message` method. Verification errors are reported using the + new `MessageError` struct. + +* Added support for the GNUstep runtime! + Operating systems besides OSX and iOS will fall back to the GNUstep runtime. + +* Root classes can be declared by using the `ClassDecl::root` constructor. + +### Changed + +* C types are now used from `std::os::raw` rather than `libc`. This means + `Encode` may not be implemented for `libc` types; switch them to the + `std::os::raw` equivalents instead. This avoids an issue that would arise + from simultaneously using different versions of the libc crate. + +* Dynamic messaging was moved into the `Message` trait; instead of + `().send(obj, sel!(description))`, use + `obj.send_message(sel!(description), ())`. + +* Rearranged the parameters to `ClassDecl::new` for consistency; instead of + `ClassDecl::new(superclass, "MyObject")`, use + `ClassDecl::new("MyObject", superclass)`. + +* Overhauled the `MethodImplementation` trait. Encodings are now accessed + through the `MethodImplementation::Args` associated type. The `imp_for` + method was replaced with `imp` and no longer takes a selector or returns an + `UnequalArgsError`, although `ClassDecl::add_method` still validates the + number of arguments. + +* Updated the definition of `Imp` to not use the old dispatch prototypes. + To invoke an `Imp`, it must first be transmuted to the correct type. + +* Removed `objc_msgSend` functions from the `runtime` module; the availability + of these functions varies and they shouldn't be called without trasmuting, + so they are now hidden as an implementation detail of messaging. + +### Fixed + +* Corrected alignment of ivars in `ClassDecl`; declared classes may now have a + smaller size. + +* With the `"exception"` or `"verify_message"` feature enabled, panics from + `msg_send!` will now be triggered from the line and file where the macro is + used, rather than from within the implementation of messaging. diff --git a/third_party/rust/objc/Cargo.toml b/third_party/rust/objc/Cargo.toml new file mode 100644 index 000000000000..33a3c701832f --- /dev/null +++ b/third_party/rust/objc/Cargo.toml @@ -0,0 +1,33 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "objc" +version = "0.2.6" +authors = ["Steven Sheldon"] +exclude = [".gitignore", ".travis.yml", "doc.sh", "travis_install.sh", "travis_test.sh", "tests-ios/**"] +description = "Objective-C Runtime bindings and wrapper for Rust." +documentation = "http://ssheldon.github.io/rust-objc/objc/" +readme = "README.md" +keywords = ["objective-c", "osx", "ios", "cocoa", "uikit"] +license = "MIT" +repository = "http://github.com/SSheldon/rust-objc" +[dependencies.malloc_buf] +version = "0.0" + +[dependencies.objc_exception] +version = "0.1" +optional = true + +[features] +exception = ["objc_exception"] +verify_message = [] diff --git a/third_party/rust/objc/LICENSE.txt b/third_party/rust/objc/LICENSE.txt new file mode 100644 index 000000000000..e9ab3babc0d8 --- /dev/null +++ b/third_party/rust/objc/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Steven Sheldon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/rust/objc/README.md b/third_party/rust/objc/README.md new file mode 100644 index 000000000000..55fba64840d4 --- /dev/null +++ b/third_party/rust/objc/README.md @@ -0,0 +1,99 @@ +Objective-C Runtime bindings and wrapper for Rust. + +* Documentation: http://ssheldon.github.io/rust-objc/objc/ +* Crate: https://crates.io/crates/objc + +## Messaging objects + +Objective-C objects can be messaged using the `msg_send!` macro: + +``` rust +let cls = class!(NSObject); +let obj: *mut Object = msg_send![cls, new]; +let hash: usize = msg_send![obj, hash]; +let is_kind: BOOL = msg_send![obj, isKindOfClass:cls]; +// Even void methods must have their return type annotated +let _: () = msg_send![obj, release]; +``` + +## Reference counting + +The utilities of the `rc` module provide ARC-like semantics for working with +Objective-C's reference counted objects in Rust. +A `StrongPtr` retains an object and releases the object when dropped. +A `WeakPtr` will not retain the object, but can be upgraded to a `StrongPtr` +and safely fails if the object has been deallocated. + +``` rust +// StrongPtr will release the object when dropped +let obj = unsafe { + StrongPtr::new(msg_send![class!(NSObject), new]) +}; + +// Cloning retains the object an additional time +let cloned = obj.clone(); +autoreleasepool(|| { + // Autorelease consumes the StrongPtr, but won't + // actually release until the end of an autoreleasepool + cloned.autorelease(); +}); + +// Weak references won't retain the object +let weak = obj.weak(); +drop(obj); +assert!(weak.load().is_null()); +``` + +## Declaring classes + +Classes can be declared using the `ClassDecl` struct. Instance variables and +methods can then be added before the class is ultimately registered. + +The following example demonstrates declaring a class named `MyNumber` that has +one ivar, a `u32` named `_number` and a `number` method that returns it: + +``` rust +let superclass = class!(NSObject); +let mut decl = ClassDecl::new("MyNumber", superclass).unwrap(); + +// Add an instance variable +decl.add_ivar::("_number"); + +// Add an ObjC method for getting the number +extern fn my_number_get(this: &Object, _cmd: Sel) -> u32 { + unsafe { *this.get_ivar("_number") } +} +unsafe { + decl.add_method(sel!(number), + my_number_get as extern fn(&Object, Sel) -> u32); +} + +decl.register(); +``` + +## Exceptions + +By default, if the `msg_send!` macro causes an exception to be thrown, this +will unwind into Rust resulting in unsafe, undefined behavior. +However, this crate has an `"exception"` feature which, when enabled, wraps +each `msg_send!` in a `@try`/`@catch` and panics if an exception is caught, +preventing Objective-C from unwinding into Rust. + +## Message type verification + +The Objective-C runtime includes encodings for each method that describe the +argument and return types. This crate can take advantage of these encodings to +verify that the types used in Rust match the types encoded for the method. + +To use this functionality, enable the `"verify_message"` feature. +With this feature enabled, type checking is performed for every message send, +which also requires that all arguments and return values for all messages +implement `Encode`. + +If this requirement is burdensome or you'd rather just verify specific messages, +you can call the `Message::verify_message` method for specific selectors. + +## Support for other Operating Systems + +The bindings can be used on Linux or *BSD utilizing the +[GNUstep Objective-C runtime](https://www.github.com/gnustep/libobjc2). diff --git a/third_party/rust/objc/examples/example.rs b/third_party/rust/objc/examples/example.rs new file mode 100644 index 000000000000..7c33c2339b0b --- /dev/null +++ b/third_party/rust/objc/examples/example.rs @@ -0,0 +1,45 @@ +#[macro_use] +extern crate objc; + +use objc::Encode; +use objc::rc::StrongPtr; +use objc::runtime::{Class, Object}; + +fn main() { + + let cls = class!(NSObject); + println!("NSObject size: {}", cls.instance_size()); + + + println!("NSObject ivars:"); + for ivar in cls.instance_variables().iter() { + println!("{}", ivar.name()); + } + + + let obj = unsafe { + let obj: *mut Object = msg_send![cls, alloc]; + let obj: *mut Object = msg_send![obj, init]; + StrongPtr::new(obj) + }; + println!("NSObject address: {:p}", obj); + + + let isa: *const Class = unsafe { + *(**obj).get_ivar("isa") + }; + println!("NSObject isa: {:?}", isa); + + + let hash_sel = sel!(hash); + let hash_method = cls.instance_method(hash_sel).unwrap(); + let hash_return = hash_method.return_type(); + println!("-[NSObject hash] return type: {:?}", hash_return); + assert!(hash_return == usize::encode()); + + + let hash: usize = unsafe { + msg_send![*obj, hash] + }; + println!("NSObject hash: {}", hash); +} diff --git a/third_party/rust/objc/src/declare.rs b/third_party/rust/objc/src/declare.rs new file mode 100644 index 000000000000..e0b64e9da053 --- /dev/null +++ b/third_party/rust/objc/src/declare.rs @@ -0,0 +1,340 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +use std::ffi::CString; +use std::mem; +use std::ptr; + +use runtime::{BOOL, Class, Imp, NO, Object, Protocol, Sel, self}; +use {Encode, EncodeArguments, Encoding, Message}; + + +pub trait MethodImplementation { + + type Callee: Message; + + type Ret: Encode; + + type Args: EncodeArguments; + + + fn imp(self) -> Imp; +} + +macro_rules! method_decl_impl { + (-$s:ident, $r:ident, $f:ty, $($t:ident),*) => ( + impl<$s, $r $(, $t)*> MethodImplementation for $f + where $s: Message, $r: Encode $(, $t: Encode)* { + type Callee = $s; + type Ret = $r; + type Args = ($($t,)*); + + fn imp(self) -> Imp { + unsafe { mem::transmute(self) } + } + } + ); + ($($t:ident),*) => ( + method_decl_impl!(-T, R, extern fn(&T, Sel $(, $t)*) -> R, $($t),*); + method_decl_impl!(-T, R, extern fn(&mut T, Sel $(, $t)*) -> R, $($t),*); + ); +} + +method_decl_impl!(); +method_decl_impl!(A); +method_decl_impl!(A, B); +method_decl_impl!(A, B, C); +method_decl_impl!(A, B, C, D); +method_decl_impl!(A, B, C, D, E); +method_decl_impl!(A, B, C, D, E, F); +method_decl_impl!(A, B, C, D, E, F, G); +method_decl_impl!(A, B, C, D, E, F, G, H); +method_decl_impl!(A, B, C, D, E, F, G, H, I); +method_decl_impl!(A, B, C, D, E, F, G, H, I, J); +method_decl_impl!(A, B, C, D, E, F, G, H, I, J, K); +method_decl_impl!(A, B, C, D, E, F, G, H, I, J, K, L); + +fn count_args(sel: Sel) -> usize { + sel.name().chars().filter(|&c| c == ':').count() +} + +fn method_type_encoding(ret: &Encoding, args: &[Encoding]) -> CString { + let mut types = ret.as_str().to_owned(); + + types.push_str(<*mut Object>::encode().as_str()); + types.push_str(Sel::encode().as_str()); + types.extend(args.iter().map(|e| e.as_str())); + CString::new(types).unwrap() +} + +fn log2_align_of() -> u8 { + let align = mem::align_of::(); + + debug_assert!(align.count_ones() == 1); + + align.trailing_zeros() as u8 +} + + + +pub struct ClassDecl { + cls: *mut Class, +} + +impl ClassDecl { + fn with_superclass(name: &str, superclass: Option<&Class>) + -> Option { + let name = CString::new(name).unwrap(); + let super_ptr = superclass.map_or(ptr::null(), |c| c); + let cls = unsafe { + runtime::objc_allocateClassPair(super_ptr, name.as_ptr(), 0) + }; + if cls.is_null() { + None + } else { + Some(ClassDecl { cls: cls }) + } + } + + + + pub fn new(name: &str, superclass: &Class) -> Option { + ClassDecl::with_superclass(name, Some(superclass)) + } + + + + + + + + + + + + + + + pub fn root(name: &str, intitialize_fn: extern fn(&Class, Sel)) + -> Option { + let mut decl = ClassDecl::with_superclass(name, None); + if let Some(ref mut decl) = decl { + unsafe { + decl.add_class_method(sel!(initialize), intitialize_fn); + } + } + decl + } + + + + + + + pub unsafe fn add_method(&mut self, sel: Sel, func: F) + where F: MethodImplementation { + let encs = F::Args::encodings(); + let encs = encs.as_ref(); + let sel_args = count_args(sel); + assert!(sel_args == encs.len(), + "Selector accepts {} arguments, but function accepts {}", + sel_args, encs.len(), + ); + + let types = method_type_encoding(&F::Ret::encode(), encs); + let success = runtime::class_addMethod(self.cls, sel, func.imp(), + types.as_ptr()); + assert!(success != NO, "Failed to add method {:?}", sel); + } + + + + + + + pub unsafe fn add_class_method(&mut self, sel: Sel, func: F) + where F: MethodImplementation { + let encs = F::Args::encodings(); + let encs = encs.as_ref(); + let sel_args = count_args(sel); + assert!(sel_args == encs.len(), + "Selector accepts {} arguments, but function accepts {}", + sel_args, encs.len(), + ); + + let types = method_type_encoding(&F::Ret::encode(), encs); + let metaclass = (*self.cls).metaclass() as *const _ as *mut _; + let success = runtime::class_addMethod(metaclass, sel, func.imp(), + types.as_ptr()); + assert!(success != NO, "Failed to add class method {:?}", sel); + } + + + + pub fn add_ivar(&mut self, name: &str) where T: Encode { + let c_name = CString::new(name).unwrap(); + let encoding = CString::new(T::encode().as_str()).unwrap(); + let size = mem::size_of::(); + let align = log2_align_of::(); + let success = unsafe { + runtime::class_addIvar(self.cls, c_name.as_ptr(), size, align, + encoding.as_ptr()) + }; + assert!(success != NO, "Failed to add ivar {}", name); + } + + + + pub fn add_protocol(&mut self, proto: &Protocol) { + let success = unsafe { runtime::class_addProtocol(self.cls, proto) }; + assert!(success != NO, "Failed to add protocol {:?}", proto); + } + + + + pub fn register(self) -> &'static Class { + unsafe { + let cls = self.cls; + runtime::objc_registerClassPair(cls); + + mem::forget(self); + &*cls + } + } +} + +impl Drop for ClassDecl { + fn drop(&mut self) { + unsafe { + runtime::objc_disposeClassPair(self.cls); + } + } +} + + + +pub struct ProtocolDecl { + proto: *mut Protocol +} + +impl ProtocolDecl { + + + pub fn new(name: &str) -> Option { + let c_name = CString::new(name).unwrap(); + let proto = unsafe { + runtime::objc_allocateProtocol(c_name.as_ptr()) + }; + if proto.is_null() { + None + } else { + Some(ProtocolDecl { proto: proto }) + } + } + + fn add_method_description_common(&mut self, sel: Sel, is_required: bool, + is_instance_method: bool) + where Args: EncodeArguments, + Ret: Encode { + let encs = Args::encodings(); + let encs = encs.as_ref(); + let sel_args = count_args(sel); + assert!(sel_args == encs.len(), + "Selector accepts {} arguments, but function accepts {}", + sel_args, encs.len(), + ); + let types = method_type_encoding(&Ret::encode(), encs); + unsafe { + runtime::protocol_addMethodDescription( + self.proto, sel, types.as_ptr(), is_required as BOOL, is_instance_method as BOOL); + } + } + + + pub fn add_method_description(&mut self, sel: Sel, is_required: bool) + where Args: EncodeArguments, + Ret: Encode { + self.add_method_description_common::(sel, is_required, true) + } + + + pub fn add_class_method_description(&mut self, sel: Sel, is_required: bool) + where Args: EncodeArguments, + Ret: Encode { + self.add_method_description_common::(sel, is_required, false) + } + + + pub fn add_protocol(&mut self, proto: &Protocol) { + unsafe { + runtime::protocol_addProtocol(self.proto, proto); + } + } + + + + pub fn register(self) -> &'static Protocol { + unsafe { + runtime::objc_registerProtocol(self.proto); + &*self.proto + } + } +} + +#[cfg(test)] +mod tests { + use test_utils; + + #[test] + fn test_custom_class() { + + let obj = test_utils::custom_object(); + unsafe { + let _: () = msg_send![obj, setFoo:13u32]; + let result: u32 = msg_send![obj, foo]; + assert!(result == 13); + } + } + + #[test] + fn test_class_method() { + let cls = test_utils::custom_class(); + unsafe { + let result: u32 = msg_send![cls, classFoo]; + assert!(result == 7); + } + } +} diff --git a/third_party/rust/objc/src/encode.rs b/third_party/rust/objc/src/encode.rs new file mode 100644 index 000000000000..6790626d0d75 --- /dev/null +++ b/third_party/rust/objc/src/encode.rs @@ -0,0 +1,279 @@ +use std::ffi::CStr; +use std::fmt; +use std::os::raw::{c_char, c_void}; +use std::str; +use malloc_buf::MallocBuffer; + +use runtime::{Class, Object, Sel}; + +const QUALIFIERS: &'static [char] = &[ + 'r', + 'n', + 'N', + 'o', + 'O', + 'R', + 'V', +]; + +#[cfg(target_pointer_width = "64")] +const CODE_INLINE_CAP: usize = 30; + +#[cfg(target_pointer_width = "32")] +const CODE_INLINE_CAP: usize = 14; + +enum Code { + Slice(&'static str), + Owned(String), + Inline(u8, [u8; CODE_INLINE_CAP]), + Malloc(MallocBuffer) +} + + + + + +pub struct Encoding { + code: Code, +} + +impl Encoding { + + + pub unsafe fn from_str(code: &str) -> Encoding { + from_str(code) + } + + + pub fn as_str(&self) -> &str { + match self.code { + Code::Slice(code) => code, + Code::Owned(ref code) => code, + Code::Inline(len, ref bytes) => unsafe { + str::from_utf8_unchecked(&bytes[..len as usize]) + }, + Code::Malloc(ref buf) => unsafe { + str::from_utf8_unchecked(&buf[..buf.len() - 1]) + }, + } + } +} + +impl Clone for Encoding { + fn clone(&self) -> Encoding { + if let Code::Slice(code) = self.code { + from_static_str(code) + } else { + from_str(self.as_str()) + } + } +} + +impl PartialEq for Encoding { + fn eq(&self, other: &Encoding) -> bool { + + let s = self.as_str().trim_left_matches(QUALIFIERS); + let o = other.as_str().trim_left_matches(QUALIFIERS); + s == o + } +} + +impl fmt::Debug for Encoding { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +pub fn from_static_str(code: &'static str) -> Encoding { + Encoding { code: Code::Slice(code) } +} + +pub fn from_str(code: &str) -> Encoding { + if code.len() > CODE_INLINE_CAP { + Encoding { code: Code::Owned(code.to_owned()) } + } else { + let mut bytes = [0; CODE_INLINE_CAP]; + for (dst, byte) in bytes.iter_mut().zip(code.bytes()) { + *dst = byte; + } + Encoding { code: Code::Inline(code.len() as u8, bytes) } + } +} + +pub unsafe fn from_malloc_str(ptr: *mut c_char) -> Encoding { + let s = CStr::from_ptr(ptr); + let bytes = s.to_bytes_with_nul(); + assert!(str::from_utf8(bytes).is_ok()); + let buf = MallocBuffer::new(ptr as *mut u8, bytes.len()).unwrap(); + Encoding { code: Code::Malloc(buf) } +} + + + + + + +pub unsafe trait Encode { + + fn encode() -> Encoding; +} + +macro_rules! encode_impls { + ($($t:ty : $s:expr,)*) => ($( + unsafe impl Encode for $t { + fn encode() -> Encoding { from_static_str($s) } + } + )*); +} + +encode_impls!( + i8: "c", + i16: "s", + i32: "i", + i64: "q", + u8: "C", + u16: "S", + u32: "I", + u64: "Q", + f32: "f", + f64: "d", + bool: "B", + (): "v", + *mut c_char: "*", + *const c_char: "r*", + *mut c_void: "^v", + *const c_void: "r^v", + Sel: ":", +); + +unsafe impl Encode for isize { + #[cfg(target_pointer_width = "32")] + fn encode() -> Encoding { i32::encode() } + + #[cfg(target_pointer_width = "64")] + fn encode() -> Encoding { i64::encode() } +} + +unsafe impl Encode for usize { + #[cfg(target_pointer_width = "32")] + fn encode() -> Encoding { u32::encode() } + + #[cfg(target_pointer_width = "64")] + fn encode() -> Encoding { u64::encode() } +} + +macro_rules! encode_message_impl { + ($code:expr, $name:ident) => ( + encode_message_impl!($code, $name,); + ); + ($code:expr, $name:ident, $($t:ident),*) => ( + unsafe impl<'a $(, $t)*> $crate::Encode for &'a $name<$($t),*> { + fn encode() -> Encoding { from_static_str($code) } + } + + unsafe impl<'a $(, $t)*> $crate::Encode for &'a mut $name<$($t),*> { + fn encode() -> Encoding { from_static_str($code) } + } + + unsafe impl<'a $(, $t)*> $crate::Encode for Option<&'a $name<$($t),*>> { + fn encode() -> Encoding { from_static_str($code) } + } + + unsafe impl<'a $(, $t)*> $crate::Encode for Option<&'a mut $name<$($t),*>> { + fn encode() -> Encoding { from_static_str($code) } + } + + unsafe impl<$($t),*> $crate::Encode for *const $name<$($t),*> { + fn encode() -> Encoding { from_static_str($code) } + } + + unsafe impl<$($t),*> $crate::Encode for *mut $name<$($t),*> { + fn encode() -> Encoding { from_static_str($code) } + } + ); +} + +encode_message_impl!("@", Object); + +encode_message_impl!("#", Class); + + + +pub trait EncodeArguments { + + type Encs: AsRef<[Encoding]>; + + + fn encodings() -> Self::Encs; +} + +macro_rules! count_idents { + () => (0); + ($a:ident) => (1); + ($a:ident, $($b:ident),+) => (1 + count_idents!($($b),*)); +} + +macro_rules! encode_args_impl { + ($($t:ident),*) => ( + impl<$($t: Encode),*> EncodeArguments for ($($t,)*) { + type Encs = [Encoding; count_idents!($($t),*)]; + + fn encodings() -> Self::Encs { + [ + $($t::encode()),* + ] + } + } + ); +} + +encode_args_impl!(); +encode_args_impl!(A); +encode_args_impl!(A, B); +encode_args_impl!(A, B, C); +encode_args_impl!(A, B, C, D); +encode_args_impl!(A, B, C, D, E); +encode_args_impl!(A, B, C, D, E, F); +encode_args_impl!(A, B, C, D, E, F, G); +encode_args_impl!(A, B, C, D, E, F, G, H); +encode_args_impl!(A, B, C, D, E, F, G, H, I); +encode_args_impl!(A, B, C, D, E, F, G, H, I, J); +encode_args_impl!(A, B, C, D, E, F, G, H, I, J, K); +encode_args_impl!(A, B, C, D, E, F, G, H, I, J, K, L); + +#[cfg(test)] +mod tests { + use runtime::{Class, Object, Sel}; + use super::{Encode, Encoding}; + + #[test] + fn test_encode() { + assert!(u32::encode().as_str() == "I"); + assert!(<()>::encode().as_str() == "v"); + assert!(<&Object>::encode().as_str() == "@"); + assert!(<*mut Object>::encode().as_str() == "@"); + assert!(<&Class>::encode().as_str() == "#"); + assert!(Sel::encode().as_str() == ":"); + } + + #[test] + fn test_inline_encoding() { + let enc = unsafe { Encoding::from_str("C") }; + assert!(enc.as_str() == "C"); + + let enc2 = enc.clone(); + assert!(enc2 == enc); + assert!(enc2.as_str() == "C"); + } + + #[test] + fn test_owned_encoding() { + let s = "{Test=CCCCCCCCCCCCCCCCCCCCCCCCC}"; + let enc = unsafe { Encoding::from_str(s) }; + assert!(enc.as_str() == s); + + let enc2 = enc.clone(); + assert!(enc2 == enc); + assert!(enc2.as_str() == s); + } +} diff --git a/third_party/rust/objc/src/exception.rs b/third_party/rust/objc/src/exception.rs new file mode 100644 index 000000000000..6fcfa7e87ad3 --- /dev/null +++ b/third_party/rust/objc/src/exception.rs @@ -0,0 +1,11 @@ +use objc_exception; + +use rc::StrongPtr; +use runtime::Object; + +pub unsafe fn try(closure: F) -> Result + where F: FnOnce() -> R { + objc_exception::try(closure).map_err(|exception| { + StrongPtr::new(exception as *mut Object) + }) +} diff --git a/third_party/rust/objc/src/lib.rs b/third_party/rust/objc/src/lib.rs new file mode 100644 index 000000000000..84adace32638 --- /dev/null +++ b/third_party/rust/objc/src/lib.rs @@ -0,0 +1,90 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#![crate_name = "objc"] +#![crate_type = "lib"] + +#![warn(missing_docs)] + +extern crate malloc_buf; +#[cfg(feature = "exception")] +extern crate objc_exception; + +pub use encode::{Encode, EncodeArguments, Encoding}; +pub use message::{Message, MessageArguments, MessageError}; + +pub use message::send_message as __send_message; +pub use message::send_super_message as __send_super_message; + +#[macro_use] +mod macros; + +pub mod runtime; +pub mod declare; +pub mod rc; +mod encode; +#[cfg(feature = "exception")] +mod exception; +mod message; + +#[cfg(test)] +mod test_utils; diff --git a/third_party/rust/objc/src/macros.rs b/third_party/rust/objc/src/macros.rs new file mode 100644 index 000000000000..ccd6bd218c16 --- /dev/null +++ b/third_party/rust/objc/src/macros.rs @@ -0,0 +1,140 @@ + + + + + + + + + + + + + + +#[macro_export] +macro_rules! class { + ($name:ident) => ({ + #[allow(deprecated)] + #[inline(always)] + fn get_class(name: &str) -> Option<&'static $crate::runtime::Class> { + unsafe { + #[cfg_attr(feature = "cargo-clippy", allow(replace_consts))] + static CLASS: ::std::sync::atomic::AtomicUsize = ::std::sync::atomic::ATOMIC_USIZE_INIT; + // `Relaxed` should be fine since `objc_getClass` is thread-safe. + let ptr = CLASS.load(::std::sync::atomic::Ordering::Relaxed) as *const $crate::runtime::Class; + if ptr.is_null() { + let cls = $crate::runtime::objc_getClass(name.as_ptr() as *const _); + CLASS.store(cls as usize, ::std::sync::atomic::Ordering::Relaxed); + if cls.is_null() { None } else { Some(&*cls) } + } else { + Some(&*ptr) + } + } + } + match get_class(concat!(stringify!($name), '\0')) { + Some(cls) => cls, + None => panic!("Class with name {} could not be found", stringify!($name)), + } + }) +} + +#[doc(hidden)] +#[macro_export] +macro_rules! sel_impl { + + + ($name:expr) => ({ + #[allow(deprecated)] + #[inline(always)] + fn register_sel(name: &str) -> $crate::runtime::Sel { + unsafe { + #[cfg_attr(feature = "cargo-clippy", allow(replace_consts))] + static SEL: ::std::sync::atomic::AtomicUsize = ::std::sync::atomic::ATOMIC_USIZE_INIT; + let ptr = SEL.load(::std::sync::atomic::Ordering::Relaxed) as *const ::std::os::raw::c_void; + // It should be fine to use `Relaxed` ordering here because `sel_registerName` is + // thread-safe. + if ptr.is_null() { + let sel = $crate::runtime::sel_registerName(name.as_ptr() as *const _); + SEL.store(sel.as_ptr() as usize, ::std::sync::atomic::Ordering::Relaxed); + sel + } else { + $crate::runtime::Sel::from_ptr(ptr) + } + } + } + register_sel($name) + }) +} + + + + + + + + + + + + + +#[macro_export] +macro_rules! sel { + ($name:ident) => ({sel_impl!(concat!(stringify!($name), '\0'))}); + ($($name:ident :)+) => ({sel_impl!(concat!($(stringify!($name), ':'),+, '\0'))}); +} + + + + + + + + + + + + + + + + + + + + + + + +#[macro_export] +macro_rules! msg_send { + (super($obj:expr, $superclass:expr), $name:ident) => ({ + let sel = sel!($name); + match $crate::__send_super_message(&*$obj, $superclass, sel, ()) { + Err(s) => panic!("{}", s), + Ok(r) => r, + } + }); + (super($obj:expr, $superclass:expr), $($name:ident : $arg:expr)+) => ({ + let sel = sel!($($name:)+); + match $crate::__send_super_message(&*$obj, $superclass, sel, ($($arg,)*)) { + Err(s) => panic!("{}", s), + Ok(r) => r, + } + }); + ($obj:expr, $name:ident) => ({ + let sel = sel!($name); + match $crate::__send_message(&*$obj, sel, ()) { + Err(s) => panic!("{}", s), + Ok(r) => r, + } + }); + ($obj:expr, $($name:ident : $arg:expr)+) => ({ + let sel = sel!($($name:)+); + match $crate::__send_message(&*$obj, sel, ($($arg,)*)) { + Err(s) => panic!("{}", s), + Ok(r) => r, + } + }); +} diff --git a/third_party/rust/objc/src/message/apple/arm.rs b/third_party/rust/objc/src/message/apple/arm.rs new file mode 100644 index 000000000000..b0056d6e7e53 --- /dev/null +++ b/third_party/rust/objc/src/message/apple/arm.rs @@ -0,0 +1,40 @@ +use std::any::{Any, TypeId}; +use std::mem; + +use runtime::Imp; + +extern { + fn objc_msgSend(); + fn objc_msgSend_stret(); + + fn objc_msgSendSuper(); + fn objc_msgSendSuper_stret(); +} + +pub fn msg_send_fn() -> Imp { + + + + + let type_id = TypeId::of::(); + if mem::size_of::() <= 4 || + type_id == TypeId::of::() || + type_id == TypeId::of::() || + type_id == TypeId::of::() { + objc_msgSend + } else { + objc_msgSend_stret + } +} + +pub fn msg_send_super_fn() -> Imp { + let type_id = TypeId::of::(); + if mem::size_of::() <= 4 || + type_id == TypeId::of::() || + type_id == TypeId::of::() || + type_id == TypeId::of::() { + objc_msgSendSuper + } else { + objc_msgSendSuper_stret + } +} diff --git a/third_party/rust/objc/src/message/apple/arm64.rs b/third_party/rust/objc/src/message/apple/arm64.rs new file mode 100644 index 000000000000..07a556031cdd --- /dev/null +++ b/third_party/rust/objc/src/message/apple/arm64.rs @@ -0,0 +1,18 @@ +use runtime::Imp; + +extern { + fn objc_msgSend(); + + fn objc_msgSendSuper(); +} + +pub fn msg_send_fn() -> Imp { + + + + objc_msgSend +} + +pub fn msg_send_super_fn() -> Imp { + objc_msgSendSuper +} diff --git a/third_party/rust/objc/src/message/apple/mod.rs b/third_party/rust/objc/src/message/apple/mod.rs new file mode 100644 index 000000000000..30f59ca8701d --- /dev/null +++ b/third_party/rust/objc/src/message/apple/mod.rs @@ -0,0 +1,40 @@ +use std::any::Any; + +use runtime::{Class, Object, Sel}; +use super::{Message, MessageArguments, MessageError, Super}; + +#[cfg(target_arch = "x86")] +#[path = "x86.rs"] +mod arch; +#[cfg(target_arch = "x86_64")] +#[path = "x86_64.rs"] +mod arch; +#[cfg(target_arch = "arm")] +#[path = "arm.rs"] +mod arch; +#[cfg(target_arch = "aarch64")] +#[path = "arm64.rs"] +mod arch; + +use self::arch::{msg_send_fn, msg_send_super_fn}; + +pub unsafe fn send_unverified(obj: *const T, sel: Sel, args: A) + -> Result + where T: Message, A: MessageArguments, R: Any { + let receiver = obj as *mut T as *mut Object; + let msg_send_fn = msg_send_fn::(); + objc_try!({ + A::invoke(msg_send_fn, receiver, sel, args) + }) +} + +pub unsafe fn send_super_unverified(obj: *const T, superclass: &Class, + sel: Sel, args: A) -> Result + where T: Message, A: MessageArguments, R: Any { + let sup = Super { receiver: obj as *mut T as *mut Object, superclass: superclass }; + let receiver = &sup as *const Super as *mut Object; + let msg_send_fn = msg_send_super_fn::(); + objc_try!({ + A::invoke(msg_send_fn, receiver, sel, args) + }) +} diff --git a/third_party/rust/objc/src/message/apple/x86.rs b/third_party/rust/objc/src/message/apple/x86.rs new file mode 100644 index 000000000000..8cfba19f2ee2 --- /dev/null +++ b/third_party/rust/objc/src/message/apple/x86.rs @@ -0,0 +1,40 @@ +use std::any::{Any, TypeId}; +use std::mem; + +use runtime::Imp; + +extern { + fn objc_msgSend(); + fn objc_msgSend_fpret(); + fn objc_msgSend_stret(); + + fn objc_msgSendSuper(); + fn objc_msgSendSuper_stret(); +} + +pub fn msg_send_fn() -> Imp { + + + + + + let type_id = TypeId::of::(); + let size = mem::size_of::(); + if type_id == TypeId::of::() || + type_id == TypeId::of::() { + objc_msgSend_fpret + } else if size == 0 || size == 1 || size == 2 || size == 4 || size == 8 { + objc_msgSend + } else { + objc_msgSend_stret + } +} + +pub fn msg_send_super_fn() -> Imp { + let size = mem::size_of::(); + if size == 0 || size == 1 || size == 2 || size == 4 || size == 8 { + objc_msgSendSuper + } else { + objc_msgSendSuper_stret + } +} diff --git a/third_party/rust/objc/src/message/apple/x86_64.rs b/third_party/rust/objc/src/message/apple/x86_64.rs new file mode 100644 index 000000000000..22f3430c58a8 --- /dev/null +++ b/third_party/rust/objc/src/message/apple/x86_64.rs @@ -0,0 +1,32 @@ +use std::mem; + +use runtime::Imp; + +extern { + fn objc_msgSend(); + fn objc_msgSend_stret(); + + fn objc_msgSendSuper(); + fn objc_msgSendSuper_stret(); +} + +pub fn msg_send_fn() -> Imp { + + + + + + if mem::size_of::() <= 16 { + objc_msgSend + } else { + objc_msgSend_stret + } +} + +pub fn msg_send_super_fn() -> Imp { + if mem::size_of::() <= 16 { + objc_msgSendSuper + } else { + objc_msgSendSuper_stret + } +} diff --git a/third_party/rust/objc/src/message/gnustep.rs b/third_party/rust/objc/src/message/gnustep.rs new file mode 100644 index 000000000000..2e28689cef27 --- /dev/null +++ b/third_party/rust/objc/src/message/gnustep.rs @@ -0,0 +1,35 @@ +use std::any::Any; +use std::mem; + +use runtime::{Class, Object, Imp, Sel}; +use super::{Message, MessageArguments, MessageError, Super}; + +extern { + fn objc_msg_lookup(receiver: *mut Object, op: Sel) -> Imp; + fn objc_msg_lookup_super(sup: *const Super, sel: Sel) -> Imp; +} + +pub unsafe fn send_unverified(obj: *const T, sel: Sel, args: A) + -> Result + where T: Message, A: MessageArguments, R: Any { + if obj.is_null() { + return mem::zeroed(); + } + + let receiver = obj as *mut T as *mut Object; + let msg_send_fn = objc_msg_lookup(receiver, sel); + objc_try!({ + A::invoke(msg_send_fn, receiver, sel, args) + }) +} + +pub unsafe fn send_super_unverified(obj: *const T, superclass: &Class, + sel: Sel, args: A) -> Result + where T: Message, A: MessageArguments, R: Any { + let receiver = obj as *mut T as *mut Object; + let sup = Super { receiver: receiver, superclass: superclass }; + let msg_send_fn = objc_msg_lookup_super(&sup, sel); + objc_try!({ + A::invoke(msg_send_fn, receiver, sel, args) + }) +} diff --git a/third_party/rust/objc/src/message/mod.rs b/third_party/rust/objc/src/message/mod.rs new file mode 100644 index 000000000000..a33f84c26787 --- /dev/null +++ b/third_party/rust/objc/src/message/mod.rs @@ -0,0 +1,296 @@ +use std::any::Any; +use std::error::Error; +use std::fmt; +use std::mem; + +use runtime::{Class, Imp, Object, Sel}; +use {Encode, EncodeArguments}; + +#[cfg(feature = "exception")] +macro_rules! objc_try { + ($b:block) => ( + $crate::exception::try(|| $b).map_err(|exception| + if exception.is_null() { + MessageError("Uncaught exception nil".to_owned()) + } else { + MessageError(format!("Uncaught exception {:?}", &**exception)) + } + ) + ) +} + +#[cfg(not(feature = "exception"))] +macro_rules! objc_try { + ($b:block) => (Ok($b)) +} + +mod verify; + +#[cfg(any(target_os = "macos", target_os = "ios"))] +#[path = "apple/mod.rs"] +mod platform; +#[cfg(not(any(target_os = "macos", target_os = "ios")))] +#[path = "gnustep.rs"] +mod platform; + +use self::platform::{send_unverified, send_super_unverified}; +use self::verify::verify_message_signature; + + +#[repr(C)] +pub struct Super { + + pub receiver: *mut Object, + + pub superclass: *const Class, +} + + + +pub unsafe trait Message { + + + + + + + + + + + #[cfg(not(feature = "verify_message"))] + unsafe fn send_message(&self, sel: Sel, args: A) + -> Result + where Self: Sized, A: MessageArguments, R: Any { + send_message(self, sel, args) + } + + #[cfg(feature = "verify_message")] + unsafe fn send_message(&self, sel: Sel, args: A) + -> Result + where Self: Sized, A: MessageArguments + EncodeArguments, + R: Any + Encode { + send_message(self, sel, args) + } + + + + + + + + + + + + + + + + + + + + + + + + + fn verify_message(&self, sel: Sel) -> Result<(), MessageError> + where Self: Sized, A: EncodeArguments, R: Encode { + let obj = unsafe { &*(self as *const _ as *const Object) }; + verify_message_signature::(obj.class(), sel) + } +} + +unsafe impl Message for Object { } + +unsafe impl Message for Class { } + + +pub trait MessageArguments: Sized { + + + + + + unsafe fn invoke(imp: Imp, obj: *mut Object, sel: Sel, args: Self) -> R + where R: Any; +} + +macro_rules! message_args_impl { + ($($a:ident : $t:ident),*) => ( + impl<$($t),*> MessageArguments for ($($t,)*) { + unsafe fn invoke(imp: Imp, obj: *mut Object, sel: Sel, ($($a,)*): Self) -> R + where R: Any { + let imp: unsafe extern fn(*mut Object, Sel $(, $t)*) -> R = + mem::transmute(imp); + imp(obj, sel $(, $a)*) + } + } + ); +} + +message_args_impl!(); +message_args_impl!(a: A); +message_args_impl!(a: A, b: B); +message_args_impl!(a: A, b: B, c: C); +message_args_impl!(a: A, b: B, c: C, d: D); +message_args_impl!(a: A, b: B, c: C, d: D, e: E); +message_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F); +message_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F, g: G); +message_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H); +message_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I); +message_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J); +message_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J, k: K); +message_args_impl!(a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J, k: K, l: L); + + + + + + + + + + +#[derive(Debug)] +pub struct MessageError(String); + +impl fmt::Display for MessageError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} + +impl Error for MessageError { + fn description(&self) -> &str { + &self.0 + } +} + +#[doc(hidden)] +#[inline(always)] +#[cfg(not(feature = "verify_message"))] +pub unsafe fn send_message(obj: *const T, sel: Sel, args: A) + -> Result + where T: Message, A: MessageArguments, R: Any { + send_unverified(obj, sel, args) +} + +#[doc(hidden)] +#[inline(always)] +#[cfg(feature = "verify_message")] +pub unsafe fn send_message(obj: *const T, sel: Sel, args: A) + -> Result + where T: Message, A: MessageArguments + EncodeArguments, + R: Any + Encode { + let cls = if obj.is_null() { + return Err(MessageError(format!("Messaging {:?} to nil", sel))); + } else { + (*(obj as *const Object)).class() + }; + + verify_message_signature::(cls, sel).and_then(|_| { + send_unverified(obj, sel, args) + }) +} + +#[doc(hidden)] +#[inline(always)] +#[cfg(not(feature = "verify_message"))] +pub unsafe fn send_super_message(obj: *const T, superclass: &Class, + sel: Sel, args: A) -> Result + where T: Message, A: MessageArguments, R: Any { + send_super_unverified(obj, superclass, sel, args) +} + +#[doc(hidden)] +#[inline(always)] +#[cfg(feature = "verify_message")] +pub unsafe fn send_super_message(obj: *const T, superclass: &Class, + sel: Sel, args: A) -> Result + where T: Message, A: MessageArguments + EncodeArguments, + R: Any + Encode { + if obj.is_null() { + return Err(MessageError(format!("Messaging {:?} to nil", sel))); + } + + verify_message_signature::(superclass, sel).and_then(|_| { + send_super_unverified(obj, superclass, sel, args) + }) +} + +#[cfg(test)] +mod tests { + use test_utils; + use runtime::Object; + use super::Message; + + #[test] + fn test_send_message() { + let obj = test_utils::custom_object(); + let result: u32 = unsafe { + let _: () = msg_send![obj, setFoo:4u32]; + msg_send![obj, foo] + }; + assert!(result == 4); + } + + #[test] + fn test_send_message_stret() { + let obj = test_utils::custom_object(); + let result: test_utils::CustomStruct = unsafe { + msg_send![obj, customStruct] + }; + let expected = test_utils::CustomStruct { a: 1, b:2, c: 3, d: 4 }; + assert!(result == expected); + } + + #[cfg(not(feature = "verify_message"))] + #[test] + fn test_send_message_nil() { + let nil: *mut Object = ::std::ptr::null_mut(); + let result: usize = unsafe { + msg_send![nil, hash] + }; + assert!(result == 0); + + let result: *mut Object = unsafe { + msg_send![nil, description] + }; + assert!(result.is_null()); + + let result: f64 = unsafe { + msg_send![nil, doubleValue] + }; + assert!(result == 0.0); + } + + #[test] + fn test_send_message_super() { + let obj = test_utils::custom_subclass_object(); + let superclass = test_utils::custom_class(); + unsafe { + let _: () = msg_send![obj, setFoo:4u32]; + let foo: u32 = msg_send![super(obj, superclass), foo]; + assert!(foo == 4); + + + let foo: u32 = msg_send![obj, foo]; + assert!(foo == 6); + } + } + + #[test] + fn test_verify_message() { + let obj = test_utils::custom_object(); + assert!(obj.verify_message::<(), u32>(sel!(foo)).is_ok()); + assert!(obj.verify_message::<(u32,), ()>(sel!(setFoo:)).is_ok()); + + + assert!(obj.verify_message::<(), u64>(sel!(setFoo:)).is_err()); + + assert!(obj.verify_message::<(u32,), ()>(sel!(setFoo)).is_err()); + } +} diff --git a/third_party/rust/objc/src/message/verify.rs b/third_party/rust/objc/src/message/verify.rs new file mode 100644 index 000000000000..61bd4ebb3942 --- /dev/null +++ b/third_party/rust/objc/src/message/verify.rs @@ -0,0 +1,49 @@ +use runtime::{Class, Object, Sel}; +use {Encode, EncodeArguments}; +use super::MessageError; + +pub fn verify_message_signature(cls: &Class, sel: Sel) + -> Result<(), MessageError> + where A: EncodeArguments, R: Encode { + let method = match cls.instance_method(sel) { + Some(method) => method, + None => return Err(MessageError( + format!("Method {:?} not found on class {:?}", + sel, cls) + )), + }; + + let ret = R::encode(); + let expected_ret = method.return_type(); + if ret != expected_ret { + return Err(MessageError( + format!("Return type code {:?} does not match expected {:?} for method {:?}", + ret, expected_ret, method.name()) + )); + } + + let self_and_cmd = [<*mut Object>::encode(), Sel::encode()]; + let args = A::encodings(); + let args = args.as_ref(); + + let count = self_and_cmd.len() + args.len(); + let expected_count = method.arguments_count(); + if count != expected_count { + return Err(MessageError( + format!("Method {:?} accepts {} arguments, but {} were given", + method.name(), expected_count, count) + )); + } + + for (i, arg) in self_and_cmd.iter().chain(args).enumerate() { + let expected = method.argument_type(i).unwrap(); + if *arg != expected { + return Err(MessageError( + format!("Method {:?} expected argument at index {} with type code {:?} but was given {:?}", + method.name(), i, expected, arg) + )); + } + } + + Ok(()) +} diff --git a/third_party/rust/objc/src/rc/autorelease.rs b/third_party/rust/objc/src/rc/autorelease.rs new file mode 100644 index 000000000000..f2c661cdce5c --- /dev/null +++ b/third_party/rust/objc/src/rc/autorelease.rs @@ -0,0 +1,30 @@ +use std::os::raw::c_void; +use runtime::{objc_autoreleasePoolPush, objc_autoreleasePoolPop}; + + +struct AutoReleaseHelper { + context: *mut c_void, +} + +impl AutoReleaseHelper { + unsafe fn new() -> Self { + AutoReleaseHelper { context: objc_autoreleasePoolPush() } + } +} + +impl Drop for AutoReleaseHelper { + fn drop(&mut self) { + unsafe { objc_autoreleasePoolPop(self.context) } + } +} + + + + + + + +pub fn autoreleasepool T>(f: F) -> T { + let _context = unsafe { AutoReleaseHelper::new() }; + f() +} diff --git a/third_party/rust/objc/src/rc/mod.rs b/third_party/rust/objc/src/rc/mod.rs new file mode 100644 index 000000000000..16e560b031f6 --- /dev/null +++ b/third_party/rust/objc/src/rc/mod.rs @@ -0,0 +1,123 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +mod strong; +mod weak; +mod autorelease; + +pub use self::strong::StrongPtr; +pub use self::weak::WeakPtr; +pub use self::autorelease::autoreleasepool; + + +#[cfg(all(test, any(target_os = "macos", target_os = "ios")))] +mod tests { + use runtime::Object; + use super::StrongPtr; + use super::autoreleasepool; + + #[test] + fn test_strong_clone() { + fn retain_count(obj: *mut Object) -> usize { + unsafe { msg_send![obj, retainCount] } + } + + let obj = unsafe { + StrongPtr::new(msg_send![class!(NSObject), new]) + }; + assert!(retain_count(*obj) == 1); + + let cloned = obj.clone(); + assert!(retain_count(*cloned) == 2); + assert!(retain_count(*obj) == 2); + + drop(obj); + assert!(retain_count(*cloned) == 1); + } + + #[test] + fn test_weak() { + let obj = unsafe { + StrongPtr::new(msg_send![class!(NSObject), new]) + }; + let weak = obj.weak(); + + let strong = weak.load(); + assert!(*strong == *obj); + drop(strong); + + drop(obj); + assert!(weak.load().is_null()); + } + + #[test] + fn test_weak_copy() { + let obj = unsafe { + StrongPtr::new(msg_send![class!(NSObject), new]) + }; + let weak = obj.weak(); + + let weak2 = weak.clone(); + let strong = weak2.load(); + assert!(*strong == *obj); + } + + #[test] + fn test_autorelease() { + let obj = unsafe { + StrongPtr::new(msg_send![class!(NSObject), new]) + }; + + fn retain_count(obj: *mut Object) -> usize { + unsafe { msg_send![obj, retainCount] } + } + let cloned = obj.clone(); + + autoreleasepool(|| { + obj.autorelease(); + assert!(retain_count(*cloned) == 2); + }); + + + assert!(retain_count(*cloned) == 1); + } +} diff --git a/third_party/rust/objc/src/rc/strong.rs b/third_party/rust/objc/src/rc/strong.rs new file mode 100644 index 000000000000..36625cc32f8c --- /dev/null +++ b/third_party/rust/objc/src/rc/strong.rs @@ -0,0 +1,73 @@ +use std::fmt; +use std::mem; +use std::ops::Deref; + +use runtime::{Object, self}; +use super::WeakPtr; + + +pub struct StrongPtr(*mut Object); + +impl StrongPtr { + + + + + pub unsafe fn new(ptr: *mut Object) -> Self { + StrongPtr(ptr) + } + + + + + pub unsafe fn retain(ptr: *mut Object) -> Self { + StrongPtr(runtime::objc_retain(ptr)) + } + + + + + pub fn autorelease(self) -> *mut Object { + let ptr = self.0; + mem::forget(self); + unsafe { + runtime::objc_autorelease(ptr); + } + ptr + } + + + pub fn weak(&self) -> WeakPtr { + unsafe { WeakPtr::new(self.0) } + } +} + +impl Drop for StrongPtr { + fn drop(&mut self) { + unsafe { + runtime::objc_release(self.0); + } + } +} + +impl Clone for StrongPtr { + fn clone(&self) -> StrongPtr { + unsafe { + StrongPtr::retain(self.0) + } + } +} + +impl Deref for StrongPtr { + type Target = *mut Object; + + fn deref(&self) -> &*mut Object { + &self.0 + } +} + +impl fmt::Pointer for StrongPtr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&self.0, f) + } +} diff --git a/third_party/rust/objc/src/rc/weak.rs b/third_party/rust/objc/src/rc/weak.rs new file mode 100644 index 000000000000..dff40e95fe3f --- /dev/null +++ b/third_party/rust/objc/src/rc/weak.rs @@ -0,0 +1,50 @@ +use std::cell::UnsafeCell; +use std::ptr; + +use runtime::{Object, self}; +use super::StrongPtr; + + + + + + + +pub struct WeakPtr(Box>); + +impl WeakPtr { + + + pub unsafe fn new(obj: *mut Object) -> Self { + let ptr = Box::new(UnsafeCell::new(ptr::null_mut())); + runtime::objc_initWeak(ptr.get(), obj); + WeakPtr(ptr) + } + + + + pub fn load(&self) -> StrongPtr { + unsafe { + let ptr = runtime::objc_loadWeakRetained(self.0.get()); + StrongPtr::new(ptr) + } + } +} + +impl Drop for WeakPtr { + fn drop(&mut self) { + unsafe { + runtime::objc_destroyWeak(self.0.get()); + } + } +} + +impl Clone for WeakPtr { + fn clone(&self) -> Self { + let ptr = Box::new(UnsafeCell::new(ptr::null_mut())); + unsafe { + runtime::objc_copyWeak(ptr.get(), self.0.get()); + } + WeakPtr(ptr) + } +} diff --git a/third_party/rust/objc/src/runtime.rs b/third_party/rust/objc/src/runtime.rs new file mode 100644 index 000000000000..3f429144bd02 --- /dev/null +++ b/third_party/rust/objc/src/runtime.rs @@ -0,0 +1,632 @@ + + + + + +use std::ffi::{CStr, CString}; +use std::fmt; +use std::os::raw::{c_char, c_int, c_uint, c_void}; +use std::ptr; +use std::str; +use malloc_buf::MallocBuffer; + +use encode; +use {Encode, Encoding}; + + + + +#[cfg(not(target_arch = "aarch64"))] +pub type BOOL = ::std::os::raw::c_schar; + +#[cfg(not(target_arch = "aarch64"))] +pub const YES: BOOL = 1; + +#[cfg(not(target_arch = "aarch64"))] +pub const NO: BOOL = 0; + +#[cfg(target_arch = "aarch64")] +pub type BOOL = bool; +#[cfg(target_arch = "aarch64")] +pub const YES: BOOL = true; +#[cfg(target_arch = "aarch64")] +pub const NO: BOOL = false; + + +#[repr(C)] +pub struct Sel { + ptr: *const c_void, +} + + + +type PrivateMarker = [u8; 0]; + + +#[repr(C)] +pub struct Ivar { + _priv: PrivateMarker, +} + + +#[repr(C)] +pub struct Method { + _priv: PrivateMarker, +} + + +#[repr(C)] +pub struct Class { + _priv: PrivateMarker, +} + + +#[repr(C)] +pub struct Protocol { + _priv: PrivateMarker +} + + +#[repr(C)] +pub struct Object { + _priv: PrivateMarker, +} + + +pub type Imp = unsafe extern fn(); + +#[link(name = "objc", kind = "dylib")] +extern { + pub fn sel_registerName(name: *const c_char) -> Sel; + pub fn sel_getName(sel: Sel) -> *const c_char; + + pub fn class_getName(cls: *const Class) -> *const c_char; + pub fn class_getSuperclass(cls: *const Class) -> *const Class; + pub fn class_getInstanceSize(cls: *const Class) -> usize; + pub fn class_getInstanceMethod(cls: *const Class, sel: Sel) -> *const Method; + pub fn class_getInstanceVariable(cls: *const Class, name: *const c_char) -> *const Ivar; + pub fn class_copyMethodList(cls: *const Class, outCount: *mut c_uint) -> *mut *const Method; + pub fn class_copyIvarList(cls: *const Class, outCount: *mut c_uint) -> *mut *const Ivar; + pub fn class_addMethod(cls: *mut Class, name: Sel, imp: Imp, types: *const c_char) -> BOOL; + pub fn class_addIvar(cls: *mut Class, name: *const c_char, size: usize, alignment: u8, types: *const c_char) -> BOOL; + pub fn class_addProtocol(cls: *mut Class, proto: *const Protocol) -> BOOL; + pub fn class_conformsToProtocol(cls: *const Class, proto: *const Protocol) -> BOOL; + pub fn class_copyProtocolList(cls: *const Class, outCount: *mut c_uint) -> *mut *const Protocol; + + pub fn objc_allocateClassPair(superclass: *const Class, name: *const c_char, extraBytes: usize) -> *mut Class; + pub fn objc_disposeClassPair(cls: *mut Class); + pub fn objc_registerClassPair(cls: *mut Class); + + pub fn class_createInstance(cls: *const Class, extraBytes: usize) -> *mut Object; + pub fn object_dispose(obj: *mut Object) -> *mut Object; + pub fn object_getClass(obj: *const Object) -> *const Class; + + pub fn objc_getClassList(buffer: *mut *const Class, bufferLen: c_int) -> c_int; + pub fn objc_copyClassList(outCount: *mut c_uint) -> *mut *const Class; + pub fn objc_getClass(name: *const c_char) -> *const Class; + pub fn objc_getProtocol(name: *const c_char) -> *const Protocol; + pub fn objc_copyProtocolList(outCount: *mut c_uint) -> *mut *const Protocol; + pub fn objc_allocateProtocol(name: *const c_char) -> *mut Protocol; + pub fn objc_registerProtocol(proto: *mut Protocol); + + pub fn objc_autoreleasePoolPush() -> *mut c_void; + pub fn objc_autoreleasePoolPop(context: *mut c_void); + + pub fn protocol_addMethodDescription(proto: *mut Protocol, name: Sel, types: *const c_char, isRequiredMethod: BOOL, + isInstanceMethod: BOOL); + pub fn protocol_addProtocol(proto: *mut Protocol, addition: *const Protocol); + pub fn protocol_getName(proto: *const Protocol) -> *const c_char; + pub fn protocol_isEqual(proto: *const Protocol, other: *const Protocol) -> BOOL; + pub fn protocol_copyProtocolList(proto: *const Protocol, outCount: *mut c_uint) -> *mut *const Protocol; + pub fn protocol_conformsToProtocol(proto: *const Protocol, other: *const Protocol) -> BOOL; + + pub fn ivar_getName(ivar: *const Ivar) -> *const c_char; + pub fn ivar_getOffset(ivar: *const Ivar) -> isize; + pub fn ivar_getTypeEncoding(ivar: *const Ivar) -> *const c_char; + + pub fn method_getName(method: *const Method) -> Sel; + pub fn method_getImplementation(method: *const Method) -> Imp; + pub fn method_copyReturnType(method: *const Method) -> *mut c_char; + pub fn method_copyArgumentType(method: *const Method, index: c_uint) -> *mut c_char; + pub fn method_getNumberOfArguments(method: *const Method) -> c_uint; + pub fn method_setImplementation(method: *mut Method, imp: Imp) -> Imp; + pub fn method_exchangeImplementations(m1: *mut Method, m2: *mut Method); + + pub fn objc_retain(obj: *mut Object) -> *mut Object; + pub fn objc_release(obj: *mut Object); + pub fn objc_autorelease(obj: *mut Object); + + pub fn objc_loadWeakRetained(location: *mut *mut Object) -> *mut Object; + pub fn objc_initWeak(location: *mut *mut Object, obj: *mut Object) -> *mut Object; + pub fn objc_destroyWeak(location: *mut *mut Object); + pub fn objc_copyWeak(to: *mut *mut Object, from: *mut *mut Object); +} + +impl Sel { + + + pub fn register(name: &str) -> Sel { + let name = CString::new(name).unwrap(); + unsafe { + sel_registerName(name.as_ptr()) + } + } + + + pub fn name(&self) -> &str { + let name = unsafe { + CStr::from_ptr(sel_getName(*self)) + }; + str::from_utf8(name.to_bytes()).unwrap() + } + + + + + #[inline] + pub unsafe fn from_ptr(ptr: *const c_void) -> Sel { + Sel { + ptr: ptr, + } + } + + + #[inline] + pub fn as_ptr(&self) -> *const c_void { + self.ptr + } +} + +impl PartialEq for Sel { + fn eq(&self, other: &Sel) -> bool { + self.ptr == other.ptr + } +} + +impl Eq for Sel { } + + +unsafe impl Sync for Sel { } +unsafe impl Send for Sel { } + +impl Copy for Sel { } + +impl Clone for Sel { + fn clone(&self) -> Sel { *self } +} + +impl fmt::Debug for Sel { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.name()) + } +} + +impl Ivar { + + pub fn name(&self) -> &str { + let name = unsafe { + CStr::from_ptr(ivar_getName(self)) + }; + str::from_utf8(name.to_bytes()).unwrap() + } + + + pub fn offset(&self) -> isize { + let offset = unsafe { + ivar_getOffset(self) + }; + offset as isize + } + + + pub fn type_encoding(&self) -> Encoding { + let encoding = unsafe { + CStr::from_ptr(ivar_getTypeEncoding(self)) + }; + let s = str::from_utf8(encoding.to_bytes()).unwrap(); + encode::from_str(s) + } +} + +impl Method { + + pub fn name(&self) -> Sel { + unsafe { + method_getName(self) + } + } + + + pub fn return_type(&self) -> Encoding { + unsafe { + let encoding = method_copyReturnType(self); + encode::from_malloc_str(encoding) + } + } + + + + pub fn argument_type(&self, index: usize) -> Option { + unsafe { + let encoding = method_copyArgumentType(self, index as c_uint); + if encoding.is_null() { + None + } else { + Some(encode::from_malloc_str(encoding)) + } + } + } + + + pub fn arguments_count(&self) -> usize { + unsafe { + method_getNumberOfArguments(self) as usize + } + } + + + pub fn implementation(&self) -> Imp { + unsafe { + method_getImplementation(self) + } + } +} + +impl Class { + + + pub fn get(name: &str) -> Option<&'static Class> { + let name = CString::new(name).unwrap(); + unsafe { + let cls = objc_getClass(name.as_ptr()); + if cls.is_null() { None } else { Some(&*cls) } + } + } + + + pub fn classes() -> MallocBuffer<&'static Class> { + unsafe { + let mut count: c_uint = 0; + let classes = objc_copyClassList(&mut count); + MallocBuffer::new(classes as *mut _, count as usize).unwrap() + } + } + + + pub fn classes_count() -> usize { + unsafe { + objc_getClassList(ptr::null_mut(), 0) as usize + } + } + + + pub fn name(&self) -> &str { + let name = unsafe { + CStr::from_ptr(class_getName(self)) + }; + str::from_utf8(name.to_bytes()).unwrap() + } + + + pub fn superclass(&self) -> Option<&Class> { + unsafe { + let superclass = class_getSuperclass(self); + if superclass.is_null() { None } else { Some(&*superclass) } + } + } + + + pub fn metaclass(&self) -> &Class { + unsafe { + let self_ptr: *const Class = self; + &*object_getClass(self_ptr as *const Object) + } + } + + + pub fn instance_size(&self) -> usize { + unsafe { + class_getInstanceSize(self) as usize + } + } + + + + + pub fn instance_method(&self, sel: Sel) -> Option<&Method> { + unsafe { + let method = class_getInstanceMethod(self, sel); + if method.is_null() { None } else { Some(&*method) } + } + } + + + + pub fn instance_variable(&self, name: &str) -> Option<&Ivar> { + let name = CString::new(name).unwrap(); + unsafe { + let ivar = class_getInstanceVariable(self, name.as_ptr()); + if ivar.is_null() { None } else { Some(&*ivar) } + } + } + + + pub fn instance_methods(&self) -> MallocBuffer<&Method> { + unsafe { + let mut count: c_uint = 0; + let methods = class_copyMethodList(self, &mut count); + MallocBuffer::new(methods as *mut _, count as usize).unwrap() + } + + } + + + pub fn conforms_to(&self, proto: &Protocol) -> bool { + unsafe { class_conformsToProtocol(self, proto) == YES } + } + + + pub fn adopted_protocols(&self) -> MallocBuffer<&Protocol> { + unsafe { + let mut count: c_uint = 0; + let protos = class_copyProtocolList(self, &mut count); + MallocBuffer::new(protos as *mut _, count as usize).unwrap() + } + } + + + pub fn instance_variables(&self) -> MallocBuffer<&Ivar> { + unsafe { + let mut count: c_uint = 0; + let ivars = class_copyIvarList(self, &mut count); + MallocBuffer::new(ivars as *mut _, count as usize).unwrap() + } + } +} + +impl PartialEq for Class { + fn eq(&self, other: &Class) -> bool { + let self_ptr: *const Class = self; + let other_ptr: *const Class = other; + self_ptr == other_ptr + } +} + +impl Eq for Class { } + +impl fmt::Debug for Class { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.name()) + } +} + +impl Protocol { + + + pub fn get(name: &str) -> Option<&'static Protocol> { + let name = CString::new(name).unwrap(); + unsafe { + let proto = objc_getProtocol(name.as_ptr()); + if proto.is_null() { None } else { Some(&*proto) } + } + } + + + pub fn protocols() -> MallocBuffer<&'static Protocol> { + unsafe { + let mut count: c_uint = 0; + let protocols = objc_copyProtocolList(&mut count); + MallocBuffer::new(protocols as *mut _, count as usize).unwrap() + } + } + + + pub fn adopted_protocols(&self) -> MallocBuffer<&Protocol> { + unsafe { + let mut count: c_uint = 0; + let protocols = protocol_copyProtocolList(self, &mut count); + MallocBuffer::new(protocols as *mut _, count as usize).unwrap() + } + } + + + pub fn conforms_to(&self, proto: &Protocol) -> bool { + unsafe { protocol_conformsToProtocol(self, proto) == YES } + } + + + pub fn name(&self) -> &str { + let name = unsafe { + CStr::from_ptr(protocol_getName(self)) + }; + str::from_utf8(name.to_bytes()).unwrap() + } +} + +impl PartialEq for Protocol { + fn eq(&self, other: &Protocol) -> bool { + unsafe { protocol_isEqual(self, other) == YES } + } +} + +impl Eq for Protocol { } + +impl fmt::Debug for Protocol { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.name()) + } +} + +impl Object { + + pub fn class(&self) -> &Class { + unsafe { + &*object_getClass(self) + } + } + + + + + + pub unsafe fn get_ivar(&self, name: &str) -> &T where T: Encode { + let offset = { + let cls = self.class(); + match cls.instance_variable(name) { + Some(ivar) => { + assert!(ivar.type_encoding() == T::encode()); + ivar.offset() + } + None => panic!("Ivar {} not found on class {:?}", name, cls), + } + }; + let ptr = { + let self_ptr: *const Object = self; + (self_ptr as *const u8).offset(offset) as *const T + }; + &*ptr + } + + + + + + pub unsafe fn get_mut_ivar(&mut self, name: &str) -> &mut T + where T: Encode { + let offset = { + let cls = self.class(); + match cls.instance_variable(name) { + Some(ivar) => { + assert!(ivar.type_encoding() == T::encode()); + ivar.offset() + } + None => panic!("Ivar {} not found on class {:?}", name, cls), + } + }; + let ptr = { + let self_ptr: *mut Object = self; + (self_ptr as *mut u8).offset(offset) as *mut T + }; + &mut *ptr + } + + + + + + pub unsafe fn set_ivar(&mut self, name: &str, value: T) + where T: Encode { + *self.get_mut_ivar::(name) = value; + } +} + +impl fmt::Debug for Object { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "<{:?}: {:p}>", self.class(), self) + } +} + +#[cfg(test)] +mod tests { + use test_utils; + use Encode; + use super::{Class, Protocol, Sel}; + + #[test] + fn test_ivar() { + let cls = test_utils::custom_class(); + let ivar = cls.instance_variable("_foo").unwrap(); + assert!(ivar.name() == "_foo"); + assert!(ivar.type_encoding() == ::encode()); + assert!(ivar.offset() > 0); + + let ivars = cls.instance_variables(); + assert!(ivars.len() > 0); + } + + #[test] + fn test_method() { + let cls = test_utils::custom_class(); + let sel = Sel::register("foo"); + let method = cls.instance_method(sel).unwrap(); + assert!(method.name().name() == "foo"); + assert!(method.arguments_count() == 2); + assert!(method.return_type() == ::encode()); + assert!(method.argument_type(1).unwrap() == Sel::encode()); + + let methods = cls.instance_methods(); + assert!(methods.len() > 0); + } + + #[test] + fn test_class() { + let cls = test_utils::custom_class(); + assert!(cls.name() == "CustomObject"); + assert!(cls.instance_size() > 0); + assert!(cls.superclass().is_none()); + + assert!(Class::get(cls.name()) == Some(cls)); + + let metaclass = cls.metaclass(); + + assert!(metaclass.superclass().unwrap() == cls); + + let subclass = test_utils::custom_subclass(); + assert!(subclass.superclass().unwrap() == cls); + } + + #[test] + fn test_classes() { + assert!(Class::classes_count() > 0); + let classes = Class::classes(); + assert!(classes.len() > 0); + } + + #[test] + fn test_protocol() { + let proto = test_utils::custom_protocol(); + assert!(proto.name() == "CustomProtocol"); + let class = test_utils::custom_class(); + assert!(class.conforms_to(proto)); + let class_protocols = class.adopted_protocols(); + assert!(class_protocols.len() > 0); + } + + #[test] + fn test_protocol_method() { + let class = test_utils::custom_class(); + let result: i32 = unsafe { + msg_send![class, addNumber:1 toNumber:2] + }; + assert_eq!(result, 3); + } + + #[test] + fn test_subprotocols() { + let sub_proto = test_utils::custom_subprotocol(); + let super_proto = test_utils::custom_protocol(); + assert!(sub_proto.conforms_to(super_proto)); + let adopted_protocols = sub_proto.adopted_protocols(); + assert_eq!(adopted_protocols[0], super_proto); + } + + #[test] + fn test_protocols() { + + let _ = test_utils::custom_protocol(); + + let protocols = Protocol::protocols(); + assert!(protocols.len() > 0); + } + + #[test] + fn test_object() { + let mut obj = test_utils::custom_object(); + assert!(obj.class() == test_utils::custom_class()); + let result: u32 = unsafe { + obj.set_ivar("_foo", 4u32); + *obj.get_ivar("_foo") + }; + assert!(result == 4); + } +} diff --git a/third_party/rust/objc/src/test_utils.rs b/third_party/rust/objc/src/test_utils.rs new file mode 100644 index 000000000000..9abcbe16b6b1 --- /dev/null +++ b/third_party/rust/objc/src/test_utils.rs @@ -0,0 +1,187 @@ +use std::ops::{Deref, DerefMut}; +use std::os::raw::c_char; +use std::sync::{Once, ONCE_INIT}; + +use declare::{ClassDecl, ProtocolDecl}; +use runtime::{Class, Object, Protocol, Sel, self}; +use {Encode, Encoding}; + +pub struct CustomObject { + obj: *mut Object, +} + +impl CustomObject { + fn new(class: &Class) -> Self { + let obj = unsafe { + runtime::class_createInstance(class, 0) + }; + CustomObject { obj: obj } + } +} + +impl Deref for CustomObject { + type Target = Object; + + fn deref(&self) -> &Object { + unsafe { &*self.obj } + } +} + +impl DerefMut for CustomObject { + fn deref_mut(&mut self) -> &mut Object { + unsafe { &mut *self.obj } + } +} + +impl Drop for CustomObject { + fn drop(&mut self) { + unsafe { + runtime::object_dispose(self.obj); + } + } +} + +#[derive(Eq, PartialEq)] +pub struct CustomStruct { + pub a: u64, + pub b: u64, + pub c: u64, + pub d: u64, +} + +unsafe impl Encode for CustomStruct { + fn encode() -> Encoding { + let mut code = "{CustomStruct=".to_owned(); + for _ in 0..4 { + code.push_str(u64::encode().as_str()); + } + code.push_str("}"); + unsafe { + Encoding::from_str(&code) + } + } +} + +pub fn custom_class() -> &'static Class { + static REGISTER_CUSTOM_CLASS: Once = ONCE_INIT; + + REGISTER_CUSTOM_CLASS.call_once(|| { + + extern fn custom_obj_class_initialize(_this: &Class, _cmd: Sel) { } + + let mut decl = ClassDecl::root("CustomObject", custom_obj_class_initialize).unwrap(); + let proto = custom_protocol(); + + decl.add_protocol(proto); + decl.add_ivar::("_foo"); + + extern fn custom_obj_set_foo(this: &mut Object, _cmd: Sel, foo: u32) { + unsafe { this.set_ivar::("_foo", foo); } + } + + extern fn custom_obj_get_foo(this: &Object, _cmd: Sel) -> u32 { + unsafe { *this.get_ivar::("_foo") } + } + + extern fn custom_obj_get_struct(_this: &Object, _cmd: Sel) -> CustomStruct { + CustomStruct { a: 1, b: 2, c: 3, d: 4 } + } + + extern fn custom_obj_class_method(_this: &Class, _cmd: Sel) -> u32 { + 7 + } + + extern fn custom_obj_set_bar(this: &mut Object, _cmd: Sel, bar: u32) { + unsafe { this.set_ivar::("_foo", bar) ;} + } + + extern fn custom_obj_add_number_to_number(_this: &Class, _cmd: Sel, fst: i32, snd: i32) -> i32 { + fst + snd + } + + unsafe { + let set_foo: extern fn(&mut Object, Sel, u32) = custom_obj_set_foo; + decl.add_method(sel!(setFoo:), set_foo); + let get_foo: extern fn(&Object, Sel) -> u32 = custom_obj_get_foo; + decl.add_method(sel!(foo), get_foo); + let get_struct: extern fn(&Object, Sel) -> CustomStruct = custom_obj_get_struct; + decl.add_method(sel!(customStruct), get_struct); + let class_method: extern fn(&Class, Sel) -> u32 = custom_obj_class_method; + decl.add_class_method(sel!(classFoo), class_method); + + let protocol_instance_method: extern fn(&mut Object, Sel, u32) = custom_obj_set_bar; + decl.add_method(sel!(setBar:), protocol_instance_method); + let protocol_class_method: extern fn(&Class, Sel, i32, i32) -> i32 = custom_obj_add_number_to_number; + decl.add_class_method(sel!(addNumber:toNumber:), protocol_class_method); + } + + decl.register(); + }); + + class!(CustomObject) +} + +pub fn custom_protocol() -> &'static Protocol { + static REGISTER_CUSTOM_PROTOCOL: Once = ONCE_INIT; + + REGISTER_CUSTOM_PROTOCOL.call_once(|| { + let mut decl = ProtocolDecl::new("CustomProtocol").unwrap(); + + decl.add_method_description::<(i32,), ()>(sel!(setBar:), true); + decl.add_method_description::<(), *const c_char>(sel!(getName), false); + decl.add_class_method_description::<(i32, i32), i32>(sel!(addNumber:toNumber:), true); + + decl.register(); + }); + + Protocol::get("CustomProtocol").unwrap() +} + +pub fn custom_subprotocol() -> &'static Protocol { + static REGISTER_CUSTOM_SUBPROTOCOL: Once = ONCE_INIT; + + REGISTER_CUSTOM_SUBPROTOCOL.call_once(|| { + let super_proto = custom_protocol(); + let mut decl = ProtocolDecl::new("CustomSubProtocol").unwrap(); + + decl.add_protocol(super_proto); + decl.add_method_description::<(u32,), u32>(sel!(calculateFoo:), true); + + decl.register(); + }); + + Protocol::get("CustomSubProtocol").unwrap() +} + +pub fn custom_object() -> CustomObject { + CustomObject::new(custom_class()) +} + +pub fn custom_subclass() -> &'static Class { + static REGISTER_CUSTOM_SUBCLASS: Once = ONCE_INIT; + + REGISTER_CUSTOM_SUBCLASS.call_once(|| { + let superclass = custom_class(); + let mut decl = ClassDecl::new("CustomSubclassObject", superclass).unwrap(); + + extern fn custom_subclass_get_foo(this: &Object, _cmd: Sel) -> u32 { + let foo: u32 = unsafe { + msg_send![super(this, custom_class()), foo] + }; + foo + 2 + } + + unsafe { + let get_foo: extern fn(&Object, Sel) -> u32 = custom_subclass_get_foo; + decl.add_method(sel!(foo), get_foo); + } + + decl.register(); + }); + + class!(CustomSubclassObject) +} + +pub fn custom_subclass_object() -> CustomObject { + CustomObject::new(custom_subclass()) +} diff --git a/third_party/rust/objc_exception/.cargo-checksum.json b/third_party/rust/objc_exception/.cargo-checksum.json new file mode 100644 index 000000000000..e756ca913407 --- /dev/null +++ b/third_party/rust/objc_exception/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"2926e2f6211cb546e43cfc5769eebf0e4561816800ed77ddff88c71a8fdb2a0d","build.rs":"6734724a6f3c46150f5a6a2adf07b9003ae6f1627fa0ee8fcafa28e9d4aafc15","extern/exception.m":"a6ee21d820126e98ee49ac34db1a6770cfd01f0fb0f71d03127e7eeff91a47c5","src/lib.rs":"24f3b04002dbf24397cc4fd2da7045d49c8b3a06101bab1918d007f7ae9b2207"},"package":null} \ No newline at end of file diff --git a/third_party/rust/objc_exception/Cargo.toml b/third_party/rust/objc_exception/Cargo.toml new file mode 100644 index 000000000000..7bb99c7cfd32 --- /dev/null +++ b/third_party/rust/objc_exception/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "objc_exception" +version = "0.1.2" +authors = ["Steven Sheldon"] + +description = "Rust interface for Objective-C's throw and try/catch statements." +keywords = ["objective-c", "osx", "ios"] +repository = "http://github.com/SSheldon/rust-objc-exception" +documentation = "http://ssheldon.github.io/rust-objc/objc_exception/" +license = "MIT" + +exclude = [".gitignore"] + +build = "build.rs" + +[build-dependencies] +cc = "1" diff --git a/third_party/rust/objc_exception/build.rs b/third_party/rust/objc_exception/build.rs new file mode 100644 index 000000000000..ba728b6e1682 --- /dev/null +++ b/third_party/rust/objc_exception/build.rs @@ -0,0 +1,7 @@ +extern crate cc; + +fn main() { + cc::Build::new() + .file("extern/exception.m") + .compile("libexception.a"); +} diff --git a/third_party/rust/objc_exception/extern/exception.m b/third_party/rust/objc_exception/extern/exception.m new file mode 100644 index 000000000000..700439ecf7d7 --- /dev/null +++ b/third_party/rust/objc_exception/extern/exception.m @@ -0,0 +1,21 @@ +#include +#include + +void RustObjCExceptionThrow(id exception) { + @throw exception; +} + +int RustObjCExceptionTryCatch(void (*try)(void *), void *context, id *error) { + @try { + try(context); + if (error) { + *error = nil; + } + return 0; + } @catch (id exception) { + if (error) { + *error = [exception retain]; + } + return 1; + } +} diff --git a/third_party/rust/objc_exception/src/lib.rs b/third_party/rust/objc_exception/src/lib.rs new file mode 100644 index 000000000000..1b8a2a88f625 --- /dev/null +++ b/third_party/rust/objc_exception/src/lib.rs @@ -0,0 +1,100 @@ + + +use std::mem; +use std::os::raw::{c_int, c_void}; +use std::ptr; + +#[link(name = "objc", kind = "dylib")] +extern { } + +extern { + fn RustObjCExceptionThrow(exception: *mut c_void); + fn RustObjCExceptionTryCatch(try: extern fn(*mut c_void), + context: *mut c_void, error: *mut *mut c_void) -> c_int; +} + + +pub enum Exception { } + + + + + +pub unsafe fn throw(exception: *mut Exception) -> ! { + RustObjCExceptionThrow(exception as *mut _); + unreachable!(); +} + +unsafe fn try_no_ret(closure: F) -> Result<(), *mut Exception> + where F: FnOnce() { + extern fn try_objc_execute_closure(closure: &mut Option) + where F: FnOnce() { + + let closure = closure.take().unwrap(); + closure(); + } + + let f: extern fn(&mut Option) = try_objc_execute_closure; + let f: extern fn(*mut c_void) = mem::transmute(f); + + let mut closure = Some(closure); + let context = &mut closure as *mut _ as *mut c_void; + + let mut exception = ptr::null_mut(); + let success = RustObjCExceptionTryCatch(f, context, &mut exception); + + if success == 0 { + Ok(()) + } else { + Err(exception as *mut _) + } +} + + + + + + + + + + +pub unsafe fn try(closure: F) -> Result + where F: FnOnce() -> R { + let mut value = None; + let result = { + let value_ref = &mut value; + try_no_ret(move || { + *value_ref = Some(closure()); + }) + }; + + result.map(|_| value.unwrap()) +} + +#[cfg(test)] +mod tests { + use std::ptr; + use super::{throw, try}; + + #[test] + fn test_try() { + unsafe { + let s = "Hello".to_string(); + let result = try(move || { + if s.len() > 0 { + throw(ptr::null_mut()); + } + s.len() + }); + assert!(result.unwrap_err() == ptr::null_mut()); + + let mut s = "Hello".to_string(); + let result = try(move || { + s.push_str(", World!"); + s + }); + assert!(result.unwrap() == "Hello, World!"); + } + } +} diff --git a/third_party/rust/range-alloc/.cargo-checksum.json b/third_party/rust/range-alloc/.cargo-checksum.json new file mode 100644 index 000000000000..39f824eab655 --- /dev/null +++ b/third_party/rust/range-alloc/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"787aa9611486551e3d92cb2dabbe367f9048ceba1fbd6525006853315338b0c5","src/lib.rs":"f3997ef41c4079ee6bfa55ed08cdfa11230ddc432c2ea26db750ac5eeb29fecf"},"package":"dd5927936723a9e8b715d37d7e4b390455087c4bdf25b9f702309460577b14f9"} \ No newline at end of file diff --git a/third_party/rust/range-alloc/Cargo.toml b/third_party/rust/range-alloc/Cargo.toml new file mode 100644 index 000000000000..c596afdbe32f --- /dev/null +++ b/third_party/rust/range-alloc/Cargo.toml @@ -0,0 +1,26 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "range-alloc" +version = "0.1.0" +authors = ["The Gfx-rs Developers"] +description = "Generic range allocator used by gfx-rs backends" +homepage = "https://github.com/gfx-rs/gfx" +documentation = "https://docs.rs/range-alloc" +keywords = ["allocator"] +categories = ["memory-management"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/gfx-rs/gfx" + +[lib] +name = "range_alloc" diff --git a/third_party/rust/range-alloc/src/lib.rs b/third_party/rust/range-alloc/src/lib.rs new file mode 100644 index 000000000000..a81cd17637f4 --- /dev/null +++ b/third_party/rust/range-alloc/src/lib.rs @@ -0,0 +1,267 @@ +use std::fmt::Debug; +use std::iter::Sum; +use std::ops::{Add, AddAssign, Range, Sub}; + + +#[derive(Debug)] +pub struct RangeAllocator { + + initial_range: Range, + + + + free_ranges: Vec>, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct RangeAllocationError { + pub fragmented_free_length: T, +} + +impl RangeAllocator +where + T: Clone + Copy + Add + AddAssign + Sub + Eq + PartialOrd + Debug, +{ + pub fn new(range: Range) -> Self { + RangeAllocator { + initial_range: range.clone(), + free_ranges: vec![range], + } + } + + pub fn allocate_range(&mut self, length: T) -> Result, RangeAllocationError> { + assert_ne!(length + length, length); + let mut best_fit: Option<(usize, Range)> = None; + let mut fragmented_free_length = length - length; + for (index, range) in self.free_ranges.iter().cloned().enumerate() { + let range_length = range.end - range.start; + fragmented_free_length += range_length; + if range_length < length { + continue; + } else if range_length == length { + + best_fit = Some((index, range)); + break; + } + best_fit = Some(match best_fit { + Some((best_index, best_range)) => { + + if range_length < best_range.end - best_range.start { + (index, range) + } else { + (best_index, best_range.clone()) + } + } + None => { + (index, range) + } + }); + } + match best_fit { + Some((index, range)) => { + if range.end - range.start == length { + self.free_ranges.remove(index); + } else { + self.free_ranges[index].start += length; + } + Ok(range.start..(range.start + length)) + } + None => Err(RangeAllocationError { + fragmented_free_length, + }) + } + } + + pub fn free_range(&mut self, range: Range) { + assert!(self.initial_range.start <= range.start && range.end <= self.initial_range.end); + assert!(range.start < range.end); + + + let i = self.free_ranges.iter() + .position(|r| r.start > range.start) + .unwrap_or(self.free_ranges.len()); + + + + if i > 0 && range.start == self.free_ranges[i - 1].end { + + self.free_ranges[i - 1].end = + if i < self.free_ranges.len() && range.end == self.free_ranges[i].start { + + let right = self.free_ranges.remove(i); + right.end + } else { + range.end + }; + + return; + } else if i < self.free_ranges.len() && range.end == self.free_ranges[i].start { + + self.free_ranges[i].start = + if i > 0 && range.start == self.free_ranges[i - 1].end { + + let left = self.free_ranges.remove(i - 1); + left.start + } else { + range.start + }; + + return; + } + + + assert!( + (i == 0 || self.free_ranges[i - 1].end < range.start) && + (i >= self.free_ranges.len() || range.end < self.free_ranges[i].start) + ); + + self.free_ranges.insert(i, range); + } + + + pub fn allocated_ranges<'a>(&'a self) -> impl 'a + Iterator> { + let first = match self.free_ranges.first() { + Some(Range { ref start, .. }) if *start > self.initial_range.start => Some(self.initial_range.start .. *start), + _ => None, + }; + + let last = match self.free_ranges.last() { + Some(Range { end, .. }) if *end < self.initial_range.end => Some(*end .. self.initial_range.end), + _ => None, + }; + + let mid = self.free_ranges + .iter() + .zip(self.free_ranges.iter().skip(1)) + .map(|(ra, rb)| ra.end .. rb.start); + + first + .into_iter() + .chain(mid) + .chain(last) + } + + pub fn reset(&mut self) { + self.free_ranges.clear(); + self.free_ranges.push(self.initial_range.clone()); + } + + pub fn is_empty(&self) -> bool { + self.free_ranges.len() == 1 && self.free_ranges[0] == self.initial_range + } +} + +impl + Sum> RangeAllocator { + pub fn total_available(&self) -> T { + self.free_ranges + .iter() + .map(|range| range.end - range.start) + .sum() + } +} + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_basic_allocation() { + let mut alloc = RangeAllocator::new(0..10); + + assert_eq!(alloc.allocate_range(4), Ok(0..4)); + + alloc.free_range(0..4); + + assert_eq!(alloc.free_ranges, vec![0..10]); + } + + #[test] + fn test_out_of_space() { + let mut alloc = RangeAllocator::new(0..10); + + assert_eq!(alloc.allocate_range(10), Ok(0..10)); + assert!(alloc.allocate_range(4).is_err()); + alloc.free_range(0..10); + } + + #[test] + fn test_dont_use_block_that_is_too_small() { + let mut alloc = RangeAllocator::new(0..10); + + assert_eq!(alloc.allocate_range(3), Ok(0..3)); + assert_eq!(alloc.allocate_range(3), Ok(3..6)); + assert_eq!(alloc.allocate_range(3), Ok(6..9)); + alloc.free_range(3..6); + assert_eq!(alloc.free_ranges, vec![3..6, 9..10]); + + assert_eq!(alloc.allocate_range(3), Ok(3..6)); + } + + #[test] + fn test_free_blocks_in_middle() { + let mut alloc = RangeAllocator::new(0..100); + + assert_eq!(alloc.allocate_range(10), Ok(0..10)); + assert_eq!(alloc.allocate_range(10), Ok(10..20)); + assert_eq!(alloc.allocate_range(10), Ok(20..30)); + assert_eq!(alloc.allocate_range(10), Ok(30..40)); + assert_eq!(alloc.allocate_range(10), Ok(40..50)); + assert_eq!(alloc.allocate_range(10), Ok(50..60)); + assert_eq!(alloc.allocate_range(10), Ok(60..70)); + assert_eq!(alloc.allocate_range(10), Ok(70..80)); + assert_eq!(alloc.allocate_range(10), Ok(80..90)); + assert_eq!(alloc.allocate_range(10), Ok(90..100)); + assert_eq!(alloc.free_ranges, vec![]); + alloc.free_range(10..20); + alloc.free_range(30..40); + alloc.free_range(50..60); + alloc.free_range(70..80); + alloc.free_range(90..100); + + assert_eq!(alloc.free_ranges, vec![10..20, 30..40, 50..60, 70..80, 90..100]); + + assert_eq!(alloc.allocate_range(6), Ok(10..16)); + assert_eq!(alloc.allocate_range(6), Ok(30..36)); + assert_eq!(alloc.allocate_range(6), Ok(50..56)); + assert_eq!(alloc.allocate_range(6), Ok(70..76)); + assert_eq!(alloc.allocate_range(6), Ok(90..96)); + + assert_eq!(alloc.free_ranges, vec![16..20, 36..40, 56..60, 76..80, 96..100]); + + assert_eq!(alloc.allocate_range(4), Ok(16..20)); + assert_eq!(alloc.allocate_range(4), Ok(36..40)); + assert_eq!(alloc.allocate_range(4), Ok(56..60)); + assert_eq!(alloc.allocate_range(4), Ok(76..80)); + assert_eq!(alloc.allocate_range(4), Ok(96..100)); + + assert_eq!(alloc.free_ranges, vec![]); + } + + #[test] + fn test_ignore_block_if_another_fits_better() { + let mut alloc = RangeAllocator::new(0..10); + + + assert_eq!(alloc.allocate_range(3), Ok(0..3)); + assert_eq!(alloc.allocate_range(3), Ok(3..6)); + assert_eq!(alloc.allocate_range(3), Ok(6..9)); + alloc.free_range(3..6); + assert_eq!(alloc.free_ranges, vec![3..6, 9..10]); + + + assert_eq!(alloc.allocate_range(1), Ok(9..10)); + } + + #[test] + fn test_merge_neighbors() { + let mut alloc = RangeAllocator::new(0..9); + assert_eq!(alloc.allocate_range(3), Ok(0..3)); + assert_eq!(alloc.allocate_range(3), Ok(3..6)); + assert_eq!(alloc.allocate_range(3), Ok(6..9)); + alloc.free_range(0..3); + alloc.free_range(6..9); + alloc.free_range(3..6); + assert_eq!(alloc.free_ranges, vec![0..9]); + } +} diff --git a/third_party/rust/raw-window-handle/.cargo-checksum.json b/third_party/rust/raw-window-handle/.cargo-checksum.json new file mode 100644 index 000000000000..7d5ae4d21633 --- /dev/null +++ b/third_party/rust/raw-window-handle/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"01451f72500ac469378f5e7dacc5b7487a64de4ed101fc0d3171f702de3d958f","Cargo.toml":"0ffb374551798f60ef0158eee5b67c6b1d2bd6b53c58bf37a2a69d08eb2b2ac2","LICENSE":"9c5a80639a57c1c945570e7ebbca0706305849ce3c098021325cca9db2f7acc4","README.md":"cab86df5186877302bfb6f90a49ae53af6141fc9588a4f6d7038658fdf10e7d0","appveyor.yml":"8d80a816ac9c301e7b91ff6797c1279462215b0b02169c0c0ceede261f788dca","rustfmt.toml":"a60f9a6656083027a16746999197d72aa6fd7f906969597cb99ce162177f1700","src/android.rs":"62e5150c8c55d1867bb070486f600cf689c946dacc34a86e02fd13ef6c91c267","src/ios.rs":"9303b649b541510a2d3e5e0edbe92cdfc7de909bf9792c225f7836fff07c3254","src/lib.rs":"68a6a3cdeb87211b6c19f18a6efb0819bae553eb9315e99fc780b66acd3f0a22","src/macos.rs":"fb7107f678e28071534fb7928e14eccd8ca551c37e34c58071da23f9d5d553a0","src/unix.rs":"9dbc416a3ad18218e44e8c041f7c3d2f243fcb71cc0ece2446aee60413582493","src/web.rs":"45f4983dbf2031f588eebe907c7080547008b7d8e84532fe7a021e14a003ec6b","src/windows.rs":"eede07931e88f81925547d597a7470a7d8a33e6166029b4c070c653a4d7c16b6"},"package":"2e815b85b31e4d397ca9dd8eb1d692e9cb458b9f6ae8ac2232c995dca8236f87"} \ No newline at end of file diff --git a/third_party/rust/raw-window-handle/CHANGELOG.md b/third_party/rust/raw-window-handle/CHANGELOG.md new file mode 100644 index 000000000000..b93a5af94a3a --- /dev/null +++ b/third_party/rust/raw-window-handle/CHANGELOG.md @@ -0,0 +1,24 @@ +# 0.3.0 (2019-10-5) + +* **Breaking:** Rename `XLib.surface` to `XLib.window`, as that more accurately represents the underlying type. +* Implement `HasRawWindowHandle` for `RawWindowHandle` +* Add `HINSTANCE` field to `WindowsHandle`. + +# 0.2.0 (2019-09-26) + +* **Breaking:** Rename `X11` to `XLib`. +* Add XCB support. +* Add Web support. +* Add Android support. + +# 0.1.2 (2019-08-13) + +* Fix use of private `_non_exhaustive` field in platform handle structs preventing structs from getting initialized. + +# 0.1.1 (2019-08-13) + +* Flesh out Cargo.toml, adding crates.io info rendering tags. + +# 0.1.0 (2019-08-13) + +* Initial release. diff --git a/third_party/rust/raw-window-handle/Cargo.toml b/third_party/rust/raw-window-handle/Cargo.toml new file mode 100644 index 000000000000..e004af5a5c95 --- /dev/null +++ b/third_party/rust/raw-window-handle/Cargo.toml @@ -0,0 +1,36 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "raw-window-handle" +version = "0.3.0" +authors = ["Osspial "] +description = "Interoperability library for Rust Windowing applications." +documentation = "https://docs.rs/raw-window-handle" +readme = "README.md" +keywords = ["windowing"] +license = "Unlicense" +repository = "https://github.com/rust-windowing/raw-window-handle" +[package.metadata.docs.rs] +features = ["nightly-docs"] +[dependencies.libc] +version = "0.2" +features = [] + +[features] +nightly-docs = [] +[badges.appveyor] +repository = "rust-windowing/raw-window-handle" + +[badges.travis-ci] +repository = "rust-windowing/raw-window-handle" diff --git a/third_party/rust/raw-window-handle/LICENSE b/third_party/rust/raw-window-handle/LICENSE new file mode 100644 index 000000000000..be95e0b866f5 --- /dev/null +++ b/third_party/rust/raw-window-handle/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Osspial + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/rust/raw-window-handle/README.md b/third_party/rust/raw-window-handle/README.md new file mode 100644 index 000000000000..e9d00520d57b --- /dev/null +++ b/third_party/rust/raw-window-handle/README.md @@ -0,0 +1,7 @@ +# `raw-window-handle`: A common windowing interoperability library for Rust +[![Crates.io](https://img.shields.io/crates/v/raw-window-handle.svg?maxAge=2592000)](https://crates.io/crates/raw-window-handle) +[![Docs](https://docs.rs/raw-window-handle/badge.svg)](https://docs.rs/raw-window-handle) +[![Travis Build Status](https://travis-ci.org/rust-windowing/raw-window-handle.svg)](https://travis-ci.org/rust-windowing/raw-window-handle) +[![Appveyor Build Status](https://ci.appveyor.com/api/projects/status/iq3j85x0ruw5y205?svg=true)](https://ci.appveyor.com/project/Osspial/raw-window-handle) + +This library provides standard types for accessing a window's platform-specific raw window handle. This does not provide any utilities for creating and managing windows; instead, it provides a common interface that window creation libraries (e.g. Winit, SDL) can use to easily talk with graphics libraries (e.g. gfx-hal). diff --git a/third_party/rust/raw-window-handle/appveyor.yml b/third_party/rust/raw-window-handle/appveyor.yml new file mode 100644 index 000000000000..050795b4c379 --- /dev/null +++ b/third_party/rust/raw-window-handle/appveyor.yml @@ -0,0 +1,22 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-msvc + CHANNEL: nightly + - TARGET: x86_64-pc-windows-msvc + CHANNEL: stable + - TARGET: i686-pc-windows-msvc + CHANNEL: nightly + - TARGET: i686-pc-windows-gnu + CHANNEL: nightly +install: + - appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe + - rustup-init -yv --default-toolchain %CHANNEL% --default-host %TARGET% + - SET PATH=%PATH%;%USERPROFILE%\.cargo\bin + - SET PATH=%PATH%;C:\MinGW\bin + - rustc -V + - cargo -V + +build: false + +test_script: + - cargo test --verbose diff --git a/third_party/rust/raw-window-handle/rustfmt.toml b/third_party/rust/raw-window-handle/rustfmt.toml new file mode 100644 index 000000000000..34503e88e1d0 --- /dev/null +++ b/third_party/rust/raw-window-handle/rustfmt.toml @@ -0,0 +1 @@ +use_field_init_shorthand=true diff --git a/third_party/rust/raw-window-handle/src/android.rs b/third_party/rust/raw-window-handle/src/android.rs new file mode 100644 index 000000000000..a6b46b1c4c7c --- /dev/null +++ b/third_party/rust/raw-window-handle/src/android.rs @@ -0,0 +1,31 @@ +use core::ptr; +use libc::c_void; + + + + + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct AndroidHandle { + + pub a_native_window: *mut c_void, + #[doc(hidden)] + #[deprecated = "This field is used to ensure that this struct is non-exhaustive, so that it may be extended in the future. Do not refer to this field."] + pub _non_exhaustive_do_not_use: crate::seal::Seal, +} + +impl AndroidHandle { + pub fn empty() -> AndroidHandle { + #[allow(deprecated)] + AndroidHandle { + a_native_window: ptr::null_mut(), + _non_exhaustive_do_not_use: crate::seal::Seal, + } + } +} diff --git a/third_party/rust/raw-window-handle/src/ios.rs b/third_party/rust/raw-window-handle/src/ios.rs new file mode 100644 index 000000000000..bcef88901285 --- /dev/null +++ b/third_party/rust/raw-window-handle/src/ios.rs @@ -0,0 +1,34 @@ +use core::ptr; +use libc::c_void; + + + + + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct IOSHandle { + pub ui_window: *mut c_void, + pub ui_view: *mut c_void, + pub ui_view_controller: *mut c_void, + #[doc(hidden)] + #[deprecated = "This field is used to ensure that this struct is non-exhaustive, so that it may be extended in the future. Do not refer to this field."] + pub _non_exhaustive_do_not_use: crate::seal::Seal, +} + +impl IOSHandle { + pub fn empty() -> IOSHandle { + #[allow(deprecated)] + IOSHandle { + ui_window: ptr::null_mut(), + ui_view: ptr::null_mut(), + ui_view_controller: ptr::null_mut(), + _non_exhaustive_do_not_use: crate::seal::Seal, + } + } +} diff --git a/third_party/rust/raw-window-handle/src/lib.rs b/third_party/rust/raw-window-handle/src/lib.rs new file mode 100644 index 000000000000..fed62a6fdadc --- /dev/null +++ b/third_party/rust/raw-window-handle/src/lib.rs @@ -0,0 +1,197 @@ + + + + + + + + + + + + + + +#![cfg_attr(feature = "nightly-docs", feature(doc_cfg))] +#![no_std] + +#[cfg_attr(feature = "nightly-docs", doc(cfg(target_os = "android")))] +#[cfg_attr(not(feature = "nightly-docs"), cfg(target_os = "android"))] +pub mod android; +#[cfg_attr(feature = "nightly-docs", doc(cfg(target_os = "ios")))] +#[cfg_attr(not(feature = "nightly-docs"), cfg(target_os = "ios"))] +pub mod ios; +#[cfg_attr(feature = "nightly-docs", doc(cfg(target_os = "macos")))] +#[cfg_attr(not(feature = "nightly-docs"), cfg(target_os = "macos"))] +pub mod macos; +#[cfg_attr( + feature = "nightly-docs", + doc(cfg(any( + target_os = "linux", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd" + ))) +)] +#[cfg_attr( + not(feature = "nightly-docs"), + cfg(any( + target_os = "linux", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd" + )) +)] +pub mod unix; +#[cfg_attr(feature = "nightly-docs", doc(cfg(target_arch = "wasm32")))] +#[cfg_attr(not(feature = "nightly-docs"), cfg(target_arch = "wasm32"))] +pub mod web; +#[cfg_attr(feature = "nightly-docs", doc(cfg(target_os = "windows")))] +#[cfg_attr(not(feature = "nightly-docs"), cfg(target_os = "windows"))] +pub mod windows; + +mod platform { + #[cfg(target_os = "android")] + pub use crate::android::*; + #[cfg(target_os = "macos")] + pub use crate::macos::*; + #[cfg(any( + target_os = "linux", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd" + ))] + pub use crate::unix::*; + #[cfg(target_os = "windows")] + pub use crate::windows::*; + + #[cfg(target_os = "ios")] + pub use crate::ios::*; + #[cfg(target_arch = "wasm32")] + pub use crate::web::*; +} + + + + + + + + + + + + + + + +pub unsafe trait HasRawWindowHandle { + fn raw_window_handle(&self) -> RawWindowHandle; +} + +unsafe impl HasRawWindowHandle for RawWindowHandle { + fn raw_window_handle(&self) -> RawWindowHandle { + *self + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RawWindowHandle { + #[cfg_attr(feature = "nightly-docs", doc(cfg(target_os = "ios")))] + #[cfg_attr(not(feature = "nightly-docs"), cfg(target_os = "ios"))] + IOS(ios::IOSHandle), + + #[cfg_attr(feature = "nightly-docs", doc(cfg(target_os = "macos")))] + #[cfg_attr(not(feature = "nightly-docs"), cfg(target_os = "macos"))] + MacOS(macos::MacOSHandle), + + #[cfg_attr( + feature = "nightly-docs", + doc(cfg(any( + target_os = "linux", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd" + ))) + )] + #[cfg_attr( + not(feature = "nightly-docs"), + cfg(any( + target_os = "linux", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd" + )) + )] + Xlib(unix::XlibHandle), + + #[cfg_attr( + feature = "nightly-docs", + doc(cfg(any( + target_os = "linux", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd" + ))) + )] + #[cfg_attr( + not(feature = "nightly-docs"), + cfg(any( + target_os = "linux", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd" + )) + )] + Xcb(unix::XcbHandle), + + #[cfg_attr( + feature = "nightly-docs", + doc(cfg(any( + target_os = "linux", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd" + ))) + )] + #[cfg_attr( + not(feature = "nightly-docs"), + cfg(any( + target_os = "linux", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd" + )) + )] + Wayland(unix::WaylandHandle), + + #[cfg_attr(feature = "nightly-docs", doc(cfg(target_os = "windows")))] + #[cfg_attr(not(feature = "nightly-docs"), cfg(target_os = "windows"))] + Windows(windows::WindowsHandle), + + #[cfg_attr(feature = "nightly-docs", doc(cfg(target_arch = "wasm32")))] + #[cfg_attr(not(feature = "nightly-docs"), cfg(target_arch = "wasm32"))] + Web(web::WebHandle), + + #[cfg_attr(feature = "nightly-docs", doc(cfg(target_os = "android")))] + #[cfg_attr(not(feature = "nightly-docs"), cfg(target_os = "android"))] + Android(android::AndroidHandle), + + #[doc(hidden)] + #[deprecated = "This field is used to ensure that this struct is non-exhaustive, so that it may be extended in the future. Do not refer to this field."] + __NonExhaustiveDoNotUse(seal::Seal), +} + +mod seal { + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + pub struct Seal; +} diff --git a/third_party/rust/raw-window-handle/src/macos.rs b/third_party/rust/raw-window-handle/src/macos.rs new file mode 100644 index 000000000000..dc323a562c89 --- /dev/null +++ b/third_party/rust/raw-window-handle/src/macos.rs @@ -0,0 +1,33 @@ +use core::ptr; +use libc::c_void; + + + + + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct MacOSHandle { + pub ns_window: *mut c_void, + pub ns_view: *mut c_void, + + #[doc(hidden)] + #[deprecated = "This field is used to ensure that this struct is non-exhaustive, so that it may be extended in the future. Do not refer to this field."] + pub _non_exhaustive_do_not_use: crate::seal::Seal, +} + +impl MacOSHandle { + pub fn empty() -> MacOSHandle { + #[allow(deprecated)] + MacOSHandle { + ns_window: ptr::null_mut(), + ns_view: ptr::null_mut(), + _non_exhaustive_do_not_use: crate::seal::Seal, + } + } +} diff --git a/third_party/rust/raw-window-handle/src/unix.rs b/third_party/rust/raw-window-handle/src/unix.rs new file mode 100644 index 000000000000..a7dddcfe2838 --- /dev/null +++ b/third_party/rust/raw-window-handle/src/unix.rs @@ -0,0 +1,98 @@ +use core::ptr; +use libc::{c_ulong, c_void}; + + + + + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct XlibHandle { + + pub window: c_ulong, + + pub display: *mut c_void, + #[doc(hidden)] + #[deprecated = "This field is used to ensure that this struct is non-exhaustive, so that it may be extended in the future. Do not refer to this field."] + pub _non_exhaustive_do_not_use: crate::seal::Seal, +} + + + + + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct XcbHandle { + + pub window: u32, + + pub connection: *mut c_void, + #[doc(hidden)] + #[deprecated = "This field is used to ensure that this struct is non-exhaustive, so that it may be extended in the future. Do not refer to this field."] + pub _non_exhaustive_do_not_use: crate::seal::Seal, +} + + + + + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct WaylandHandle { + + pub surface: *mut c_void, + + pub display: *mut c_void, + #[doc(hidden)] + #[deprecated = "This field is used to ensure that this struct is non-exhaustive, so that it may be extended in the future. Do not refer to this field."] + pub _non_exhaustive_do_not_use: crate::seal::Seal, +} + +impl XlibHandle { + pub fn empty() -> XlibHandle { + #[allow(deprecated)] + XlibHandle { + window: 0, + display: ptr::null_mut(), + _non_exhaustive_do_not_use: crate::seal::Seal, + } + } +} + +impl XcbHandle { + pub fn empty() -> XcbHandle { + #[allow(deprecated)] + XcbHandle { + window: 0, + connection: ptr::null_mut(), + _non_exhaustive_do_not_use: crate::seal::Seal, + } + } +} + +impl WaylandHandle { + pub fn empty() -> WaylandHandle { + #[allow(deprecated)] + WaylandHandle { + surface: ptr::null_mut(), + display: ptr::null_mut(), + _non_exhaustive_do_not_use: crate::seal::Seal, + } + } +} diff --git a/third_party/rust/raw-window-handle/src/web.rs b/third_party/rust/raw-window-handle/src/web.rs new file mode 100644 index 000000000000..6fb699304b80 --- /dev/null +++ b/third_party/rust/raw-window-handle/src/web.rs @@ -0,0 +1,33 @@ + + + + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct WebHandle { + + + + + + + pub id: u32, + #[doc(hidden)] + #[deprecated = "This field is used to ensure that this struct is non-exhaustive, so that it may be extended in the future. Do not refer to this field."] + pub _non_exhaustive_do_not_use: crate::seal::Seal, +} + +impl WebHandle { + pub fn empty() -> WebHandle { + #[allow(deprecated)] + WebHandle { + id: 0, + _non_exhaustive_do_not_use: crate::seal::Seal, + } + } +} diff --git a/third_party/rust/raw-window-handle/src/windows.rs b/third_party/rust/raw-window-handle/src/windows.rs new file mode 100644 index 000000000000..9f9ba655ac3d --- /dev/null +++ b/third_party/rust/raw-window-handle/src/windows.rs @@ -0,0 +1,34 @@ +use core::ptr; +use libc::c_void; + + + + + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct WindowsHandle { + + pub hwnd: *mut c_void, + + pub hinstance: *mut c_void, + #[doc(hidden)] + #[deprecated = "This field is used to ensure that this struct is non-exhaustive, so that it may be extended in the future. Do not refer to this field."] + pub _non_exhaustive_do_not_use: crate::seal::Seal, +} + +impl WindowsHandle { + pub fn empty() -> WindowsHandle { + #[allow(deprecated)] + WindowsHandle { + hwnd: ptr::null_mut(), + hinstance: ptr::null_mut(), + _non_exhaustive_do_not_use: crate::seal::Seal, + } + } +} diff --git a/third_party/rust/relevant/.cargo-checksum.json b/third_party/rust/relevant/.cargo-checksum.json new file mode 100644 index 000000000000..05021dc81d96 --- /dev/null +++ b/third_party/rust/relevant/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"5e5011954bc26ef3ddc850942128938e98df4c8271abe61c0ea5f1f0a17fc0a6","LICENSE-APACHE":"7cfd738c53d61c79f07e348f622bf7707c9084237054d37fbe07788a75f5881c","LICENSE-MIT":"9507d46994231e6272fcaca81c5af179e32f1522c775eaaec9938015b645ae99","README.md":"8da07788d7b67b67e2eee64989761b87559426f66da0e54d408f769c588cf0f3","src/lib.rs":"7fc604f517c72d7fed503c935a0eb3cc9d7e7d00cfb3a7aacfeabaaa611fe35f"},"package":"bbc232e13d37f4547f5b9b42a5efc380cabe5dbc1807f8b893580640b2ab0308"} \ No newline at end of file diff --git a/third_party/rust/relevant/Cargo.toml b/third_party/rust/relevant/Cargo.toml new file mode 100644 index 000000000000..bcd08c7d8ee1 --- /dev/null +++ b/third_party/rust/relevant/Cargo.toml @@ -0,0 +1,42 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "relevant" +version = "0.4.2" +authors = ["omni-viral "] +description = "A small utility type to emulate must-use types" +license = "MIT/Apache-2.0" +repository = "https://github.com/omni-viral/relevant.git" +[package.metadata.docs.rs] +features = ["backtrace", "log", "serde-1"] +[dependencies.backtrace] +version = "0.3.13" +optional = true + +[dependencies.cfg-if] +version = "0.1" + +[dependencies.log] +version = "0.4" +optional = true + +[dependencies.serde] +version = "1.0" +features = ["derive"] +optional = true + +[features] +default = ["std"] +panic = [] +serde-1 = ["serde"] +std = [] diff --git a/third_party/rust/relevant/LICENSE-APACHE b/third_party/rust/relevant/LICENSE-APACHE new file mode 100644 index 000000000000..f47c9411414e --- /dev/null +++ b/third_party/rust/relevant/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/relevant/LICENSE-MIT b/third_party/rust/relevant/LICENSE-MIT new file mode 100644 index 000000000000..4a4762e607f0 --- /dev/null +++ b/third_party/rust/relevant/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 The Amethyst Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/relevant/README.md b/third_party/rust/relevant/README.md new file mode 100644 index 000000000000..c05ab42f90cb --- /dev/null +++ b/third_party/rust/relevant/README.md @@ -0,0 +1,54 @@ +# Relevant + +A small utility type to emulate must-use types. +They are different from `#[must_use]` attribute in that the one who have an instance must either send it somewhere else or `dispose` it manually. + +This must be desired for types that need manual destruction which can't be implemented with `Drop` trait. +For example resource handler created from some source and that must be returned to the same source. + +## Usage + +The type `Relevant` is non-droppable. As limitation of current implementation it panics when dropped. +To make type non-droppable it must contain non-droppable type (`Relevant` type for example). + +### Example + +```rust + +struct SourceOfFoos { + handle: u64, +} + +/// Foo must be destroyed manually. +struct Foo { + handle: u64, + relevant: Relevant, +} + +/// Function from C library to create `Foo` +/// Access to same source must be synchronized. +extern "C" create_foo(source: u64) -> u64; + +/// Function from C library to destroy `Foo`. +/// Access to same source must be synchronized. +extern "C" destroy_foo(source: u64, foo: u64) -> u64; + +impl SourceOfFoos { + fn create_foo(&mut self) -> Foo { + Foo { + handle: create_foo(self.handle), + relevant: Relevant, + } + } + + fn destroy_foo(&mut self, foo: Foo) { + destroy_foo(self.handle, foo.handle); + foo.relevant.dispose(); + } +} + +``` + +Now it is not possible to accidentally drop `Foo` and leak handle. +Of course it always possible to explicitly `std::mem::forget` relevant type. +But it will be deliberate leak. diff --git a/third_party/rust/relevant/src/lib.rs b/third_party/rust/relevant/src/lib.rs new file mode 100644 index 000000000000..f5648fedd1ef --- /dev/null +++ b/third_party/rust/relevant/src/lib.rs @@ -0,0 +1,89 @@ + + + + + + + + + + + +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +use core as std; + + + + + + + + + + + +#[derive(Clone, Debug, PartialOrd, PartialEq, Ord, Eq, Hash)] +#[cfg_attr(feature = "serde-1", derive(serde::Serialize, serde::Deserialize))] +pub struct Relevant; + +impl Relevant { + + pub fn dispose(self) { + std::mem::forget(self) + } +} + +impl Drop for Relevant { + fn drop(&mut self) { + dropped() + } +} + +cfg_if::cfg_if! { + if #[cfg(feature = "panic")] { + macro_rules! sink { + ($($x:tt)*) => { panic!($($x)*) }; + } + } else if #[cfg(feature = "log")] { + macro_rules! sink { + ($($x:tt)*) => { log::error!($($x)*) }; + } + } else if #[cfg(feature = "std")] { + macro_rules! sink { + ($($x:tt)*) => { eprintln!($($x)*) }; + } + } else { + macro_rules! sink { + ($($x:tt)*) => { panic!($($x)*) }; + } + } +} + +cfg_if::cfg_if! { + if #[cfg(all(not(feature = "panic"), any(feature = "std", feature = "log"), feature = "backtrace"))] { + fn whine() { + let backtrace = backtrace::Backtrace::new(); + sink!("Values of this type can't be dropped!. Trace: {:#?}", backtrace) + } + } else { + fn whine() { + sink!("Values of this type can't be dropped!") + } + } +} + +cfg_if::cfg_if! { + if #[cfg(feature = "std")] { + fn dropped() { + if !std::thread::panicking() { + whine() + } + } + } else { + fn dropped() { + whine() + } + } +} \ No newline at end of file diff --git a/third_party/rust/rendy-descriptor/.cargo-checksum.json b/third_party/rust/rendy-descriptor/.cargo-checksum.json new file mode 100644 index 000000000000..22aa539f909c --- /dev/null +++ b/third_party/rust/rendy-descriptor/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"ee74a4961bdffd03f8e3d7a218fc8d94151bb5ebb688c5ed9cab343bf35d0fa1","src/allocator.rs":"5f55db009d1d12cc7257e4668f39f070641a79d0123221b836191faae7e486d1","src/lib.rs":"9ca8109e174f350fc3113480315d2d43ed4f2f9f30d887c6806de4577351a6c7","src/ranges.rs":"206ed3dfdc7167b3ada0420773ba6052aaecefbf9d31b31e53e40024c458833b"},"package":"f475bcc0505946e998590f1f0545c52ef4b559174a1b353a7ce6638def8b621e"} \ No newline at end of file diff --git a/third_party/rust/rendy-descriptor/Cargo.toml b/third_party/rust/rendy-descriptor/Cargo.toml new file mode 100644 index 000000000000..8d949b0b7934 --- /dev/null +++ b/third_party/rust/rendy-descriptor/Cargo.toml @@ -0,0 +1,35 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "rendy-descriptor" +version = "0.5.1" +authors = ["omni-viral "] +description = "Rendy's descriptor allocator" +documentation = "https://docs.rs/rendy-descriptor" +keywords = ["graphics", "gfx-hal", "rendy"] +categories = ["rendering"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/amethyst/rendy" +[dependencies.gfx-hal] +version = "0.4" + +[dependencies.log] +version = "0.4" + +[dependencies.relevant] +version = "0.4" +features = ["log"] + +[dependencies.smallvec] +version = "0.6" diff --git a/third_party/rust/rendy-descriptor/src/allocator.rs b/third_party/rust/rendy-descriptor/src/allocator.rs new file mode 100644 index 000000000000..2d6fecda42de --- /dev/null +++ b/third_party/rust/rendy-descriptor/src/allocator.rs @@ -0,0 +1,398 @@ +use { + crate::ranges::*, + gfx_hal::{ + device::{Device, OutOfMemory}, + pso::{AllocationError, DescriptorPool as _, DescriptorPoolCreateFlags}, + Backend, + }, + smallvec::{smallvec, SmallVec}, + std::{ + collections::{HashMap, VecDeque}, + ops::Deref, + }, +}; + +const MIN_SETS: u32 = 64; +const MAX_SETS: u32 = 512; + +/// Descriptor set from allocator. +#[derive(Debug)] +pub struct DescriptorSet { + raw: B::DescriptorSet, + pool: u64, + ranges: DescriptorRanges, +} + +impl DescriptorSet +where + B: Backend, +{ + /// Get raw set + pub fn raw(&self) -> &B::DescriptorSet { + &self.raw + } + + /// Get raw set + /// It must not be replaced. + pub unsafe fn raw_mut(&mut self) -> &mut B::DescriptorSet { + &mut self.raw + } +} + +impl Deref for DescriptorSet +where + B: Backend, +{ + type Target = B::DescriptorSet; + + fn deref(&self) -> &B::DescriptorSet { + &self.raw + } +} + +#[derive(Debug)] +struct Allocation { + sets: SmallVec<[B::DescriptorSet; 1]>, + pools: Vec, +} + +#[derive(Debug)] +struct DescriptorPool { + raw: B::DescriptorPool, + size: u32, + + // Number of free sets left. + free: u32, + + // Number of sets freed (they can't be reused until gfx-hal 0.2) + freed: u32, +} + +unsafe fn allocate_from_pool( + raw: &mut B::DescriptorPool, + layout: &B::DescriptorSetLayout, + count: u32, + allocation: &mut SmallVec<[B::DescriptorSet; 1]>, +) -> Result<(), OutOfMemory> { + let sets_were = allocation.len(); + raw.allocate_sets(std::iter::repeat(layout).take(count as usize), allocation) + .map_err(|err| match err { + AllocationError::Host => OutOfMemory::Host, + AllocationError::Device => OutOfMemory::Device, + err => { + // We check pool for free descriptors and sets before calling this function, + // so it can't be exhausted. + // And it can't be fragmented either according to spec + // + // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#VkDescriptorPoolCreateInfo + // + // """ + // Additionally, if all sets allocated from the pool since it was created or most recently reset + // use the same number of descriptors (of each type) and the requested allocation also + // uses that same number of descriptors (of each type), then fragmentation must not cause an allocation failure + // """ + panic!("Unexpected error: {:?}", err); + } + })?; + assert_eq!(allocation.len(), sets_were + count as usize); + Ok(()) +} + +#[derive(Debug)] +struct DescriptorBucket { + pools_offset: u64, + pools: VecDeque>, + total: u64, +} + +impl DescriptorBucket +where + B: Backend, +{ + fn new() -> Self { + DescriptorBucket { + pools_offset: 0, + pools: VecDeque::new(), + total: 0, + } + } + + fn new_pool_size(&self, count: u32) -> u32 { + MIN_SETS // at least MIN_SETS + .max(count) // at least enough for allocation + .max(self.total.min(MAX_SETS as u64) as u32) // at least as much as was allocated so far capped to MAX_SETS + .next_power_of_two() // rounded up to nearest 2^N + } + + unsafe fn dispose(mut self, device: &B::Device) { + if self.total > 0 { + log::error!("Not all descriptor sets were deallocated"); + } + + while let Some(pool) = self.pools.pop_front() { + assert!(pool.freed + pool.free <= pool.size); + if pool.freed + pool.free < pool.size { + log::error!( + "Descriptor pool is still in use during allocator disposal. {:?}", + pool + ); + } else { + log::trace!("Destroying used up descriptor pool"); + device.destroy_descriptor_pool(pool.raw); + self.pools_offset += 1; + } + } + + self.pools + .drain(..) + .for_each(|pool| device.destroy_descriptor_pool(pool.raw)); + } + + unsafe fn allocate( + &mut self, + device: &B::Device, + layout: &B::DescriptorSetLayout, + layout_ranges: DescriptorRanges, + mut count: u32, + allocation: &mut Allocation, + ) -> Result<(), OutOfMemory> { + if count == 0 { + return Ok(()); + } + + for (index, pool) in self.pools.iter_mut().enumerate().rev() { + if pool.free == 0 { + continue; + } + + let allocate = pool.free.min(count); + log::trace!("Allocate {} from exising pool", allocate); + allocate_from_pool::(&mut pool.raw, layout, allocate, &mut allocation.sets)?; + allocation.pools.extend( + std::iter::repeat(index as u64 + self.pools_offset).take(allocate as usize), + ); + count -= allocate; + pool.free -= allocate; + self.total += allocate as u64; + + if count == 0 { + return Ok(()); + } + } + + while count > 0 { + let size = self.new_pool_size(count); + let pool_ranges = layout_ranges * size; + log::trace!( + "Create new pool with {} sets and {:?} descriptors", + size, + pool_ranges, + ); + let raw = device.create_descriptor_pool( + size as usize, + &pool_ranges, + DescriptorPoolCreateFlags::empty(), + )?; + let allocate = size.min(count); + + self.pools.push_back(DescriptorPool { + raw, + size, + free: size, + freed: 0, + }); + let index = self.pools.len() - 1; + let pool = self.pools.back_mut().unwrap(); + + allocate_from_pool::(&mut pool.raw, layout, allocate, &mut allocation.sets)?; + allocation.pools.extend( + std::iter::repeat(index as u64 + self.pools_offset).take(allocate as usize), + ); + + count -= allocate; + pool.free -= allocate; + self.total += allocate as u64; + } + + Ok(()) + } + + unsafe fn free(&mut self, sets: impl IntoIterator, pool: u64) { + let pool = &mut self.pools[(pool - self.pools_offset) as usize]; + let freed = sets.into_iter().count() as u32; + pool.freed += freed; + self.total -= freed as u64; + log::trace!("Freed {} from descriptor bucket", freed); + } + + unsafe fn cleanup(&mut self, device: &B::Device) { + while let Some(pool) = self.pools.pop_front() { + if pool.freed < pool.size { + self.pools.push_front(pool); + break; + } + log::trace!("Destroying used up descriptor pool"); + device.destroy_descriptor_pool(pool.raw); + self.pools_offset += 1; + } + } +} + +/// Descriptor allocator. +/// Can be used to allocate descriptor sets for any layout. +#[derive(Debug)] +pub struct DescriptorAllocator { + buckets: HashMap>, + allocation: Allocation, + relevant: relevant::Relevant, + total: u64, +} + +impl DescriptorAllocator +where + B: Backend, +{ + /// Create new allocator instance. + pub fn new() -> Self { + DescriptorAllocator { + buckets: HashMap::new(), + allocation: Allocation { + sets: SmallVec::new(), + pools: Vec::new(), + }, + relevant: relevant::Relevant, + total: 0, + } + } + + /// Destroy allocator instance. + /// All sets allocated from this allocator become invalid. + pub unsafe fn dispose(mut self, device: &B::Device) { + self.buckets + .drain() + .for_each(|(_, bucket)| bucket.dispose(device)); + self.relevant.dispose(); + } + + /// Allocate descriptor set with specified layout. + /// `DescriptorRanges` must match descriptor numbers of the layout. + /// `DescriptorRanges` can be constructed [from bindings] that were used + /// to create layout instance. + /// + /// [from bindings]: . + pub unsafe fn allocate( + &mut self, + device: &B::Device, + layout: &B::DescriptorSetLayout, + layout_ranges: DescriptorRanges, + count: u32, + extend: &mut impl Extend>, + ) -> Result<(), OutOfMemory> { + if count == 0 { + return Ok(()); + } + + log::trace!( + "Allocating {} sets with layout {:?} @ {:?}", + count, + layout, + layout_ranges + ); + + let bucket = self + .buckets + .entry(layout_ranges) + .or_insert_with(|| DescriptorBucket::new()); + match bucket.allocate(device, layout, layout_ranges, count, &mut self.allocation) { + Ok(()) => { + extend.extend( + Iterator::zip( + self.allocation.pools.drain(..), + self.allocation.sets.drain(), + ) + .map(|(pool, set)| DescriptorSet { + raw: set, + ranges: layout_ranges, + pool, + }), + ); + Ok(()) + } + Err(err) => { + // Free sets allocated so far. + let mut last = None; + for (index, pool) in self.allocation.pools.drain(..).enumerate().rev() { + match last { + Some(last) if last == pool => { + // same pool, continue + } + Some(last) => { + let sets = &mut self.allocation.sets; + // Free contiguous range of sets from one pool in one go. + bucket.free((index + 1..sets.len()).map(|_| sets.pop().unwrap()), last); + } + None => last = Some(pool), + } + } + + if let Some(last) = last { + bucket.free(self.allocation.sets.drain(), last); + } + + Err(err) + } + } + } + + /// Free descriptor sets. + /// + /// # Safety + /// + /// None of descriptor sets can be referenced in any pending command buffers. + /// All command buffers where at least one of descriptor sets referenced + /// move to invalid state. + pub unsafe fn free(&mut self, all_sets: impl IntoIterator>) { + let mut free: Option<(DescriptorRanges, u64, SmallVec<[B::DescriptorSet; 32]>)> = None; + + // Collect contig + for set in all_sets { + match &mut free { + slot @ None => { + slot.replace((set.ranges, set.pool, smallvec![set.raw])); + } + Some((ranges, pool, raw_sets)) if *ranges == set.ranges && *pool == set.pool => { + raw_sets.push(set.raw); + } + Some((ranges, pool, raw_sets)) => { + let bucket = self + .buckets + .get_mut(ranges) + .expect("Set should be allocated from this allocator"); + debug_assert!(bucket.total >= raw_sets.len() as u64); + + bucket.free(raw_sets.drain(), *pool); + *pool = set.pool; + *ranges = set.ranges; + raw_sets.push(set.raw); + } + } + } + + if let Some((ranges, pool, raw_sets)) = free { + let bucket = self + .buckets + .get_mut(&ranges) + .expect("Set should be allocated from this allocator"); + debug_assert!(bucket.total >= raw_sets.len() as u64); + + bucket.free(raw_sets, pool); + } + } + + + pub unsafe fn cleanup(&mut self, device: &B::Device) { + self.buckets + .values_mut() + .for_each(|bucket| bucket.cleanup(device)); + } +} diff --git a/third_party/rust/rendy-descriptor/src/lib.rs b/third_party/rust/rendy-descriptor/src/lib.rs new file mode 100644 index 000000000000..18d5e0e51975 --- /dev/null +++ b/third_party/rust/rendy-descriptor/src/lib.rs @@ -0,0 +1,4 @@ +mod allocator; +mod ranges; + +pub use {allocator::*, ranges::*}; diff --git a/third_party/rust/rendy-descriptor/src/ranges.rs b/third_party/rust/rendy-descriptor/src/ranges.rs new file mode 100644 index 000000000000..24780e34e290 --- /dev/null +++ b/third_party/rust/rendy-descriptor/src/ranges.rs @@ -0,0 +1,187 @@ +use std::{ + cmp::Ordering, + ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign}, +}; + +pub use gfx_hal::pso::{DescriptorRangeDesc, DescriptorSetLayoutBinding, DescriptorType}; + +const DESCRIPTOR_TYPES_COUNT: usize = 11; + +const DESCRIPTOR_TYPES: [DescriptorType; DESCRIPTOR_TYPES_COUNT] = [ + DescriptorType::Sampler, + DescriptorType::CombinedImageSampler, + DescriptorType::SampledImage, + DescriptorType::StorageImage, + DescriptorType::UniformTexelBuffer, + DescriptorType::StorageTexelBuffer, + DescriptorType::UniformBuffer, + DescriptorType::StorageBuffer, + DescriptorType::UniformBufferDynamic, + DescriptorType::StorageBufferDynamic, + DescriptorType::InputAttachment, +]; + + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub struct DescriptorRanges { + counts: [u32; DESCRIPTOR_TYPES_COUNT], +} + +impl DescriptorRanges { + + pub fn zero() -> Self { + DescriptorRanges { + counts: [0; DESCRIPTOR_TYPES_COUNT], + } + } + + + + pub fn add_binding(&mut self, binding: DescriptorSetLayoutBinding) { + self.counts[binding.ty as usize] += binding.count as u32; + } + + + + pub fn iter(&self) -> DescriptorRangesIter<'_> { + DescriptorRangesIter { + counts: &self.counts, + index: 0, + } + } + + + pub fn counts(&self) -> &[u32] { + &self.counts + } + + + pub fn counts_mut(&mut self) -> &mut [u32] { + &mut self.counts + } + + + pub fn from_bindings(bindings: &[DescriptorSetLayoutBinding]) -> Self { + let mut descs = Self::zero(); + + for binding in bindings { + descs.counts[binding.ty as usize] += binding.count as u32; + } + + descs + } + + + pub fn from_binding_iter(bindings: I) -> Self + where + I: Iterator, + { + let mut descs = Self::zero(); + + for binding in bindings { + descs.counts[binding.ty as usize] += binding.count as u32; + } + + descs + } +} + +impl PartialOrd for DescriptorRanges { + fn partial_cmp(&self, other: &Self) -> Option { + let mut ord = self.counts[0].partial_cmp(&other.counts[0])?; + for i in 1..DESCRIPTOR_TYPES_COUNT { + match (ord, self.counts[i].partial_cmp(&other.counts[i])?) { + (Ordering::Less, Ordering::Greater) | (Ordering::Greater, Ordering::Less) => { + return None; + } + (Ordering::Equal, new) => ord = new, + _ => (), + } + } + Some(ord) + } +} + +impl Add for DescriptorRanges { + type Output = Self; + fn add(mut self, rhs: Self) -> Self { + self += rhs; + self + } +} + +impl AddAssign for DescriptorRanges { + fn add_assign(&mut self, rhs: Self) { + for i in 0..DESCRIPTOR_TYPES_COUNT { + self.counts[i] += rhs.counts[i]; + } + } +} + +impl Sub for DescriptorRanges { + type Output = Self; + fn sub(mut self, rhs: Self) -> Self { + self -= rhs; + self + } +} + +impl SubAssign for DescriptorRanges { + fn sub_assign(&mut self, rhs: Self) { + for i in 0..DESCRIPTOR_TYPES_COUNT { + self.counts[i] -= rhs.counts[i]; + } + } +} + +impl Mul for DescriptorRanges { + type Output = Self; + fn mul(mut self, rhs: u32) -> Self { + self *= rhs; + self + } +} + +impl MulAssign for DescriptorRanges { + fn mul_assign(&mut self, rhs: u32) { + for i in 0..DESCRIPTOR_TYPES_COUNT { + self.counts[i] *= rhs; + } + } +} + +impl<'a> IntoIterator for &'a DescriptorRanges { + type Item = DescriptorRangeDesc; + type IntoIter = DescriptorRangesIter<'a>; + + fn into_iter(self) -> DescriptorRangesIter<'a> { + self.iter() + } +} + + +pub struct DescriptorRangesIter<'a> { + counts: &'a [u32; DESCRIPTOR_TYPES_COUNT], + index: u8, +} + +impl<'a> Iterator for DescriptorRangesIter<'a> { + type Item = DescriptorRangeDesc; + + fn next(&mut self) -> Option { + loop { + let index = self.index as usize; + if index >= DESCRIPTOR_TYPES_COUNT { + return None; + } else { + self.index += 1; + if self.counts[index] > 0 { + return Some(DescriptorRangeDesc { + count: self.counts[index] as usize, + ty: DESCRIPTOR_TYPES[index], + }); + } + } + } + } +} diff --git a/third_party/rust/rendy-memory/.cargo-checksum.json b/third_party/rust/rendy-memory/.cargo-checksum.json new file mode 100644 index 000000000000..6939d578fc93 --- /dev/null +++ b/third_party/rust/rendy-memory/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"fe83416b06a4dd51f32ff690cd9dbf2bb6bd50b71f56c4638e5aa9d462f85b70","src/allocator/dedicated.rs":"5684b00774c9b69d39a23c5ae9b86a99a250d42a69f5d703c14c92445cbfa24e","src/allocator/dynamic.rs":"72ec46316b630d8c81ab88aac2b9749c0a535dae58bb766ac3cdb26e93ab81bf","src/allocator/linear.rs":"8721b2fcab68b39c0371b285a9d999dd871e82a676f0372b4458561140e2d401","src/allocator/mod.rs":"3338e9964975352439726a9fb476886eb8a44c80dc0abe71de74a7c925551fdd","src/block.rs":"e7edbb32d9a1b2f7eb6d129850e5b2f879cf6dff249865b45d959867d1e13bb0","src/heaps/heap.rs":"b78719a00cbfb36d5b7cf116608443267b9ca99a2394be11ee70ce676282f5d5","src/heaps/memory_type.rs":"6329a34e75fccbc86c5f7b2096b9b459a8bdf31f678ac4acdbab01545011d7d8","src/heaps/mod.rs":"02011a59a4f09d1adbf4d5de0d1d0b21118aff44571df9de588d2d196a81bacd","src/lib.rs":"92ce6b52031f3abf662e6a22a114eaadf3bd27f21aad85d33f81d60331bb2459","src/mapping/mod.rs":"6447a902c195f46c4187c8e4872c12d83112eca9d4e4080ee2b84b30bb311092","src/mapping/range.rs":"631e3e063d96cfd3c360437e72d12cfbf4cc0fba3288e3ade291929ba0ae2589","src/mapping/write.rs":"d32403c1cd9973135cd0fdb87630768e5e40164e21c65486735be475a4b3b781","src/memory.rs":"3022d0c02c483494eab55ca7147647c1a6bd5258a0b7410455e6f983c01e2678","src/usage.rs":"c313855d79b6d4638c788f20345d39a62d929437813a166007a3ec4ea69ee24d","src/util.rs":"39049e139b31594e45b0c1e5058d60832651e8bb83ae30c861c01fea99fd5ca5","src/utilization.rs":"dacb44e4dac24818de2914c3fdd68eaf5f9bfb8e1cf296ff20be03e224d7eec0"},"package":"08f99de535d9e48d9cfab780b521702cc0d7183d354872d223967b75abae1199"} \ No newline at end of file diff --git a/third_party/rust/rendy-memory/Cargo.toml b/third_party/rust/rendy-memory/Cargo.toml new file mode 100644 index 000000000000..bc71560f9fe6 --- /dev/null +++ b/third_party/rust/rendy-memory/Cargo.toml @@ -0,0 +1,55 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "rendy-memory" +version = "0.5.1" +authors = ["omni-viral "] +description = "Rendy's memory manager" +documentation = "https://docs.rs/rendy-memory" +keywords = ["graphics", "gfx-hal", "rendy"] +categories = ["rendering"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/amethyst/rendy" +[dependencies.colorful] +version = "0.2" + +[dependencies.gfx-hal] +version = "0.4" + +[dependencies.hibitset] +version = "0.6" +default-features = false + +[dependencies.log] +version = "0.4" + +[dependencies.relevant] +version = "0.4" +features = ["log"] + +[dependencies.serde] +version = "1.0" +features = ["derive"] +optional = true + +[dependencies.slab] +version = "0.4" + +[dependencies.smallvec] +version = "0.6" +[dev-dependencies.rand] +version = "0.7" + +[features] +serde-1 = ["serde", "gfx-hal/serde"] diff --git a/third_party/rust/rendy-memory/src/allocator/dedicated.rs b/third_party/rust/rendy-memory/src/allocator/dedicated.rs new file mode 100644 index 000000000000..1824bfebcf4f --- /dev/null +++ b/third_party/rust/rendy-memory/src/allocator/dedicated.rs @@ -0,0 +1,188 @@ +use std::{ops::Range, ptr::NonNull}; + +use { + crate::{ + allocator::{Allocator, Kind}, + block::Block, + mapping::{mapped_fitting_range, MappedRange}, + memory::*, + }, + gfx_hal::{device::Device as _, Backend}, +}; + +/// Memory block allocated from `DedicatedAllocator` +#[derive(Debug)] +pub struct DedicatedBlock { + memory: Memory, + mapping: Option<(NonNull, Range)>, +} + +unsafe impl Send for DedicatedBlock where B: Backend {} +unsafe impl Sync for DedicatedBlock where B: Backend {} + +impl DedicatedBlock +where + B: Backend, +{ + /// Get inner memory. + /// Panics if mapped. + pub fn unwrap_memory(self) -> Memory { + assert!(self.mapping.is_none()); + self.memory + } + + /// Make unmapped block. + pub fn from_memory(memory: Memory) -> Self { + DedicatedBlock { + memory, + mapping: None, + } + } +} + +impl Block for DedicatedBlock +where + B: Backend, +{ + #[inline] + fn properties(&self) -> gfx_hal::memory::Properties { + self.memory.properties() + } + + #[inline] + fn memory(&self) -> &B::Memory { + self.memory.raw() + } + + #[inline] + fn range(&self) -> Range { + 0..self.memory.size() + } + + fn map<'a>( + &'a mut self, + device: &B::Device, + range: Range, + ) -> Result, gfx_hal::device::MapError> { + assert!( + range.start < range.end, + "Memory mapping region must have valid size" + ); + + if !self.memory.host_visible() { + //TODO: invalid access error + return Err(gfx_hal::device::MapError::MappingFailed); + } + + unsafe { + if let Some(ptr) = self + .mapping + .clone() + .and_then(|mapping| mapped_fitting_range(mapping.0, mapping.1, range.clone())) + { + Ok(MappedRange::from_raw(&self.memory, ptr, range)) + } else { + self.unmap(device); + let ptr = device.map_memory(self.memory.raw(), range.clone())?; + let ptr = NonNull::new(ptr).expect("Memory mapping shouldn't return nullptr"); + let mapping = MappedRange::from_raw(&self.memory, ptr, range); + self.mapping = Some((mapping.ptr(), mapping.range())); + Ok(mapping) + } + } + } + + fn unmap(&mut self, device: &B::Device) { + if self.mapping.take().is_some() { + unsafe { + // trace!("Unmap memory: {:#?}", self.memory); + device.unmap_memory(self.memory.raw()); + } + } + } +} + +/// Dedicated memory allocator that uses memory object per allocation requested. +/// +/// This allocator suites best huge allocations. +/// From 32 MiB when GPU has 4-8 GiB memory total. +/// +/// `Heaps` use this allocator when none of sub-allocators bound to the memory type +/// can handle size required. +/// TODO: Check if resource prefers dedicated memory. +#[derive(Debug)] +pub struct DedicatedAllocator { + memory_type: gfx_hal::MemoryTypeId, + memory_properties: gfx_hal::memory::Properties, + used: u64, +} + +impl DedicatedAllocator { + /// Get properties required by the allocator. + pub fn properties_required() -> gfx_hal::memory::Properties { + gfx_hal::memory::Properties::empty() + } + + /// Create new `LinearAllocator` + /// for `memory_type` with `memory_properties` specified + pub fn new( + memory_type: gfx_hal::MemoryTypeId, + memory_properties: gfx_hal::memory::Properties, + ) -> Self { + DedicatedAllocator { + memory_type, + memory_properties, + used: 0, + } + } +} + +impl Allocator for DedicatedAllocator +where + B: Backend, +{ + type Block = DedicatedBlock; + + fn kind() -> Kind { + Kind::Dedicated + } + + #[inline] + fn alloc( + &mut self, + device: &B::Device, + size: u64, + _align: u64, + ) -> Result<(DedicatedBlock, u64), gfx_hal::device::AllocationError> { + let memory = unsafe { + Memory::from_raw( + device.allocate_memory(self.memory_type, size)?, + size, + self.memory_properties, + ) + }; + + self.used += size; + + Ok((DedicatedBlock::from_memory(memory), size)) + } + + #[inline] + fn free(&mut self, device: &B::Device, mut block: DedicatedBlock) -> u64 { + block.unmap(device); + let size = block.memory.size(); + self.used -= size; + unsafe { + device.free_memory(block.memory.into_raw()); + } + size + } +} + +impl Drop for DedicatedAllocator { + fn drop(&mut self) { + if self.used > 0 { + log::error!("Not all allocation from DedicatedAllocator was freed"); + } + } +} diff --git a/third_party/rust/rendy-memory/src/allocator/dynamic.rs b/third_party/rust/rendy-memory/src/allocator/dynamic.rs new file mode 100644 index 000000000000..26221deb6b97 --- /dev/null +++ b/third_party/rust/rendy-memory/src/allocator/dynamic.rs @@ -0,0 +1,674 @@ +use std::{ + collections::{BTreeSet, HashMap}, + ops::Range, + ptr::NonNull, + thread, +}; + +use { + crate::{ + allocator::{Allocator, Kind}, + block::Block, + mapping::*, + memory::*, + util::*, + }, + gfx_hal::{device::Device as _, Backend}, + hibitset::{BitSet, BitSetLike as _}, +}; + +/// Memory block allocated from `DynamicAllocator` +#[derive(Debug)] +pub struct DynamicBlock { + block_index: u32, + chunk_index: u32, + count: u32, + memory: *const Memory, + ptr: Option>, + range: Range, + relevant: relevant::Relevant, +} + +unsafe impl Send for DynamicBlock where B: Backend {} +unsafe impl Sync for DynamicBlock where B: Backend {} + +impl DynamicBlock +where + B: Backend, +{ + fn shared_memory(&self) -> &Memory { + // Memory won't be freed until last block created from it deallocated. + unsafe { &*self.memory } + } + + fn size(&self) -> u64 { + self.range.end - self.range.start + } + + fn dispose(self) { + self.relevant.dispose(); + } +} + +impl Block for DynamicBlock +where + B: Backend, +{ + #[inline] + fn properties(&self) -> gfx_hal::memory::Properties { + self.shared_memory().properties() + } + + #[inline] + fn memory(&self) -> &B::Memory { + self.shared_memory().raw() + } + + #[inline] + fn range(&self) -> Range { + self.range.clone() + } + + #[inline] + fn map<'a>( + &'a mut self, + _device: &B::Device, + range: Range, + ) -> Result, gfx_hal::device::MapError> { + debug_assert!( + range.start < range.end, + "Memory mapping region must have valid size" + ); + if !self.shared_memory().host_visible() { + //TODO: invalid access error + return Err(gfx_hal::device::MapError::MappingFailed); + } + + if let Some(ptr) = self.ptr { + if let Some((ptr, range)) = mapped_sub_range(ptr, self.range.clone(), range) { + let mapping = unsafe { MappedRange::from_raw(self.shared_memory(), ptr, range) }; + Ok(mapping) + } else { + Err(gfx_hal::device::MapError::OutOfBounds) + } + } else { + Err(gfx_hal::device::MapError::MappingFailed) + } + } + + #[inline] + fn unmap(&mut self, _device: &B::Device) {} +} + +/// Config for `DynamicAllocator`. +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct DynamicConfig { + /// All requests are rounded up to multiple of this value. + pub block_size_granularity: u64, + + /// Maximum chunk of blocks size. + /// Actual chunk size is `min(max_chunk_size, block_size * blocks_per_chunk)` + pub max_chunk_size: u64, + + /// Minimum size of device allocation. + pub min_device_allocation: u64, +} + +/// No-fragmentation allocator. +/// Suitable for any type of small allocations. +/// Every freed block can be reused. +#[derive(Debug)] +pub struct DynamicAllocator { + /// Memory type that this allocator allocates. + memory_type: gfx_hal::MemoryTypeId, + + /// Memory properties of the memory type. + memory_properties: gfx_hal::memory::Properties, + + /// All requests are rounded up to multiple of this value. + block_size_granularity: u64, + + /// Maximum chunk of blocks size. + max_chunk_size: u64, + + /// Minimum size of device allocation. + min_device_allocation: u64, + + /// Chunk lists. + sizes: HashMap>, + + /// Ordered set of sizes that have allocated chunks. + chunks: BTreeSet, +} + +unsafe impl Send for DynamicAllocator where B: Backend {} +unsafe impl Sync for DynamicAllocator where B: Backend {} + +#[derive(Debug)] +struct SizeEntry { + /// Total count of allocated blocks with size corresponding to this entry. + total_blocks: u64, + + /// Bits per ready (non-exhausted) chunks with free blocks. + ready_chunks: BitSet, + + /// List of chunks. + chunks: slab::Slab>, +} + +impl Default for SizeEntry +where + B: Backend, +{ + fn default() -> Self { + SizeEntry { + chunks: Default::default(), + total_blocks: 0, + ready_chunks: Default::default(), + } + } +} + +const MAX_BLOCKS_PER_CHUNK: u32 = 64; +const MIN_BLOCKS_PER_CHUNK: u32 = 8; + +impl DynamicAllocator +where + B: Backend, +{ + /// Create new `DynamicAllocator` + /// for `memory_type` with `memory_properties` specified, + /// with `DynamicConfig` provided. + pub fn new( + memory_type: gfx_hal::MemoryTypeId, + memory_properties: gfx_hal::memory::Properties, + config: DynamicConfig, + ) -> Self { + log::trace!( + "Create new allocator: type: '{:?}', properties: '{:#?}' config: '{:#?}'", + memory_type, + memory_properties, + config + ); + + assert!( + config.block_size_granularity.is_power_of_two(), + "Allocation granularity must be power of two" + ); + + assert!( + config.max_chunk_size.is_power_of_two(), + "Max chunk size must be power of two" + ); + + assert!( + config.min_device_allocation.is_power_of_two(), + "Min device allocation must be power of two" + ); + + assert!( + config.min_device_allocation <= config.max_chunk_size, + "Min device allocation must be less than or equalt to max chunk size" + ); + + if memory_properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE) { + debug_assert!( + fits_usize(config.max_chunk_size), + "Max chunk size must fit usize for mapping" + ); + } + + DynamicAllocator { + memory_type, + memory_properties, + block_size_granularity: config.block_size_granularity, + max_chunk_size: config.max_chunk_size, + min_device_allocation: config.min_device_allocation, + sizes: HashMap::new(), + chunks: BTreeSet::new(), + } + } + + /// Maximum allocation size. + pub fn max_allocation(&self) -> u64 { + self.max_chunk_size / MIN_BLOCKS_PER_CHUNK as u64 + } + + /// Allocate memory chunk from device. + fn alloc_chunk_from_device( + &self, + device: &B::Device, + block_size: u64, + chunk_size: u64, + ) -> Result, gfx_hal::device::AllocationError> { + log::trace!( + "Allocate chunk of size: {} for blocks of size {} from device", + chunk_size, + block_size + ); + + // Allocate from device. + let (memory, mapping) = unsafe { + // Valid memory type specified. + let raw = device.allocate_memory(self.memory_type, chunk_size)?; + + let mapping = if self + .memory_properties + .contains(gfx_hal::memory::Properties::CPU_VISIBLE) + { + log::trace!("Map new memory object"); + match device.map_memory(&raw, 0..chunk_size) { + Ok(mapping) => Some(NonNull::new_unchecked(mapping)), + Err(gfx_hal::device::MapError::OutOfMemory(error)) => { + device.free_memory(raw); + return Err(error.into()); + } + Err(_) => panic!("Unexpected mapping failure"), + } + } else { + None + }; + let memory = Memory::from_raw(raw, chunk_size, self.memory_properties); + (memory, mapping) + }; + Ok(Chunk::from_memory(block_size, memory, mapping)) + } + + /// Allocate memory chunk for given block size. + fn alloc_chunk( + &mut self, + device: &B::Device, + block_size: u64, + total_blocks: u64, + ) -> Result<(Chunk, u64), gfx_hal::device::AllocationError> { + log::trace!( + "Allocate chunk for blocks of size {} ({} total blocks allocated)", + block_size, + total_blocks + ); + + let min_chunk_size = MIN_BLOCKS_PER_CHUNK as u64 * block_size; + let min_size = min_chunk_size.min(total_blocks * block_size); + let max_chunk_size = MAX_BLOCKS_PER_CHUNK as u64 * block_size; + + // If smallest possible chunk size is larger then this allocator max allocation + if min_size > self.max_allocation() + || (total_blocks < MIN_BLOCKS_PER_CHUNK as u64 + && min_size >= self.min_device_allocation) + { + // Allocate memory block from device. + let chunk = self.alloc_chunk_from_device(device, block_size, min_size)?; + return Ok((chunk, min_size)); + } + + if let Some(&chunk_size) = self + .chunks + .range(min_chunk_size..=max_chunk_size) + .next_back() + { + // Allocate block for the chunk. + let (block, allocated) = self.alloc_from_entry(device, chunk_size, 1, block_size)?; + Ok((Chunk::from_block(block_size, block), allocated)) + } else { + let total_blocks = self.sizes[&block_size].total_blocks; + let chunk_size = + (max_chunk_size.min(min_chunk_size.max(total_blocks * block_size)) / 2 + 1) + .next_power_of_two(); + let (block, allocated) = self.alloc_block(device, chunk_size, block_size)?; + Ok((Chunk::from_block(block_size, block), allocated)) + } + } + + /// Allocate blocks from particular chunk. + fn alloc_from_chunk( + chunks: &mut slab::Slab>, + chunk_index: u32, + block_size: u64, + count: u32, + align: u64, + ) -> Option> { + log::trace!( + "Allocate {} consecutive blocks of size {} from chunk {}", + count, + block_size, + chunk_index + ); + + let ref mut chunk = chunks[chunk_index as usize]; + let block_index = chunk.acquire_blocks(count, block_size, align)?; + let block_range = chunk.blocks_range(block_size, block_index, count); + + debug_assert_eq!((block_range.end - block_range.start) % count as u64, 0); + + Some(DynamicBlock { + range: block_range.clone(), + memory: chunk.shared_memory(), + block_index, + chunk_index, + count, + ptr: chunk.mapping_ptr().map(|ptr| { + mapped_fitting_range(ptr, chunk.range(), block_range) + .expect("Block must be sub-range of chunk") + }), + relevant: relevant::Relevant, + }) + } + + /// Allocate blocks from size entry. + fn alloc_from_entry( + &mut self, + device: &B::Device, + block_size: u64, + count: u32, + align: u64, + ) -> Result<(DynamicBlock, u64), gfx_hal::device::AllocationError> { + log::trace!( + "Allocate {} consecutive blocks for size {} from the entry", + count, + block_size + ); + + debug_assert!(count < MIN_BLOCKS_PER_CHUNK); + let size_entry = self.sizes.entry(block_size).or_default(); + + for chunk_index in (&size_entry.ready_chunks).iter() { + if let Some(block) = Self::alloc_from_chunk( + &mut size_entry.chunks, + chunk_index, + block_size, + count, + align, + ) { + return Ok((block, 0)); + } + } + + if size_entry.chunks.vacant_entry().key() > max_chunks_per_size() { + return Err(gfx_hal::device::OutOfMemory::Host.into()); + } + + let total_blocks = size_entry.total_blocks; + let (chunk, allocated) = self.alloc_chunk(device, block_size, total_blocks)?; + let size_entry = self.sizes.entry(block_size).or_default(); + let chunk_index = size_entry.chunks.insert(chunk) as u32; + + let block = Self::alloc_from_chunk( + &mut size_entry.chunks, + chunk_index, + block_size, + count, + align, + ) + .expect("New chunk should yield blocks"); + + if !size_entry.chunks[chunk_index as usize].is_exhausted() { + size_entry.ready_chunks.add(chunk_index); + } + + Ok((block, allocated)) + } + + /// Allocate block. + fn alloc_block( + &mut self, + device: &B::Device, + block_size: u64, + align: u64, + ) -> Result<(DynamicBlock, u64), gfx_hal::device::AllocationError> { + log::trace!("Allocate block of size {}", block_size); + + debug_assert_eq!(block_size % self.block_size_granularity, 0); + let size_entry = self.sizes.entry(block_size).or_default(); + size_entry.total_blocks += 1; + + let overhead = (MIN_BLOCKS_PER_CHUNK as u64 - 1) / size_entry.total_blocks; + + if overhead >= 1 { + if let Some(&size) = self + .chunks + .range(block_size / 4..block_size * overhead) + .next() + { + return self.alloc_from_entry( + device, + size, + ((block_size - 1) / size + 1) as u32, + align, + ); + } + } + + if size_entry.total_blocks == MIN_BLOCKS_PER_CHUNK as u64 { + self.chunks.insert(block_size); + } + + self.alloc_from_entry(device, block_size, 1, align) + } + + fn free_chunk(&mut self, device: &B::Device, chunk: Chunk, block_size: u64) -> u64 { + log::trace!("Free chunk: {:#?}", chunk); + assert!(chunk.is_unused(block_size)); + match chunk.flavor { + ChunkFlavor::Dedicated(boxed, _) => { + let size = boxed.size(); + unsafe { + if self + .memory_properties + .contains(gfx_hal::memory::Properties::CPU_VISIBLE) + { + log::trace!("Unmap memory: {:#?}", boxed); + device.unmap_memory(boxed.raw()); + } + device.free_memory(boxed.into_raw()); + } + size + } + ChunkFlavor::Dynamic(dynamic_block) => self.free(device, dynamic_block), + } + } + + fn free_block(&mut self, device: &B::Device, block: DynamicBlock) -> u64 { + log::trace!("Free block: {:#?}", block); + + let block_size = block.size() / block.count as u64; + let ref mut size_entry = self + .sizes + .get_mut(&block_size) + .expect("Unable to get size entry from which block was allocated"); + let chunk_index = block.chunk_index; + let ref mut chunk = size_entry.chunks[chunk_index as usize]; + let block_index = block.block_index; + let count = block.count; + block.dispose(); + chunk.release_blocks(block_index, count); + if chunk.is_unused(block_size) { + size_entry.ready_chunks.remove(chunk_index); + let chunk = size_entry.chunks.remove(chunk_index as usize); + self.free_chunk(device, chunk, block_size) + } else { + size_entry.ready_chunks.add(chunk_index); + 0 + } + } + + /// Perform full cleanup of the memory allocated. + pub fn dispose(self) { + if !thread::panicking() { + for (index, size) in self.sizes { + assert_eq!(size.chunks.len(), 0, "SizeEntry({}) is still used", index); + } + } else { + for (index, size) in self.sizes { + if size.chunks.len() != 0 { + log::error!("Memory leak: SizeEntry({}) is still used", index); + } + } + } + } +} + +impl Allocator for DynamicAllocator +where + B: Backend, +{ + type Block = DynamicBlock; + + fn kind() -> Kind { + Kind::Dynamic + } + + fn alloc( + &mut self, + device: &B::Device, + size: u64, + align: u64, + ) -> Result<(DynamicBlock, u64), gfx_hal::device::AllocationError> { + debug_assert!(size <= self.max_allocation()); + debug_assert!(align.is_power_of_two()); + let aligned_size = ((size - 1) | (align - 1) | (self.block_size_granularity - 1)) + 1; + + log::trace!( + "Allocate dynamic block: size: {}, align: {}, aligned size: {}, type: {}", + size, + align, + aligned_size, + self.memory_type.0 + ); + + self.alloc_block(device, aligned_size, align) + } + + fn free(&mut self, device: &B::Device, block: DynamicBlock) -> u64 { + self.free_block(device, block) + } +} + + +#[derive(Debug)] +enum ChunkFlavor { + + Dedicated(Box>, Option>), + + + Dynamic(DynamicBlock), +} + +#[derive(Debug)] +struct Chunk { + flavor: ChunkFlavor, + blocks: u64, +} + +impl Chunk +where + B: Backend, +{ + fn from_memory(block_size: u64, memory: Memory, mapping: Option>) -> Self { + let blocks = memory.size() / block_size; + debug_assert!(blocks <= MAX_BLOCKS_PER_CHUNK as u64); + + let high_bit = 1 << (blocks - 1); + + Chunk { + flavor: ChunkFlavor::Dedicated(Box::new(memory), mapping), + blocks: (high_bit - 1) | high_bit, + } + } + + fn from_block(block_size: u64, chunk_block: DynamicBlock) -> Self { + let blocks = (chunk_block.size() / block_size).min(MAX_BLOCKS_PER_CHUNK as u64); + + let high_bit = 1 << (blocks - 1); + + Chunk { + flavor: ChunkFlavor::Dynamic(chunk_block), + blocks: (high_bit - 1) | high_bit, + } + } + + fn shared_memory(&self) -> &Memory { + match &self.flavor { + ChunkFlavor::Dedicated(boxed, _) => &*boxed, + ChunkFlavor::Dynamic(chunk_block) => chunk_block.shared_memory(), + } + } + + fn range(&self) -> Range { + match &self.flavor { + ChunkFlavor::Dedicated(boxed, _) => 0..boxed.size(), + ChunkFlavor::Dynamic(chunk_block) => chunk_block.range(), + } + } + + fn size(&self) -> u64 { + let range = self.range(); + range.end - range.start + } + + + fn blocks_range(&self, block_size: u64, block_index: u32, count: u32) -> Range { + let range = self.range(); + let start = range.start + block_size * block_index as u64; + let end = start + block_size * count as u64; + debug_assert!(end <= range.end); + start..end + } + + + fn is_unused(&self, block_size: u64) -> bool { + let blocks = (self.size() / block_size).min(MAX_BLOCKS_PER_CHUNK as u64); + + let high_bit = 1 << (blocks - 1); + let mask = (high_bit - 1) | high_bit; + + debug_assert!(self.blocks <= mask); + self.blocks == mask + } + + + fn is_exhausted(&self) -> bool { + self.blocks == 0 + } + + fn acquire_blocks(&mut self, count: u32, block_size: u64, align: u64) -> Option { + debug_assert!(count > 0 && count <= MAX_BLOCKS_PER_CHUNK); + + + let mut blocks = !0; + for i in 0..count { + blocks &= self.blocks >> i; + } + + while blocks != 0 { + let index = blocks.trailing_zeros(); + blocks &= !(1 << index); + + if (index as u64 * block_size) & (align - 1) == 0 { + let mask = ((1 << count) - 1) << index; + self.blocks &= !mask; + return Some(index); + } + } + None + } + + fn release_blocks(&mut self, index: u32, count: u32) { + let mask = ((1 << count) - 1) << index; + debug_assert_eq!(self.blocks & mask, 0); + self.blocks |= mask; + } + + fn mapping_ptr(&self) -> Option> { + match &self.flavor { + ChunkFlavor::Dedicated(_, ptr) => *ptr, + ChunkFlavor::Dynamic(chunk_block) => chunk_block.ptr, + } + } +} + +fn max_chunks_per_size() -> usize { + let value = (std::mem::size_of::() * 8).pow(4); + debug_assert!(fits_u32(value)); + value +} diff --git a/third_party/rust/rendy-memory/src/allocator/linear.rs b/third_party/rust/rendy-memory/src/allocator/linear.rs new file mode 100644 index 000000000000..291d72aee54b --- /dev/null +++ b/third_party/rust/rendy-memory/src/allocator/linear.rs @@ -0,0 +1,325 @@ +use std::{collections::VecDeque, ops::Range, ptr::NonNull}; + +use { + crate::{ + allocator::{Allocator, Kind}, + block::Block, + mapping::*, + memory::*, + util::*, + }, + gfx_hal::{device::Device as _, Backend}, + std::sync::Arc, +}; + +/// Memory block allocated from `LinearAllocator` +pub struct LinearBlock { + memory: Arc>, + linear_index: u64, + ptr: NonNull, + range: Range, + relevant: relevant::Relevant, +} + +impl std::fmt::Debug for LinearBlock +where + B: Backend, +{ + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fmt.debug_struct("LinearBlock") + .field("memory", &*self.memory) + .field("linear_index", &self.linear_index) + .field("ptr", &self.ptr) + .field("range", &self.range) + .finish() + } +} + +unsafe impl Send for LinearBlock where B: Backend {} +unsafe impl Sync for LinearBlock where B: Backend {} + +impl LinearBlock +where + B: Backend, +{ + fn size(&self) -> u64 { + self.range.end - self.range.start + } + + fn dispose(self) { + self.relevant.dispose(); + } +} + +impl Block for LinearBlock +where + B: Backend, +{ + #[inline] + fn properties(&self) -> gfx_hal::memory::Properties { + self.memory.properties() + } + + #[inline] + fn memory(&self) -> &B::Memory { + self.memory.raw() + } + + #[inline] + fn range(&self) -> Range { + self.range.clone() + } + + #[inline] + fn map<'a>( + &'a mut self, + _device: &B::Device, + range: Range, + ) -> Result, gfx_hal::device::MapError> { + assert!( + range.start < range.end, + "Memory mapping region must have valid size" + ); + if !self.memory.host_visible() { + //TODO: invalid access error + return Err(gfx_hal::device::MapError::MappingFailed); + } + + if let Some((ptr, range)) = mapped_sub_range(self.ptr, self.range.clone(), range) { + let mapping = unsafe { MappedRange::from_raw(&*self.memory, ptr, range) }; + Ok(mapping) + } else { + Err(gfx_hal::device::MapError::OutOfBounds) + } + } + + #[inline] + fn unmap(&mut self, _device: &B::Device) { + debug_assert!(self.memory.host_visible()); + } +} + +/// Config for `LinearAllocator`. +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct LinearConfig { + /// Size of the linear chunk. + /// Keep it big. + pub linear_size: u64, +} + +/// Linear allocator that return memory from chunk sequentially. +/// It keeps only number of bytes allocated from each chunk. +/// Once chunk is exhausted it is placed into list. +/// When all blocks allocated from head of that list are freed, +/// head is freed as well. +/// +/// This allocator suites best short-lived types of allocations. +/// Allocation strategy requires minimal overhead and implementation is fast. +/// But holding single block will completely stop memory recycling. +#[derive(Debug)] +pub struct LinearAllocator { + memory_type: gfx_hal::MemoryTypeId, + memory_properties: gfx_hal::memory::Properties, + linear_size: u64, + offset: u64, + lines: VecDeque>, +} + +#[derive(Debug)] +struct Line { + used: u64, + free: u64, + memory: Arc>, + ptr: NonNull, +} + +unsafe impl Send for Line where B: Backend {} +unsafe impl Sync for Line where B: Backend {} + +impl LinearAllocator +where + B: Backend, +{ + /// Get properties required by the `LinearAllocator`. + pub fn properties_required() -> gfx_hal::memory::Properties { + gfx_hal::memory::Properties::CPU_VISIBLE + } + + /// Maximum allocation size. + pub fn max_allocation(&self) -> u64 { + self.linear_size / 2 + } + + /// Create new `LinearAllocator` + /// for `memory_type` with `memory_properties` specified, + /// with `LinearConfig` provided. + pub fn new( + memory_type: gfx_hal::MemoryTypeId, + memory_properties: gfx_hal::memory::Properties, + config: LinearConfig, + ) -> Self { + log::trace!( + "Create new 'linear' allocator: type: '{:?}', properties: '{:#?}' config: '{:#?}'", + memory_type, + memory_properties, + config + ); + assert!(memory_properties.contains(Self::properties_required())); + assert!( + fits_usize(config.linear_size), + "Linear size must fit in both usize and u64" + ); + LinearAllocator { + memory_type, + memory_properties, + linear_size: config.linear_size, + offset: 0, + lines: VecDeque::new(), + } + } + + /// Perform full cleanup of the memory allocated. + pub fn dispose(mut self, device: &B::Device) { + let _ = self.cleanup(device, 0); + if !self.lines.is_empty() { + log::error!( + "Lines are not empty during allocator disposal. Lines: {:#?}", + self.lines + ); + } + } + + fn cleanup(&mut self, device: &B::Device, off: usize) -> u64 { + let mut freed = 0; + while self.lines.len() > off { + if self.lines[0].used > self.lines[0].free { + break; + } + + let line = self.lines.pop_front().unwrap(); + self.offset += 1; + + unsafe { + match Arc::try_unwrap(line.memory) { + Ok(memory) => { + // trace!("Unmap memory: {:#?}", line.memory); + device.unmap_memory(memory.raw()); + + freed += memory.size(); + device.free_memory(memory.into_raw()); + } + Err(_) => log::error!("Allocated `Line` was freed, but memory is still shared and never will be destroyed"), + } + } + } + freed + } +} + +impl Allocator for LinearAllocator +where + B: Backend, +{ + type Block = LinearBlock; + + fn kind() -> Kind { + Kind::Linear + } + + fn alloc( + &mut self, + device: &B::Device, + size: u64, + align: u64, + ) -> Result<(LinearBlock, u64), gfx_hal::device::AllocationError> { + debug_assert!(self + .memory_properties + .contains(gfx_hal::memory::Properties::CPU_VISIBLE)); + + assert!(size <= self.linear_size); + assert!(align <= self.linear_size); + + let count = self.lines.len() as u64; + if let Some(line) = self.lines.back_mut() { + let aligned = aligned(line.used, align); + let overhead = aligned - line.used; + if self.linear_size - size > aligned { + line.used = aligned + size; + line.free += overhead; + let (ptr, range) = + mapped_sub_range(line.ptr, 0..self.linear_size, aligned..aligned + size) + .expect("This sub-range must fit in line mapping"); + + return Ok(( + LinearBlock { + linear_index: self.offset + count - 1, + memory: line.memory.clone(), + ptr, + range, + relevant: relevant::Relevant, + }, + 0, + )); + } + } + + let (memory, ptr) = unsafe { + let raw = device.allocate_memory(self.memory_type, self.linear_size)?; + + let ptr = match device.map_memory(&raw, 0..self.linear_size) { + Ok(ptr) => NonNull::new_unchecked(ptr), + Err(gfx_hal::device::MapError::OutOfMemory(error)) => { + device.free_memory(raw); + return Err(error.into()); + } + Err(_) => panic!("Unexpected mapping failure"), + }; + + let memory = Memory::from_raw(raw, self.linear_size, self.memory_properties); + + (memory, ptr) + }; + + let line = Line { + used: size, + free: 0, + ptr, + memory: Arc::new(memory), + }; + + let (ptr, range) = mapped_sub_range(ptr, 0..self.linear_size, 0..size) + .expect("This sub-range must fit in line mapping"); + + let block = LinearBlock { + linear_index: self.offset + count, + memory: line.memory.clone(), + ptr, + range, + relevant: relevant::Relevant, + }; + + self.lines.push_back(line); + Ok((block, self.linear_size)) + } + + fn free(&mut self, device: &B::Device, block: Self::Block) -> u64 { + let index = block.linear_index - self.offset; + assert!( + fits_usize(index), + "This can't exceed lines list length which fits into usize by definition" + ); + let index = index as usize; + assert!( + index < self.lines.len(), + "Can't be allocated from not yet created line" + ); + { + let ref mut line = self.lines[index]; + line.free += block.size(); + } + block.dispose(); + + self.cleanup(device, 1) + } +} diff --git a/third_party/rust/rendy-memory/src/allocator/mod.rs b/third_party/rust/rendy-memory/src/allocator/mod.rs new file mode 100644 index 000000000000..bf368d56e808 --- /dev/null +++ b/third_party/rust/rendy-memory/src/allocator/mod.rs @@ -0,0 +1,50 @@ + + +mod dedicated; +mod dynamic; +mod linear; + +use crate::block::Block; + +pub use self::{ + dedicated::{DedicatedAllocator, DedicatedBlock}, + dynamic::{DynamicAllocator, DynamicBlock, DynamicConfig}, + linear::{LinearAllocator, LinearBlock, LinearConfig}, +}; + + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub enum Kind { + + Dedicated, + + + Dynamic, + + + + + Linear, +} + + +pub trait Allocator { + + type Block: Block; + + + fn kind() -> Kind; + + + + fn alloc( + &mut self, + device: &B::Device, + size: u64, + align: u64, + ) -> Result<(Self::Block, u64), gfx_hal::device::AllocationError>; + + + + fn free(&mut self, device: &B::Device, block: Self::Block) -> u64; +} diff --git a/third_party/rust/rendy-memory/src/block.rs b/third_party/rust/rendy-memory/src/block.rs new file mode 100644 index 000000000000..646765d70bc3 --- /dev/null +++ b/third_party/rust/rendy-memory/src/block.rs @@ -0,0 +1,36 @@ +use std::ops::Range; + +use crate::mapping::MappedRange; + + + + + +pub trait Block { + + fn properties(&self) -> gfx_hal::memory::Properties; + + + fn memory(&self) -> &B::Memory; + + + fn range(&self) -> Range; + + + fn size(&self) -> u64 { + let range = self.range(); + range.end - range.start + } + + + + fn map<'a>( + &'a mut self, + device: &B::Device, + range: Range, + ) -> Result, gfx_hal::device::MapError>; + + + + fn unmap(&mut self, device: &B::Device); +} diff --git a/third_party/rust/rendy-memory/src/heaps/heap.rs b/third_party/rust/rendy-memory/src/heaps/heap.rs new file mode 100644 index 000000000000..6595cbc83a56 --- /dev/null +++ b/third_party/rust/rendy-memory/src/heaps/heap.rs @@ -0,0 +1,49 @@ +use crate::utilization::*; + +#[derive(Debug)] +pub(super) struct MemoryHeap { + size: u64, + used: u64, + effective: u64, +} + +impl MemoryHeap { + pub(super) fn new(size: u64) -> Self { + MemoryHeap { + size, + used: 0, + effective: 0, + } + } + + pub(super) fn available(&self) -> u64 { + if self.used > self.size { + log::warn!("Heap size exceeded"); + 0 + } else { + self.size - self.used + } + } + + pub(super) fn allocated(&mut self, used: u64, effective: u64) { + self.used += used; + self.effective += effective; + debug_assert!(self.used >= self.effective); + } + + pub(super) fn freed(&mut self, used: u64, effective: u64) { + self.used -= used; + self.effective -= effective; + debug_assert!(self.used >= self.effective); + } + + pub(super) fn utilization(&self) -> MemoryHeapUtilization { + MemoryHeapUtilization { + utilization: MemoryUtilization { + used: self.used, + effective: self.effective, + }, + size: self.size, + } + } +} diff --git a/third_party/rust/rendy-memory/src/heaps/memory_type.rs b/third_party/rust/rendy-memory/src/heaps/memory_type.rs new file mode 100644 index 000000000000..fa0eb844d771 --- /dev/null +++ b/third_party/rust/rendy-memory/src/heaps/memory_type.rs @@ -0,0 +1,157 @@ +use { + super::{BlockFlavor, HeapsConfig}, + crate::{allocator::*, usage::MemoryUsage, utilization::*}, + gfx_hal::memory::Properties, +}; + +#[derive(Debug)] +pub(super) struct MemoryType { + heap_index: usize, + properties: Properties, + dedicated: DedicatedAllocator, + linear: Option>, + dynamic: Option>, + // chunk: Option, + used: u64, + effective: u64, +} + +impl MemoryType +where + B: gfx_hal::Backend, +{ + pub(super) fn new( + memory_type: gfx_hal::MemoryTypeId, + heap_index: usize, + properties: Properties, + config: HeapsConfig, + ) -> Self { + MemoryType { + properties, + heap_index, + dedicated: DedicatedAllocator::new(memory_type, properties), + linear: if properties.contains(Properties::CPU_VISIBLE) { + config + .linear + .map(|config| LinearAllocator::new(memory_type, properties, config)) + } else { + None + }, + dynamic: config + .dynamic + .map(|config| DynamicAllocator::new(memory_type, properties, config)), + used: 0, + effective: 0, + } + } + + pub(super) fn properties(&self) -> Properties { + self.properties + } + + pub(super) fn heap_index(&self) -> usize { + self.heap_index + } + + pub(super) fn alloc( + &mut self, + device: &B::Device, + usage: impl MemoryUsage, + size: u64, + align: u64, + ) -> Result<(BlockFlavor, u64), gfx_hal::device::AllocationError> { + let (block, allocated) = self.alloc_impl(device, usage, size, align)?; + self.effective += block.size(); + self.used += allocated; + Ok((block, allocated)) + } + + fn alloc_impl( + &mut self, + device: &B::Device, + usage: impl MemoryUsage, + size: u64, + align: u64, + ) -> Result<(BlockFlavor, u64), gfx_hal::device::AllocationError> { + match (self.dynamic.as_mut(), self.linear.as_mut()) { + (Some(dynamic), Some(linear)) => { + if dynamic.max_allocation() >= size + && usage.allocator_fitness(Kind::Dynamic) + > usage.allocator_fitness(Kind::Linear) + { + dynamic + .alloc(device, size, align) + .map(|(block, size)| (BlockFlavor::Dynamic(block), size)) + } else if linear.max_allocation() >= size + && usage.allocator_fitness(Kind::Linear) > 0 + { + linear + .alloc(device, size, align) + .map(|(block, size)| (BlockFlavor::Linear(block), size)) + } else { + self.dedicated + .alloc(device, size, align) + .map(|(block, size)| (BlockFlavor::Dedicated(block), size)) + } + } + (Some(dynamic), None) => { + if dynamic.max_allocation() >= size && usage.allocator_fitness(Kind::Dynamic) > 0 { + dynamic + .alloc(device, size, align) + .map(|(block, size)| (BlockFlavor::Dynamic(block), size)) + } else { + self.dedicated + .alloc(device, size, align) + .map(|(block, size)| (BlockFlavor::Dedicated(block), size)) + } + } + (None, Some(linear)) => { + if linear.max_allocation() >= size && usage.allocator_fitness(Kind::Linear) > 0 { + linear + .alloc(device, size, align) + .map(|(block, size)| (BlockFlavor::Linear(block), size)) + } else { + self.dedicated + .alloc(device, size, align) + .map(|(block, size)| (BlockFlavor::Dedicated(block), size)) + } + } + (None, None) => self + .dedicated + .alloc(device, size, align) + .map(|(block, size)| (BlockFlavor::Dedicated(block), size)), + } + } + + pub(super) fn free(&mut self, device: &B::Device, block: BlockFlavor) -> u64 { + match block { + BlockFlavor::Dedicated(block) => self.dedicated.free(device, block), + BlockFlavor::Linear(block) => self.linear.as_mut().unwrap().free(device, block), + BlockFlavor::Dynamic(block) => self.dynamic.as_mut().unwrap().free(device, block), + } + } + + pub(super) fn dispose(self, device: &B::Device) { + log::trace!("Dispose memory allocators"); + + if let Some(linear) = self.linear { + linear.dispose(device); + log::trace!("Linear allocator disposed"); + } + if let Some(dynamic) = self.dynamic { + dynamic.dispose(); + log::trace!("Dynamic allocator disposed"); + } + } + + pub(super) fn utilization(&self) -> MemoryTypeUtilization { + MemoryTypeUtilization { + utilization: MemoryUtilization { + used: self.used, + effective: self.effective, + }, + properties: self.properties, + heap_index: self.heap_index, + } + } +} diff --git a/third_party/rust/rendy-memory/src/heaps/mod.rs b/third_party/rust/rendy-memory/src/heaps/mod.rs new file mode 100644 index 000000000000..ea596de0772c --- /dev/null +++ b/third_party/rust/rendy-memory/src/heaps/mod.rs @@ -0,0 +1,324 @@ +mod heap; +mod memory_type; + +use { + self::{heap::MemoryHeap, memory_type::MemoryType}, + crate::{allocator::*, block::Block, mapping::*, usage::MemoryUsage, util::*, utilization::*}, + std::ops::Range, +}; + +/// Possible errors returned by `Heaps`. +#[allow(missing_copy_implementations)] +#[derive(Debug)] +pub enum HeapsError { + /// Memory allocation failure. + AllocationError(gfx_hal::device::AllocationError), + /// No memory types among required for resource with requested properties was found. + NoSuitableMemory(u32, gfx_hal::memory::Properties), +} + +impl std::fmt::Display for HeapsError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + HeapsError::AllocationError(e) => write!(f, "{:?}", e), + HeapsError::NoSuitableMemory(e, e2) => write!( + f, + "Memory type among ({}) with properties ({:?}) not found", + e, e2 + ), + } + } +} +impl std::error::Error for HeapsError {} + +impl From for HeapsError { + fn from(error: gfx_hal::device::AllocationError) -> Self { + HeapsError::AllocationError(error) + } +} + +impl From for HeapsError { + fn from(error: gfx_hal::device::OutOfMemory) -> Self { + HeapsError::AllocationError(error.into()) + } +} + +/// Config for `Heaps` allocator. +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct HeapsConfig { + /// Config for linear sub-allocator. + pub linear: Option, + + /// Config for dynamic sub-allocator. + pub dynamic: Option, +} + +/// Heaps available on particular physical device. +#[derive(Debug)] +pub struct Heaps { + types: Vec>, + heaps: Vec, +} + +impl Heaps +where + B: gfx_hal::Backend, +{ + /// This must be called with `gfx_hal::memory::Properties` fetched from physical device. + pub unsafe fn new(types: P, heaps: H) -> Self + where + P: IntoIterator, + H: IntoIterator, + { + let heaps = heaps + .into_iter() + .map(|size| MemoryHeap::new(size)) + .collect::>(); + Heaps { + types: types + .into_iter() + .enumerate() + .map(|(index, (properties, heap_index, config))| { + assert!( + fits_u32(index), + "Number of memory types must fit in u32 limit" + ); + assert!( + fits_usize(heap_index), + "Number of memory types must fit in u32 limit" + ); + let memory_type = gfx_hal::MemoryTypeId(index); + let heap_index = heap_index as usize; + assert!(heap_index < heaps.len()); + MemoryType::new(memory_type, heap_index, properties, config) + }) + .collect(), + heaps, + } + } + + /// Allocate memory block + /// from one of memory types specified by `mask`, + /// for intended `usage`, + /// with `size` + /// and `align` requirements. + pub fn allocate( + &mut self, + device: &B::Device, + mask: u32, + usage: impl MemoryUsage, + size: u64, + align: u64, + ) -> Result, HeapsError> { + debug_assert!(fits_u32(self.types.len())); + + let (memory_index, _, _) = { + let suitable_types = self + .types + .iter() + .enumerate() + .filter(|(index, _)| (mask & (1u32 << index)) != 0) + .filter_map(|(index, mt)| { + if mt.properties().contains(usage.properties_required()) { + let fitness = usage.memory_fitness(mt.properties()); + Some((index, mt, fitness)) + } else { + None + } + }) + .collect::>(); + + if suitable_types.is_empty() { + return Err(HeapsError::NoSuitableMemory( + mask, + usage.properties_required(), + )); + } + + suitable_types + .into_iter() + .filter(|(_, mt, _)| self.heaps[mt.heap_index()].available() > size + align) + .max_by_key(|&(_, _, fitness)| fitness) + .ok_or_else(|| { + log::error!("All suitable heaps are exhausted. {:#?}", self); + gfx_hal::device::OutOfMemory::Device + })? + }; + + self.allocate_from(device, memory_index as u32, usage, size, align) + } + + /// Allocate memory block + /// from `memory_index` specified, + /// for intended `usage`, + /// with `size` + /// and `align` requirements. + fn allocate_from( + &mut self, + device: &B::Device, + memory_index: u32, + usage: impl MemoryUsage, + size: u64, + align: u64, + ) -> Result, HeapsError> { + log::trace!( + "Allocate memory block: type '{}', usage '{:#?}', size: '{}', align: '{}'", + memory_index, + usage, + size, + align + ); + assert!(fits_usize(memory_index)); + + let ref mut memory_type = self.types[memory_index as usize]; + let ref mut memory_heap = self.heaps[memory_type.heap_index()]; + + if memory_heap.available() < size { + return Err(gfx_hal::device::OutOfMemory::Device.into()); + } + + let (block, allocated) = memory_type.alloc(device, usage, size, align)?; + memory_heap.allocated(allocated, block.size()); + + Ok(MemoryBlock { + block, + memory_index, + }) + } + + /// Free memory block. + /// + /// Memory block must be allocated from this heap. + pub fn free(&mut self, device: &B::Device, block: MemoryBlock) { + // trace!("Free block '{:#?}'", block); + let memory_index = block.memory_index; + debug_assert!(fits_usize(memory_index)); + let size = block.size(); + + let ref mut memory_type = self.types[memory_index as usize]; + let ref mut memory_heap = self.heaps[memory_type.heap_index()]; + let freed = memory_type.free(device, block.block); + memory_heap.freed(freed, size); + } + + + + + pub fn dispose(self, device: &B::Device) { + for mt in self.types { + mt.dispose(device) + } + } + + + pub fn utilization(&self) -> TotalMemoryUtilization { + TotalMemoryUtilization { + heaps: self.heaps.iter().map(MemoryHeap::utilization).collect(), + types: self.types.iter().map(MemoryType::utilization).collect(), + } + } +} + + +#[derive(Debug)] +pub struct MemoryBlock { + block: BlockFlavor, + memory_index: u32, +} + +impl MemoryBlock +where + B: gfx_hal::Backend, +{ + + pub fn memory_type(&self) -> u32 { + self.memory_index + } +} + +#[derive(Debug)] +enum BlockFlavor { + Dedicated(DedicatedBlock), + Linear(LinearBlock), + Dynamic(DynamicBlock), + +} + +macro_rules! any_block { + ($self:ident. $block:ident => $expr:expr) => {{ + use self::BlockFlavor::*; + match $self.$block { + Dedicated($block) => $expr, + Linear($block) => $expr, + Dynamic($block) => $expr, + // Chunk($block) => $expr, + } + }}; + (& $self:ident. $block:ident => $expr:expr) => {{ + use self::BlockFlavor::*; + match &$self.$block { + Dedicated($block) => $expr, + Linear($block) => $expr, + Dynamic($block) => $expr, + // Chunk($block) => $expr, + } + }}; + (&mut $self:ident. $block:ident => $expr:expr) => {{ + use self::BlockFlavor::*; + match &mut $self.$block { + Dedicated($block) => $expr, + Linear($block) => $expr, + Dynamic($block) => $expr, + // Chunk($block) => $expr, + } + }}; +} + +impl BlockFlavor +where + B: gfx_hal::Backend, +{ + #[inline] + fn size(&self) -> u64 { + use self::BlockFlavor::*; + match self { + Dedicated(block) => block.size(), + Linear(block) => block.size(), + Dynamic(block) => block.size(), + + } + } +} + +impl Block for MemoryBlock +where + B: gfx_hal::Backend, +{ + #[inline] + fn properties(&self) -> gfx_hal::memory::Properties { + any_block!(&self.block => block.properties()) + } + + #[inline] + fn memory(&self) -> &B::Memory { + any_block!(&self.block => block.memory()) + } + + #[inline] + fn range(&self) -> Range { + any_block!(&self.block => block.range()) + } + + fn map<'a>( + &'a mut self, + device: &B::Device, + range: Range, + ) -> Result, gfx_hal::device::MapError> { + any_block!(&mut self.block => block.map(device, range)) + } + + fn unmap(&mut self, device: &B::Device) { + any_block!(&mut self.block => block.unmap(device)) + } +} diff --git a/third_party/rust/rendy-memory/src/lib.rs b/third_party/rust/rendy-memory/src/lib.rs new file mode 100644 index 000000000000..6996fd68cf90 --- /dev/null +++ b/third_party/rust/rendy-memory/src/lib.rs @@ -0,0 +1,31 @@ + + + +#![warn( + missing_debug_implementations, + missing_copy_implementations, + missing_docs, + trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications +)] +mod allocator; +mod block; +mod heaps; +mod mapping; +mod memory; +mod usage; +mod util; +mod utilization; + +pub use crate::{ + allocator::*, + block::Block, + heaps::{Heaps, HeapsConfig, HeapsError, MemoryBlock}, + mapping::{write::Write, Coherent, MappedRange, MaybeCoherent, NonCoherent}, + memory::Memory, + usage::*, + utilization::*, +}; diff --git a/third_party/rust/rendy-memory/src/mapping/mod.rs b/third_party/rust/rendy-memory/src/mapping/mod.rs new file mode 100644 index 000000000000..c13c8e5836df --- /dev/null +++ b/third_party/rust/rendy-memory/src/mapping/mod.rs @@ -0,0 +1,288 @@ +mod range; +pub(crate) mod write; + +use { + crate::{memory::Memory, util::fits_usize}, + gfx_hal::{device::Device as _, Backend}, + std::{ops::Range, ptr::NonNull}, +}; + +pub(crate) use self::range::{ + mapped_fitting_range, mapped_slice, mapped_slice_mut, mapped_sub_range, +}; +use self::write::{Write, WriteCoherent, WriteFlush}; + + +#[derive(Clone, Copy, Debug)] +pub struct NonCoherent; + + +#[derive(Clone, Copy, Debug)] +pub struct Coherent; + + +#[derive(Clone, Copy, Debug)] +pub struct MaybeCoherent(bool); + + + +#[derive(Debug)] +pub struct MappedRange<'a, B: Backend, C = MaybeCoherent> { + + memory: &'a Memory, + + + ptr: NonNull, + + + range: Range, + + + coherent: C, +} + +impl<'a, B> MappedRange<'a, B> +where + B: Backend, +{ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + pub unsafe fn from_raw(memory: &'a Memory, ptr: NonNull, range: Range) -> Self { + assert!( + range.start < range.end, + "Memory mapping region must have valid size" + ); + MappedRange { + ptr, + range, + memory, + coherent: MaybeCoherent(memory.host_coherent()), + } + } + + + + pub fn ptr(&self) -> NonNull { + self.ptr + } + + + pub fn range(&self) -> Range { + self.range.clone() + } + + + + + + + + + + + pub unsafe fn read<'b, T>( + &'b mut self, + device: &B::Device, + range: Range, + ) -> Result<&'b [T], gfx_hal::device::MapError> + where + 'a: 'b, + T: Copy, + { + assert!( + range.start < range.end, + "Memory mapping region must have valid size" + ); + assert!( + fits_usize(range.end - range.start), + "Range length must fit in usize" + ); + + let (ptr, range) = mapped_sub_range(self.ptr, self.range.clone(), range) + .ok_or_else(|| gfx_hal::device::MapError::OutOfBounds)?; + + let size = (range.end - range.start) as usize; + + if self.coherent.0 { + device + .invalidate_mapped_memory_ranges(Some((self.memory.raw(), self.range.clone())))?; + } + + let slice = mapped_slice::(ptr, size); + Ok(slice) + } + + + + + + + + pub unsafe fn write<'b, T: 'b>( + &'b mut self, + device: &'b B::Device, + range: Range, + ) -> Result + 'b, gfx_hal::device::MapError> + where + 'a: 'b, + T: Copy, + { + assert!( + range.start < range.end, + "Memory mapping region must have valid size" + ); + assert!( + fits_usize(range.end - range.start), + "Range length must fit in usize" + ); + + let (ptr, range) = mapped_sub_range(self.ptr, self.range.clone(), range) + .ok_or_else(|| gfx_hal::device::MapError::OutOfBounds)?; + + let size = (range.end - range.start) as usize; + + if !self.coherent.0 { + device + .invalidate_mapped_memory_ranges(Some((self.memory.raw(), self.range.clone())))?; + } + + let slice = mapped_slice_mut::(ptr, size); + + let ref memory = self.memory; + + Ok(WriteFlush { + slice, + flush: if !self.coherent.0 { + Some(move || { + device + .flush_mapped_memory_ranges(Some((memory.raw(), range))) + .expect("Should flush successfully"); + }) + } else { + None + }, + }) + } + + + pub fn coherent(self) -> Result, MappedRange<'a, B, NonCoherent>> { + if self.coherent.0 { + Ok(MappedRange { + memory: self.memory, + ptr: self.ptr, + range: self.range, + coherent: Coherent, + }) + } else { + Err(MappedRange { + memory: self.memory, + ptr: self.ptr, + range: self.range, + coherent: NonCoherent, + }) + } + } +} + +impl<'a, B> From> for MappedRange<'a, B> +where + B: Backend, +{ + fn from(range: MappedRange<'a, B, Coherent>) -> Self { + MappedRange { + memory: range.memory, + ptr: range.ptr, + range: range.range, + coherent: MaybeCoherent(true), + } + } +} + +impl<'a, B> From> for MappedRange<'a, B> +where + B: Backend, +{ + fn from(range: MappedRange<'a, B, NonCoherent>) -> Self { + MappedRange { + memory: range.memory, + ptr: range.ptr, + range: range.range, + coherent: MaybeCoherent(false), + } + } +} + +impl<'a, B> MappedRange<'a, B, Coherent> +where + B: Backend, +{ + + + + + + pub unsafe fn write<'b, U: 'b>( + &'b mut self, + range: Range, + ) -> Result + 'b, gfx_hal::device::MapError> + where + U: Copy, + { + assert!( + range.start < range.end, + "Memory mapping region must have valid size" + ); + assert!( + fits_usize(range.end - range.start), + "Range length must fit in usize" + ); + + let (ptr, range) = mapped_sub_range(self.ptr, self.range.clone(), range) + .ok_or_else(|| gfx_hal::device::MapError::OutOfBounds)?; + + let size = (range.end - range.start) as usize; + + let slice = mapped_slice_mut::(ptr, size); + + Ok(WriteCoherent { slice }) + } +} diff --git a/third_party/rust/rendy-memory/src/mapping/range.rs b/third_party/rust/rendy-memory/src/mapping/range.rs new file mode 100644 index 000000000000..f15b80ab3694 --- /dev/null +++ b/third_party/rust/rendy-memory/src/mapping/range.rs @@ -0,0 +1,101 @@ +use { + crate::util::fits_usize, + std::{ + mem::{align_of, size_of}, + ops::Range, + ptr::NonNull, + slice::{from_raw_parts, from_raw_parts_mut}, + }, +}; + + + + + + +pub(crate) fn mapped_fitting_range( + ptr: NonNull, + range: Range, + fitting: Range, +) -> Option> { + assert!( + range.start < range.end, + "Memory mapping region must have valid size" + ); + assert!( + fitting.start < fitting.end, + "Memory mapping region must have valid size" + ); + assert!(fits_usize(range.end - range.start)); + assert!(usize::max_value() - (range.end - range.start) as usize >= ptr.as_ptr() as usize); + + if fitting.start < range.start || fitting.end > range.end { + None + } else { + Some(unsafe { + + NonNull::new_unchecked( + (ptr.as_ptr() as usize + (fitting.start - range.start) as usize) as *mut u8, + ) + }) + } +} + + + + + + + +pub(crate) fn mapped_sub_range( + ptr: NonNull, + range: Range, + sub: Range, +) -> Option<(NonNull, Range)> { + let fitting = sub.start.checked_add(range.start)?..sub.end.checked_add(range.start)?; + let ptr = mapped_fitting_range(ptr, range, fitting.clone())?; + Some((ptr, fitting)) +} + + + + + + + +pub(crate) unsafe fn mapped_slice_mut<'a, T>(ptr: NonNull, size: usize) -> &'a mut [T] { + assert_eq!( + size % size_of::(), + 0, + "Range length must be multiple of element size" + ); + let offset = ptr.as_ptr() as usize; + assert_eq!( + offset % align_of::(), + 0, + "Range offset must be multiple of element alignment" + ); + assert!(usize::max_value() - size >= ptr.as_ptr() as usize); + from_raw_parts_mut(ptr.as_ptr() as *mut T, size) +} + + + + + + +pub(crate) unsafe fn mapped_slice<'a, T>(ptr: NonNull, size: usize) -> &'a [T] { + assert_eq!( + size % size_of::(), + 0, + "Range length must be multiple of element size" + ); + let offset = ptr.as_ptr() as usize; + assert_eq!( + offset % align_of::(), + 0, + "Range offset must be multiple of element alignment" + ); + assert!(usize::max_value() - size >= ptr.as_ptr() as usize); + from_raw_parts(ptr.as_ptr() as *const T, size) +} diff --git a/third_party/rust/rendy-memory/src/mapping/write.rs b/third_party/rust/rendy-memory/src/mapping/write.rs new file mode 100644 index 000000000000..8e7a6db5f072 --- /dev/null +++ b/third_party/rust/rendy-memory/src/mapping/write.rs @@ -0,0 +1,73 @@ +use std::ptr::copy_nonoverlapping; + + +pub trait Write { + + + + + + unsafe fn slice(&mut self) -> &mut [T]; + + + + + + + fn write(&mut self, data: &[T]) { + unsafe { + let slice = self.slice(); + assert!(data.len() <= slice.len()); + copy_nonoverlapping(data.as_ptr(), slice.as_mut_ptr(), data.len()); + } + } +} + +#[derive(Debug)] +pub(super) struct WriteFlush<'a, T, F: FnOnce() + 'a> { + pub(super) slice: &'a mut [T], + pub(super) flush: Option, +} + +impl<'a, T, F> Drop for WriteFlush<'a, T, F> +where + T: 'a, + F: FnOnce() + 'a, +{ + fn drop(&mut self) { + if let Some(f) = self.flush.take() { + f(); + } + } +} + +impl<'a, T, F> Write for WriteFlush<'a, T, F> +where + T: Copy + 'a, + F: FnOnce() + 'a, +{ + + + + unsafe fn slice(&mut self) -> &mut [T] { + self.slice + } +} + +#[warn(dead_code)] +#[derive(Debug)] +pub(super) struct WriteCoherent<'a, T> { + pub(super) slice: &'a mut [T], +} + +impl<'a, T> Write for WriteCoherent<'a, T> +where + T: Copy + 'a, +{ + + + + unsafe fn slice(&mut self) -> &mut [T] { + self.slice + } +} diff --git a/third_party/rust/rendy-memory/src/memory.rs b/third_party/rust/rendy-memory/src/memory.rs new file mode 100644 index 000000000000..d3816903d770 --- /dev/null +++ b/third_party/rust/rendy-memory/src/memory.rs @@ -0,0 +1,82 @@ + + + + +#[derive(Debug)] +pub struct Memory { + raw: B::Memory, + size: u64, + properties: gfx_hal::memory::Properties, + relevant: relevant::Relevant, +} + +impl Memory +where + B: gfx_hal::Backend, +{ + + pub fn properties(&self) -> gfx_hal::memory::Properties { + self.properties + } + + + pub fn size(&self) -> u64 { + self.size + } + + + pub fn raw(&self) -> &B::Memory { + &self.raw + } + + + pub fn into_raw(self) -> B::Memory { + self.relevant.dispose(); + self.raw + } + + + + + + + pub unsafe fn from_raw( + raw: B::Memory, + size: u64, + properties: gfx_hal::memory::Properties, + ) -> Self { + Memory { + properties, + raw, + size, + relevant: relevant::Relevant, + } + } + + + + pub fn host_visible(&self) -> bool { + self.properties + .contains(gfx_hal::memory::Properties::CPU_VISIBLE) + } + + + + pub fn host_coherent(&self) -> bool { + self.properties + .contains(gfx_hal::memory::Properties::COHERENT) + } +} + + + + + + + + + + + + + diff --git a/third_party/rust/rendy-memory/src/usage.rs b/third_party/rust/rendy-memory/src/usage.rs new file mode 100644 index 000000000000..b2134ecaf927 --- /dev/null +++ b/third_party/rust/rendy-memory/src/usage.rs @@ -0,0 +1,210 @@ + + + +use crate::allocator::Kind; + + +pub trait MemoryUsage: std::fmt::Debug { + + fn properties_required(&self) -> gfx_hal::memory::Properties; + + + + + + + fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32; + + + fn allocator_fitness(&self, kind: Kind) -> u32; +} + +impl MemoryUsage for T +where + T: std::ops::Deref + std::fmt::Debug, + T::Target: MemoryUsage, +{ + fn properties_required(&self) -> gfx_hal::memory::Properties { + (&**self).properties_required() + } + fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 { + (&**self).memory_fitness(properties) + } + fn allocator_fitness(&self, kind: Kind) -> u32 { + (&**self).allocator_fitness(kind) + } +} + + + + +#[derive(Clone, Copy, Debug)] +pub struct Data; + +impl MemoryUsage for Data { + fn properties_required(&self) -> gfx_hal::memory::Properties { + gfx_hal::memory::Properties::DEVICE_LOCAL + } + + #[inline] + fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 { + assert!(properties.contains(gfx_hal::memory::Properties::DEVICE_LOCAL)); + 0 | ((!properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE)) as u32) << 3 + | ((!properties.contains(gfx_hal::memory::Properties::LAZILY_ALLOCATED)) as u32) << 2 + | ((!properties.contains(gfx_hal::memory::Properties::CPU_CACHED)) as u32) << 1 + | ((!properties.contains(gfx_hal::memory::Properties::COHERENT)) as u32) << 0 + } + + fn allocator_fitness(&self, kind: Kind) -> u32 { + match kind { + Kind::Dedicated => 1, + Kind::Dynamic => 2, + Kind::Linear => 0, + } + } +} + + + + + +#[derive(Clone, Copy, Debug)] +pub struct Dynamic; + +impl MemoryUsage for Dynamic { + fn properties_required(&self) -> gfx_hal::memory::Properties { + gfx_hal::memory::Properties::CPU_VISIBLE + } + + #[inline] + fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 { + assert!(properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE)); + assert!(!properties.contains(gfx_hal::memory::Properties::LAZILY_ALLOCATED)); + + 0 | (properties.contains(gfx_hal::memory::Properties::DEVICE_LOCAL) as u32) << 2 + | (properties.contains(gfx_hal::memory::Properties::COHERENT) as u32) << 1 + | ((!properties.contains(gfx_hal::memory::Properties::CPU_CACHED)) as u32) << 0 + } + + fn allocator_fitness(&self, kind: Kind) -> u32 { + match kind { + Kind::Dedicated => 1, + Kind::Dynamic => 2, + Kind::Linear => 0, + } + } +} + + + + +#[derive(Clone, Copy, Debug)] +pub struct Upload; + +impl MemoryUsage for Upload { + fn properties_required(&self) -> gfx_hal::memory::Properties { + gfx_hal::memory::Properties::CPU_VISIBLE + } + + #[inline] + fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 { + assert!(properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE)); + assert!(!properties.contains(gfx_hal::memory::Properties::LAZILY_ALLOCATED)); + + 0 | ((!properties.contains(gfx_hal::memory::Properties::DEVICE_LOCAL)) as u32) << 2 + | (properties.contains(gfx_hal::memory::Properties::COHERENT) as u32) << 1 + | ((!properties.contains(gfx_hal::memory::Properties::CPU_CACHED)) as u32) << 0 + } + + fn allocator_fitness(&self, kind: Kind) -> u32 { + match kind { + Kind::Dedicated => 0, + Kind::Dynamic => 1, + Kind::Linear => 2, + } + } +} + + + + +#[derive(Clone, Copy, Debug)] +pub struct Download; + +impl MemoryUsage for Download { + fn properties_required(&self) -> gfx_hal::memory::Properties { + gfx_hal::memory::Properties::CPU_VISIBLE + } + + #[inline] + fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 { + assert!(properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE)); + assert!(!properties.contains(gfx_hal::memory::Properties::LAZILY_ALLOCATED)); + + 0 | ((!properties.contains(gfx_hal::memory::Properties::DEVICE_LOCAL)) as u32) << 2 + | (properties.contains(gfx_hal::memory::Properties::CPU_CACHED) as u32) << 1 + | (properties.contains(gfx_hal::memory::Properties::COHERENT) as u32) << 0 + } + + fn allocator_fitness(&self, kind: Kind) -> u32 { + match kind { + Kind::Dedicated => 0, + Kind::Dynamic => 1, + Kind::Linear => 2, + } + } +} + + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum MemoryUsageValue { + + + + Data, + + + + + Dynamic, + + + + + Upload, + + + + + Download, +} + + +impl MemoryUsage for MemoryUsageValue { + fn properties_required(&self) -> gfx_hal::memory::Properties { + match self { + MemoryUsageValue::Data => Data.properties_required(), + MemoryUsageValue::Dynamic => Dynamic.properties_required(), + MemoryUsageValue::Upload => Upload.properties_required(), + MemoryUsageValue::Download => Download.properties_required(), + } + } + + fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 { + match self { + MemoryUsageValue::Data => Data.memory_fitness(properties), + MemoryUsageValue::Dynamic => Dynamic.memory_fitness(properties), + MemoryUsageValue::Upload => Upload.memory_fitness(properties), + MemoryUsageValue::Download => Download.memory_fitness(properties), + } + } + + fn allocator_fitness(&self, kind: Kind) -> u32 { + match self { + MemoryUsageValue::Data => Data.allocator_fitness(kind), + MemoryUsageValue::Dynamic => Dynamic.allocator_fitness(kind), + MemoryUsageValue::Upload => Upload.allocator_fitness(kind), + MemoryUsageValue::Download => Download.allocator_fitness(kind), + } + } +} diff --git a/third_party/rust/rendy-memory/src/util.rs b/third_party/rust/rendy-memory/src/util.rs new file mode 100644 index 000000000000..8ce109bb8f60 --- /dev/null +++ b/third_party/rust/rendy-memory/src/util.rs @@ -0,0 +1,125 @@ +pub(crate) fn aligned(value: u64, align: u64) -> u64 { + debug_assert_ne!(align, 0); + debug_assert_eq!(align.count_ones(), 1); + if value == 0 { + 0 + } else { + 1u64 + ((value - 1u64) | (align - 1u64)) + } +} + +pub(crate) trait IntegerFitting { + fn fits_usize(self) -> bool; + fn fits_isize(self) -> bool; + + fn usize_fits(value: usize) -> bool; + fn isize_fits(value: isize) -> bool; +} + +#[cfg(any(target_pointer_width = "16", target_pointer_width = "32"))] +impl IntegerFitting for u64 { + fn fits_usize(self) -> bool { + self <= usize::max_value() as u64 + } + fn fits_isize(self) -> bool { + self <= isize::max_value() as u64 + } + fn usize_fits(_value: usize) -> bool { + true + } + fn isize_fits(value: isize) -> bool { + value >= 0 + } +} + +#[cfg(target_pointer_width = "64")] +impl IntegerFitting for u64 { + fn fits_usize(self) -> bool { + true + } + fn fits_isize(self) -> bool { + self <= isize::max_value() as u64 + } + fn usize_fits(_value: usize) -> bool { + true + } + fn isize_fits(value: isize) -> bool { + value >= 0 + } +} + +#[cfg(not(any( + target_pointer_width = "16", + target_pointer_width = "32", + target_pointer_width = "64" +)))] +impl IntegerFitting for u64 { + fn fits_usize(self) -> bool { + true + } + fn fits_isize(self) -> bool { + true + } + fn usize_fits(value: usize) -> bool { + value <= u64::max_value() as usize + } + fn isize_fits(value: isize) -> bool { + value >= 0 && value <= u64::max_value() as isize + } +} + +#[cfg(target_pointer_width = "16")] +impl IntegerFitting for u32 { + fn fits_usize(self) -> bool { + self <= usize::max_value() as u32 + } + fn fits_isize(self) -> bool { + self <= isize::max_value() as u32 + } + fn usize_fits(_value: usize) -> bool { + true + } + fn isize_fits(value: isize) -> bool { + value >= 0 + } +} + +#[cfg(target_pointer_width = "32")] +impl IntegerFitting for u32 { + fn fits_usize(self) -> bool { + true + } + fn fits_isize(self) -> bool { + self <= isize::max_value() as u32 + } + fn usize_fits(_value: usize) -> bool { + true + } + fn isize_fits(value: isize) -> bool { + value >= 0 + } +} + +#[cfg(not(any(target_pointer_width = "16", target_pointer_width = "32")))] +impl IntegerFitting for u32 { + fn fits_usize(self) -> bool { + true + } + fn fits_isize(self) -> bool { + true + } + fn usize_fits(value: usize) -> bool { + value <= u32::max_value() as usize + } + fn isize_fits(value: isize) -> bool { + value >= 0 && value <= u32::max_value() as isize + } +} + +pub(crate) fn fits_usize(value: T) -> bool { + value.fits_usize() +} + +pub(crate) fn fits_u32(value: usize) -> bool { + u32::usize_fits(value) +} diff --git a/third_party/rust/rendy-memory/src/utilization.rs b/third_party/rust/rendy-memory/src/utilization.rs new file mode 100644 index 000000000000..fdf7962b676c --- /dev/null +++ b/third_party/rust/rendy-memory/src/utilization.rs @@ -0,0 +1,137 @@ +use { + colorful::{core::color_string::CString, Color, Colorful as _}, + gfx_hal::memory::Properties, +}; + + +#[derive(Clone, Copy, Debug)] +pub struct MemoryUtilization { + + pub used: u64, + + pub effective: u64, +} + + +#[derive(Clone, Copy, Debug)] +pub struct MemoryHeapUtilization { + + pub utilization: MemoryUtilization, + + + pub size: u64, +} + + +#[derive(Clone, Copy, Debug)] +pub struct MemoryTypeUtilization { + + pub utilization: MemoryUtilization, + + + pub properties: Properties, + + + pub heap_index: usize, +} + + +#[derive(Clone, Debug)] +pub struct TotalMemoryUtilization { + + pub types: Vec, + + + pub heaps: Vec, +} + +impl std::fmt::Display for TotalMemoryUtilization { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + const MB: u64 = 1024 * 1024; + + writeln!(fmt, "!!! Memory utilization !!!")?; + for (index, heap) in self.heaps.iter().enumerate() { + let size = heap.size; + let MemoryUtilization { used, effective } = heap.utilization; + let usage_basis_points = used * 10000 / size; + let fill = if usage_basis_points > 10000 { + + 50 + } else { + (usage_basis_points / 200) as usize + }; + let effective_basis_points = if used > 0 { + effective * 10000 / used + } else { + 10000 + }; + + let line = ("|".repeat(fill) + &(" ".repeat(50 - fill))) + .gradient_with_color(Color::Green, Color::Red); + writeln!( + fmt, + "Heap {}:\n{:6} / {:<6} or{} {{ effective:{} }} [{}]", + format!("{}", index).magenta(), + format!("{}MB", used / MB), + format!("{}MB", size / MB), + format_basis_points(usage_basis_points), + format_basis_points_inverted(effective_basis_points), + line + )?; + + for ty in self.types.iter().filter(|ty| ty.heap_index == index) { + let properties = ty.properties; + let MemoryUtilization { used, effective } = ty.utilization; + let usage_basis_points = used * 10000 / size; + let effective_basis_points = if used > 0 { + effective * 10000 / used + } else { + 0 + }; + + writeln!( + fmt, + " {:>6} or{} {{ effective:{} }} | {:?}", + format!("{}MB", used / MB), + format_basis_points(usage_basis_points), + format_basis_points_inverted(effective_basis_points), + properties, + )?; + } + } + + Ok(()) + } +} + +fn format_basis_points(basis_points: u64) -> CString { + debug_assert!(basis_points <= 10000); + let s = format!("{:>3}.{:02}%", basis_points / 100, basis_points % 100); + if basis_points > 7500 { + s.red() + } else if basis_points > 5000 { + s.yellow() + } else if basis_points > 2500 { + s.green() + } else if basis_points > 100 { + s.blue() + } else { + s.white() + } +} + +fn format_basis_points_inverted(basis_points: u64) -> CString { + debug_assert!(basis_points <= 10000); + let s = format!("{:>3}.{:02}%", basis_points / 100, basis_points % 100); + if basis_points > 9900 { + s.white() + } else if basis_points > 7500 { + s.blue() + } else if basis_points > 5000 { + s.green() + } else if basis_points > 2500 { + s.yellow() + } else { + s.red() + } +} diff --git a/third_party/rust/shared_library/.cargo-checksum.json b/third_party/rust/shared_library/.cargo-checksum.json new file mode 100644 index 000000000000..0b16e8307b7b --- /dev/null +++ b/third_party/rust/shared_library/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"f9ad715b6b6424f37f903a069039d3567d46e426d98d11d4e62a9a3933691e5b","LICENSE-APACHE":"c144680885b29e4719e2a51f0aab5439a1e02d980692b5aaf086cae12727f28b","LICENSE-MIT":"1c07d19ccbe2578665ab7d8c63f71559f890eb8d2a82fa39d0206b7a3414064f","src/dynamic_library.rs":"973df715d4ae2daae662392d73ca853b9bacdb4165bab3e4d8343427dca55c9c","src/lib.rs":"29f1aef9437d1ab891d17d6a6b86c6e1176813d372333cfdfc063b97586deb02"},"package":"5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11"} \ No newline at end of file diff --git a/third_party/rust/shared_library/Cargo.toml b/third_party/rust/shared_library/Cargo.toml new file mode 100644 index 000000000000..1d7d58be91fd --- /dev/null +++ b/third_party/rust/shared_library/Cargo.toml @@ -0,0 +1,24 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "shared_library" +version = "0.1.9" +authors = ["Pierre Krieger "] +description = "Easily bind to and load shared libraries" +license = "Apache-2.0/MIT" +repository = "https://github.com/tomaka/shared_library/" +[dependencies.lazy_static] +version = "1" + +[dependencies.libc] +version = "0.2" diff --git a/third_party/rust/shared_library/LICENSE-APACHE b/third_party/rust/shared_library/LICENSE-APACHE new file mode 100644 index 000000000000..1b22bef9c78a --- /dev/null +++ b/third_party/rust/shared_library/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/third_party/rust/shared_library/LICENSE-MIT b/third_party/rust/shared_library/LICENSE-MIT new file mode 100644 index 000000000000..4f2b149cd621 --- /dev/null +++ b/third_party/rust/shared_library/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2017 Pierre Krieger + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/third_party/rust/shared_library/src/dynamic_library.rs b/third_party/rust/shared_library/src/dynamic_library.rs new file mode 100644 index 000000000000..2bea28e74e59 --- /dev/null +++ b/third_party/rust/shared_library/src/dynamic_library.rs @@ -0,0 +1,410 @@ + + + + + + + + + + + + + + +#![allow(missing_docs)] + +use std::env; +use std::ffi::{CString, OsString}; +use std::mem; +use std::path::{Path, PathBuf}; +use libc; + +pub struct DynamicLibrary { + handle: *mut u8 +} + +unsafe impl Send for DynamicLibrary {} +unsafe impl Sync for DynamicLibrary {} + +impl Drop for DynamicLibrary { + fn drop(&mut self) { + if let Err(str) = dl::check_for_errors_in(|| unsafe { + dl::close(self.handle) + }) { + panic!("{}", str) + } + } +} + + + + + + +#[cfg(target_os = "linux")] +pub enum SpecialHandles { + Next, + Default, +} + +impl DynamicLibrary { + + + + + + + + + + + + + pub fn open(filename: Option<&Path>) -> Result { + + + + dl::open(filename.map(|path| path.as_os_str())) + .map(|handle| DynamicLibrary { handle }) + } + + + pub fn prepend_search_path(path: &Path) { + let mut search_path = Self::search_path(); + search_path.insert(0, path.to_path_buf()); + env::set_var(Self::envvar(), &Self::create_path(&search_path)); + } + + + + pub fn create_path(path: &[PathBuf]) -> OsString { + let mut newvar = OsString::new(); + for (i, path) in path.iter().enumerate() { + if i > 0 { newvar.push(Self::separator()); } + newvar.push(path); + } + newvar + } + + + + pub fn envvar() -> &'static str { + if cfg!(windows) { + "PATH" + } else if cfg!(target_os = "macos") { + "DYLD_LIBRARY_PATH" + } else { + "LD_LIBRARY_PATH" + } + } + + + fn separator() -> &'static str { + if cfg!(windows) { ";" } else { ":" } + } + + + + pub fn search_path() -> Vec { + match env::var_os(Self::envvar()) { + Some(var) => env::split_paths(&var).collect(), + None => Vec::new(), + } + } + + + + + + + pub unsafe fn symbol(&self, symbol: &str) -> Result<*mut T, String> { + + + + let raw_string = CString::new(symbol).unwrap(); + + + dl::check_for_errors_in(|| { + dl::symbol(self.handle as *mut libc::c_void, raw_string.as_ptr() as *const _) + }) + .map(|sym| mem::transmute(sym)) + } + + + + + + + + #[cfg(target_os = "linux")] + pub unsafe fn symbol_special(handle: SpecialHandles, symbol: &str) -> Result<*mut T, String> { + + + + let handle = match handle { + SpecialHandles::Next => mem::transmute::(-1), + SpecialHandles::Default => ::std::ptr::null_mut(), + }; + + let raw_string = CString::new(symbol).unwrap(); + + + dl::check_for_errors_in(|| { + dl::symbol(handle, raw_string.as_ptr() as *const _) + }) + .map(|sym| mem::transmute(sym)) + } +} + +#[cfg(all(test, not(target_os = "ios")))] +mod test { + use super::*; + use std::mem; + use std::path::Path; + + #[test] + #[cfg_attr(any(windows, target_os = "android"), ignore)] + fn test_loading_cosine() { + + + let libm = match DynamicLibrary::open(None) { + Err(error) => panic!("Could not load self as module: {}", error), + Ok(libm) => libm + }; + + let cosine: extern fn(libc::c_double) -> libc::c_double = unsafe { + match libm.symbol("cos") { + Err(error) => panic!("Could not load function cos: {}", error), + Ok(cosine) => mem::transmute::<*mut u8, _>(cosine) + } + }; + + let argument = 0.0; + let expected_result = 1.0; + let result = cosine(argument); + if result != expected_result { + panic!("cos({}) != {} but equaled {} instead", argument, + expected_result, result) + } + } + + #[test] + #[cfg(any(target_os = "linux", + target_os = "macos", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "netbsd", + target_os = "dragonfly", + target_os = "bitrig", + target_os = "openbsd", + target_os = "solaris"))] + fn test_errors_do_not_crash() { + + + let path = Path::new("/dev/null"); + match DynamicLibrary::open(Some(&path)) { + Err(_) => {} + Ok(_) => panic!("Successfully opened the empty library.") + } + } +} + + +#[cfg(any(target_os = "linux", + target_os = "android", + target_os = "macos", + target_os = "ios", + target_os = "fuchsia", + target_os = "freebsd", + target_os = "netbsd", + target_os = "dragonfly", + target_os = "bitrig", + target_os = "openbsd", + target_os = "solaris", + target_os = "emscripten"))] +mod dl { + use std::ffi::{CString, CStr, OsStr}; + use std::os::unix::ffi::OsStrExt; + use std::str; + use libc; + use std::ptr; + use std::sync::Mutex; + + lazy_static! { + static ref LOCK: Mutex<()> = Mutex::new(()); + } + + pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> { + check_for_errors_in(|| unsafe { + match filename { + Some(filename) => open_external(filename), + None => open_internal(), + } + }) + } + + const LAZY: libc::c_int = 1; + + unsafe fn open_external(filename: &OsStr) -> *mut u8 { + let s = CString::new(filename.as_bytes().to_vec()).unwrap(); + dlopen(s.as_ptr() as *const _, LAZY) as *mut u8 + } + + unsafe fn open_internal() -> *mut u8 { + dlopen(ptr::null(), LAZY) as *mut u8 + } + + pub fn check_for_errors_in(f: F) -> Result where + F: FnOnce() -> T, + { + unsafe { + + + let _guard = LOCK.lock(); + let _old_error = dlerror(); + + let result = f(); + + let last_error = dlerror() as *const _; + let ret = if ptr::null() == last_error { + Ok(result) + } else { + let s = CStr::from_ptr(last_error).to_bytes(); + Err(str::from_utf8(s).unwrap().to_string()) + }; + + ret + } + } + + pub unsafe fn symbol( + handle: *mut libc::c_void, + symbol: *const libc::c_char, + ) -> *mut u8 { + dlsym(handle, symbol) as *mut u8 + } + + pub unsafe fn close(handle: *mut u8) { + dlclose(handle as *mut libc::c_void); () + } + + extern { + fn dlopen( + filename: *const libc::c_char, + flag: libc::c_int, + ) -> *mut libc::c_void; + fn dlerror() -> *mut libc::c_char; + fn dlsym( + handle: *mut libc::c_void, + symbol: *const libc::c_char, + ) -> *mut libc::c_void; + fn dlclose( + handle: *mut libc::c_void, + ) -> libc::c_int; + } +} + +#[cfg(target_os = "windows")] +mod dl { + use std::ffi::OsStr; + use std::iter::Iterator; + use libc; + use std::ops::FnOnce; + use std::io::Error as IoError; + use std::os::windows::prelude::*; + use std::option::Option::{self, Some, None}; + use std::ptr; + use std::result::Result; + use std::result::Result::{Ok, Err}; + use std::string::String; + use std::vec::Vec; + + pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> { + + let prev_error_mode = unsafe { + + let new_error_mode = 1; + SetErrorMode(new_error_mode) + }; + + unsafe { + SetLastError(0); + } + + let result = match filename { + Some(filename) => { + let filename_str: Vec<_> = + filename.encode_wide().chain(Some(0).into_iter()).collect(); + let result = unsafe { + LoadLibraryW(filename_str.as_ptr() as *const libc::c_void) + }; + + + if result == ptr::null_mut() { + Err(format!("{}", IoError::last_os_error())) + } else { + Ok(result as *mut u8) + } + } + None => { + let mut handle = ptr::null_mut(); + let succeeded = unsafe { + GetModuleHandleExW(0, ptr::null(), &mut handle) + }; + if succeeded == 0 { + Err(format!("{}", IoError::last_os_error())) + } else { + Ok(handle as *mut u8) + } + } + }; + + unsafe { + SetErrorMode(prev_error_mode); + } + + result + } + + pub fn check_for_errors_in(f: F) -> Result where + F: FnOnce() -> T, + { + unsafe { + SetLastError(0); + + let result = f(); + + let error = IoError::last_os_error(); + if 0 == error.raw_os_error().unwrap() { + Ok(result) + } else { + Err(format!("{}", error)) + } + } + } + + pub unsafe fn symbol(handle: *mut libc::c_void, symbol: *const libc::c_char) -> *mut u8 { + GetProcAddress(handle, symbol) as *mut u8 + } + pub unsafe fn close(handle: *mut u8) { + FreeLibrary(handle as *mut libc::c_void); () + } + + #[allow(non_snake_case)] + extern "system" { + fn SetLastError(error: libc::size_t); + fn LoadLibraryW(name: *const libc::c_void) -> *mut libc::c_void; + fn GetModuleHandleExW( + dwFlags: u32, + name: *const u16, + handle: *mut *mut libc::c_void, + ) -> i32; + fn GetProcAddress( + handle: *mut libc::c_void, + name: *const libc::c_char, + ) -> *mut libc::c_void; + fn FreeLibrary(handle: *mut libc::c_void); + fn SetErrorMode(uMode: libc::c_uint) -> libc::c_uint; + } +} diff --git a/third_party/rust/shared_library/src/lib.rs b/third_party/rust/shared_library/src/lib.rs new file mode 100644 index 000000000000..2ed8327adbf7 --- /dev/null +++ b/third_party/rust/shared_library/src/lib.rs @@ -0,0 +1,175 @@ +extern crate libc; + +#[macro_use] +extern crate lazy_static; + +pub mod dynamic_library; + + +#[derive(Debug, Clone)] +pub enum LoadingError { + + LibraryNotFound { + descr: String, + }, + + + SymbolNotFound { + + symbol: &'static str, + } +} + +#[macro_export] +macro_rules! shared_library { + ($struct_name:ident, pub $($rest:tt)+) => { + shared_library!(__impl $struct_name [] [] [] pub $($rest)+); + }; + + ($struct_name:ident, fn $($rest:tt)+) => { + shared_library!(__impl $struct_name [] [] [] fn $($rest)+); + }; + + ($struct_name:ident, static $($rest:tt)+) => { + shared_library!(__impl $struct_name [] [] [] static $($rest)+); + }; + + ($struct_name:ident, $def_path:expr, $($rest:tt)+) => { + shared_library!(__impl $struct_name [] [$def_path] [] $($rest)+); + }; + + (__impl $struct_name:ident + [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*] + , $($rest:tt)* + ) => { + shared_library!(__impl $struct_name [$($p1)*] [$($p2)*] [$($p3)*] $($rest)*); + }; + + (__impl $struct_name:ident + [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*] + pub $($rest:tt)* + ) => { + shared_library!(__impl $struct_name + [$($p1)*] [$($p2)*] [$($p3)* pub] $($rest)*); + }; + + (__impl $struct_name:ident + [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*] + fn $name:ident($($p:ident:$ty:ty),*) -> $ret:ty, $($rest:tt)* + ) => { + shared_library!(__impl $struct_name + [$($p1)*, $name:unsafe extern fn($($p:$ty),*) -> $ret] + [$($p2)*] + [$($p3)* + unsafe fn $name($($p:$ty),*) -> $ret { + #![allow(dead_code)] + ($struct_name::get_static_ref().$name)($($p),*) + } + ] $($rest)*); + }; + + (__impl $struct_name:ident + [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*] + static $name:ident:$ty:ty, $($rest:tt)* + ) => { + shared_library!(__impl $struct_name + [$($p1)*, $name: $ty] + [$($p2)*] + [$($p3)*] $($rest)*); + }; + + (__impl $struct_name:ident + [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*] + fn $name:ident($($p:ident:$ty:ty),*), $($rest:tt)* + ) => { + shared_library!(__impl $struct_name + [$($p1)*] [$($p2)*] [$($p3)*] + fn $name($($p:$ty),*) -> (), $($rest)*); + }; + + (__impl $struct_name:ident [$(,$mem_n:ident:$mem_t:ty)+] [$($p2:tt)*] [$($p3:tt)*]) => { + /// Symbols loaded from a shared library. + #[allow(non_snake_case)] + pub struct $struct_name { + _library_guard: $crate::dynamic_library::DynamicLibrary, + $( + pub $mem_n: $mem_t, + )+ + } + + impl $struct_name { + /// Tries to open the dynamic library. + #[allow(non_snake_case)] + pub fn open(path: &::std::path::Path) -> Result<$struct_name, $crate::LoadingError> { + use std::mem; + + let dylib = match $crate::dynamic_library::DynamicLibrary::open(Some(path)) { + Ok(l) => l, + Err(reason) => return Err($crate::LoadingError::LibraryNotFound { descr: reason }) + }; + + $( + let $mem_n: *mut () = match unsafe { dylib.symbol(stringify!($mem_n)) } { + Ok(s) => s, + Err(_) => return Err($crate::LoadingError::SymbolNotFound { symbol: stringify!($mem_n) }), + }; + )+ + + Ok($struct_name { + _library_guard: dylib, + $( + $mem_n: unsafe { mem::transmute($mem_n) }, + )+ + }) + } + } + + shared_library!(__write_static_fns $struct_name [] [$($p2)*] [$($p3)*]); + }; + + (__write_static_fns $struct_name:ident [$($p1:tt)*] [] [$($p3:tt)*]) => { + }; + + (__write_static_fns $struct_name:ident [$($p1:tt)*] [$defpath:expr] [$($standalones:item)+]) => { + impl $struct_name { + /// This function is used by the regular functions. + fn get_static_ref() -> &'static $struct_name { + $struct_name::try_loading().ok() + .expect(concat!("Could not open dynamic \ + library `", stringify!($struct_name), + "`")) + } + + /// Try loading the static symbols linked to this library. + pub fn try_loading() -> Result<&'static $struct_name, $crate::LoadingError> { + use std::sync::{Mutex, Once, ONCE_INIT}; + use std::mem; + + unsafe { + static mut DATA: *const Mutex> = 0 as *const _; + + static mut INIT: Once = ONCE_INIT; + INIT.call_once(|| { + let data = Box::new(Mutex::new(None)); + DATA = &*data; + mem::forget(data); + }); + + let data: &Mutex> = &*DATA; + let mut data = data.lock().unwrap(); + + if let Some(ref data) = *data { + return Ok(mem::transmute(data)); + } + + let path = ::std::path::Path::new($defpath); + let result = try!($struct_name::open(path)); + *data = Some(result); + Ok(mem::transmute(data.as_ref().unwrap())) + } + } + } + + $($standalones)+ + }; +} diff --git a/third_party/rust/spirv_cross/.cargo-checksum.json b/third_party/rust/spirv_cross/.cargo-checksum.json new file mode 100644 index 000000000000..01919d6eac47 --- /dev/null +++ b/third_party/rust/spirv_cross/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"8e60146336b8a4501f075c7f5f784ea426f10df4c1d5d48301d043908f025c81","build.rs":"975dd26cbee3c1987ed658e38fe2211224320ce7943d6ef7bd2735425280e89a","src/bindings_native.rs":"d999b7a89c4ab7f2b839995bac1cda77f63142c4fff3b6f6764e27dec774b45c","src/bindings_wasm.rs":"fce9a87f0af3d700e673cba68a771329e6a4841b9f048db5b5d648f83f6062b6","src/bindings_wasm_functions.rs":"3ea541791b3ea8f4881d813070c83549d0ee5e701d158a53942af415d42e7c6f","src/compiler.rs":"647370c53e95aacfd5f3c8b01821eb420630289cd66b5cfdc8f4f2d47dee90aa","src/emscripten.rs":"3169890001970610013026468739910afca0d85e00d7e34beadfdd31bbcbeeb7","src/glsl.rs":"0a80523899d168796a623d6ac1f65b0422843ec0bb29c6fe1bb5cb837ceee163","src/hlsl.rs":"f6b5a61e00cbabef4c64eda2640229ea82b130f4254d36d32dda7ced357cc213","src/lib.rs":"cc41cbbe48f3e96791ba5338c66fa1fe0e533eaed6bbdced3f008d5e9fe6c6ce","src/msl.rs":"a62d5a7d65fa0bc6d1e24db2c2ec11b850c0a1b5da7a71669e63f5639f064bc9","src/ptr_util.rs":"280404beede469b2c9ae40536323515a9213dac5d30014fac870a23b37672442","src/spirv.rs":"5dd16eb7402e70122459318ba9ac5aecda12837ed13ca5240d2e3c8611b67cbf","src/vendor/SPIRV-Cross/.clang-format":"9ec4314e20afecad827a2dbd4832256be8464e88aab4a53fab45173ed129b2ed","src/vendor/SPIRV-Cross/.gitignore":"7f23cc92ddb5e1f584447e98d3e8ab6543fc182f1543f0f6ec29856f9250cdd6","src/vendor/SPIRV-Cross/CMakeLists.txt":"a2a76ecacf1a0620e7a1c044c18fbe7210b4f9384cb8fd03095739f25b3d4f40","src/vendor/SPIRV-Cross/GLSL.std.450.h":"20f32378793c5f416bc0704f44345c2a14c99cba3f411e3beaf1bcea372d58ba","src/vendor/SPIRV-Cross/LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","src/vendor/SPIRV-Cross/Makefile":"e2255d32e47d447b100ce3cd0753d0758dc26460e0440d14cc157d4caf9b62f1","src/vendor/SPIRV-Cross/cmake/gitversion.in.h":"75408b8a0cf86d6cf3e59d840c26ab28d3bda48f0c7f30001082a5ddf05d4184","src/vendor/SPIRV-Cross/format_all.sh":"7682215b1a669cd5a574489b3918e4009942b30a69f43d46bf68bd32a81ed399","src/vendor/SPIRV-Cross/gn/BUILD.gn":"340a042ebd24a9cdeb756b22627701a8df085349660645e4eac8531dd0024010","src/vendor/SPIRV-Cross/include/spirv_cross/barrier.hpp":"bb796625e89f75e239e92f9a61597d421ffe5fb1902d200691ebe95cf856a1f8","src/vendor/SPIRV-Cross/include/spirv_cross/external_interface.h":"cdceda962d87133e44989510edc944e99052d713869b406a8b6b2d54e3d02dd7","src/vendor/SPIRV-Cross/include/spirv_cross/image.hpp":"681d0964b144c5009424196a8bc832cb81cfe5df5b91c2f3e1bfb625765a0c50","src/vendor/SPIRV-Cross/include/spirv_cross/internal_interface.hpp":"ab8851e5708b944a9bf340ce17297d94bef4876d30c833ea83d44b16f60726f6","src/vendor/SPIRV-Cross/include/spirv_cross/sampler.hpp":"ee7c48bda908d1a5153acc6157afb35f3c66a84179ad6dea1adfdaa791a58b03","src/vendor/SPIRV-Cross/include/spirv_cross/thread_group.hpp":"70d9e0400f62de71d3775972eadc196ddb218254fa8155e8e33daf8d99957cc0","src/vendor/SPIRV-Cross/main.cpp":"059dca8074ec16a981c38449959d102b1b30983bccd80989ff91dc21e5105a74","src/vendor/SPIRV-Cross/pkg-config/spirv-cross-c-shared.pc.in":"cf4c55760569e296c5c2a0e306bb1af83272fb48a8d8ae1877b2196720129529","src/vendor/SPIRV-Cross/spirv.h":"7c2f6af34455c96957bad8a2d67197fbc6693308579d45e9740b5a9c330ca84a","src/vendor/SPIRV-Cross/spirv.hpp":"d937d4016e2fb8fca62838e0dec9f70d551751eaff07155f060750822373bc8b","src/vendor/SPIRV-Cross/spirv_cfg.cpp":"a7b47c8d05f96a9a51ac5a5d9d24cce65ea0661110ea499caf885a4dc0aa0bf4","src/vendor/SPIRV-Cross/spirv_cfg.hpp":"c803177e728e62e90856596d62b036c93d4a99dfc86edf597ea9597f0fbff8ea","src/vendor/SPIRV-Cross/spirv_common.hpp":"713ef166de2ac85b6a327110f98f21354dc6b4e8a112e0f3aa34543b2f5f36fc","src/vendor/SPIRV-Cross/spirv_cpp.cpp":"3cef3b9df5a5a5acc2aedc0ac6440a54c4afbd503c0281e7f8c9e123479188f9","src/vendor/SPIRV-Cross/spirv_cpp.hpp":"50f3704eb9b33f63284fcde37ee58859de83bdd19b87665bc410da3b7c952bfb","src/vendor/SPIRV-Cross/spirv_cross.cpp":"1b3d1d13b71b7c53d894d12ca1f6a22aa283d003c533df931a92b7ef202ab7b2","src/vendor/SPIRV-Cross/spirv_cross.hpp":"50558c0314234a9f438821f2ac4dc3a4e3489a3ab0df17dd5558e6ff8f0d79c3","src/vendor/SPIRV-Cross/spirv_cross_c.cpp":"ab4d72758f71c0f1c57a7412d91418850a2380bc57f2caa018e13e6a8918db84","src/vendor/SPIRV-Cross/spirv_cross_c.h":"6a98ccb6b9e6f366cb137a448134f19feba7929c543bf8acec86ab98da20e9f8","src/vendor/SPIRV-Cross/spirv_cross_containers.hpp":"5058178cb018420fc7ebb33a50bb1dabebe3dbd2e848560a1b22f0c618b81d08","src/vendor/SPIRV-Cross/spirv_cross_error_handling.hpp":"bf8b9a0a2f8b15db6e4fc87373f6ab437f772b546e2643c6edb3ec28ae8c10a9","src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.cpp":"addf8ee2a81f731ecf0000a3bbf324fff463e8fb93f018d0f8ae99607c25da16","src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.hpp":"a1e78ba7bade737b4b25e73119846ef0a5618025830f68292a2feef0f3718d3a","src/vendor/SPIRV-Cross/spirv_cross_util.cpp":"6d4126ab19c0754240b9d567565efcec20cf99c792c91c622b75a9395552e009","src/vendor/SPIRV-Cross/spirv_cross_util.hpp":"85139cbc86ae9cb93a5d25d398f109c142f1f646f86425bd51d89e0486506e4d","src/vendor/SPIRV-Cross/spirv_glsl.cpp":"f0c3e917b809c28b6d372f58b1ac6997c2cc7888e724af108c12ee4ebbe22723","src/vendor/SPIRV-Cross/spirv_glsl.hpp":"69fb8349964dbab7f1c2f1725b6329d0461968309778de069a7d6f8377cff11e","src/vendor/SPIRV-Cross/spirv_hlsl.cpp":"6bef1d3b301e55317da71afc911296d1d4e6dc1957b51cb5384e39a14a0d255d","src/vendor/SPIRV-Cross/spirv_hlsl.hpp":"199d1d677750b67964579e6abd33eefce90437813256138b2e0c4c41cc183e05","src/vendor/SPIRV-Cross/spirv_msl.cpp":"3e06f8ac4117e630b1b998d1b95f2594ef67ece4d51c124796d3f1bba3036239","src/vendor/SPIRV-Cross/spirv_msl.hpp":"95e53f03e124fd01bb450733e938666750d69c87451797c82ac8e1155a910978","src/vendor/SPIRV-Cross/spirv_parser.cpp":"76d5a9a9237a5fd6fd682a5562578d3cb2b27d0911cfb3df93e2b2c70011a8d7","src/vendor/SPIRV-Cross/spirv_parser.hpp":"b2dbbb6ba4e7fc774f9d6071e3f1765ee0824548f1732d65ebfc06b060426520","src/vendor/SPIRV-Cross/spirv_reflect.cpp":"22b0f0621afb953ba24143db4d2362c0677cd9bb2f6d7b010d0be39c5ed282f6","src/vendor/SPIRV-Cross/spirv_reflect.hpp":"35e7858287f94d865a4785e87ba9b4ab849b52ffc818801d13086ab304c9dca3","src/wrapper.cpp":"8a6bc6ed9c7916f13e4c940a51daa6ff8501e39265b0a56bcfc8ff7c60d0ba6a","src/wrapper.hpp":"c20bc6645e3041e608e3c0d3f7233c631e032485159deb0ea21f327fb0f7cd3e","tests/common/mod.rs":"2843bf104c7938d93065f7b5688c9f063ad9e5720c407c737aedc5f2dee5a80f","tests/glsl_tests.rs":"89bba5a10b48ee2117485c31d37e7d96be09efbcc86ce7edfa80f0b5e30d97f6","tests/hlsl_tests.rs":"1e535461e3a1a1395a43890e12e3f4bbf947d2880238f7e71f9e52920808d014","tests/msl_tests.rs":"995fea74bb8548b0c50b5689c8b370cd032d6805f1ad33332a09d28cdeba7a30","tests/shaders/array.vert":"d0dab7ddea131e069961054f40a164602448aa78720b7ff480e141d1f7b0b2d6","tests/shaders/array.vert.spv":"8e44421590ade1716be66ad39f60fb1ce58eedeab8f0806335a7369687b308b1","tests/shaders/rasterize_disabled.vert":"da6de172549830216933c44edf18b13113d7ca87462e3d09ad50dfc9c9836821","tests/shaders/rasterize_disabled.vert.spv":"2ba809eb500ed6e5a067389ccc056551e796e7019517593d110fb62c9dca2056","tests/shaders/sampler.frag":"4c568e65176afe596dd8ef279485e992607e94d612786214ae1c6702d0322e1f","tests/shaders/sampler.frag.spv":"bd7bd1973a82dcfdf5755361fa4dd420fdf1c32c5de0a6f0896a8d5971f98684","tests/shaders/simple.vert":"ea143c97dff5ef03728b96b2dd893bdc59d56651581ecf9fe50f10807b0efdd0","tests/shaders/simple.vert.spv":"a2b5094ffd76288e0d08c37ce0351e28f20bb6d80ddd73fc44a71c1c7cbbf7db","tests/shaders/specialization.comp":"ce32fa1615737209f2e465ea347d79030ddcb33a88c38447e7cde7dffc920163","tests/shaders/specialization.comp.spv":"848604e37b870b8999692b266677be2ce0df6ce38093a0d81e6bc43d0bdf8a3f","tests/shaders/struct.frag":"d8840bb1961d6f14609b00ee54406c1e3ea31cecd8231b77cfb73d28b71910c0","tests/shaders/struct.frag.spv":"774aa886374eb95abf9bb7d0045ee77d97e26529e9ec96b90991a515fdbca4be","tests/shaders/struct.vert":"9299cda83ddb5b5c3d95ab0d057e4df2af137dfd92d6c4d3e96295b7d42e29a1","tests/shaders/struct.vert.spv":"4a82bdee72616ac058bc60d4255efa3e78199a2b8597570c013bebbee7107fb7","tests/shaders/workgroup.comp":"478044b5392e0d1fb042253d71ea6bf7b8a014af4a6ee35d8db4c5343ac69739","tests/shaders/workgroup.comp.spv":"72f636fe3d1d6d0c5963f71bf4349c7e40d544331b33b6b64fb5b65784e6abee","tests/spirv_tests.rs":"4660f40d6d4598df4426bbc4b377280e00f85485e810d97d0c23cab96d824696"},"package":null} \ No newline at end of file diff --git a/third_party/rust/spirv_cross/Cargo.toml b/third_party/rust/spirv_cross/Cargo.toml new file mode 100644 index 000000000000..2518ef187754 --- /dev/null +++ b/third_party/rust/spirv_cross/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "spirv_cross" +version = "0.16.0" +authors = ["Joshua Groves "] +description = "Safe wrapper around SPIRV-Cross" +license = "MIT/Apache-2.0" +homepage = "https://github.com/grovesNL/spirv_cross" +repository = "https://github.com/grovesNL/spirv_cross" +readme = "../README.md" +keywords = ["spirv", "cross"] +build = "build.rs" +edition = "2018" + +[lib] +crate-type = ["rlib"] + +[features] +default = [] +glsl = [] +hlsl = [] +msl = [] + +[target.'cfg(not(target_arch = "wasm32"))'.build-dependencies] +cc = "1.0.4" diff --git a/third_party/rust/spirv_cross/build.rs b/third_party/rust/spirv_cross/build.rs new file mode 100644 index 000000000000..aa395505d696 --- /dev/null +++ b/third_party/rust/spirv_cross/build.rs @@ -0,0 +1,61 @@ +fn main() { + + let target_arch = std::env::var("CARGO_CFG_TARGET_ARCH"); + if let Ok(ref arch) = target_arch { + if "wasm32" == arch { + return; + } + } + + let target_vendor = std::env::var("CARGO_CFG_TARGET_VENDOR"); + let is_apple = target_vendor.is_ok() && target_vendor.unwrap() == "apple"; + + let target_os = std::env::var("CARGO_CFG_TARGET_OS"); + let is_ios = target_os.is_ok() && target_os.unwrap() == "ios"; + + let mut build = cc::Build::new(); + build.cpp(true).static_crt(false); + + let compiler = build.try_get_compiler(); + let is_clang = compiler.is_ok() && compiler.unwrap().is_like_clang(); + + if is_apple && (is_clang || is_ios) { + build.flag("-std=c++14").cpp_set_stdlib("c++"); + } else { + build.flag_if_supported("-std=c++14"); + } + + + build.flag("-fno-exceptions"); + build.flag("-fno-rtti"); + + build + .flag("-DSPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS") + .flag("-DSPIRV_CROSS_WRAPPER_NO_EXCEPTIONS"); + + build + .file("src/wrapper.cpp") + .file("src/vendor/SPIRV-Cross/spirv_cfg.cpp") + .file("src/vendor/SPIRV-Cross/spirv_cross.cpp") + .file("src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.cpp") + .file("src/vendor/SPIRV-Cross/spirv_parser.cpp") + .file("src/vendor/SPIRV-Cross/spirv_cross_util.cpp"); + + + + build + .file("src/vendor/SPIRV-Cross/spirv_glsl.cpp") + .flag("-DSPIRV_CROSS_WRAPPER_GLSL"); + + #[cfg(feature = "hlsl")] + build + .file("src/vendor/SPIRV-Cross/spirv_hlsl.cpp") + .flag("-DSPIRV_CROSS_WRAPPER_HLSL"); + + #[cfg(feature = "msl")] + build + .file("src/vendor/SPIRV-Cross/spirv_msl.cpp") + .flag("-DSPIRV_CROSS_WRAPPER_MSL"); + + build.compile("spirv-cross-rust-wrapper"); +} diff --git a/third_party/rust/spirv_cross/src/bindings_native.rs b/third_party/rust/spirv_cross/src/bindings_native.rs new file mode 100644 index 000000000000..7d8142163a6f --- /dev/null +++ b/third_party/rust/spirv_cross/src/bindings_native.rs @@ -0,0 +1,2228 @@ + + +#[allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] +pub mod root { + #[allow(unused_imports)] + use self::super::root; + pub mod spv { + #[allow(unused_imports)] + use self::super::super::root; + pub type Id = ::std::os::raw::c_uint; + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum SourceLanguage { + SourceLanguageUnknown = 0, + SourceLanguageESSL = 1, + SourceLanguageGLSL = 2, + SourceLanguageOpenCL_C = 3, + SourceLanguageOpenCL_CPP = 4, + SourceLanguageHLSL = 5, + SourceLanguageMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ExecutionModel { + ExecutionModelVertex = 0, + ExecutionModelTessellationControl = 1, + ExecutionModelTessellationEvaluation = 2, + ExecutionModelGeometry = 3, + ExecutionModelFragment = 4, + ExecutionModelGLCompute = 5, + ExecutionModelKernel = 6, + ExecutionModelTaskNV = 5267, + ExecutionModelMeshNV = 5268, + ExecutionModelRayGenerationNV = 5313, + ExecutionModelIntersectionNV = 5314, + ExecutionModelAnyHitNV = 5315, + ExecutionModelClosestHitNV = 5316, + ExecutionModelMissNV = 5317, + ExecutionModelCallableNV = 5318, + ExecutionModelMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum AddressingModel { + AddressingModelLogical = 0, + AddressingModelPhysical32 = 1, + AddressingModelPhysical64 = 2, + AddressingModelPhysicalStorageBuffer64EXT = 5348, + AddressingModelMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MemoryModel { + MemoryModelSimple = 0, + MemoryModelGLSL450 = 1, + MemoryModelOpenCL = 2, + MemoryModelVulkanKHR = 3, + MemoryModelMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ExecutionMode { + ExecutionModeInvocations = 0, + ExecutionModeSpacingEqual = 1, + ExecutionModeSpacingFractionalEven = 2, + ExecutionModeSpacingFractionalOdd = 3, + ExecutionModeVertexOrderCw = 4, + ExecutionModeVertexOrderCcw = 5, + ExecutionModePixelCenterInteger = 6, + ExecutionModeOriginUpperLeft = 7, + ExecutionModeOriginLowerLeft = 8, + ExecutionModeEarlyFragmentTests = 9, + ExecutionModePointMode = 10, + ExecutionModeXfb = 11, + ExecutionModeDepthReplacing = 12, + ExecutionModeDepthGreater = 14, + ExecutionModeDepthLess = 15, + ExecutionModeDepthUnchanged = 16, + ExecutionModeLocalSize = 17, + ExecutionModeLocalSizeHint = 18, + ExecutionModeInputPoints = 19, + ExecutionModeInputLines = 20, + ExecutionModeInputLinesAdjacency = 21, + ExecutionModeTriangles = 22, + ExecutionModeInputTrianglesAdjacency = 23, + ExecutionModeQuads = 24, + ExecutionModeIsolines = 25, + ExecutionModeOutputVertices = 26, + ExecutionModeOutputPoints = 27, + ExecutionModeOutputLineStrip = 28, + ExecutionModeOutputTriangleStrip = 29, + ExecutionModeVecTypeHint = 30, + ExecutionModeContractionOff = 31, + ExecutionModeInitializer = 33, + ExecutionModeFinalizer = 34, + ExecutionModeSubgroupSize = 35, + ExecutionModeSubgroupsPerWorkgroup = 36, + ExecutionModeSubgroupsPerWorkgroupId = 37, + ExecutionModeLocalSizeId = 38, + ExecutionModeLocalSizeHintId = 39, + ExecutionModePostDepthCoverage = 4446, + ExecutionModeDenormPreserve = 4459, + ExecutionModeDenormFlushToZero = 4460, + ExecutionModeSignedZeroInfNanPreserve = 4461, + ExecutionModeRoundingModeRTE = 4462, + ExecutionModeRoundingModeRTZ = 4463, + ExecutionModeStencilRefReplacingEXT = 5027, + ExecutionModeOutputLinesNV = 5269, + ExecutionModeOutputPrimitivesNV = 5270, + ExecutionModeDerivativeGroupQuadsNV = 5289, + ExecutionModeDerivativeGroupLinearNV = 5290, + ExecutionModeOutputTrianglesNV = 5298, + ExecutionModeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum StorageClass { + StorageClassUniformConstant = 0, + StorageClassInput = 1, + StorageClassUniform = 2, + StorageClassOutput = 3, + StorageClassWorkgroup = 4, + StorageClassCrossWorkgroup = 5, + StorageClassPrivate = 6, + StorageClassFunction = 7, + StorageClassGeneric = 8, + StorageClassPushConstant = 9, + StorageClassAtomicCounter = 10, + StorageClassImage = 11, + StorageClassStorageBuffer = 12, + StorageClassCallableDataNV = 5328, + StorageClassIncomingCallableDataNV = 5329, + StorageClassRayPayloadNV = 5338, + StorageClassHitAttributeNV = 5339, + StorageClassIncomingRayPayloadNV = 5342, + StorageClassShaderRecordBufferNV = 5343, + StorageClassPhysicalStorageBufferEXT = 5349, + StorageClassMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum Dim { + Dim1D = 0, + Dim2D = 1, + Dim3D = 2, + DimCube = 3, + DimRect = 4, + DimBuffer = 5, + DimSubpassData = 6, + DimMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum SamplerAddressingMode { + SamplerAddressingModeNone = 0, + SamplerAddressingModeClampToEdge = 1, + SamplerAddressingModeClamp = 2, + SamplerAddressingModeRepeat = 3, + SamplerAddressingModeRepeatMirrored = 4, + SamplerAddressingModeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum SamplerFilterMode { + SamplerFilterModeNearest = 0, + SamplerFilterModeLinear = 1, + SamplerFilterModeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ImageFormat { + ImageFormatUnknown = 0, + ImageFormatRgba32f = 1, + ImageFormatRgba16f = 2, + ImageFormatR32f = 3, + ImageFormatRgba8 = 4, + ImageFormatRgba8Snorm = 5, + ImageFormatRg32f = 6, + ImageFormatRg16f = 7, + ImageFormatR11fG11fB10f = 8, + ImageFormatR16f = 9, + ImageFormatRgba16 = 10, + ImageFormatRgb10A2 = 11, + ImageFormatRg16 = 12, + ImageFormatRg8 = 13, + ImageFormatR16 = 14, + ImageFormatR8 = 15, + ImageFormatRgba16Snorm = 16, + ImageFormatRg16Snorm = 17, + ImageFormatRg8Snorm = 18, + ImageFormatR16Snorm = 19, + ImageFormatR8Snorm = 20, + ImageFormatRgba32i = 21, + ImageFormatRgba16i = 22, + ImageFormatRgba8i = 23, + ImageFormatR32i = 24, + ImageFormatRg32i = 25, + ImageFormatRg16i = 26, + ImageFormatRg8i = 27, + ImageFormatR16i = 28, + ImageFormatR8i = 29, + ImageFormatRgba32ui = 30, + ImageFormatRgba16ui = 31, + ImageFormatRgba8ui = 32, + ImageFormatR32ui = 33, + ImageFormatRgb10a2ui = 34, + ImageFormatRg32ui = 35, + ImageFormatRg16ui = 36, + ImageFormatRg8ui = 37, + ImageFormatR16ui = 38, + ImageFormatR8ui = 39, + ImageFormatMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ImageChannelOrder { + ImageChannelOrderR = 0, + ImageChannelOrderA = 1, + ImageChannelOrderRG = 2, + ImageChannelOrderRA = 3, + ImageChannelOrderRGB = 4, + ImageChannelOrderRGBA = 5, + ImageChannelOrderBGRA = 6, + ImageChannelOrderARGB = 7, + ImageChannelOrderIntensity = 8, + ImageChannelOrderLuminance = 9, + ImageChannelOrderRx = 10, + ImageChannelOrderRGx = 11, + ImageChannelOrderRGBx = 12, + ImageChannelOrderDepth = 13, + ImageChannelOrderDepthStencil = 14, + ImageChannelOrdersRGB = 15, + ImageChannelOrdersRGBx = 16, + ImageChannelOrdersRGBA = 17, + ImageChannelOrdersBGRA = 18, + ImageChannelOrderABGR = 19, + ImageChannelOrderMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ImageChannelDataType { + ImageChannelDataTypeSnormInt8 = 0, + ImageChannelDataTypeSnormInt16 = 1, + ImageChannelDataTypeUnormInt8 = 2, + ImageChannelDataTypeUnormInt16 = 3, + ImageChannelDataTypeUnormShort565 = 4, + ImageChannelDataTypeUnormShort555 = 5, + ImageChannelDataTypeUnormInt101010 = 6, + ImageChannelDataTypeSignedInt8 = 7, + ImageChannelDataTypeSignedInt16 = 8, + ImageChannelDataTypeSignedInt32 = 9, + ImageChannelDataTypeUnsignedInt8 = 10, + ImageChannelDataTypeUnsignedInt16 = 11, + ImageChannelDataTypeUnsignedInt32 = 12, + ImageChannelDataTypeHalfFloat = 13, + ImageChannelDataTypeFloat = 14, + ImageChannelDataTypeUnormInt24 = 15, + ImageChannelDataTypeUnormInt101010_2 = 16, + ImageChannelDataTypeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ImageOperandsShift { + ImageOperandsBiasShift = 0, + ImageOperandsLodShift = 1, + ImageOperandsGradShift = 2, + ImageOperandsConstOffsetShift = 3, + ImageOperandsOffsetShift = 4, + ImageOperandsConstOffsetsShift = 5, + ImageOperandsSampleShift = 6, + ImageOperandsMinLodShift = 7, + ImageOperandsMakeTexelAvailableKHRShift = 8, + ImageOperandsMakeTexelVisibleKHRShift = 9, + ImageOperandsNonPrivateTexelKHRShift = 10, + ImageOperandsVolatileTexelKHRShift = 11, + ImageOperandsMax = 2147483647, + } + pub const ImageOperandsMask_ImageOperandsMaskNone: + root::spv::ImageOperandsMask = + ImageOperandsMask(0); + pub const ImageOperandsMask_ImageOperandsBiasMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(1); + pub const ImageOperandsMask_ImageOperandsLodMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(2); + pub const ImageOperandsMask_ImageOperandsGradMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(4); + pub const ImageOperandsMask_ImageOperandsConstOffsetMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(8); + pub const ImageOperandsMask_ImageOperandsOffsetMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(16); + pub const ImageOperandsMask_ImageOperandsConstOffsetsMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(32); + pub const ImageOperandsMask_ImageOperandsSampleMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(64); + pub const ImageOperandsMask_ImageOperandsMinLodMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(128); + pub const ImageOperandsMask_ImageOperandsMakeTexelAvailableKHRMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(256); + pub const ImageOperandsMask_ImageOperandsMakeTexelVisibleKHRMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(512); + pub const ImageOperandsMask_ImageOperandsNonPrivateTexelKHRMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(1024); + pub const ImageOperandsMask_ImageOperandsVolatileTexelKHRMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(2048); + impl ::std::ops::BitOr for + root::spv::ImageOperandsMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + ImageOperandsMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::ImageOperandsMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::ImageOperandsMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::ImageOperandsMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + ImageOperandsMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::ImageOperandsMask { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::ImageOperandsMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct ImageOperandsMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum FPFastMathModeShift { + FPFastMathModeNotNaNShift = 0, + FPFastMathModeNotInfShift = 1, + FPFastMathModeNSZShift = 2, + FPFastMathModeAllowRecipShift = 3, + FPFastMathModeFastShift = 4, + FPFastMathModeMax = 2147483647, + } + pub const FPFastMathModeMask_FPFastMathModeMaskNone: + root::spv::FPFastMathModeMask = + FPFastMathModeMask(0); + pub const FPFastMathModeMask_FPFastMathModeNotNaNMask: + root::spv::FPFastMathModeMask = + FPFastMathModeMask(1); + pub const FPFastMathModeMask_FPFastMathModeNotInfMask: + root::spv::FPFastMathModeMask = + FPFastMathModeMask(2); + pub const FPFastMathModeMask_FPFastMathModeNSZMask: + root::spv::FPFastMathModeMask = + FPFastMathModeMask(4); + pub const FPFastMathModeMask_FPFastMathModeAllowRecipMask: + root::spv::FPFastMathModeMask = + FPFastMathModeMask(8); + pub const FPFastMathModeMask_FPFastMathModeFastMask: + root::spv::FPFastMathModeMask = + FPFastMathModeMask(16); + impl ::std::ops::BitOr for + root::spv::FPFastMathModeMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + FPFastMathModeMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::FPFastMathModeMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::FPFastMathModeMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::FPFastMathModeMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + FPFastMathModeMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::FPFastMathModeMask { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::FPFastMathModeMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct FPFastMathModeMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum FPRoundingMode { + FPRoundingModeRTE = 0, + FPRoundingModeRTZ = 1, + FPRoundingModeRTP = 2, + FPRoundingModeRTN = 3, + FPRoundingModeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum LinkageType { + LinkageTypeExport = 0, + LinkageTypeImport = 1, + LinkageTypeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum AccessQualifier { + AccessQualifierReadOnly = 0, + AccessQualifierWriteOnly = 1, + AccessQualifierReadWrite = 2, + AccessQualifierMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum FunctionParameterAttribute { + FunctionParameterAttributeZext = 0, + FunctionParameterAttributeSext = 1, + FunctionParameterAttributeByVal = 2, + FunctionParameterAttributeSret = 3, + FunctionParameterAttributeNoAlias = 4, + FunctionParameterAttributeNoCapture = 5, + FunctionParameterAttributeNoWrite = 6, + FunctionParameterAttributeNoReadWrite = 7, + FunctionParameterAttributeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum Decoration { + DecorationRelaxedPrecision = 0, + DecorationSpecId = 1, + DecorationBlock = 2, + DecorationBufferBlock = 3, + DecorationRowMajor = 4, + DecorationColMajor = 5, + DecorationArrayStride = 6, + DecorationMatrixStride = 7, + DecorationGLSLShared = 8, + DecorationGLSLPacked = 9, + DecorationCPacked = 10, + DecorationBuiltIn = 11, + DecorationNoPerspective = 13, + DecorationFlat = 14, + DecorationPatch = 15, + DecorationCentroid = 16, + DecorationSample = 17, + DecorationInvariant = 18, + DecorationRestrict = 19, + DecorationAliased = 20, + DecorationVolatile = 21, + DecorationConstant = 22, + DecorationCoherent = 23, + DecorationNonWritable = 24, + DecorationNonReadable = 25, + DecorationUniform = 26, + DecorationSaturatedConversion = 28, + DecorationStream = 29, + DecorationLocation = 30, + DecorationComponent = 31, + DecorationIndex = 32, + DecorationBinding = 33, + DecorationDescriptorSet = 34, + DecorationOffset = 35, + DecorationXfbBuffer = 36, + DecorationXfbStride = 37, + DecorationFuncParamAttr = 38, + DecorationFPRoundingMode = 39, + DecorationFPFastMathMode = 40, + DecorationLinkageAttributes = 41, + DecorationNoContraction = 42, + DecorationInputAttachmentIndex = 43, + DecorationAlignment = 44, + DecorationMaxByteOffset = 45, + DecorationAlignmentId = 46, + DecorationMaxByteOffsetId = 47, + DecorationNoSignedWrap = 4469, + DecorationNoUnsignedWrap = 4470, + DecorationExplicitInterpAMD = 4999, + DecorationOverrideCoverageNV = 5248, + DecorationPassthroughNV = 5250, + DecorationViewportRelativeNV = 5252, + DecorationSecondaryViewportRelativeNV = 5256, + DecorationPerPrimitiveNV = 5271, + DecorationPerViewNV = 5272, + DecorationPerTaskNV = 5273, + DecorationPerVertexNV = 5285, + DecorationNonUniformEXT = 5300, + DecorationRestrictPointerEXT = 5355, + DecorationAliasedPointerEXT = 5356, + DecorationHlslCounterBufferGOOGLE = 5634, + DecorationHlslSemanticGOOGLE = 5635, + DecorationMax = 2147483647, + } + pub const BuiltIn_BuiltInSubgroupEqMaskKHR: root::spv::BuiltIn = + BuiltIn::BuiltInSubgroupEqMask; + pub const BuiltIn_BuiltInSubgroupGeMaskKHR: root::spv::BuiltIn = + BuiltIn::BuiltInSubgroupGeMask; + pub const BuiltIn_BuiltInSubgroupGtMaskKHR: root::spv::BuiltIn = + BuiltIn::BuiltInSubgroupGtMask; + pub const BuiltIn_BuiltInSubgroupLeMaskKHR: root::spv::BuiltIn = + BuiltIn::BuiltInSubgroupLeMask; + pub const BuiltIn_BuiltInSubgroupLtMaskKHR: root::spv::BuiltIn = + BuiltIn::BuiltInSubgroupLtMask; + pub const BuiltIn_BuiltInFragmentSizeNV: root::spv::BuiltIn = + BuiltIn::BuiltInFragSizeEXT; + pub const BuiltIn_BuiltInInvocationsPerPixelNV: root::spv::BuiltIn = + BuiltIn::BuiltInFragInvocationCountEXT; + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum BuiltIn { + BuiltInPosition = 0, + BuiltInPointSize = 1, + BuiltInClipDistance = 3, + BuiltInCullDistance = 4, + BuiltInVertexId = 5, + BuiltInInstanceId = 6, + BuiltInPrimitiveId = 7, + BuiltInInvocationId = 8, + BuiltInLayer = 9, + BuiltInViewportIndex = 10, + BuiltInTessLevelOuter = 11, + BuiltInTessLevelInner = 12, + BuiltInTessCoord = 13, + BuiltInPatchVertices = 14, + BuiltInFragCoord = 15, + BuiltInPointCoord = 16, + BuiltInFrontFacing = 17, + BuiltInSampleId = 18, + BuiltInSamplePosition = 19, + BuiltInSampleMask = 20, + BuiltInFragDepth = 22, + BuiltInHelperInvocation = 23, + BuiltInNumWorkgroups = 24, + BuiltInWorkgroupSize = 25, + BuiltInWorkgroupId = 26, + BuiltInLocalInvocationId = 27, + BuiltInGlobalInvocationId = 28, + BuiltInLocalInvocationIndex = 29, + BuiltInWorkDim = 30, + BuiltInGlobalSize = 31, + BuiltInEnqueuedWorkgroupSize = 32, + BuiltInGlobalOffset = 33, + BuiltInGlobalLinearId = 34, + BuiltInSubgroupSize = 36, + BuiltInSubgroupMaxSize = 37, + BuiltInNumSubgroups = 38, + BuiltInNumEnqueuedSubgroups = 39, + BuiltInSubgroupId = 40, + BuiltInSubgroupLocalInvocationId = 41, + BuiltInVertexIndex = 42, + BuiltInInstanceIndex = 43, + BuiltInSubgroupEqMask = 4416, + BuiltInSubgroupGeMask = 4417, + BuiltInSubgroupGtMask = 4418, + BuiltInSubgroupLeMask = 4419, + BuiltInSubgroupLtMask = 4420, + BuiltInBaseVertex = 4424, + BuiltInBaseInstance = 4425, + BuiltInDrawIndex = 4426, + BuiltInDeviceIndex = 4438, + BuiltInViewIndex = 4440, + BuiltInBaryCoordNoPerspAMD = 4992, + BuiltInBaryCoordNoPerspCentroidAMD = 4993, + BuiltInBaryCoordNoPerspSampleAMD = 4994, + BuiltInBaryCoordSmoothAMD = 4995, + BuiltInBaryCoordSmoothCentroidAMD = 4996, + BuiltInBaryCoordSmoothSampleAMD = 4997, + BuiltInBaryCoordPullModelAMD = 4998, + BuiltInFragStencilRefEXT = 5014, + BuiltInViewportMaskNV = 5253, + BuiltInSecondaryPositionNV = 5257, + BuiltInSecondaryViewportMaskNV = 5258, + BuiltInPositionPerViewNV = 5261, + BuiltInViewportMaskPerViewNV = 5262, + BuiltInFullyCoveredEXT = 5264, + BuiltInTaskCountNV = 5274, + BuiltInPrimitiveCountNV = 5275, + BuiltInPrimitiveIndicesNV = 5276, + BuiltInClipDistancePerViewNV = 5277, + BuiltInCullDistancePerViewNV = 5278, + BuiltInLayerPerViewNV = 5279, + BuiltInMeshViewCountNV = 5280, + BuiltInMeshViewIndicesNV = 5281, + BuiltInBaryCoordNV = 5286, + BuiltInBaryCoordNoPerspNV = 5287, + BuiltInFragSizeEXT = 5292, + BuiltInFragInvocationCountEXT = 5293, + BuiltInLaunchIdNV = 5319, + BuiltInLaunchSizeNV = 5320, + BuiltInWorldRayOriginNV = 5321, + BuiltInWorldRayDirectionNV = 5322, + BuiltInObjectRayOriginNV = 5323, + BuiltInObjectRayDirectionNV = 5324, + BuiltInRayTminNV = 5325, + BuiltInRayTmaxNV = 5326, + BuiltInInstanceCustomIndexNV = 5327, + BuiltInObjectToWorldNV = 5330, + BuiltInWorldToObjectNV = 5331, + BuiltInHitTNV = 5332, + BuiltInHitKindNV = 5333, + BuiltInIncomingRayFlagsNV = 5351, + BuiltInMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum SelectionControlShift { + SelectionControlFlattenShift = 0, + SelectionControlDontFlattenShift = 1, + SelectionControlMax = 2147483647, + } + pub const SelectionControlMask_SelectionControlMaskNone: + root::spv::SelectionControlMask = + SelectionControlMask(0); + pub const SelectionControlMask_SelectionControlFlattenMask: + root::spv::SelectionControlMask = + SelectionControlMask(1); + pub const SelectionControlMask_SelectionControlDontFlattenMask: + root::spv::SelectionControlMask = + SelectionControlMask(2); + impl ::std::ops::BitOr for + root::spv::SelectionControlMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + SelectionControlMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::SelectionControlMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::SelectionControlMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::SelectionControlMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + SelectionControlMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::SelectionControlMask { + #[inline] + fn bitand_assign(&mut self, + rhs: root::spv::SelectionControlMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct SelectionControlMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum LoopControlShift { + LoopControlUnrollShift = 0, + LoopControlDontUnrollShift = 1, + LoopControlDependencyInfiniteShift = 2, + LoopControlDependencyLengthShift = 3, + LoopControlMax = 2147483647, + } + pub const LoopControlMask_LoopControlMaskNone: + root::spv::LoopControlMask = + LoopControlMask(0); + pub const LoopControlMask_LoopControlUnrollMask: + root::spv::LoopControlMask = + LoopControlMask(1); + pub const LoopControlMask_LoopControlDontUnrollMask: + root::spv::LoopControlMask = + LoopControlMask(2); + pub const LoopControlMask_LoopControlDependencyInfiniteMask: + root::spv::LoopControlMask = + LoopControlMask(4); + pub const LoopControlMask_LoopControlDependencyLengthMask: + root::spv::LoopControlMask = + LoopControlMask(8); + impl ::std::ops::BitOr for + root::spv::LoopControlMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + LoopControlMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::LoopControlMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::LoopControlMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::LoopControlMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + LoopControlMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::LoopControlMask { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::LoopControlMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct LoopControlMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum FunctionControlShift { + FunctionControlInlineShift = 0, + FunctionControlDontInlineShift = 1, + FunctionControlPureShift = 2, + FunctionControlConstShift = 3, + FunctionControlMax = 2147483647, + } + pub const FunctionControlMask_FunctionControlMaskNone: + root::spv::FunctionControlMask = + FunctionControlMask(0); + pub const FunctionControlMask_FunctionControlInlineMask: + root::spv::FunctionControlMask = + FunctionControlMask(1); + pub const FunctionControlMask_FunctionControlDontInlineMask: + root::spv::FunctionControlMask = + FunctionControlMask(2); + pub const FunctionControlMask_FunctionControlPureMask: + root::spv::FunctionControlMask = + FunctionControlMask(4); + pub const FunctionControlMask_FunctionControlConstMask: + root::spv::FunctionControlMask = + FunctionControlMask(8); + impl ::std::ops::BitOr for + root::spv::FunctionControlMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + FunctionControlMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::FunctionControlMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::FunctionControlMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::FunctionControlMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + FunctionControlMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::FunctionControlMask { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::FunctionControlMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct FunctionControlMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MemorySemanticsShift { + MemorySemanticsAcquireShift = 1, + MemorySemanticsReleaseShift = 2, + MemorySemanticsAcquireReleaseShift = 3, + MemorySemanticsSequentiallyConsistentShift = 4, + MemorySemanticsUniformMemoryShift = 6, + MemorySemanticsSubgroupMemoryShift = 7, + MemorySemanticsWorkgroupMemoryShift = 8, + MemorySemanticsCrossWorkgroupMemoryShift = 9, + MemorySemanticsAtomicCounterMemoryShift = 10, + MemorySemanticsImageMemoryShift = 11, + MemorySemanticsOutputMemoryKHRShift = 12, + MemorySemanticsMakeAvailableKHRShift = 13, + MemorySemanticsMakeVisibleKHRShift = 14, + MemorySemanticsMax = 2147483647, + } + pub const MemorySemanticsMask_MemorySemanticsMaskNone: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(0); + pub const MemorySemanticsMask_MemorySemanticsAcquireMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(2); + pub const MemorySemanticsMask_MemorySemanticsReleaseMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(4); + pub const MemorySemanticsMask_MemorySemanticsAcquireReleaseMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(8); + pub const MemorySemanticsMask_MemorySemanticsSequentiallyConsistentMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(16); + pub const MemorySemanticsMask_MemorySemanticsUniformMemoryMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(64); + pub const MemorySemanticsMask_MemorySemanticsSubgroupMemoryMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(128); + pub const MemorySemanticsMask_MemorySemanticsWorkgroupMemoryMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(256); + pub const MemorySemanticsMask_MemorySemanticsCrossWorkgroupMemoryMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(512); + pub const MemorySemanticsMask_MemorySemanticsAtomicCounterMemoryMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(1024); + pub const MemorySemanticsMask_MemorySemanticsImageMemoryMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(2048); + pub const MemorySemanticsMask_MemorySemanticsOutputMemoryKHRMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(4096); + pub const MemorySemanticsMask_MemorySemanticsMakeAvailableKHRMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(8192); + pub const MemorySemanticsMask_MemorySemanticsMakeVisibleKHRMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(16384); + impl ::std::ops::BitOr for + root::spv::MemorySemanticsMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + MemorySemanticsMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::MemorySemanticsMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::MemorySemanticsMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::MemorySemanticsMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + MemorySemanticsMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::MemorySemanticsMask { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::MemorySemanticsMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct MemorySemanticsMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MemoryAccessShift { + MemoryAccessVolatileShift = 0, + MemoryAccessAlignedShift = 1, + MemoryAccessNontemporalShift = 2, + MemoryAccessMakePointerAvailableKHRShift = 3, + MemoryAccessMakePointerVisibleKHRShift = 4, + MemoryAccessNonPrivatePointerKHRShift = 5, + MemoryAccessMax = 2147483647, + } + pub const MemoryAccessMask_MemoryAccessMaskNone: + root::spv::MemoryAccessMask = + MemoryAccessMask(0); + pub const MemoryAccessMask_MemoryAccessVolatileMask: + root::spv::MemoryAccessMask = + MemoryAccessMask(1); + pub const MemoryAccessMask_MemoryAccessAlignedMask: + root::spv::MemoryAccessMask = + MemoryAccessMask(2); + pub const MemoryAccessMask_MemoryAccessNontemporalMask: + root::spv::MemoryAccessMask = + MemoryAccessMask(4); + pub const MemoryAccessMask_MemoryAccessMakePointerAvailableKHRMask: + root::spv::MemoryAccessMask = + MemoryAccessMask(8); + pub const MemoryAccessMask_MemoryAccessMakePointerVisibleKHRMask: + root::spv::MemoryAccessMask = + MemoryAccessMask(16); + pub const MemoryAccessMask_MemoryAccessNonPrivatePointerKHRMask: + root::spv::MemoryAccessMask = + MemoryAccessMask(32); + impl ::std::ops::BitOr for + root::spv::MemoryAccessMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + MemoryAccessMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::MemoryAccessMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::MemoryAccessMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::MemoryAccessMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + MemoryAccessMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::MemoryAccessMask { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::MemoryAccessMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct MemoryAccessMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum Scope { + ScopeCrossDevice = 0, + ScopeDevice = 1, + ScopeWorkgroup = 2, + ScopeSubgroup = 3, + ScopeInvocation = 4, + ScopeQueueFamilyKHR = 5, + ScopeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum GroupOperation { + GroupOperationReduce = 0, + GroupOperationInclusiveScan = 1, + GroupOperationExclusiveScan = 2, + GroupOperationClusteredReduce = 3, + GroupOperationPartitionedReduceNV = 6, + GroupOperationPartitionedInclusiveScanNV = 7, + GroupOperationPartitionedExclusiveScanNV = 8, + GroupOperationMax = 2147483647, + } + pub const KernelEnqueueFlags_KernelEnqueueFlagsNoWait: + root::spv::KernelEnqueueFlags = + KernelEnqueueFlags(0); + pub const KernelEnqueueFlags_KernelEnqueueFlagsWaitKernel: + root::spv::KernelEnqueueFlags = + KernelEnqueueFlags(1); + pub const KernelEnqueueFlags_KernelEnqueueFlagsWaitWorkGroup: + root::spv::KernelEnqueueFlags = + KernelEnqueueFlags(2); + pub const KernelEnqueueFlags_KernelEnqueueFlagsMax: + root::spv::KernelEnqueueFlags = + KernelEnqueueFlags(2147483647); + impl ::std::ops::BitOr for + root::spv::KernelEnqueueFlags { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + KernelEnqueueFlags(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::KernelEnqueueFlags { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::KernelEnqueueFlags) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::KernelEnqueueFlags { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + KernelEnqueueFlags(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::KernelEnqueueFlags { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::KernelEnqueueFlags) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct KernelEnqueueFlags(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum KernelProfilingInfoShift { + KernelProfilingInfoCmdExecTimeShift = 0, + KernelProfilingInfoMax = 2147483647, + } + pub const KernelProfilingInfoMask_KernelProfilingInfoMaskNone: + root::spv::KernelProfilingInfoMask = + KernelProfilingInfoMask(0); + pub const KernelProfilingInfoMask_KernelProfilingInfoCmdExecTimeMask: + root::spv::KernelProfilingInfoMask = + KernelProfilingInfoMask(1); + impl ::std::ops::BitOr for + root::spv::KernelProfilingInfoMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + KernelProfilingInfoMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::KernelProfilingInfoMask { + #[inline] + fn bitor_assign(&mut self, + rhs: root::spv::KernelProfilingInfoMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::KernelProfilingInfoMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + KernelProfilingInfoMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::KernelProfilingInfoMask { + #[inline] + fn bitand_assign(&mut self, + rhs: root::spv::KernelProfilingInfoMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct KernelProfilingInfoMask(pub ::std::os::raw::c_uint); + pub const Capability_CapabilityStorageUniformBufferBlock16: + root::spv::Capability = + Capability::CapabilityStorageBuffer16BitAccess; + pub const Capability_CapabilityUniformAndStorageBuffer16BitAccess: + root::spv::Capability = + Capability::CapabilityStorageUniform16; + pub const Capability_CapabilityShaderViewportIndexLayerNV: + root::spv::Capability = + Capability::CapabilityShaderViewportIndexLayerEXT; + pub const Capability_CapabilityShadingRateNV: root::spv::Capability = + Capability::CapabilityFragmentDensityEXT; + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum Capability { + CapabilityMatrix = 0, + CapabilityShader = 1, + CapabilityGeometry = 2, + CapabilityTessellation = 3, + CapabilityAddresses = 4, + CapabilityLinkage = 5, + CapabilityKernel = 6, + CapabilityVector16 = 7, + CapabilityFloat16Buffer = 8, + CapabilityFloat16 = 9, + CapabilityFloat64 = 10, + CapabilityInt64 = 11, + CapabilityInt64Atomics = 12, + CapabilityImageBasic = 13, + CapabilityImageReadWrite = 14, + CapabilityImageMipmap = 15, + CapabilityPipes = 17, + CapabilityGroups = 18, + CapabilityDeviceEnqueue = 19, + CapabilityLiteralSampler = 20, + CapabilityAtomicStorage = 21, + CapabilityInt16 = 22, + CapabilityTessellationPointSize = 23, + CapabilityGeometryPointSize = 24, + CapabilityImageGatherExtended = 25, + CapabilityStorageImageMultisample = 27, + CapabilityUniformBufferArrayDynamicIndexing = 28, + CapabilitySampledImageArrayDynamicIndexing = 29, + CapabilityStorageBufferArrayDynamicIndexing = 30, + CapabilityStorageImageArrayDynamicIndexing = 31, + CapabilityClipDistance = 32, + CapabilityCullDistance = 33, + CapabilityImageCubeArray = 34, + CapabilitySampleRateShading = 35, + CapabilityImageRect = 36, + CapabilitySampledRect = 37, + CapabilityGenericPointer = 38, + CapabilityInt8 = 39, + CapabilityInputAttachment = 40, + CapabilitySparseResidency = 41, + CapabilityMinLod = 42, + CapabilitySampled1D = 43, + CapabilityImage1D = 44, + CapabilitySampledCubeArray = 45, + CapabilitySampledBuffer = 46, + CapabilityImageBuffer = 47, + CapabilityImageMSArray = 48, + CapabilityStorageImageExtendedFormats = 49, + CapabilityImageQuery = 50, + CapabilityDerivativeControl = 51, + CapabilityInterpolationFunction = 52, + CapabilityTransformFeedback = 53, + CapabilityGeometryStreams = 54, + CapabilityStorageImageReadWithoutFormat = 55, + CapabilityStorageImageWriteWithoutFormat = 56, + CapabilityMultiViewport = 57, + CapabilitySubgroupDispatch = 58, + CapabilityNamedBarrier = 59, + CapabilityPipeStorage = 60, + CapabilityGroupNonUniform = 61, + CapabilityGroupNonUniformVote = 62, + CapabilityGroupNonUniformArithmetic = 63, + CapabilityGroupNonUniformBallot = 64, + CapabilityGroupNonUniformShuffle = 65, + CapabilityGroupNonUniformShuffleRelative = 66, + CapabilityGroupNonUniformClustered = 67, + CapabilityGroupNonUniformQuad = 68, + CapabilitySubgroupBallotKHR = 4423, + CapabilityDrawParameters = 4427, + CapabilitySubgroupVoteKHR = 4431, + CapabilityStorageBuffer16BitAccess = 4433, + CapabilityStorageUniform16 = 4434, + CapabilityStoragePushConstant16 = 4435, + CapabilityStorageInputOutput16 = 4436, + CapabilityDeviceGroup = 4437, + CapabilityMultiView = 4439, + CapabilityVariablePointersStorageBuffer = 4441, + CapabilityVariablePointers = 4442, + CapabilityAtomicStorageOps = 4445, + CapabilitySampleMaskPostDepthCoverage = 4447, + CapabilityStorageBuffer8BitAccess = 4448, + CapabilityUniformAndStorageBuffer8BitAccess = 4449, + CapabilityStoragePushConstant8 = 4450, + CapabilityDenormPreserve = 4464, + CapabilityDenormFlushToZero = 4465, + CapabilitySignedZeroInfNanPreserve = 4466, + CapabilityRoundingModeRTE = 4467, + CapabilityRoundingModeRTZ = 4468, + CapabilityFloat16ImageAMD = 5008, + CapabilityImageGatherBiasLodAMD = 5009, + CapabilityFragmentMaskAMD = 5010, + CapabilityStencilExportEXT = 5013, + CapabilityImageReadWriteLodAMD = 5015, + CapabilitySampleMaskOverrideCoverageNV = 5249, + CapabilityGeometryShaderPassthroughNV = 5251, + CapabilityShaderViewportIndexLayerEXT = 5254, + CapabilityShaderViewportMaskNV = 5255, + CapabilityShaderStereoViewNV = 5259, + CapabilityPerViewAttributesNV = 5260, + CapabilityFragmentFullyCoveredEXT = 5265, + CapabilityMeshShadingNV = 5266, + CapabilityImageFootprintNV = 5282, + CapabilityFragmentBarycentricNV = 5284, + CapabilityComputeDerivativeGroupQuadsNV = 5288, + CapabilityFragmentDensityEXT = 5291, + CapabilityGroupNonUniformPartitionedNV = 5297, + CapabilityShaderNonUniformEXT = 5301, + CapabilityRuntimeDescriptorArrayEXT = 5302, + CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, + CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, + CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, + CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, + CapabilitySampledImageArrayNonUniformIndexingEXT = 5307, + CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, + CapabilityStorageImageArrayNonUniformIndexingEXT = 5309, + CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, + CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, + CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, + CapabilityRayTracingNV = 5340, + CapabilityVulkanMemoryModelKHR = 5345, + CapabilityVulkanMemoryModelDeviceScopeKHR = 5346, + CapabilityPhysicalStorageBufferAddressesEXT = 5347, + CapabilityComputeDerivativeGroupLinearNV = 5350, + CapabilitySubgroupShuffleINTEL = 5568, + CapabilitySubgroupBufferBlockIOINTEL = 5569, + CapabilitySubgroupImageBlockIOINTEL = 5570, + CapabilitySubgroupImageMediaBlockIOINTEL = 5579, + CapabilityMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum Op { + OpNop = 0, + OpUndef = 1, + OpSourceContinued = 2, + OpSource = 3, + OpSourceExtension = 4, + OpName = 5, + OpMemberName = 6, + OpString = 7, + OpLine = 8, + OpExtension = 10, + OpExtInstImport = 11, + OpExtInst = 12, + OpMemoryModel = 14, + OpEntryPoint = 15, + OpExecutionMode = 16, + OpCapability = 17, + OpTypeVoid = 19, + OpTypeBool = 20, + OpTypeInt = 21, + OpTypeFloat = 22, + OpTypeVector = 23, + OpTypeMatrix = 24, + OpTypeImage = 25, + OpTypeSampler = 26, + OpTypeSampledImage = 27, + OpTypeArray = 28, + OpTypeRuntimeArray = 29, + OpTypeStruct = 30, + OpTypeOpaque = 31, + OpTypePointer = 32, + OpTypeFunction = 33, + OpTypeEvent = 34, + OpTypeDeviceEvent = 35, + OpTypeReserveId = 36, + OpTypeQueue = 37, + OpTypePipe = 38, + OpTypeForwardPointer = 39, + OpConstantTrue = 41, + OpConstantFalse = 42, + OpConstant = 43, + OpConstantComposite = 44, + OpConstantSampler = 45, + OpConstantNull = 46, + OpSpecConstantTrue = 48, + OpSpecConstantFalse = 49, + OpSpecConstant = 50, + OpSpecConstantComposite = 51, + OpSpecConstantOp = 52, + OpFunction = 54, + OpFunctionParameter = 55, + OpFunctionEnd = 56, + OpFunctionCall = 57, + OpVariable = 59, + OpImageTexelPointer = 60, + OpLoad = 61, + OpStore = 62, + OpCopyMemory = 63, + OpCopyMemorySized = 64, + OpAccessChain = 65, + OpInBoundsAccessChain = 66, + OpPtrAccessChain = 67, + OpArrayLength = 68, + OpGenericPtrMemSemantics = 69, + OpInBoundsPtrAccessChain = 70, + OpDecorate = 71, + OpMemberDecorate = 72, + OpDecorationGroup = 73, + OpGroupDecorate = 74, + OpGroupMemberDecorate = 75, + OpVectorExtractDynamic = 77, + OpVectorInsertDynamic = 78, + OpVectorShuffle = 79, + OpCompositeConstruct = 80, + OpCompositeExtract = 81, + OpCompositeInsert = 82, + OpCopyObject = 83, + OpTranspose = 84, + OpSampledImage = 86, + OpImageSampleImplicitLod = 87, + OpImageSampleExplicitLod = 88, + OpImageSampleDrefImplicitLod = 89, + OpImageSampleDrefExplicitLod = 90, + OpImageSampleProjImplicitLod = 91, + OpImageSampleProjExplicitLod = 92, + OpImageSampleProjDrefImplicitLod = 93, + OpImageSampleProjDrefExplicitLod = 94, + OpImageFetch = 95, + OpImageGather = 96, + OpImageDrefGather = 97, + OpImageRead = 98, + OpImageWrite = 99, + OpImage = 100, + OpImageQueryFormat = 101, + OpImageQueryOrder = 102, + OpImageQuerySizeLod = 103, + OpImageQuerySize = 104, + OpImageQueryLod = 105, + OpImageQueryLevels = 106, + OpImageQuerySamples = 107, + OpConvertFToU = 109, + OpConvertFToS = 110, + OpConvertSToF = 111, + OpConvertUToF = 112, + OpUConvert = 113, + OpSConvert = 114, + OpFConvert = 115, + OpQuantizeToF16 = 116, + OpConvertPtrToU = 117, + OpSatConvertSToU = 118, + OpSatConvertUToS = 119, + OpConvertUToPtr = 120, + OpPtrCastToGeneric = 121, + OpGenericCastToPtr = 122, + OpGenericCastToPtrExplicit = 123, + OpBitcast = 124, + OpSNegate = 126, + OpFNegate = 127, + OpIAdd = 128, + OpFAdd = 129, + OpISub = 130, + OpFSub = 131, + OpIMul = 132, + OpFMul = 133, + OpUDiv = 134, + OpSDiv = 135, + OpFDiv = 136, + OpUMod = 137, + OpSRem = 138, + OpSMod = 139, + OpFRem = 140, + OpFMod = 141, + OpVectorTimesScalar = 142, + OpMatrixTimesScalar = 143, + OpVectorTimesMatrix = 144, + OpMatrixTimesVector = 145, + OpMatrixTimesMatrix = 146, + OpOuterProduct = 147, + OpDot = 148, + OpIAddCarry = 149, + OpISubBorrow = 150, + OpUMulExtended = 151, + OpSMulExtended = 152, + OpAny = 154, + OpAll = 155, + OpIsNan = 156, + OpIsInf = 157, + OpIsFinite = 158, + OpIsNormal = 159, + OpSignBitSet = 160, + OpLessOrGreater = 161, + OpOrdered = 162, + OpUnordered = 163, + OpLogicalEqual = 164, + OpLogicalNotEqual = 165, + OpLogicalOr = 166, + OpLogicalAnd = 167, + OpLogicalNot = 168, + OpSelect = 169, + OpIEqual = 170, + OpINotEqual = 171, + OpUGreaterThan = 172, + OpSGreaterThan = 173, + OpUGreaterThanEqual = 174, + OpSGreaterThanEqual = 175, + OpULessThan = 176, + OpSLessThan = 177, + OpULessThanEqual = 178, + OpSLessThanEqual = 179, + OpFOrdEqual = 180, + OpFUnordEqual = 181, + OpFOrdNotEqual = 182, + OpFUnordNotEqual = 183, + OpFOrdLessThan = 184, + OpFUnordLessThan = 185, + OpFOrdGreaterThan = 186, + OpFUnordGreaterThan = 187, + OpFOrdLessThanEqual = 188, + OpFUnordLessThanEqual = 189, + OpFOrdGreaterThanEqual = 190, + OpFUnordGreaterThanEqual = 191, + OpShiftRightLogical = 194, + OpShiftRightArithmetic = 195, + OpShiftLeftLogical = 196, + OpBitwiseOr = 197, + OpBitwiseXor = 198, + OpBitwiseAnd = 199, + OpNot = 200, + OpBitFieldInsert = 201, + OpBitFieldSExtract = 202, + OpBitFieldUExtract = 203, + OpBitReverse = 204, + OpBitCount = 205, + OpDPdx = 207, + OpDPdy = 208, + OpFwidth = 209, + OpDPdxFine = 210, + OpDPdyFine = 211, + OpFwidthFine = 212, + OpDPdxCoarse = 213, + OpDPdyCoarse = 214, + OpFwidthCoarse = 215, + OpEmitVertex = 218, + OpEndPrimitive = 219, + OpEmitStreamVertex = 220, + OpEndStreamPrimitive = 221, + OpControlBarrier = 224, + OpMemoryBarrier = 225, + OpAtomicLoad = 227, + OpAtomicStore = 228, + OpAtomicExchange = 229, + OpAtomicCompareExchange = 230, + OpAtomicCompareExchangeWeak = 231, + OpAtomicIIncrement = 232, + OpAtomicIDecrement = 233, + OpAtomicIAdd = 234, + OpAtomicISub = 235, + OpAtomicSMin = 236, + OpAtomicUMin = 237, + OpAtomicSMax = 238, + OpAtomicUMax = 239, + OpAtomicAnd = 240, + OpAtomicOr = 241, + OpAtomicXor = 242, + OpPhi = 245, + OpLoopMerge = 246, + OpSelectionMerge = 247, + OpLabel = 248, + OpBranch = 249, + OpBranchConditional = 250, + OpSwitch = 251, + OpKill = 252, + OpReturn = 253, + OpReturnValue = 254, + OpUnreachable = 255, + OpLifetimeStart = 256, + OpLifetimeStop = 257, + OpGroupAsyncCopy = 259, + OpGroupWaitEvents = 260, + OpGroupAll = 261, + OpGroupAny = 262, + OpGroupBroadcast = 263, + OpGroupIAdd = 264, + OpGroupFAdd = 265, + OpGroupFMin = 266, + OpGroupUMin = 267, + OpGroupSMin = 268, + OpGroupFMax = 269, + OpGroupUMax = 270, + OpGroupSMax = 271, + OpReadPipe = 274, + OpWritePipe = 275, + OpReservedReadPipe = 276, + OpReservedWritePipe = 277, + OpReserveReadPipePackets = 278, + OpReserveWritePipePackets = 279, + OpCommitReadPipe = 280, + OpCommitWritePipe = 281, + OpIsValidReserveId = 282, + OpGetNumPipePackets = 283, + OpGetMaxPipePackets = 284, + OpGroupReserveReadPipePackets = 285, + OpGroupReserveWritePipePackets = 286, + OpGroupCommitReadPipe = 287, + OpGroupCommitWritePipe = 288, + OpEnqueueMarker = 291, + OpEnqueueKernel = 292, + OpGetKernelNDrangeSubGroupCount = 293, + OpGetKernelNDrangeMaxSubGroupSize = 294, + OpGetKernelWorkGroupSize = 295, + OpGetKernelPreferredWorkGroupSizeMultiple = 296, + OpRetainEvent = 297, + OpReleaseEvent = 298, + OpCreateUserEvent = 299, + OpIsValidEvent = 300, + OpSetUserEventStatus = 301, + OpCaptureEventProfilingInfo = 302, + OpGetDefaultQueue = 303, + OpBuildNDRange = 304, + OpImageSparseSampleImplicitLod = 305, + OpImageSparseSampleExplicitLod = 306, + OpImageSparseSampleDrefImplicitLod = 307, + OpImageSparseSampleDrefExplicitLod = 308, + OpImageSparseSampleProjImplicitLod = 309, + OpImageSparseSampleProjExplicitLod = 310, + OpImageSparseSampleProjDrefImplicitLod = 311, + OpImageSparseSampleProjDrefExplicitLod = 312, + OpImageSparseFetch = 313, + OpImageSparseGather = 314, + OpImageSparseDrefGather = 315, + OpImageSparseTexelsResident = 316, + OpNoLine = 317, + OpAtomicFlagTestAndSet = 318, + OpAtomicFlagClear = 319, + OpImageSparseRead = 320, + OpSizeOf = 321, + OpTypePipeStorage = 322, + OpConstantPipeStorage = 323, + OpCreatePipeFromPipeStorage = 324, + OpGetKernelLocalSizeForSubgroupCount = 325, + OpGetKernelMaxNumSubgroups = 326, + OpTypeNamedBarrier = 327, + OpNamedBarrierInitialize = 328, + OpMemoryNamedBarrier = 329, + OpModuleProcessed = 330, + OpExecutionModeId = 331, + OpDecorateId = 332, + OpGroupNonUniformElect = 333, + OpGroupNonUniformAll = 334, + OpGroupNonUniformAny = 335, + OpGroupNonUniformAllEqual = 336, + OpGroupNonUniformBroadcast = 337, + OpGroupNonUniformBroadcastFirst = 338, + OpGroupNonUniformBallot = 339, + OpGroupNonUniformInverseBallot = 340, + OpGroupNonUniformBallotBitExtract = 341, + OpGroupNonUniformBallotBitCount = 342, + OpGroupNonUniformBallotFindLSB = 343, + OpGroupNonUniformBallotFindMSB = 344, + OpGroupNonUniformShuffle = 345, + OpGroupNonUniformShuffleXor = 346, + OpGroupNonUniformShuffleUp = 347, + OpGroupNonUniformShuffleDown = 348, + OpGroupNonUniformIAdd = 349, + OpGroupNonUniformFAdd = 350, + OpGroupNonUniformIMul = 351, + OpGroupNonUniformFMul = 352, + OpGroupNonUniformSMin = 353, + OpGroupNonUniformUMin = 354, + OpGroupNonUniformFMin = 355, + OpGroupNonUniformSMax = 356, + OpGroupNonUniformUMax = 357, + OpGroupNonUniformFMax = 358, + OpGroupNonUniformBitwiseAnd = 359, + OpGroupNonUniformBitwiseOr = 360, + OpGroupNonUniformBitwiseXor = 361, + OpGroupNonUniformLogicalAnd = 362, + OpGroupNonUniformLogicalOr = 363, + OpGroupNonUniformLogicalXor = 364, + OpGroupNonUniformQuadBroadcast = 365, + OpGroupNonUniformQuadSwap = 366, + OpSubgroupBallotKHR = 4421, + OpSubgroupFirstInvocationKHR = 4422, + OpSubgroupAllKHR = 4428, + OpSubgroupAnyKHR = 4429, + OpSubgroupAllEqualKHR = 4430, + OpSubgroupReadInvocationKHR = 4432, + OpGroupIAddNonUniformAMD = 5000, + OpGroupFAddNonUniformAMD = 5001, + OpGroupFMinNonUniformAMD = 5002, + OpGroupUMinNonUniformAMD = 5003, + OpGroupSMinNonUniformAMD = 5004, + OpGroupFMaxNonUniformAMD = 5005, + OpGroupUMaxNonUniformAMD = 5006, + OpGroupSMaxNonUniformAMD = 5007, + OpFragmentMaskFetchAMD = 5011, + OpFragmentFetchAMD = 5012, + OpImageSampleFootprintNV = 5283, + OpGroupNonUniformPartitionNV = 5296, + OpWritePackedPrimitiveIndices4x8NV = 5299, + OpReportIntersectionNV = 5334, + OpIgnoreIntersectionNV = 5335, + OpTerminateRayNV = 5336, + OpTraceNV = 5337, + OpTypeAccelerationStructureNV = 5341, + OpExecuteCallableNV = 5344, + OpSubgroupShuffleINTEL = 5571, + OpSubgroupShuffleDownINTEL = 5572, + OpSubgroupShuffleUpINTEL = 5573, + OpSubgroupShuffleXorINTEL = 5574, + OpSubgroupBlockReadINTEL = 5575, + OpSubgroupBlockWriteINTEL = 5576, + OpSubgroupImageBlockReadINTEL = 5577, + OpSubgroupImageBlockWriteINTEL = 5578, + OpSubgroupImageMediaBlockReadINTEL = 5580, + OpSubgroupImageMediaBlockWriteINTEL = 5581, + OpDecorateStringGOOGLE = 5632, + OpMemberDecorateStringGOOGLE = 5633, + OpMax = 2147483647, + } + } + pub mod std { + #[allow(unused_imports)] + use self::super::super::root; + pub type string = [u64; 4usize]; + } + pub mod __gnu_cxx { + #[allow(unused_imports)] + use self::super::super::root; + } + pub type __uint8_t = ::std::os::raw::c_uchar; + pub type __int32_t = ::std::os::raw::c_int; + pub type __uint32_t = ::std::os::raw::c_uint; + pub mod SPIRV_CROSS_NAMESPACE { + #[allow(unused_imports)] + use self::super::super::root; + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum SPIRType_BaseType { + Unknown = 0, + Void = 1, + Boolean = 2, + SByte = 3, + UByte = 4, + Short = 5, + UShort = 6, + Int = 7, + UInt = 8, + Int64 = 9, + UInt64 = 10, + AtomicCounter = 11, + Half = 12, + Float = 13, + Double = 14, + Struct = 15, + Image = 16, + SampledImage = 17, + Sampler = 18, + AccelerationStructureNV = 19, + ControlPointArray = 20, + Char = 21, + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct Resource { + pub id: u32, + pub type_id: u32, + pub base_type_id: u32, + pub name: root::std::string, + } + impl Clone for Resource { + fn clone(&self) -> Self { *self } + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MSLVertexFormat { + MSL_VERTEX_FORMAT_OTHER = 0, + MSL_VERTEX_FORMAT_UINT8 = 1, + MSL_VERTEX_FORMAT_UINT16 = 2, + MSL_VERTEX_FORMAT_INT_MAX = 2147483647, + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct MSLVertexAttr { + pub location: u32, + pub msl_buffer: u32, + pub msl_offset: u32, + pub msl_stride: u32, + pub per_instance: bool, + pub format: root::SPIRV_CROSS_NAMESPACE::MSLVertexFormat, + pub builtin: root::spv::BuiltIn, + } + impl Clone for MSLVertexAttr { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct MSLResourceBinding { + pub stage: root::spv::ExecutionModel, + pub desc_set: u32, + pub binding: u32, + pub msl_buffer: u32, + pub msl_texture: u32, + pub msl_sampler: u32, + } + impl Clone for MSLResourceBinding { + fn clone(&self) -> Self { *self } + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MSLSamplerCoord { + MSL_SAMPLER_COORD_NORMALIZED = 0, + MSL_SAMPLER_COORD_PIXEL = 1, + MSL_SAMPLER_INT_MAX = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MSLSamplerFilter { + MSL_SAMPLER_FILTER_NEAREST = 0, + MSL_SAMPLER_FILTER_LINEAR = 1, + MSL_SAMPLER_FILTER_INT_MAX = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MSLSamplerMipFilter { + MSL_SAMPLER_MIP_FILTER_NONE = 0, + MSL_SAMPLER_MIP_FILTER_NEAREST = 1, + MSL_SAMPLER_MIP_FILTER_LINEAR = 2, + MSL_SAMPLER_MIP_FILTER_INT_MAX = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MSLSamplerAddress { + MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO = 0, + MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE = 1, + MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER = 2, + MSL_SAMPLER_ADDRESS_REPEAT = 3, + MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT = 4, + MSL_SAMPLER_ADDRESS_INT_MAX = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MSLSamplerCompareFunc { + MSL_SAMPLER_COMPARE_FUNC_NEVER = 0, + MSL_SAMPLER_COMPARE_FUNC_LESS = 1, + MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL = 2, + MSL_SAMPLER_COMPARE_FUNC_GREATER = 3, + MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL = 4, + MSL_SAMPLER_COMPARE_FUNC_EQUAL = 5, + MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL = 6, + MSL_SAMPLER_COMPARE_FUNC_ALWAYS = 7, + MSL_SAMPLER_COMPARE_FUNC_INT_MAX = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MSLSamplerBorderColor { + MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK = 0, + MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK = 1, + MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE = 2, + MSL_SAMPLER_BORDER_COLOR_INT_MAX = 2147483647, + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct MSLConstexprSampler { + pub coord: root::SPIRV_CROSS_NAMESPACE::MSLSamplerCoord, + pub min_filter: root::SPIRV_CROSS_NAMESPACE::MSLSamplerFilter, + pub mag_filter: root::SPIRV_CROSS_NAMESPACE::MSLSamplerFilter, + pub mip_filter: root::SPIRV_CROSS_NAMESPACE::MSLSamplerMipFilter, + pub s_address: root::SPIRV_CROSS_NAMESPACE::MSLSamplerAddress, + pub t_address: root::SPIRV_CROSS_NAMESPACE::MSLSamplerAddress, + pub r_address: root::SPIRV_CROSS_NAMESPACE::MSLSamplerAddress, + pub compare_func: root::SPIRV_CROSS_NAMESPACE::MSLSamplerCompareFunc, + pub border_color: root::SPIRV_CROSS_NAMESPACE::MSLSamplerBorderColor, + pub lod_clamp_min: f32, + pub lod_clamp_max: f32, + pub max_anisotropy: ::std::os::raw::c_int, + pub compare_enable: bool, + pub lod_clamp_enable: bool, + pub anisotropy_enable: bool, + } + impl Clone for MSLConstexprSampler { + fn clone(&self) -> Self { *self } + } + } + pub type ScInternalCompilerBase = ::std::os::raw::c_void; + pub type ScInternalCompilerHlsl = ::std::os::raw::c_void; + pub type ScInternalCompilerMsl = ::std::os::raw::c_void; + pub type ScInternalCompilerGlsl = ::std::os::raw::c_void; + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ScInternalResult { + Success = 0, + Unhandled = 1, + CompilationError = 2, + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScEntryPoint { + pub name: *mut ::std::os::raw::c_char, + pub execution_model: root::spv::ExecutionModel, + pub work_group_size_x: u32, + pub work_group_size_y: u32, + pub work_group_size_z: u32, + } + impl Clone for ScEntryPoint { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScBufferRange { + pub index: ::std::os::raw::c_uint, + pub offset: usize, + pub range: usize, + } + impl Clone for ScBufferRange { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScCombinedImageSampler { + pub combined_id: u32, + pub image_id: u32, + pub sampler_id: u32, + } + impl Clone for ScCombinedImageSampler { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScHlslRootConstant { + pub start: u32, + pub end: u32, + pub binding: u32, + pub space: u32, + } + impl Clone for ScHlslRootConstant { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScHlslCompilerOptions { + pub shader_model: i32, + pub point_size_compat: bool, + pub point_coord_compat: bool, + pub vertex_transform_clip_space: bool, + pub vertex_invert_y: bool, + } + impl Clone for ScHlslCompilerOptions { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScMslCompilerOptions { + pub vertex_transform_clip_space: bool, + pub vertex_invert_y: bool, + pub platform: u8, + pub version: u32, + pub enable_point_size_builtin: bool, + pub disable_rasterization: bool, + pub swizzle_buffer_index: u32, + pub indirect_params_buffer_index: u32, + pub shader_output_buffer_index: u32, + pub shader_patch_output_buffer_index: u32, + pub shader_tess_factor_buffer_index: u32, + pub buffer_size_buffer_index: u32, + pub capture_output_to_buffer: bool, + pub swizzle_texture_samples: bool, + pub tess_domain_origin_lower_left: bool, + pub argument_buffers: bool, + pub pad_fragment_output_components: bool, + } + impl Clone for ScMslCompilerOptions { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScGlslCompilerOptions { + pub vertex_transform_clip_space: bool, + pub vertex_invert_y: bool, + pub version: u32, + pub es: bool, + } + impl Clone for ScGlslCompilerOptions { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScResource { + pub id: u32, + pub type_id: u32, + pub base_type_id: u32, + pub name: *mut ::std::os::raw::c_char, + } + impl Clone for ScResource { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScResourceArray { + pub data: *mut root::ScResource, + pub num: usize, + } + impl Clone for ScResourceArray { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScShaderResources { + pub uniform_buffers: root::ScResourceArray, + pub storage_buffers: root::ScResourceArray, + pub stage_inputs: root::ScResourceArray, + pub stage_outputs: root::ScResourceArray, + pub subpass_inputs: root::ScResourceArray, + pub storage_images: root::ScResourceArray, + pub sampled_images: root::ScResourceArray, + pub atomic_counters: root::ScResourceArray, + pub push_constant_buffers: root::ScResourceArray, + pub separate_images: root::ScResourceArray, + pub separate_samplers: root::ScResourceArray, + } + impl Clone for ScShaderResources { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScSpecializationConstant { + pub id: u32, + pub constant_id: u32, + } + impl Clone for ScSpecializationConstant { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScType { + pub type_: root::SPIRV_CROSS_NAMESPACE::SPIRType_BaseType, + pub member_types: *mut u32, + pub member_types_size: usize, + pub array: *mut u32, + pub array_size: usize, + } + impl Clone for ScType { + fn clone(&self) -> Self { *self } + } + extern "C" { + pub fn sc_internal_get_latest_exception_message(message: + *mut *const ::std::os::raw::c_char) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_hlsl_new(compiler: + *mut *mut root::ScInternalCompilerHlsl, + ir: *const u32, size: usize) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_hlsl_set_options(compiler: + *const root::ScInternalCompilerHlsl, + options: + *const root::ScHlslCompilerOptions) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_hlsl_set_root_constant_layout(compiler: + *const root::ScInternalCompilerHlsl, + constants: + *const root::ScHlslRootConstant, + count: + usize) + -> root::ScInternalResult; + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct MslConstSamplerMapping { + pub desc_set: u32, + pub binding: u32, + pub sampler: root::SPIRV_CROSS_NAMESPACE::MSLConstexprSampler, + } + impl Clone for MslConstSamplerMapping { + fn clone(&self) -> Self { *self } + } + extern "C" { + pub fn sc_internal_compiler_msl_new(compiler: + *mut *mut root::ScInternalCompilerMsl, + ir: *const u32, size: usize) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_msl_set_options(compiler: + *const root::ScInternalCompilerMsl, + options: + *const root::ScMslCompilerOptions) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_msl_get_is_rasterization_disabled(compiler: + *const root::ScInternalCompilerMsl, + is_rasterization_disabled: + *mut bool) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_msl_compile(compiler: + *const root::ScInternalCompilerBase, + shader: + *mut *const ::std::os::raw::c_char, + p_vat_overrides: + *const root::SPIRV_CROSS_NAMESPACE::MSLVertexAttr, + vat_override_count: usize, + p_res_overrides: + *const root::SPIRV_CROSS_NAMESPACE::MSLResourceBinding, + res_override_count: usize, + p_const_samplers: + *const root::MslConstSamplerMapping, + const_sampler_count: usize) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_glsl_new(compiler: + *mut *mut root::ScInternalCompilerGlsl, + ir: *const u32, size: usize) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_glsl_set_options(compiler: + *const root::ScInternalCompilerGlsl, + options: + *const root::ScGlslCompilerOptions) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_glsl_build_combined_image_samplers(compiler: + *const root::ScInternalCompilerBase) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_glsl_get_combined_image_samplers(compiler: + *const root::ScInternalCompilerBase, + samplers: + *mut *const root::ScCombinedImageSampler, + size: + *mut usize) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_decoration(compiler: + *const root::ScInternalCompilerBase, + result: *mut u32, id: u32, + decoration: + root::spv::Decoration) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_set_decoration(compiler: + *const root::ScInternalCompilerBase, + id: u32, + decoration: + root::spv::Decoration, + argument: u32) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_unset_decoration(compiler: + *const root::ScInternalCompilerBase, + id: u32, + decoration: + root::spv::Decoration) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_name(compiler: + *const root::ScInternalCompilerBase, + id: u32, + name: + *mut *const ::std::os::raw::c_char) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_set_name(compiler: + *const root::ScInternalCompilerBase, + id: u32, + name: + *const ::std::os::raw::c_char) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_entry_points(compiler: + *const root::ScInternalCompilerBase, + entry_points: + *mut *mut root::ScEntryPoint, + size: *mut usize) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_active_buffer_ranges(compiler: + *const root::ScInternalCompilerBase, + id: u32, + active_buffer_ranges: + *mut *mut root::ScBufferRange, + size: *mut usize) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_cleansed_entry_point_name(compiler: + *const root::ScInternalCompilerBase, + original_entry_point_name: + *const ::std::os::raw::c_char, + execution_model: + root::spv::ExecutionModel, + compiled_entry_point_name: + *mut *const ::std::os::raw::c_char) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_shader_resources(compiler: + *const root::ScInternalCompilerBase, + shader_resources: + *mut root::ScShaderResources) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_specialization_constants(compiler: + *const root::ScInternalCompilerBase, + constants: + *mut *mut root::ScSpecializationConstant, + size: + *mut usize) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_set_scalar_constant(compiler: + *const root::ScInternalCompilerBase, + id: u32, + constant_high_bits: + u32, + constant_low_bits: + u32) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_type(compiler: + *const root::ScInternalCompilerBase, + id: u32, + spirv_type: + *mut *const root::ScType) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_member_name(compiler: + *const root::ScInternalCompilerBase, + id: u32, index: u32, + name: + *mut *const ::std::os::raw::c_char) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_member_decoration(compiler: + *const root::ScInternalCompilerBase, + id: u32, index: u32, + decoration: + root::spv::Decoration, + result: *mut u32) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_set_member_decoration(compiler: + *const root::ScInternalCompilerBase, + id: u32, index: u32, + decoration: + root::spv::Decoration, + argument: u32) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_declared_struct_size(compiler: + *const root::ScInternalCompilerBase, + id: u32, + result: *mut u32) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_declared_struct_member_size(compiler: + *const root::ScInternalCompilerBase, + id: u32, + index: + u32, + result: + *mut u32) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_rename_interface_variable(compiler: + *const root::ScInternalCompilerBase, + resources: + *const root::ScResource, + resources_size: + usize, + location: u32, + name: + *const ::std::os::raw::c_char) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_get_work_group_size_specialization_constants(compiler: + *const root::ScInternalCompilerBase, + constants: + *mut *mut root::ScSpecializationConstant) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_compile(compiler: + *const root::ScInternalCompilerBase, + shader: + *mut *const ::std::os::raw::c_char) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_compiler_delete(compiler: + *mut root::ScInternalCompilerBase) + -> root::ScInternalResult; + } + extern "C" { + pub fn sc_internal_free_pointer(pointer: *mut ::std::os::raw::c_void) + -> root::ScInternalResult; + } +} diff --git a/third_party/rust/spirv_cross/src/bindings_wasm.rs b/third_party/rust/spirv_cross/src/bindings_wasm.rs new file mode 100644 index 000000000000..b32aabdd38e7 --- /dev/null +++ b/third_party/rust/spirv_cross/src/bindings_wasm.rs @@ -0,0 +1,1862 @@ + + +#[allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] +pub mod root { + #[allow(unused_imports)] + use self::super::root; + pub mod spv { + #[allow(unused_imports)] + use self::super::super::root; + pub type Id = ::std::os::raw::c_uint; + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum SourceLanguage { + SourceLanguageUnknown = 0, + SourceLanguageESSL = 1, + SourceLanguageGLSL = 2, + SourceLanguageOpenCL_C = 3, + SourceLanguageOpenCL_CPP = 4, + SourceLanguageHLSL = 5, + SourceLanguageMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ExecutionModel { + ExecutionModelVertex = 0, + ExecutionModelTessellationControl = 1, + ExecutionModelTessellationEvaluation = 2, + ExecutionModelGeometry = 3, + ExecutionModelFragment = 4, + ExecutionModelGLCompute = 5, + ExecutionModelKernel = 6, + ExecutionModelTaskNV = 5267, + ExecutionModelMeshNV = 5268, + ExecutionModelRayGenerationNV = 5313, + ExecutionModelIntersectionNV = 5314, + ExecutionModelAnyHitNV = 5315, + ExecutionModelClosestHitNV = 5316, + ExecutionModelMissNV = 5317, + ExecutionModelCallableNV = 5318, + ExecutionModelMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum AddressingModel { + AddressingModelLogical = 0, + AddressingModelPhysical32 = 1, + AddressingModelPhysical64 = 2, + AddressingModelPhysicalStorageBuffer64EXT = 5348, + AddressingModelMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MemoryModel { + MemoryModelSimple = 0, + MemoryModelGLSL450 = 1, + MemoryModelOpenCL = 2, + MemoryModelVulkanKHR = 3, + MemoryModelMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ExecutionMode { + ExecutionModeInvocations = 0, + ExecutionModeSpacingEqual = 1, + ExecutionModeSpacingFractionalEven = 2, + ExecutionModeSpacingFractionalOdd = 3, + ExecutionModeVertexOrderCw = 4, + ExecutionModeVertexOrderCcw = 5, + ExecutionModePixelCenterInteger = 6, + ExecutionModeOriginUpperLeft = 7, + ExecutionModeOriginLowerLeft = 8, + ExecutionModeEarlyFragmentTests = 9, + ExecutionModePointMode = 10, + ExecutionModeXfb = 11, + ExecutionModeDepthReplacing = 12, + ExecutionModeDepthGreater = 14, + ExecutionModeDepthLess = 15, + ExecutionModeDepthUnchanged = 16, + ExecutionModeLocalSize = 17, + ExecutionModeLocalSizeHint = 18, + ExecutionModeInputPoints = 19, + ExecutionModeInputLines = 20, + ExecutionModeInputLinesAdjacency = 21, + ExecutionModeTriangles = 22, + ExecutionModeInputTrianglesAdjacency = 23, + ExecutionModeQuads = 24, + ExecutionModeIsolines = 25, + ExecutionModeOutputVertices = 26, + ExecutionModeOutputPoints = 27, + ExecutionModeOutputLineStrip = 28, + ExecutionModeOutputTriangleStrip = 29, + ExecutionModeVecTypeHint = 30, + ExecutionModeContractionOff = 31, + ExecutionModeInitializer = 33, + ExecutionModeFinalizer = 34, + ExecutionModeSubgroupSize = 35, + ExecutionModeSubgroupsPerWorkgroup = 36, + ExecutionModeSubgroupsPerWorkgroupId = 37, + ExecutionModeLocalSizeId = 38, + ExecutionModeLocalSizeHintId = 39, + ExecutionModePostDepthCoverage = 4446, + ExecutionModeDenormPreserve = 4459, + ExecutionModeDenormFlushToZero = 4460, + ExecutionModeSignedZeroInfNanPreserve = 4461, + ExecutionModeRoundingModeRTE = 4462, + ExecutionModeRoundingModeRTZ = 4463, + ExecutionModeStencilRefReplacingEXT = 5027, + ExecutionModeOutputLinesNV = 5269, + ExecutionModeOutputPrimitivesNV = 5270, + ExecutionModeDerivativeGroupQuadsNV = 5289, + ExecutionModeDerivativeGroupLinearNV = 5290, + ExecutionModeOutputTrianglesNV = 5298, + ExecutionModeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum StorageClass { + StorageClassUniformConstant = 0, + StorageClassInput = 1, + StorageClassUniform = 2, + StorageClassOutput = 3, + StorageClassWorkgroup = 4, + StorageClassCrossWorkgroup = 5, + StorageClassPrivate = 6, + StorageClassFunction = 7, + StorageClassGeneric = 8, + StorageClassPushConstant = 9, + StorageClassAtomicCounter = 10, + StorageClassImage = 11, + StorageClassStorageBuffer = 12, + StorageClassCallableDataNV = 5328, + StorageClassIncomingCallableDataNV = 5329, + StorageClassRayPayloadNV = 5338, + StorageClassHitAttributeNV = 5339, + StorageClassIncomingRayPayloadNV = 5342, + StorageClassShaderRecordBufferNV = 5343, + StorageClassPhysicalStorageBufferEXT = 5349, + StorageClassMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum Dim { + Dim1D = 0, + Dim2D = 1, + Dim3D = 2, + DimCube = 3, + DimRect = 4, + DimBuffer = 5, + DimSubpassData = 6, + DimMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum SamplerAddressingMode { + SamplerAddressingModeNone = 0, + SamplerAddressingModeClampToEdge = 1, + SamplerAddressingModeClamp = 2, + SamplerAddressingModeRepeat = 3, + SamplerAddressingModeRepeatMirrored = 4, + SamplerAddressingModeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum SamplerFilterMode { + SamplerFilterModeNearest = 0, + SamplerFilterModeLinear = 1, + SamplerFilterModeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ImageFormat { + ImageFormatUnknown = 0, + ImageFormatRgba32f = 1, + ImageFormatRgba16f = 2, + ImageFormatR32f = 3, + ImageFormatRgba8 = 4, + ImageFormatRgba8Snorm = 5, + ImageFormatRg32f = 6, + ImageFormatRg16f = 7, + ImageFormatR11fG11fB10f = 8, + ImageFormatR16f = 9, + ImageFormatRgba16 = 10, + ImageFormatRgb10A2 = 11, + ImageFormatRg16 = 12, + ImageFormatRg8 = 13, + ImageFormatR16 = 14, + ImageFormatR8 = 15, + ImageFormatRgba16Snorm = 16, + ImageFormatRg16Snorm = 17, + ImageFormatRg8Snorm = 18, + ImageFormatR16Snorm = 19, + ImageFormatR8Snorm = 20, + ImageFormatRgba32i = 21, + ImageFormatRgba16i = 22, + ImageFormatRgba8i = 23, + ImageFormatR32i = 24, + ImageFormatRg32i = 25, + ImageFormatRg16i = 26, + ImageFormatRg8i = 27, + ImageFormatR16i = 28, + ImageFormatR8i = 29, + ImageFormatRgba32ui = 30, + ImageFormatRgba16ui = 31, + ImageFormatRgba8ui = 32, + ImageFormatR32ui = 33, + ImageFormatRgb10a2ui = 34, + ImageFormatRg32ui = 35, + ImageFormatRg16ui = 36, + ImageFormatRg8ui = 37, + ImageFormatR16ui = 38, + ImageFormatR8ui = 39, + ImageFormatMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ImageChannelOrder { + ImageChannelOrderR = 0, + ImageChannelOrderA = 1, + ImageChannelOrderRG = 2, + ImageChannelOrderRA = 3, + ImageChannelOrderRGB = 4, + ImageChannelOrderRGBA = 5, + ImageChannelOrderBGRA = 6, + ImageChannelOrderARGB = 7, + ImageChannelOrderIntensity = 8, + ImageChannelOrderLuminance = 9, + ImageChannelOrderRx = 10, + ImageChannelOrderRGx = 11, + ImageChannelOrderRGBx = 12, + ImageChannelOrderDepth = 13, + ImageChannelOrderDepthStencil = 14, + ImageChannelOrdersRGB = 15, + ImageChannelOrdersRGBx = 16, + ImageChannelOrdersRGBA = 17, + ImageChannelOrdersBGRA = 18, + ImageChannelOrderABGR = 19, + ImageChannelOrderMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ImageChannelDataType { + ImageChannelDataTypeSnormInt8 = 0, + ImageChannelDataTypeSnormInt16 = 1, + ImageChannelDataTypeUnormInt8 = 2, + ImageChannelDataTypeUnormInt16 = 3, + ImageChannelDataTypeUnormShort565 = 4, + ImageChannelDataTypeUnormShort555 = 5, + ImageChannelDataTypeUnormInt101010 = 6, + ImageChannelDataTypeSignedInt8 = 7, + ImageChannelDataTypeSignedInt16 = 8, + ImageChannelDataTypeSignedInt32 = 9, + ImageChannelDataTypeUnsignedInt8 = 10, + ImageChannelDataTypeUnsignedInt16 = 11, + ImageChannelDataTypeUnsignedInt32 = 12, + ImageChannelDataTypeHalfFloat = 13, + ImageChannelDataTypeFloat = 14, + ImageChannelDataTypeUnormInt24 = 15, + ImageChannelDataTypeUnormInt101010_2 = 16, + ImageChannelDataTypeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ImageOperandsShift { + ImageOperandsBiasShift = 0, + ImageOperandsLodShift = 1, + ImageOperandsGradShift = 2, + ImageOperandsConstOffsetShift = 3, + ImageOperandsOffsetShift = 4, + ImageOperandsConstOffsetsShift = 5, + ImageOperandsSampleShift = 6, + ImageOperandsMinLodShift = 7, + ImageOperandsMakeTexelAvailableKHRShift = 8, + ImageOperandsMakeTexelVisibleKHRShift = 9, + ImageOperandsNonPrivateTexelKHRShift = 10, + ImageOperandsVolatileTexelKHRShift = 11, + ImageOperandsMax = 2147483647, + } + pub const ImageOperandsMask_ImageOperandsMaskNone: + root::spv::ImageOperandsMask = + ImageOperandsMask(0); + pub const ImageOperandsMask_ImageOperandsBiasMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(1); + pub const ImageOperandsMask_ImageOperandsLodMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(2); + pub const ImageOperandsMask_ImageOperandsGradMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(4); + pub const ImageOperandsMask_ImageOperandsConstOffsetMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(8); + pub const ImageOperandsMask_ImageOperandsOffsetMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(16); + pub const ImageOperandsMask_ImageOperandsConstOffsetsMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(32); + pub const ImageOperandsMask_ImageOperandsSampleMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(64); + pub const ImageOperandsMask_ImageOperandsMinLodMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(128); + pub const ImageOperandsMask_ImageOperandsMakeTexelAvailableKHRMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(256); + pub const ImageOperandsMask_ImageOperandsMakeTexelVisibleKHRMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(512); + pub const ImageOperandsMask_ImageOperandsNonPrivateTexelKHRMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(1024); + pub const ImageOperandsMask_ImageOperandsVolatileTexelKHRMask: + root::spv::ImageOperandsMask = + ImageOperandsMask(2048); + impl ::std::ops::BitOr for + root::spv::ImageOperandsMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + ImageOperandsMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::ImageOperandsMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::ImageOperandsMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::ImageOperandsMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + ImageOperandsMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::ImageOperandsMask { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::ImageOperandsMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct ImageOperandsMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum FPFastMathModeShift { + FPFastMathModeNotNaNShift = 0, + FPFastMathModeNotInfShift = 1, + FPFastMathModeNSZShift = 2, + FPFastMathModeAllowRecipShift = 3, + FPFastMathModeFastShift = 4, + FPFastMathModeMax = 2147483647, + } + pub const FPFastMathModeMask_FPFastMathModeMaskNone: + root::spv::FPFastMathModeMask = + FPFastMathModeMask(0); + pub const FPFastMathModeMask_FPFastMathModeNotNaNMask: + root::spv::FPFastMathModeMask = + FPFastMathModeMask(1); + pub const FPFastMathModeMask_FPFastMathModeNotInfMask: + root::spv::FPFastMathModeMask = + FPFastMathModeMask(2); + pub const FPFastMathModeMask_FPFastMathModeNSZMask: + root::spv::FPFastMathModeMask = + FPFastMathModeMask(4); + pub const FPFastMathModeMask_FPFastMathModeAllowRecipMask: + root::spv::FPFastMathModeMask = + FPFastMathModeMask(8); + pub const FPFastMathModeMask_FPFastMathModeFastMask: + root::spv::FPFastMathModeMask = + FPFastMathModeMask(16); + impl ::std::ops::BitOr for + root::spv::FPFastMathModeMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + FPFastMathModeMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::FPFastMathModeMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::FPFastMathModeMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::FPFastMathModeMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + FPFastMathModeMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::FPFastMathModeMask { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::FPFastMathModeMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct FPFastMathModeMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum FPRoundingMode { + FPRoundingModeRTE = 0, + FPRoundingModeRTZ = 1, + FPRoundingModeRTP = 2, + FPRoundingModeRTN = 3, + FPRoundingModeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum LinkageType { + LinkageTypeExport = 0, + LinkageTypeImport = 1, + LinkageTypeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum AccessQualifier { + AccessQualifierReadOnly = 0, + AccessQualifierWriteOnly = 1, + AccessQualifierReadWrite = 2, + AccessQualifierMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum FunctionParameterAttribute { + FunctionParameterAttributeZext = 0, + FunctionParameterAttributeSext = 1, + FunctionParameterAttributeByVal = 2, + FunctionParameterAttributeSret = 3, + FunctionParameterAttributeNoAlias = 4, + FunctionParameterAttributeNoCapture = 5, + FunctionParameterAttributeNoWrite = 6, + FunctionParameterAttributeNoReadWrite = 7, + FunctionParameterAttributeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum Decoration { + DecorationRelaxedPrecision = 0, + DecorationSpecId = 1, + DecorationBlock = 2, + DecorationBufferBlock = 3, + DecorationRowMajor = 4, + DecorationColMajor = 5, + DecorationArrayStride = 6, + DecorationMatrixStride = 7, + DecorationGLSLShared = 8, + DecorationGLSLPacked = 9, + DecorationCPacked = 10, + DecorationBuiltIn = 11, + DecorationNoPerspective = 13, + DecorationFlat = 14, + DecorationPatch = 15, + DecorationCentroid = 16, + DecorationSample = 17, + DecorationInvariant = 18, + DecorationRestrict = 19, + DecorationAliased = 20, + DecorationVolatile = 21, + DecorationConstant = 22, + DecorationCoherent = 23, + DecorationNonWritable = 24, + DecorationNonReadable = 25, + DecorationUniform = 26, + DecorationSaturatedConversion = 28, + DecorationStream = 29, + DecorationLocation = 30, + DecorationComponent = 31, + DecorationIndex = 32, + DecorationBinding = 33, + DecorationDescriptorSet = 34, + DecorationOffset = 35, + DecorationXfbBuffer = 36, + DecorationXfbStride = 37, + DecorationFuncParamAttr = 38, + DecorationFPRoundingMode = 39, + DecorationFPFastMathMode = 40, + DecorationLinkageAttributes = 41, + DecorationNoContraction = 42, + DecorationInputAttachmentIndex = 43, + DecorationAlignment = 44, + DecorationMaxByteOffset = 45, + DecorationAlignmentId = 46, + DecorationMaxByteOffsetId = 47, + DecorationNoSignedWrap = 4469, + DecorationNoUnsignedWrap = 4470, + DecorationExplicitInterpAMD = 4999, + DecorationOverrideCoverageNV = 5248, + DecorationPassthroughNV = 5250, + DecorationViewportRelativeNV = 5252, + DecorationSecondaryViewportRelativeNV = 5256, + DecorationPerPrimitiveNV = 5271, + DecorationPerViewNV = 5272, + DecorationPerTaskNV = 5273, + DecorationPerVertexNV = 5285, + DecorationNonUniformEXT = 5300, + DecorationRestrictPointerEXT = 5355, + DecorationAliasedPointerEXT = 5356, + DecorationHlslCounterBufferGOOGLE = 5634, + DecorationHlslSemanticGOOGLE = 5635, + DecorationMax = 2147483647, + } + pub const BuiltIn_BuiltInSubgroupEqMaskKHR: root::spv::BuiltIn = + BuiltIn::BuiltInSubgroupEqMask; + pub const BuiltIn_BuiltInSubgroupGeMaskKHR: root::spv::BuiltIn = + BuiltIn::BuiltInSubgroupGeMask; + pub const BuiltIn_BuiltInSubgroupGtMaskKHR: root::spv::BuiltIn = + BuiltIn::BuiltInSubgroupGtMask; + pub const BuiltIn_BuiltInSubgroupLeMaskKHR: root::spv::BuiltIn = + BuiltIn::BuiltInSubgroupLeMask; + pub const BuiltIn_BuiltInSubgroupLtMaskKHR: root::spv::BuiltIn = + BuiltIn::BuiltInSubgroupLtMask; + pub const BuiltIn_BuiltInFragmentSizeNV: root::spv::BuiltIn = + BuiltIn::BuiltInFragSizeEXT; + pub const BuiltIn_BuiltInInvocationsPerPixelNV: root::spv::BuiltIn = + BuiltIn::BuiltInFragInvocationCountEXT; + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum BuiltIn { + BuiltInPosition = 0, + BuiltInPointSize = 1, + BuiltInClipDistance = 3, + BuiltInCullDistance = 4, + BuiltInVertexId = 5, + BuiltInInstanceId = 6, + BuiltInPrimitiveId = 7, + BuiltInInvocationId = 8, + BuiltInLayer = 9, + BuiltInViewportIndex = 10, + BuiltInTessLevelOuter = 11, + BuiltInTessLevelInner = 12, + BuiltInTessCoord = 13, + BuiltInPatchVertices = 14, + BuiltInFragCoord = 15, + BuiltInPointCoord = 16, + BuiltInFrontFacing = 17, + BuiltInSampleId = 18, + BuiltInSamplePosition = 19, + BuiltInSampleMask = 20, + BuiltInFragDepth = 22, + BuiltInHelperInvocation = 23, + BuiltInNumWorkgroups = 24, + BuiltInWorkgroupSize = 25, + BuiltInWorkgroupId = 26, + BuiltInLocalInvocationId = 27, + BuiltInGlobalInvocationId = 28, + BuiltInLocalInvocationIndex = 29, + BuiltInWorkDim = 30, + BuiltInGlobalSize = 31, + BuiltInEnqueuedWorkgroupSize = 32, + BuiltInGlobalOffset = 33, + BuiltInGlobalLinearId = 34, + BuiltInSubgroupSize = 36, + BuiltInSubgroupMaxSize = 37, + BuiltInNumSubgroups = 38, + BuiltInNumEnqueuedSubgroups = 39, + BuiltInSubgroupId = 40, + BuiltInSubgroupLocalInvocationId = 41, + BuiltInVertexIndex = 42, + BuiltInInstanceIndex = 43, + BuiltInSubgroupEqMask = 4416, + BuiltInSubgroupGeMask = 4417, + BuiltInSubgroupGtMask = 4418, + BuiltInSubgroupLeMask = 4419, + BuiltInSubgroupLtMask = 4420, + BuiltInBaseVertex = 4424, + BuiltInBaseInstance = 4425, + BuiltInDrawIndex = 4426, + BuiltInDeviceIndex = 4438, + BuiltInViewIndex = 4440, + BuiltInBaryCoordNoPerspAMD = 4992, + BuiltInBaryCoordNoPerspCentroidAMD = 4993, + BuiltInBaryCoordNoPerspSampleAMD = 4994, + BuiltInBaryCoordSmoothAMD = 4995, + BuiltInBaryCoordSmoothCentroidAMD = 4996, + BuiltInBaryCoordSmoothSampleAMD = 4997, + BuiltInBaryCoordPullModelAMD = 4998, + BuiltInFragStencilRefEXT = 5014, + BuiltInViewportMaskNV = 5253, + BuiltInSecondaryPositionNV = 5257, + BuiltInSecondaryViewportMaskNV = 5258, + BuiltInPositionPerViewNV = 5261, + BuiltInViewportMaskPerViewNV = 5262, + BuiltInFullyCoveredEXT = 5264, + BuiltInTaskCountNV = 5274, + BuiltInPrimitiveCountNV = 5275, + BuiltInPrimitiveIndicesNV = 5276, + BuiltInClipDistancePerViewNV = 5277, + BuiltInCullDistancePerViewNV = 5278, + BuiltInLayerPerViewNV = 5279, + BuiltInMeshViewCountNV = 5280, + BuiltInMeshViewIndicesNV = 5281, + BuiltInBaryCoordNV = 5286, + BuiltInBaryCoordNoPerspNV = 5287, + BuiltInFragSizeEXT = 5292, + BuiltInFragInvocationCountEXT = 5293, + BuiltInLaunchIdNV = 5319, + BuiltInLaunchSizeNV = 5320, + BuiltInWorldRayOriginNV = 5321, + BuiltInWorldRayDirectionNV = 5322, + BuiltInObjectRayOriginNV = 5323, + BuiltInObjectRayDirectionNV = 5324, + BuiltInRayTminNV = 5325, + BuiltInRayTmaxNV = 5326, + BuiltInInstanceCustomIndexNV = 5327, + BuiltInObjectToWorldNV = 5330, + BuiltInWorldToObjectNV = 5331, + BuiltInHitTNV = 5332, + BuiltInHitKindNV = 5333, + BuiltInIncomingRayFlagsNV = 5351, + BuiltInMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum SelectionControlShift { + SelectionControlFlattenShift = 0, + SelectionControlDontFlattenShift = 1, + SelectionControlMax = 2147483647, + } + pub const SelectionControlMask_SelectionControlMaskNone: + root::spv::SelectionControlMask = + SelectionControlMask(0); + pub const SelectionControlMask_SelectionControlFlattenMask: + root::spv::SelectionControlMask = + SelectionControlMask(1); + pub const SelectionControlMask_SelectionControlDontFlattenMask: + root::spv::SelectionControlMask = + SelectionControlMask(2); + impl ::std::ops::BitOr for + root::spv::SelectionControlMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + SelectionControlMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::SelectionControlMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::SelectionControlMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::SelectionControlMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + SelectionControlMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::SelectionControlMask { + #[inline] + fn bitand_assign(&mut self, + rhs: root::spv::SelectionControlMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct SelectionControlMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum LoopControlShift { + LoopControlUnrollShift = 0, + LoopControlDontUnrollShift = 1, + LoopControlDependencyInfiniteShift = 2, + LoopControlDependencyLengthShift = 3, + LoopControlMax = 2147483647, + } + pub const LoopControlMask_LoopControlMaskNone: + root::spv::LoopControlMask = + LoopControlMask(0); + pub const LoopControlMask_LoopControlUnrollMask: + root::spv::LoopControlMask = + LoopControlMask(1); + pub const LoopControlMask_LoopControlDontUnrollMask: + root::spv::LoopControlMask = + LoopControlMask(2); + pub const LoopControlMask_LoopControlDependencyInfiniteMask: + root::spv::LoopControlMask = + LoopControlMask(4); + pub const LoopControlMask_LoopControlDependencyLengthMask: + root::spv::LoopControlMask = + LoopControlMask(8); + impl ::std::ops::BitOr for + root::spv::LoopControlMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + LoopControlMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::LoopControlMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::LoopControlMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::LoopControlMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + LoopControlMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::LoopControlMask { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::LoopControlMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct LoopControlMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum FunctionControlShift { + FunctionControlInlineShift = 0, + FunctionControlDontInlineShift = 1, + FunctionControlPureShift = 2, + FunctionControlConstShift = 3, + FunctionControlMax = 2147483647, + } + pub const FunctionControlMask_FunctionControlMaskNone: + root::spv::FunctionControlMask = + FunctionControlMask(0); + pub const FunctionControlMask_FunctionControlInlineMask: + root::spv::FunctionControlMask = + FunctionControlMask(1); + pub const FunctionControlMask_FunctionControlDontInlineMask: + root::spv::FunctionControlMask = + FunctionControlMask(2); + pub const FunctionControlMask_FunctionControlPureMask: + root::spv::FunctionControlMask = + FunctionControlMask(4); + pub const FunctionControlMask_FunctionControlConstMask: + root::spv::FunctionControlMask = + FunctionControlMask(8); + impl ::std::ops::BitOr for + root::spv::FunctionControlMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + FunctionControlMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::FunctionControlMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::FunctionControlMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::FunctionControlMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + FunctionControlMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::FunctionControlMask { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::FunctionControlMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct FunctionControlMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MemorySemanticsShift { + MemorySemanticsAcquireShift = 1, + MemorySemanticsReleaseShift = 2, + MemorySemanticsAcquireReleaseShift = 3, + MemorySemanticsSequentiallyConsistentShift = 4, + MemorySemanticsUniformMemoryShift = 6, + MemorySemanticsSubgroupMemoryShift = 7, + MemorySemanticsWorkgroupMemoryShift = 8, + MemorySemanticsCrossWorkgroupMemoryShift = 9, + MemorySemanticsAtomicCounterMemoryShift = 10, + MemorySemanticsImageMemoryShift = 11, + MemorySemanticsOutputMemoryKHRShift = 12, + MemorySemanticsMakeAvailableKHRShift = 13, + MemorySemanticsMakeVisibleKHRShift = 14, + MemorySemanticsMax = 2147483647, + } + pub const MemorySemanticsMask_MemorySemanticsMaskNone: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(0); + pub const MemorySemanticsMask_MemorySemanticsAcquireMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(2); + pub const MemorySemanticsMask_MemorySemanticsReleaseMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(4); + pub const MemorySemanticsMask_MemorySemanticsAcquireReleaseMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(8); + pub const MemorySemanticsMask_MemorySemanticsSequentiallyConsistentMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(16); + pub const MemorySemanticsMask_MemorySemanticsUniformMemoryMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(64); + pub const MemorySemanticsMask_MemorySemanticsSubgroupMemoryMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(128); + pub const MemorySemanticsMask_MemorySemanticsWorkgroupMemoryMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(256); + pub const MemorySemanticsMask_MemorySemanticsCrossWorkgroupMemoryMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(512); + pub const MemorySemanticsMask_MemorySemanticsAtomicCounterMemoryMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(1024); + pub const MemorySemanticsMask_MemorySemanticsImageMemoryMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(2048); + pub const MemorySemanticsMask_MemorySemanticsOutputMemoryKHRMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(4096); + pub const MemorySemanticsMask_MemorySemanticsMakeAvailableKHRMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(8192); + pub const MemorySemanticsMask_MemorySemanticsMakeVisibleKHRMask: + root::spv::MemorySemanticsMask = + MemorySemanticsMask(16384); + impl ::std::ops::BitOr for + root::spv::MemorySemanticsMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + MemorySemanticsMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::MemorySemanticsMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::MemorySemanticsMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::MemorySemanticsMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + MemorySemanticsMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::MemorySemanticsMask { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::MemorySemanticsMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct MemorySemanticsMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MemoryAccessShift { + MemoryAccessVolatileShift = 0, + MemoryAccessAlignedShift = 1, + MemoryAccessNontemporalShift = 2, + MemoryAccessMakePointerAvailableKHRShift = 3, + MemoryAccessMakePointerVisibleKHRShift = 4, + MemoryAccessNonPrivatePointerKHRShift = 5, + MemoryAccessMax = 2147483647, + } + pub const MemoryAccessMask_MemoryAccessMaskNone: + root::spv::MemoryAccessMask = + MemoryAccessMask(0); + pub const MemoryAccessMask_MemoryAccessVolatileMask: + root::spv::MemoryAccessMask = + MemoryAccessMask(1); + pub const MemoryAccessMask_MemoryAccessAlignedMask: + root::spv::MemoryAccessMask = + MemoryAccessMask(2); + pub const MemoryAccessMask_MemoryAccessNontemporalMask: + root::spv::MemoryAccessMask = + MemoryAccessMask(4); + pub const MemoryAccessMask_MemoryAccessMakePointerAvailableKHRMask: + root::spv::MemoryAccessMask = + MemoryAccessMask(8); + pub const MemoryAccessMask_MemoryAccessMakePointerVisibleKHRMask: + root::spv::MemoryAccessMask = + MemoryAccessMask(16); + pub const MemoryAccessMask_MemoryAccessNonPrivatePointerKHRMask: + root::spv::MemoryAccessMask = + MemoryAccessMask(32); + impl ::std::ops::BitOr for + root::spv::MemoryAccessMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + MemoryAccessMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::MemoryAccessMask { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::MemoryAccessMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::MemoryAccessMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + MemoryAccessMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::MemoryAccessMask { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::MemoryAccessMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct MemoryAccessMask(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum Scope { + ScopeCrossDevice = 0, + ScopeDevice = 1, + ScopeWorkgroup = 2, + ScopeSubgroup = 3, + ScopeInvocation = 4, + ScopeQueueFamilyKHR = 5, + ScopeMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum GroupOperation { + GroupOperationReduce = 0, + GroupOperationInclusiveScan = 1, + GroupOperationExclusiveScan = 2, + GroupOperationClusteredReduce = 3, + GroupOperationPartitionedReduceNV = 6, + GroupOperationPartitionedInclusiveScanNV = 7, + GroupOperationPartitionedExclusiveScanNV = 8, + GroupOperationMax = 2147483647, + } + pub const KernelEnqueueFlags_KernelEnqueueFlagsNoWait: + root::spv::KernelEnqueueFlags = + KernelEnqueueFlags(0); + pub const KernelEnqueueFlags_KernelEnqueueFlagsWaitKernel: + root::spv::KernelEnqueueFlags = + KernelEnqueueFlags(1); + pub const KernelEnqueueFlags_KernelEnqueueFlagsWaitWorkGroup: + root::spv::KernelEnqueueFlags = + KernelEnqueueFlags(2); + pub const KernelEnqueueFlags_KernelEnqueueFlagsMax: + root::spv::KernelEnqueueFlags = + KernelEnqueueFlags(2147483647); + impl ::std::ops::BitOr for + root::spv::KernelEnqueueFlags { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + KernelEnqueueFlags(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::KernelEnqueueFlags { + #[inline] + fn bitor_assign(&mut self, rhs: root::spv::KernelEnqueueFlags) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::KernelEnqueueFlags { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + KernelEnqueueFlags(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::KernelEnqueueFlags { + #[inline] + fn bitand_assign(&mut self, rhs: root::spv::KernelEnqueueFlags) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct KernelEnqueueFlags(pub ::std::os::raw::c_uint); + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum KernelProfilingInfoShift { + KernelProfilingInfoCmdExecTimeShift = 0, + KernelProfilingInfoMax = 2147483647, + } + pub const KernelProfilingInfoMask_KernelProfilingInfoMaskNone: + root::spv::KernelProfilingInfoMask = + KernelProfilingInfoMask(0); + pub const KernelProfilingInfoMask_KernelProfilingInfoCmdExecTimeMask: + root::spv::KernelProfilingInfoMask = + KernelProfilingInfoMask(1); + impl ::std::ops::BitOr for + root::spv::KernelProfilingInfoMask { + type + Output + = + Self; + #[inline] + fn bitor(self, other: Self) -> Self { + KernelProfilingInfoMask(self.0 | other.0) + } + } + impl ::std::ops::BitOrAssign for root::spv::KernelProfilingInfoMask { + #[inline] + fn bitor_assign(&mut self, + rhs: root::spv::KernelProfilingInfoMask) { + self.0 |= rhs.0; + } + } + impl ::std::ops::BitAnd for + root::spv::KernelProfilingInfoMask { + type + Output + = + Self; + #[inline] + fn bitand(self, other: Self) -> Self { + KernelProfilingInfoMask(self.0 & other.0) + } + } + impl ::std::ops::BitAndAssign for root::spv::KernelProfilingInfoMask { + #[inline] + fn bitand_assign(&mut self, + rhs: root::spv::KernelProfilingInfoMask) { + self.0 &= rhs.0; + } + } + #[repr(C)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct KernelProfilingInfoMask(pub ::std::os::raw::c_uint); + pub const Capability_CapabilityStorageUniformBufferBlock16: + root::spv::Capability = + Capability::CapabilityStorageBuffer16BitAccess; + pub const Capability_CapabilityUniformAndStorageBuffer16BitAccess: + root::spv::Capability = + Capability::CapabilityStorageUniform16; + pub const Capability_CapabilityShaderViewportIndexLayerNV: + root::spv::Capability = + Capability::CapabilityShaderViewportIndexLayerEXT; + pub const Capability_CapabilityShadingRateNV: root::spv::Capability = + Capability::CapabilityFragmentDensityEXT; + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum Capability { + CapabilityMatrix = 0, + CapabilityShader = 1, + CapabilityGeometry = 2, + CapabilityTessellation = 3, + CapabilityAddresses = 4, + CapabilityLinkage = 5, + CapabilityKernel = 6, + CapabilityVector16 = 7, + CapabilityFloat16Buffer = 8, + CapabilityFloat16 = 9, + CapabilityFloat64 = 10, + CapabilityInt64 = 11, + CapabilityInt64Atomics = 12, + CapabilityImageBasic = 13, + CapabilityImageReadWrite = 14, + CapabilityImageMipmap = 15, + CapabilityPipes = 17, + CapabilityGroups = 18, + CapabilityDeviceEnqueue = 19, + CapabilityLiteralSampler = 20, + CapabilityAtomicStorage = 21, + CapabilityInt16 = 22, + CapabilityTessellationPointSize = 23, + CapabilityGeometryPointSize = 24, + CapabilityImageGatherExtended = 25, + CapabilityStorageImageMultisample = 27, + CapabilityUniformBufferArrayDynamicIndexing = 28, + CapabilitySampledImageArrayDynamicIndexing = 29, + CapabilityStorageBufferArrayDynamicIndexing = 30, + CapabilityStorageImageArrayDynamicIndexing = 31, + CapabilityClipDistance = 32, + CapabilityCullDistance = 33, + CapabilityImageCubeArray = 34, + CapabilitySampleRateShading = 35, + CapabilityImageRect = 36, + CapabilitySampledRect = 37, + CapabilityGenericPointer = 38, + CapabilityInt8 = 39, + CapabilityInputAttachment = 40, + CapabilitySparseResidency = 41, + CapabilityMinLod = 42, + CapabilitySampled1D = 43, + CapabilityImage1D = 44, + CapabilitySampledCubeArray = 45, + CapabilitySampledBuffer = 46, + CapabilityImageBuffer = 47, + CapabilityImageMSArray = 48, + CapabilityStorageImageExtendedFormats = 49, + CapabilityImageQuery = 50, + CapabilityDerivativeControl = 51, + CapabilityInterpolationFunction = 52, + CapabilityTransformFeedback = 53, + CapabilityGeometryStreams = 54, + CapabilityStorageImageReadWithoutFormat = 55, + CapabilityStorageImageWriteWithoutFormat = 56, + CapabilityMultiViewport = 57, + CapabilitySubgroupDispatch = 58, + CapabilityNamedBarrier = 59, + CapabilityPipeStorage = 60, + CapabilityGroupNonUniform = 61, + CapabilityGroupNonUniformVote = 62, + CapabilityGroupNonUniformArithmetic = 63, + CapabilityGroupNonUniformBallot = 64, + CapabilityGroupNonUniformShuffle = 65, + CapabilityGroupNonUniformShuffleRelative = 66, + CapabilityGroupNonUniformClustered = 67, + CapabilityGroupNonUniformQuad = 68, + CapabilitySubgroupBallotKHR = 4423, + CapabilityDrawParameters = 4427, + CapabilitySubgroupVoteKHR = 4431, + CapabilityStorageBuffer16BitAccess = 4433, + CapabilityStorageUniform16 = 4434, + CapabilityStoragePushConstant16 = 4435, + CapabilityStorageInputOutput16 = 4436, + CapabilityDeviceGroup = 4437, + CapabilityMultiView = 4439, + CapabilityVariablePointersStorageBuffer = 4441, + CapabilityVariablePointers = 4442, + CapabilityAtomicStorageOps = 4445, + CapabilitySampleMaskPostDepthCoverage = 4447, + CapabilityStorageBuffer8BitAccess = 4448, + CapabilityUniformAndStorageBuffer8BitAccess = 4449, + CapabilityStoragePushConstant8 = 4450, + CapabilityDenormPreserve = 4464, + CapabilityDenormFlushToZero = 4465, + CapabilitySignedZeroInfNanPreserve = 4466, + CapabilityRoundingModeRTE = 4467, + CapabilityRoundingModeRTZ = 4468, + CapabilityFloat16ImageAMD = 5008, + CapabilityImageGatherBiasLodAMD = 5009, + CapabilityFragmentMaskAMD = 5010, + CapabilityStencilExportEXT = 5013, + CapabilityImageReadWriteLodAMD = 5015, + CapabilitySampleMaskOverrideCoverageNV = 5249, + CapabilityGeometryShaderPassthroughNV = 5251, + CapabilityShaderViewportIndexLayerEXT = 5254, + CapabilityShaderViewportMaskNV = 5255, + CapabilityShaderStereoViewNV = 5259, + CapabilityPerViewAttributesNV = 5260, + CapabilityFragmentFullyCoveredEXT = 5265, + CapabilityMeshShadingNV = 5266, + CapabilityImageFootprintNV = 5282, + CapabilityFragmentBarycentricNV = 5284, + CapabilityComputeDerivativeGroupQuadsNV = 5288, + CapabilityFragmentDensityEXT = 5291, + CapabilityGroupNonUniformPartitionedNV = 5297, + CapabilityShaderNonUniformEXT = 5301, + CapabilityRuntimeDescriptorArrayEXT = 5302, + CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, + CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, + CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, + CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, + CapabilitySampledImageArrayNonUniformIndexingEXT = 5307, + CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, + CapabilityStorageImageArrayNonUniformIndexingEXT = 5309, + CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, + CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, + CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, + CapabilityRayTracingNV = 5340, + CapabilityVulkanMemoryModelKHR = 5345, + CapabilityVulkanMemoryModelDeviceScopeKHR = 5346, + CapabilityPhysicalStorageBufferAddressesEXT = 5347, + CapabilityComputeDerivativeGroupLinearNV = 5350, + CapabilitySubgroupShuffleINTEL = 5568, + CapabilitySubgroupBufferBlockIOINTEL = 5569, + CapabilitySubgroupImageBlockIOINTEL = 5570, + CapabilitySubgroupImageMediaBlockIOINTEL = 5579, + CapabilityMax = 2147483647, + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum Op { + OpNop = 0, + OpUndef = 1, + OpSourceContinued = 2, + OpSource = 3, + OpSourceExtension = 4, + OpName = 5, + OpMemberName = 6, + OpString = 7, + OpLine = 8, + OpExtension = 10, + OpExtInstImport = 11, + OpExtInst = 12, + OpMemoryModel = 14, + OpEntryPoint = 15, + OpExecutionMode = 16, + OpCapability = 17, + OpTypeVoid = 19, + OpTypeBool = 20, + OpTypeInt = 21, + OpTypeFloat = 22, + OpTypeVector = 23, + OpTypeMatrix = 24, + OpTypeImage = 25, + OpTypeSampler = 26, + OpTypeSampledImage = 27, + OpTypeArray = 28, + OpTypeRuntimeArray = 29, + OpTypeStruct = 30, + OpTypeOpaque = 31, + OpTypePointer = 32, + OpTypeFunction = 33, + OpTypeEvent = 34, + OpTypeDeviceEvent = 35, + OpTypeReserveId = 36, + OpTypeQueue = 37, + OpTypePipe = 38, + OpTypeForwardPointer = 39, + OpConstantTrue = 41, + OpConstantFalse = 42, + OpConstant = 43, + OpConstantComposite = 44, + OpConstantSampler = 45, + OpConstantNull = 46, + OpSpecConstantTrue = 48, + OpSpecConstantFalse = 49, + OpSpecConstant = 50, + OpSpecConstantComposite = 51, + OpSpecConstantOp = 52, + OpFunction = 54, + OpFunctionParameter = 55, + OpFunctionEnd = 56, + OpFunctionCall = 57, + OpVariable = 59, + OpImageTexelPointer = 60, + OpLoad = 61, + OpStore = 62, + OpCopyMemory = 63, + OpCopyMemorySized = 64, + OpAccessChain = 65, + OpInBoundsAccessChain = 66, + OpPtrAccessChain = 67, + OpArrayLength = 68, + OpGenericPtrMemSemantics = 69, + OpInBoundsPtrAccessChain = 70, + OpDecorate = 71, + OpMemberDecorate = 72, + OpDecorationGroup = 73, + OpGroupDecorate = 74, + OpGroupMemberDecorate = 75, + OpVectorExtractDynamic = 77, + OpVectorInsertDynamic = 78, + OpVectorShuffle = 79, + OpCompositeConstruct = 80, + OpCompositeExtract = 81, + OpCompositeInsert = 82, + OpCopyObject = 83, + OpTranspose = 84, + OpSampledImage = 86, + OpImageSampleImplicitLod = 87, + OpImageSampleExplicitLod = 88, + OpImageSampleDrefImplicitLod = 89, + OpImageSampleDrefExplicitLod = 90, + OpImageSampleProjImplicitLod = 91, + OpImageSampleProjExplicitLod = 92, + OpImageSampleProjDrefImplicitLod = 93, + OpImageSampleProjDrefExplicitLod = 94, + OpImageFetch = 95, + OpImageGather = 96, + OpImageDrefGather = 97, + OpImageRead = 98, + OpImageWrite = 99, + OpImage = 100, + OpImageQueryFormat = 101, + OpImageQueryOrder = 102, + OpImageQuerySizeLod = 103, + OpImageQuerySize = 104, + OpImageQueryLod = 105, + OpImageQueryLevels = 106, + OpImageQuerySamples = 107, + OpConvertFToU = 109, + OpConvertFToS = 110, + OpConvertSToF = 111, + OpConvertUToF = 112, + OpUConvert = 113, + OpSConvert = 114, + OpFConvert = 115, + OpQuantizeToF16 = 116, + OpConvertPtrToU = 117, + OpSatConvertSToU = 118, + OpSatConvertUToS = 119, + OpConvertUToPtr = 120, + OpPtrCastToGeneric = 121, + OpGenericCastToPtr = 122, + OpGenericCastToPtrExplicit = 123, + OpBitcast = 124, + OpSNegate = 126, + OpFNegate = 127, + OpIAdd = 128, + OpFAdd = 129, + OpISub = 130, + OpFSub = 131, + OpIMul = 132, + OpFMul = 133, + OpUDiv = 134, + OpSDiv = 135, + OpFDiv = 136, + OpUMod = 137, + OpSRem = 138, + OpSMod = 139, + OpFRem = 140, + OpFMod = 141, + OpVectorTimesScalar = 142, + OpMatrixTimesScalar = 143, + OpVectorTimesMatrix = 144, + OpMatrixTimesVector = 145, + OpMatrixTimesMatrix = 146, + OpOuterProduct = 147, + OpDot = 148, + OpIAddCarry = 149, + OpISubBorrow = 150, + OpUMulExtended = 151, + OpSMulExtended = 152, + OpAny = 154, + OpAll = 155, + OpIsNan = 156, + OpIsInf = 157, + OpIsFinite = 158, + OpIsNormal = 159, + OpSignBitSet = 160, + OpLessOrGreater = 161, + OpOrdered = 162, + OpUnordered = 163, + OpLogicalEqual = 164, + OpLogicalNotEqual = 165, + OpLogicalOr = 166, + OpLogicalAnd = 167, + OpLogicalNot = 168, + OpSelect = 169, + OpIEqual = 170, + OpINotEqual = 171, + OpUGreaterThan = 172, + OpSGreaterThan = 173, + OpUGreaterThanEqual = 174, + OpSGreaterThanEqual = 175, + OpULessThan = 176, + OpSLessThan = 177, + OpULessThanEqual = 178, + OpSLessThanEqual = 179, + OpFOrdEqual = 180, + OpFUnordEqual = 181, + OpFOrdNotEqual = 182, + OpFUnordNotEqual = 183, + OpFOrdLessThan = 184, + OpFUnordLessThan = 185, + OpFOrdGreaterThan = 186, + OpFUnordGreaterThan = 187, + OpFOrdLessThanEqual = 188, + OpFUnordLessThanEqual = 189, + OpFOrdGreaterThanEqual = 190, + OpFUnordGreaterThanEqual = 191, + OpShiftRightLogical = 194, + OpShiftRightArithmetic = 195, + OpShiftLeftLogical = 196, + OpBitwiseOr = 197, + OpBitwiseXor = 198, + OpBitwiseAnd = 199, + OpNot = 200, + OpBitFieldInsert = 201, + OpBitFieldSExtract = 202, + OpBitFieldUExtract = 203, + OpBitReverse = 204, + OpBitCount = 205, + OpDPdx = 207, + OpDPdy = 208, + OpFwidth = 209, + OpDPdxFine = 210, + OpDPdyFine = 211, + OpFwidthFine = 212, + OpDPdxCoarse = 213, + OpDPdyCoarse = 214, + OpFwidthCoarse = 215, + OpEmitVertex = 218, + OpEndPrimitive = 219, + OpEmitStreamVertex = 220, + OpEndStreamPrimitive = 221, + OpControlBarrier = 224, + OpMemoryBarrier = 225, + OpAtomicLoad = 227, + OpAtomicStore = 228, + OpAtomicExchange = 229, + OpAtomicCompareExchange = 230, + OpAtomicCompareExchangeWeak = 231, + OpAtomicIIncrement = 232, + OpAtomicIDecrement = 233, + OpAtomicIAdd = 234, + OpAtomicISub = 235, + OpAtomicSMin = 236, + OpAtomicUMin = 237, + OpAtomicSMax = 238, + OpAtomicUMax = 239, + OpAtomicAnd = 240, + OpAtomicOr = 241, + OpAtomicXor = 242, + OpPhi = 245, + OpLoopMerge = 246, + OpSelectionMerge = 247, + OpLabel = 248, + OpBranch = 249, + OpBranchConditional = 250, + OpSwitch = 251, + OpKill = 252, + OpReturn = 253, + OpReturnValue = 254, + OpUnreachable = 255, + OpLifetimeStart = 256, + OpLifetimeStop = 257, + OpGroupAsyncCopy = 259, + OpGroupWaitEvents = 260, + OpGroupAll = 261, + OpGroupAny = 262, + OpGroupBroadcast = 263, + OpGroupIAdd = 264, + OpGroupFAdd = 265, + OpGroupFMin = 266, + OpGroupUMin = 267, + OpGroupSMin = 268, + OpGroupFMax = 269, + OpGroupUMax = 270, + OpGroupSMax = 271, + OpReadPipe = 274, + OpWritePipe = 275, + OpReservedReadPipe = 276, + OpReservedWritePipe = 277, + OpReserveReadPipePackets = 278, + OpReserveWritePipePackets = 279, + OpCommitReadPipe = 280, + OpCommitWritePipe = 281, + OpIsValidReserveId = 282, + OpGetNumPipePackets = 283, + OpGetMaxPipePackets = 284, + OpGroupReserveReadPipePackets = 285, + OpGroupReserveWritePipePackets = 286, + OpGroupCommitReadPipe = 287, + OpGroupCommitWritePipe = 288, + OpEnqueueMarker = 291, + OpEnqueueKernel = 292, + OpGetKernelNDrangeSubGroupCount = 293, + OpGetKernelNDrangeMaxSubGroupSize = 294, + OpGetKernelWorkGroupSize = 295, + OpGetKernelPreferredWorkGroupSizeMultiple = 296, + OpRetainEvent = 297, + OpReleaseEvent = 298, + OpCreateUserEvent = 299, + OpIsValidEvent = 300, + OpSetUserEventStatus = 301, + OpCaptureEventProfilingInfo = 302, + OpGetDefaultQueue = 303, + OpBuildNDRange = 304, + OpImageSparseSampleImplicitLod = 305, + OpImageSparseSampleExplicitLod = 306, + OpImageSparseSampleDrefImplicitLod = 307, + OpImageSparseSampleDrefExplicitLod = 308, + OpImageSparseSampleProjImplicitLod = 309, + OpImageSparseSampleProjExplicitLod = 310, + OpImageSparseSampleProjDrefImplicitLod = 311, + OpImageSparseSampleProjDrefExplicitLod = 312, + OpImageSparseFetch = 313, + OpImageSparseGather = 314, + OpImageSparseDrefGather = 315, + OpImageSparseTexelsResident = 316, + OpNoLine = 317, + OpAtomicFlagTestAndSet = 318, + OpAtomicFlagClear = 319, + OpImageSparseRead = 320, + OpSizeOf = 321, + OpTypePipeStorage = 322, + OpConstantPipeStorage = 323, + OpCreatePipeFromPipeStorage = 324, + OpGetKernelLocalSizeForSubgroupCount = 325, + OpGetKernelMaxNumSubgroups = 326, + OpTypeNamedBarrier = 327, + OpNamedBarrierInitialize = 328, + OpMemoryNamedBarrier = 329, + OpModuleProcessed = 330, + OpExecutionModeId = 331, + OpDecorateId = 332, + OpGroupNonUniformElect = 333, + OpGroupNonUniformAll = 334, + OpGroupNonUniformAny = 335, + OpGroupNonUniformAllEqual = 336, + OpGroupNonUniformBroadcast = 337, + OpGroupNonUniformBroadcastFirst = 338, + OpGroupNonUniformBallot = 339, + OpGroupNonUniformInverseBallot = 340, + OpGroupNonUniformBallotBitExtract = 341, + OpGroupNonUniformBallotBitCount = 342, + OpGroupNonUniformBallotFindLSB = 343, + OpGroupNonUniformBallotFindMSB = 344, + OpGroupNonUniformShuffle = 345, + OpGroupNonUniformShuffleXor = 346, + OpGroupNonUniformShuffleUp = 347, + OpGroupNonUniformShuffleDown = 348, + OpGroupNonUniformIAdd = 349, + OpGroupNonUniformFAdd = 350, + OpGroupNonUniformIMul = 351, + OpGroupNonUniformFMul = 352, + OpGroupNonUniformSMin = 353, + OpGroupNonUniformUMin = 354, + OpGroupNonUniformFMin = 355, + OpGroupNonUniformSMax = 356, + OpGroupNonUniformUMax = 357, + OpGroupNonUniformFMax = 358, + OpGroupNonUniformBitwiseAnd = 359, + OpGroupNonUniformBitwiseOr = 360, + OpGroupNonUniformBitwiseXor = 361, + OpGroupNonUniformLogicalAnd = 362, + OpGroupNonUniformLogicalOr = 363, + OpGroupNonUniformLogicalXor = 364, + OpGroupNonUniformQuadBroadcast = 365, + OpGroupNonUniformQuadSwap = 366, + OpSubgroupBallotKHR = 4421, + OpSubgroupFirstInvocationKHR = 4422, + OpSubgroupAllKHR = 4428, + OpSubgroupAnyKHR = 4429, + OpSubgroupAllEqualKHR = 4430, + OpSubgroupReadInvocationKHR = 4432, + OpGroupIAddNonUniformAMD = 5000, + OpGroupFAddNonUniformAMD = 5001, + OpGroupFMinNonUniformAMD = 5002, + OpGroupUMinNonUniformAMD = 5003, + OpGroupSMinNonUniformAMD = 5004, + OpGroupFMaxNonUniformAMD = 5005, + OpGroupUMaxNonUniformAMD = 5006, + OpGroupSMaxNonUniformAMD = 5007, + OpFragmentMaskFetchAMD = 5011, + OpFragmentFetchAMD = 5012, + OpImageSampleFootprintNV = 5283, + OpGroupNonUniformPartitionNV = 5296, + OpWritePackedPrimitiveIndices4x8NV = 5299, + OpReportIntersectionNV = 5334, + OpIgnoreIntersectionNV = 5335, + OpTerminateRayNV = 5336, + OpTraceNV = 5337, + OpTypeAccelerationStructureNV = 5341, + OpExecuteCallableNV = 5344, + OpSubgroupShuffleINTEL = 5571, + OpSubgroupShuffleDownINTEL = 5572, + OpSubgroupShuffleUpINTEL = 5573, + OpSubgroupShuffleXorINTEL = 5574, + OpSubgroupBlockReadINTEL = 5575, + OpSubgroupBlockWriteINTEL = 5576, + OpSubgroupImageBlockReadINTEL = 5577, + OpSubgroupImageBlockWriteINTEL = 5578, + OpSubgroupImageMediaBlockReadINTEL = 5580, + OpSubgroupImageMediaBlockWriteINTEL = 5581, + OpDecorateStringGOOGLE = 5632, + OpMemberDecorateStringGOOGLE = 5633, + OpMax = 2147483647, + } + } + pub mod std { + #[allow(unused_imports)] + use self::super::super::root; + pub type string = [u64; 4usize]; + } + pub mod __gnu_cxx { + #[allow(unused_imports)] + use self::super::super::root; + } + pub type __uint8_t = ::std::os::raw::c_uchar; + pub type __int32_t = ::std::os::raw::c_int; + pub type __uint32_t = ::std::os::raw::c_uint; + pub mod SPIRV_CROSS_NAMESPACE { + #[allow(unused_imports)] + use self::super::super::root; + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum SPIRType_BaseType { + Unknown = 0, + Void = 1, + Boolean = 2, + SByte = 3, + UByte = 4, + Short = 5, + UShort = 6, + Int = 7, + UInt = 8, + Int64 = 9, + UInt64 = 10, + AtomicCounter = 11, + Half = 12, + Float = 13, + Double = 14, + Struct = 15, + Image = 16, + SampledImage = 17, + Sampler = 18, + AccelerationStructureNV = 19, + ControlPointArray = 20, + Char = 21, + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct Resource { + pub id: u32, + pub type_id: u32, + pub base_type_id: u32, + pub name: root::std::string, + } + impl Clone for Resource { + fn clone(&self) -> Self { *self } + } + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum MSLVertexFormat { + MSL_VERTEX_FORMAT_OTHER = 0, + MSL_VERTEX_FORMAT_UINT8 = 1, + MSL_VERTEX_FORMAT_UINT16 = 2, + MSL_VERTEX_FORMAT_INT_MAX = 2147483647, + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct MSLVertexAttr { + pub location: u32, + pub msl_buffer: u32, + pub msl_offset: u32, + pub msl_stride: u32, + pub per_instance: bool, + pub format: root::SPIRV_CROSS_NAMESPACE::MSLVertexFormat, + pub builtin: root::spv::BuiltIn, + } + impl Clone for MSLVertexAttr { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct MSLResourceBinding { + pub stage: root::spv::ExecutionModel, + pub desc_set: u32, + pub binding: u32, + pub msl_buffer: u32, + pub msl_texture: u32, + pub msl_sampler: u32, + } + impl Clone for MSLResourceBinding { + fn clone(&self) -> Self { *self } + } + } + pub type ScInternalCompilerBase = ::std::os::raw::c_void; + pub type ScInternalCompilerHlsl = ::std::os::raw::c_void; + pub type ScInternalCompilerMsl = ::std::os::raw::c_void; + pub type ScInternalCompilerGlsl = ::std::os::raw::c_void; + #[repr(u32)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub enum ScInternalResult { + Success = 0, + Unhandled = 1, + CompilationError = 2, + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScEntryPoint { + pub name: *mut ::std::os::raw::c_char, + pub execution_model: root::spv::ExecutionModel, + pub work_group_size_x: u32, + pub work_group_size_y: u32, + pub work_group_size_z: u32, + } + impl Clone for ScEntryPoint { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScCombinedImageSampler { + pub combined_id: u32, + pub image_id: u32, + pub sampler_id: u32, + } + impl Clone for ScCombinedImageSampler { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScHlslRootConstant { + pub start: u32, + pub end: u32, + pub binding: u32, + pub space: u32, + } + impl Clone for ScHlslRootConstant { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScHlslCompilerOptions { + pub shader_model: i32, + pub point_size_compat: bool, + pub point_coord_compat: bool, + pub vertex_transform_clip_space: bool, + pub vertex_invert_y: bool, + } + impl Clone for ScHlslCompilerOptions { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScMslCompilerOptions { + pub vertex_transform_clip_space: bool, + pub vertex_invert_y: bool, + pub platform: u8, + pub version: u32, + pub enable_point_size_builtin: bool, + pub disable_rasterization: bool, + pub swizzle_buffer_index: u32, + pub indirect_params_buffer_index: u32, + pub shader_output_buffer_index: u32, + pub shader_patch_output_buffer_index: u32, + pub shader_tess_factor_buffer_index: u32, + pub buffer_size_buffer_index: u32, + pub capture_output_to_buffer: bool, + pub swizzle_texture_samples: bool, + pub tess_domain_origin_lower_left: bool, + pub argument_buffers: bool, + pub pad_fragment_output_components: bool, + } + impl Clone for ScMslCompilerOptions { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScGlslCompilerOptions { + pub vertex_transform_clip_space: bool, + pub vertex_invert_y: bool, + pub version: u32, + pub es: bool, + } + impl Clone for ScGlslCompilerOptions { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScResource { + pub id: u32, + pub type_id: u32, + pub base_type_id: u32, + pub name: *mut ::std::os::raw::c_char, + } + impl Clone for ScResource { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScResourceArray { + pub data: *mut root::ScResource, + pub num: usize, + } + impl Clone for ScResourceArray { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScShaderResources { + pub uniform_buffers: root::ScResourceArray, + pub storage_buffers: root::ScResourceArray, + pub stage_inputs: root::ScResourceArray, + pub stage_outputs: root::ScResourceArray, + pub subpass_inputs: root::ScResourceArray, + pub storage_images: root::ScResourceArray, + pub sampled_images: root::ScResourceArray, + pub atomic_counters: root::ScResourceArray, + pub push_constant_buffers: root::ScResourceArray, + pub separate_images: root::ScResourceArray, + pub separate_samplers: root::ScResourceArray, + } + impl Clone for ScShaderResources { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScSpecializationConstant { + pub id: u32, + pub constant_id: u32, + } + impl Clone for ScSpecializationConstant { + fn clone(&self) -> Self { *self } + } + #[repr(C)] + #[derive(Debug, Copy)] + pub struct ScType { + pub type_: root::SPIRV_CROSS_NAMESPACE::SPIRType_BaseType, + pub member_types: *mut u32, + pub member_types_size: usize, + pub array: *mut u32, + pub array_size: usize, + } + impl Clone for ScType { + fn clone(&self) -> Self { *self } + } +} diff --git a/third_party/rust/spirv_cross/src/bindings_wasm_functions.rs b/third_party/rust/spirv_cross/src/bindings_wasm_functions.rs new file mode 100644 index 000000000000..405f2c0bb80b --- /dev/null +++ b/third_party/rust/spirv_cross/src/bindings_wasm_functions.rs @@ -0,0 +1,669 @@ + + + + + + + +use crate::emscripten; +use crate::{bindings, ErrorCode}; +use js_sys::{global, Object, Reflect, Uint32Array, Uint8Array}; +use std::ffi::CStr; +use wasm_bindgen::prelude::*; + +const U32_SIZE: u32 = std::mem::size_of::() as u32; + +#[wasm_bindgen] +extern "C" { + + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_glsl_new(compiler: u32, ir: u32, size: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_glsl_set_options(compiler: u32, options: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_glsl_build_combined_image_samplers(compiler: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_glsl_get_combined_image_samplers( + compiler: u32, + samplers: u32, + size: u32, + ) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_decoration( + compiler: u32, + result: u32, + id: u32, + decoration: u32, + ) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_set_decoration( + compiler: u32, + id: u32, + decoration: u32, + argument: u32, + ) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_unset_decoration(compiler: u32, id: u32, decoration: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_name(compiler: u32, id: u32, name: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_set_name(compiler: u32, id: u32, name: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_entry_points(compiler: u32, entry_points: u32, size: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_cleansed_entry_point_name( + compiler: u32, + original_entry_point_name: u32, + execution_model: u32, + compiled_entry_point_name: u32, + ) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_shader_resources(compiler: u32, shader_resources: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_specialization_constants( + compiler: u32, + constants: u32, + size: u32, + ) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_set_scalar_constant( + compiler: u32, + id: u32, + constant_high_bits: u32, + constant_low_bits: u32, + ) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_type(compiler: u32, id: u32, spirv_type: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_member_name(compiler: u32, id: u32, index: u32, name: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_member_decoration( + compiler: u32, + id: u32, + index: u32, + decoration: u32, + result: u32, + ) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_set_member_decoration( + compiler: u32, + id: u32, + index: u32, + decoration: u32, + argument: u32, + ) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_declared_struct_size(compiler: u32, id: u32, result: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_declared_struct_member_size( + compiler: u32, + id: u32, + index: u32, + result: u32, + ) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_rename_interface_variable( + compiler: u32, + resources: u32, + resources_size: u32, + location: u32, + name: u32, + ) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_work_group_size_specialization_constants( + compiler: u32, + constants: u32, + ) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_compile(compiler: u32, shader: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_delete(compiler: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_free_pointer(pointer: u32) -> u32; +} + +fn map_internal_result(result: u32) -> bindings::ScInternalResult { + match result { + 0 => bindings::ScInternalResult::Success, + 1 => bindings::ScInternalResult::Unhandled, + 2 => bindings::ScInternalResult::CompilationError, + _ => unreachable!(), + } +} + +pub fn sc_internal_get_latest_exception_message( + message: *mut *const ::std::os::raw::c_char, +) -> bindings::ScInternalResult { + + + bindings::ScInternalResult::Success +} + +pub fn sc_internal_compiler_glsl_new( + compiler: *mut *mut bindings::ScInternalCompilerGlsl, + ir: *const u32, + size: usize, +) -> bindings::ScInternalResult { + let spirv_bytes = size * (U32_SIZE as usize); + unsafe { + let spirv = std::slice::from_raw_parts(ir as *const u8, spirv_bytes); + let module = emscripten::get_module(); + let spirv_ptr = module.allocate(spirv_bytes as u32); + module.set_from_u8_slice(spirv_ptr, spirv); + let compiler_ptr_to_ptr = module.allocate(U32_SIZE); + let result = map_internal_result(_sc_internal_compiler_glsl_new( + compiler_ptr_to_ptr.as_offset(), + spirv_ptr.as_offset(), + size as u32, + )); + *compiler = module.get_u32(compiler_ptr_to_ptr) as *mut bindings::ScInternalCompilerGlsl; + module.free(compiler_ptr_to_ptr); + module.free(spirv_ptr); + result + } +} + +pub fn sc_internal_compiler_glsl_set_options( + compiler: *const bindings::ScInternalCompilerGlsl, + options: *const bindings::ScGlslCompilerOptions, +) -> bindings::ScInternalResult { + + + + + let module = emscripten::get_module(); + let compiler_options_size = std::mem::size_of::(); + + unsafe { + let bytes = std::slice::from_raw_parts(options as *const u8, compiler_options_size); + let copied_options_ptr = module.allocate(compiler_options_size as u32); + module.set_from_u8_slice(copied_options_ptr, bytes); + let result = map_internal_result(_sc_internal_compiler_glsl_set_options( + compiler as u32, + copied_options_ptr.as_offset(), + )); + module.free(copied_options_ptr); + result + } +} + +pub fn sc_internal_compiler_glsl_build_combined_image_samplers( + compiler: *const bindings::ScInternalCompilerBase, +) -> bindings::ScInternalResult { + map_internal_result(_sc_internal_compiler_glsl_build_combined_image_samplers( + compiler as u32, + )) +} + +pub fn sc_internal_compiler_glsl_get_combined_image_samplers( + compiler: *const bindings::ScInternalCompilerBase, + samplers: *mut *const bindings::ScCombinedImageSampler, + size: *mut usize, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let samplers_ptr_to_ptr = module.allocate(U32_SIZE); + let size_ptr = module.allocate(U32_SIZE); + let result = map_internal_result(_sc_internal_compiler_glsl_get_combined_image_samplers( + compiler as u32, + samplers_ptr_to_ptr.as_offset(), + size_ptr.as_offset(), + )); + + *samplers = module.get_u32(samplers_ptr_to_ptr) as *const bindings::ScCombinedImageSampler; + *size = module.get_u32(size_ptr) as usize; + + module.free(samplers_ptr_to_ptr); + module.free(size_ptr); + + result + } +} + +pub fn sc_internal_compiler_get_decoration( + compiler: *const bindings::ScInternalCompilerBase, + result: *mut u32, + id: u32, + decoration: bindings::spv::Decoration, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let result_ptr = module.allocate(U32_SIZE); + let ret = map_internal_result(_sc_internal_compiler_get_decoration( + compiler as u32, + result_ptr.as_offset(), + id, + decoration as u32, + )); + *result = module.get_u32(result_ptr) as u32; + module.free(result_ptr); + ret + } +} + +pub fn sc_internal_compiler_set_decoration( + compiler: *const bindings::ScInternalCompilerBase, + id: u32, + decoration: bindings::spv::Decoration, + argument: u32, +) -> bindings::ScInternalResult { + map_internal_result(_sc_internal_compiler_set_decoration( + compiler as u32, + id, + decoration as u32, + argument, + )) +} + +pub fn sc_internal_compiler_unset_decoration( + compiler: *const bindings::ScInternalCompilerBase, + id: u32, + decoration: bindings::spv::Decoration, +) -> bindings::ScInternalResult { + map_internal_result(_sc_internal_compiler_unset_decoration( + compiler as u32, + id, + decoration as u32, + )) +} + +pub fn sc_internal_compiler_get_name( + compiler: *const bindings::ScInternalCompilerBase, + id: u32, + name: *mut *const ::std::os::raw::c_char, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let name_ptr_to_ptr = module.allocate(U32_SIZE); + let result = map_internal_result(_sc_internal_compiler_get_name( + compiler as u32, + id, + name_ptr_to_ptr.as_offset(), + )); + let name_ptr = module.get_u32(name_ptr_to_ptr); + *name = name_ptr as *const ::std::os::raw::c_char; + module.free(name_ptr_to_ptr); + result + } +} + +pub fn sc_internal_compiler_set_name( + compiler: *const bindings::ScInternalCompilerBase, + id: u32, + name: *const ::std::os::raw::c_char, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let name_bytes = CStr::from_ptr(name).to_bytes(); + let name_ptr = module.allocate(name_bytes.len() as u32); + module.set_from_u8_slice(name_ptr, name_bytes); + let result = map_internal_result(_sc_internal_compiler_set_name( + compiler as u32, + id, + name_ptr.as_offset(), + )); + module.free(name_ptr); + result + } +} + +pub fn sc_internal_compiler_get_entry_points( + compiler: *const bindings::ScInternalCompilerBase, + entry_points: *mut *mut bindings::ScEntryPoint, + size: *mut usize, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let entry_points_ptr_to_ptr = module.allocate(U32_SIZE); + let size_ptr = module.allocate(U32_SIZE); + + let result = map_internal_result(_sc_internal_compiler_get_entry_points( + compiler as u32, + entry_points_ptr_to_ptr.as_offset(), + size_ptr.as_offset(), + )); + + *entry_points = module.get_u32(entry_points_ptr_to_ptr) as *mut bindings::ScEntryPoint; + *size = module.get_u32(size_ptr) as usize; + + module.free(size_ptr); + module.free(entry_points_ptr_to_ptr); + + result + } +} + +pub fn sc_internal_compiler_get_cleansed_entry_point_name( + compiler: *const bindings::ScInternalCompilerBase, + original_entry_point_name: *const ::std::os::raw::c_char, + execution_model: bindings::spv::ExecutionModel, + compiled_entry_point_name: *mut *const ::std::os::raw::c_char, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let original_name_bytes = CStr::from_ptr(original_entry_point_name).to_bytes_with_nul(); + let original_name_ptr = module.allocate(original_name_bytes.len() as u32); + module.set_from_u8_slice(original_name_ptr, original_name_bytes); + + let compiled_name_ptr_to_ptr = module.allocate(U32_SIZE); + let result = map_internal_result(_sc_internal_compiler_get_cleansed_entry_point_name( + compiler as u32, + original_name_ptr.as_offset(), + execution_model as u32, + compiled_name_ptr_to_ptr.as_offset(), + )); + let compiled_name_ptr = module.get_u32(compiled_name_ptr_to_ptr); + *compiled_entry_point_name = compiled_name_ptr as *const ::std::os::raw::c_char; + + module.free(compiled_name_ptr_to_ptr); + module.free(original_name_ptr); + + result + } +} + +pub fn sc_internal_compiler_get_shader_resources( + compiler: *const bindings::ScInternalCompilerBase, + shader_resources: *mut bindings::ScShaderResources, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let num_bytes = std::mem::size_of::(); + let shader_resources_ptr = module.allocate(num_bytes as u32); + let result = map_internal_result(_sc_internal_compiler_get_shader_resources( + compiler as u32, + shader_resources_ptr.as_offset(), + )); + module.read_bytes_into_pointer_while( + shader_resources_ptr, + |byte, bytes_read| bytes_read < num_bytes, + false, + shader_resources as *mut u8, + ); + module.free(shader_resources_ptr); + result + } +} + +pub fn sc_internal_compiler_get_specialization_constants( + compiler: *const bindings::ScInternalCompilerBase, + constants: *mut *mut bindings::ScSpecializationConstant, + size: *mut usize, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let constants_ptr_to_ptr = module.allocate(U32_SIZE); + let constants_size_ptr = module.allocate(U32_SIZE); + let result = map_internal_result(_sc_internal_compiler_get_specialization_constants( + compiler as u32, + constants_ptr_to_ptr.as_offset(), + constants_size_ptr.as_offset() as u32, + )); + *constants = + module.get_u32(constants_ptr_to_ptr) as *mut bindings::ScSpecializationConstant; + *size = module.get_u32(constants_size_ptr) as usize; + module.free(constants_size_ptr); + module.free(constants_ptr_to_ptr); + result + } +} + +pub fn sc_internal_compiler_set_scalar_constant( + compiler: *const bindings::ScInternalCompilerBase, + id: u32, + constant_high_bits: u32, + constant_low_bits: u32, +) -> bindings::ScInternalResult { + map_internal_result(_sc_internal_compiler_set_scalar_constant( + compiler as u32, + id, + constant_high_bits, + constant_low_bits, + )) +} + +pub fn sc_internal_compiler_get_type( + compiler: *const bindings::ScInternalCompilerBase, + id: u32, + spirv_type: *mut *const bindings::ScType, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let type_ptr_to_ptr = module.allocate(U32_SIZE); + let result = map_internal_result(_sc_internal_compiler_get_type( + compiler as u32, + id, + type_ptr_to_ptr.as_offset(), + )); + let type_ptr = module.get_u32(type_ptr_to_ptr); + *spirv_type = type_ptr as *const bindings::ScType; + module.free(type_ptr_to_ptr); + result + } +} + +pub fn sc_internal_compiler_get_member_name( + compiler: *const bindings::ScInternalCompilerBase, + id: u32, + index: u32, + name: *mut *const ::std::os::raw::c_char, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let name_ptr_to_ptr = module.allocate(U32_SIZE); + let result = map_internal_result(_sc_internal_compiler_get_member_name( + compiler as u32, + id, + index, + name_ptr_to_ptr.as_offset(), + )); + let name_ptr = module.get_u32(name_ptr_to_ptr); + *name = name_ptr as *const ::std::os::raw::c_char; + module.free(name_ptr_to_ptr); + result + } +} + +pub fn sc_internal_compiler_get_member_decoration( + compiler: *const bindings::ScInternalCompilerBase, + id: u32, + index: u32, + decoration: bindings::spv::Decoration, + result: *mut u32, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let result_ptr = module.allocate(U32_SIZE); + let ret = map_internal_result(_sc_internal_compiler_get_member_decoration( + compiler as u32, + id, + index, + decoration as u32, + result_ptr.as_offset(), + )); + *result = module.get_u32(result_ptr) as u32; + module.free(result_ptr); + ret + } +} + +pub fn sc_internal_compiler_set_member_decoration( + compiler: *const bindings::ScInternalCompilerBase, + id: u32, + index: u32, + decoration: bindings::spv::Decoration, + argument: u32, +) -> bindings::ScInternalResult { + map_internal_result(_sc_internal_compiler_set_member_decoration( + compiler as u32, + id, + index, + decoration as u32, + argument, + )) +} + +pub fn sc_internal_compiler_get_declared_struct_size( + compiler: *const bindings::ScInternalCompilerBase, + id: u32, + result: *mut u32, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let result_ptr = module.allocate(U32_SIZE); + let ret = map_internal_result(_sc_internal_compiler_get_declared_struct_size( + compiler as u32, + id, + result_ptr.as_offset(), + )); + *result = module.get_u32(result_ptr) as u32; + module.free(result_ptr); + ret + } +} + +pub fn sc_internal_compiler_get_declared_struct_member_size( + compiler: *const bindings::ScInternalCompilerBase, + id: u32, + index: u32, + result: *mut u32, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let result_ptr = module.allocate(U32_SIZE); + let ret = map_internal_result(_sc_internal_compiler_get_declared_struct_member_size( + compiler as u32, + id, + index, + result_ptr.as_offset(), + )); + *result = module.get_u32(result_ptr) as u32; + module.free(result_ptr); + ret + } +} + +pub fn sc_internal_compiler_rename_interface_variable( + compiler: *const bindings::ScInternalCompilerBase, + resources: *const bindings::ScResource, + resources_size: usize, + location: u32, + name: *const ::std::os::raw::c_char, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let mut resources_copied = std::slice::from_raw_parts(resources, resources_size).to_vec(); + + for mut resource in &mut resources_copied { + + let resource_name_bytes = CStr::from_ptr(resource.name).to_bytes(); + let resource_name_ptr = module.allocate(resource_name_bytes.len() as u32); + module.set_from_u8_slice(resource_name_ptr, resource_name_bytes); + resource.name = resource_name_ptr.as_offset() as *mut std::os::raw::c_char; + } + + let resources_ptr = module.allocate(std::mem::size_of::() as u32); + module.set_from_u8_slice( + resources_ptr, + std::slice::from_raw_parts( + resources_copied.as_ptr() as *const u8, + resources_size * std::mem::size_of::(), + ), + ); + let name_bytes = CStr::from_ptr(name).to_bytes(); + let name_ptr = module.allocate(name_bytes.len() as u32); + module.set_from_u8_slice(name_ptr, name_bytes); + let result = map_internal_result(_sc_internal_compiler_rename_interface_variable( + compiler as u32, + resources_ptr.as_offset(), + resources_size as u32, + location, + name_ptr.as_offset(), + )); + + for resource in resources_copied { + module.free(emscripten::Pointer::from_offset(resource.name as u32)); + } + + module.free(name_ptr); + result + } +} + +pub fn sc_internal_compiler_get_work_group_size_specialization_constants( + compiler: *const bindings::ScInternalCompilerBase, + constants: *mut *mut bindings::ScSpecializationConstant, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + let constants_length = 3; + unsafe { + let constants_ptr_to_ptr = module.allocate( + std::mem::size_of::() as u32 * constants_length, + ); + let result = map_internal_result( + _sc_internal_compiler_get_work_group_size_specialization_constants( + compiler as u32, + constants_ptr_to_ptr.as_offset(), + ), + ); + let constants_ptr = module.get_u32(constants_ptr_to_ptr); + *constants = constants_ptr as *mut bindings::ScSpecializationConstant; + module.free(constants_ptr_to_ptr); + result + } +} + +pub fn sc_internal_compiler_compile( + compiler: *const bindings::ScInternalCompilerBase, + shader: *mut *const ::std::os::raw::c_char, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let shader_ptr_to_ptr = module.allocate(U32_SIZE); + let result = map_internal_result(_sc_internal_compiler_compile( + compiler as u32, + shader_ptr_to_ptr.as_offset(), + )); + let shader_ptr = module.get_u32(shader_ptr_to_ptr); + *shader = shader_ptr as *const ::std::os::raw::c_char; + module.free(shader_ptr_to_ptr); + result + } +} + +pub fn sc_internal_compiler_delete( + compiler: *mut bindings::ScInternalCompilerBase, +) -> bindings::ScInternalResult { + map_internal_result(_sc_internal_compiler_delete(compiler as u32)) +} + +pub fn sc_internal_free_pointer( + pointer: *mut ::std::os::raw::c_void, +) -> bindings::ScInternalResult { + map_internal_result(_sc_internal_free_pointer(pointer as u32)) +} diff --git a/third_party/rust/spirv_cross/src/compiler.rs b/third_party/rust/spirv_cross/src/compiler.rs new file mode 100644 index 000000000000..e67e0df34cb4 --- /dev/null +++ b/third_party/rust/spirv_cross/src/compiler.rs @@ -0,0 +1,636 @@ + +use crate::bindings as br; +use crate::ptr_util::{read_from_ptr, read_into_vec_from_ptr, read_string_from_ptr}; +use crate::spirv::{self, Decoration, Type}; +use crate::ErrorCode; +use std::ffi::CString; +use std::os::raw::c_void; +use std::{mem, ptr}; + +impl spirv::ExecutionModel { + fn from_raw(raw: br::spv::ExecutionModel) -> Result { + use crate::bindings::root::spv::ExecutionModel as Em; + use crate::spirv::ExecutionModel::*; + match raw { + Em::ExecutionModelVertex => Ok(Vertex), + Em::ExecutionModelTessellationControl => Ok(TessellationControl), + Em::ExecutionModelTessellationEvaluation => Ok(TessellationEvaluation), + Em::ExecutionModelGeometry => Ok(Geometry), + Em::ExecutionModelFragment => Ok(Fragment), + Em::ExecutionModelGLCompute => Ok(GlCompute), + Em::ExecutionModelKernel => Ok(Kernel), + _ => Err(ErrorCode::Unhandled), + } + } + + pub(crate) fn as_raw(self) -> br::spv::ExecutionModel { + use crate::bindings::root::spv::ExecutionModel as Em; + use crate::spirv::ExecutionModel::*; + match self { + Vertex => Em::ExecutionModelVertex, + TessellationControl => Em::ExecutionModelTessellationControl, + TessellationEvaluation => Em::ExecutionModelTessellationEvaluation, + Geometry => Em::ExecutionModelGeometry, + Fragment => Em::ExecutionModelFragment, + GlCompute => Em::ExecutionModelGLCompute, + Kernel => Em::ExecutionModelKernel, + } + } +} + +impl spirv::Decoration { + fn as_raw(self) -> br::spv::Decoration { + use crate::bindings::root::spv::Decoration as D; + match self { + Decoration::RelaxedPrecision => D::DecorationRelaxedPrecision, + Decoration::SpecId => D::DecorationSpecId, + Decoration::Block => D::DecorationBlock, + Decoration::BufferBlock => D::DecorationBufferBlock, + Decoration::RowMajor => D::DecorationRowMajor, + Decoration::ColMajor => D::DecorationColMajor, + Decoration::ArrayStride => D::DecorationArrayStride, + Decoration::MatrixStride => D::DecorationMatrixStride, + Decoration::GlslShared => D::DecorationGLSLShared, + Decoration::GlslPacked => D::DecorationGLSLPacked, + Decoration::CPacked => D::DecorationCPacked, + Decoration::BuiltIn => D::DecorationBuiltIn, + Decoration::NoPerspective => D::DecorationNoPerspective, + Decoration::Flat => D::DecorationFlat, + Decoration::Patch => D::DecorationPatch, + Decoration::Centroid => D::DecorationCentroid, + Decoration::Sample => D::DecorationSample, + Decoration::Invariant => D::DecorationInvariant, + Decoration::Restrict => D::DecorationRestrict, + Decoration::Aliased => D::DecorationAliased, + Decoration::Volatile => D::DecorationVolatile, + Decoration::Constant => D::DecorationConstant, + Decoration::Coherent => D::DecorationCoherent, + Decoration::NonWritable => D::DecorationNonWritable, + Decoration::NonReadable => D::DecorationNonReadable, + Decoration::Uniform => D::DecorationUniform, + Decoration::SaturatedConversion => D::DecorationSaturatedConversion, + Decoration::Stream => D::DecorationStream, + Decoration::Location => D::DecorationLocation, + Decoration::Component => D::DecorationComponent, + Decoration::Index => D::DecorationIndex, + Decoration::Binding => D::DecorationBinding, + Decoration::DescriptorSet => D::DecorationDescriptorSet, + Decoration::Offset => D::DecorationOffset, + Decoration::XfbBuffer => D::DecorationXfbBuffer, + Decoration::XfbStride => D::DecorationXfbStride, + Decoration::FuncParamAttr => D::DecorationFuncParamAttr, + Decoration::FpRoundingMode => D::DecorationFPRoundingMode, + Decoration::FpFastMathMode => D::DecorationFPFastMathMode, + Decoration::LinkageAttributes => D::DecorationLinkageAttributes, + Decoration::NoContraction => D::DecorationNoContraction, + Decoration::InputAttachmentIndex => D::DecorationInputAttachmentIndex, + Decoration::Alignment => D::DecorationAlignment, + Decoration::OverrideCoverageNv => D::DecorationOverrideCoverageNV, + Decoration::PassthroughNv => D::DecorationPassthroughNV, + Decoration::ViewportRelativeNv => D::DecorationViewportRelativeNV, + Decoration::SecondaryViewportRelativeNv => D::DecorationSecondaryViewportRelativeNV, + } + } +} + +impl spirv::Type { + pub(crate) fn from_raw( + ty: br::SPIRV_CROSS_NAMESPACE::SPIRType_BaseType, + member_types: Vec, + array: Vec, + ) -> Type { + use crate::bindings::root::SPIRV_CROSS_NAMESPACE::SPIRType_BaseType as B; + use crate::spirv::Type::*; + match ty { + B::Unknown => Unknown, + B::Void => Void, + B::Boolean => Boolean { array }, + B::Char => Char { array }, + B::Int => Int { array }, + B::UInt => UInt { array }, + B::Int64 => Int64 { array }, + B::UInt64 => UInt64 { array }, + B::AtomicCounter => AtomicCounter { array }, + B::Half => Half { array }, + B::Float => Float { array }, + B::Double => Double { array }, + B::Struct => Struct { + member_types, + array, + }, + B::Image => Image { array }, + B::SampledImage => SampledImage { array }, + B::Sampler => Sampler { array }, + B::SByte => SByte { array }, + B::UByte => UByte { array }, + B::Short => Short { array }, + B::UShort => UShort { array }, + B::ControlPointArray => ControlPointArray, + B::AccelerationStructureNV => AccelerationStructureNv, + } + } +} + +#[derive(Debug, Clone)] +pub struct Compiler { + pub(crate) sc_compiler: *mut br::ScInternalCompilerBase, + pub(crate) target_data: TTargetData, + pub(crate) has_been_compiled: bool, +} + +impl Compiler { + #[cfg(any(feature = "msl", feature = "glsl", feature = "hlsl"))] + pub fn compile(&mut self) -> Result { + unsafe { + let mut shader_ptr = ptr::null(); + check!(br::sc_internal_compiler_compile( + self.sc_compiler, + &mut shader_ptr, + )); + let shader = read_string_from_ptr(shader_ptr)?; + check!(br::sc_internal_free_pointer(shader_ptr as *mut c_void)); + Ok(shader) + } + } + + pub fn get_decoration(&self, id: u32, decoration: spirv::Decoration) -> Result { + let mut result = 0; + unsafe { + check!(br::sc_internal_compiler_get_decoration( + self.sc_compiler, + &mut result, + id, + decoration.as_raw(), + )); + } + Ok(result) + } + + pub fn get_name(&mut self, id: u32) -> Result { + unsafe { + let mut name_ptr = ptr::null(); + check!(br::sc_internal_compiler_get_name( + self.sc_compiler, + id, + &mut name_ptr, + )); + let name = read_string_from_ptr(name_ptr)?; + check!(br::sc_internal_free_pointer(name_ptr as *mut c_void)); + Ok(name) + } + } + + pub fn set_name(&mut self, id: u32, name: &str) -> Result<(), ErrorCode> { + let name = CString::new(name); + unsafe { + match name { + Ok(name) => { + check!(br::sc_internal_compiler_set_name( + self.sc_compiler, + id, + name.as_ptr(), + )); + } + _ => return Err(ErrorCode::Unhandled), + } + } + Ok(()) + } + + pub fn unset_decoration( + &mut self, + id: u32, + decoration: spirv::Decoration, + ) -> Result<(), ErrorCode> { + unsafe { + check!(br::sc_internal_compiler_unset_decoration( + self.sc_compiler, + id, + decoration.as_raw(), + )); + } + + Ok(()) + } + + pub fn set_decoration( + &mut self, + id: u32, + decoration: spirv::Decoration, + argument: u32, + ) -> Result<(), ErrorCode> { + unsafe { + check!(br::sc_internal_compiler_set_decoration( + self.sc_compiler, + id, + decoration.as_raw(), + argument, + )); + } + + Ok(()) + } + + pub fn get_entry_points(&self) -> Result, ErrorCode> { + let mut entry_points_raw = ptr::null_mut(); + let mut entry_points_raw_length = 0 as usize; + + unsafe { + check!(br::sc_internal_compiler_get_entry_points( + self.sc_compiler, + &mut entry_points_raw, + &mut entry_points_raw_length, + )); + + let entry_points = (0..entry_points_raw_length) + .map(|offset| { + let entry_point_raw_ptr = entry_points_raw.add(offset); + let entry_point_raw = read_from_ptr::(entry_point_raw_ptr); + let name = read_string_from_ptr(entry_point_raw.name)?; + let entry_point = spirv::EntryPoint { + name, + execution_model: spirv::ExecutionModel::from_raw( + entry_point_raw.execution_model, + )?, + work_group_size: spirv::WorkGroupSize { + x: entry_point_raw.work_group_size_x, + y: entry_point_raw.work_group_size_y, + z: entry_point_raw.work_group_size_z, + }, + }; + + check!(br::sc_internal_free_pointer( + entry_point_raw.name as *mut c_void, + )); + check!(br::sc_internal_free_pointer( + entry_point_raw_ptr as *mut c_void + )); + + Ok(entry_point) + }) + .collect::, _>>(); + + Ok(entry_points?) + } + } + + pub fn get_active_buffer_ranges(&self, id: u32) -> Result, ErrorCode> { + let mut active_buffer_ranges_raw = ptr::null_mut(); + let mut active_buffer_ranges_raw_length = 0 as usize; + + unsafe { + check!(br::sc_internal_compiler_get_active_buffer_ranges( + self.sc_compiler, + id, + &mut active_buffer_ranges_raw, + &mut active_buffer_ranges_raw_length, + )); + + let active_buffer_ranges = (0..active_buffer_ranges_raw_length) + .map(|offset| { + let active_buffer_range_raw_ptr = active_buffer_ranges_raw.add(offset); + let active_buffer_range_raw = + read_from_ptr::(active_buffer_range_raw_ptr); + spirv::BufferRange { + index: active_buffer_range_raw.index, + offset: active_buffer_range_raw.offset, + range: active_buffer_range_raw.range, + } + }) + .collect::>(); + + check!(br::sc_internal_free_pointer( + active_buffer_ranges_raw as *mut c_void + )); + + Ok(active_buffer_ranges) + } + } + + pub fn get_cleansed_entry_point_name( + &self, + entry_point_name: &str, + execution_model: spirv::ExecutionModel, + ) -> Result { + let mut cleansed_ptr = ptr::null(); + let entry_point = CString::new(entry_point_name); + match entry_point { + Ok(ep) => unsafe { + check!(br::sc_internal_compiler_get_cleansed_entry_point_name( + self.sc_compiler, + ep.as_ptr(), + execution_model.as_raw(), + &mut cleansed_ptr + )); + let cleansed = read_string_from_ptr(cleansed_ptr)?; + check!(br::sc_internal_free_pointer(cleansed_ptr as *mut c_void)); + Ok(cleansed) + }, + _ => Err(ErrorCode::Unhandled), + } + } + + pub fn get_specialization_constants( + &self, + ) -> Result, ErrorCode> { + let mut constants_raw = ptr::null_mut(); + let mut constants_raw_length = 0 as usize; + + unsafe { + check!(br::sc_internal_compiler_get_specialization_constants( + self.sc_compiler, + &mut constants_raw, + &mut constants_raw_length, + )); + + let constants = (0..constants_raw_length) + .map(|offset| { + let constant_raw_ptr = constants_raw.add(offset); + let constant_raw = + read_from_ptr::(constant_raw_ptr); + + let constant = spirv::SpecializationConstant { + id: constant_raw.id, + constant_id: constant_raw.constant_id, + }; + + Ok(constant) + }) + .collect::, _>>(); + + check!(br::sc_internal_free_pointer(constants_raw as *mut c_void)); + + Ok(constants?) + } + } + + pub fn set_scalar_constant(&self, id: u32, value: u64) -> Result<(), ErrorCode> { + let high_bits = (value >> 32) as u32; + let low_bits = value as u32; + unsafe { + check!(br::sc_internal_compiler_set_scalar_constant( + self.sc_compiler, + id, + high_bits, + low_bits, + )); + } + + Ok(()) + } + + pub fn get_type(&self, id: u32) -> Result { + unsafe { + let mut type_ptr = std::mem::zeroed(); + + check!(br::sc_internal_compiler_get_type( + self.sc_compiler, + id, + &mut type_ptr, + )); + + let raw = read_from_ptr::(type_ptr); + let member_types = read_into_vec_from_ptr(raw.member_types, raw.member_types_size); + let array = read_into_vec_from_ptr(raw.array, raw.array_size); + let result = Type::from_raw(raw.type_, member_types, array); + + if raw.member_types_size > 0 { + check!(br::sc_internal_free_pointer( + raw.member_types as *mut c_void + )); + } + if raw.array_size > 0 { + check!(br::sc_internal_free_pointer(raw.array as *mut c_void)); + } + check!(br::sc_internal_free_pointer(type_ptr as *mut c_void)); + + Ok(result) + } + } + + pub fn get_member_name(&self, id: u32, index: u32) -> Result { + unsafe { + let mut name_ptr = ptr::null(); + check!(br::sc_internal_compiler_get_member_name( + self.sc_compiler, + id, + index, + &mut name_ptr, + )); + let name = read_string_from_ptr(name_ptr)?; + check!(br::sc_internal_free_pointer(name_ptr as *mut c_void)); + Ok(name) + } + } + + pub fn get_member_decoration( + &self, + id: u32, + index: u32, + decoration: Decoration, + ) -> Result { + let mut result = 0; + unsafe { + check!(br::sc_internal_compiler_get_member_decoration( + self.sc_compiler, + id, + index, + decoration.as_raw(), + &mut result, + )); + } + Ok(result) + } + + pub fn set_member_decoration( + &self, + id: u32, + index: u32, + decoration: Decoration, + argument: u32, + ) -> Result<(), ErrorCode> { + unsafe { + check!(br::sc_internal_compiler_set_member_decoration( + self.sc_compiler, + id, + index, + decoration.as_raw(), + argument, + )); + } + + Ok(()) + } + + pub fn get_declared_struct_size(&self, id: u32) -> Result { + let mut result = 0; + unsafe { + check!(br::sc_internal_compiler_get_declared_struct_size( + self.sc_compiler, + id, + &mut result, + )); + } + Ok(result) + } + + pub fn get_declared_struct_member_size(&self, id: u32, index: u32) -> Result { + let mut result = 0; + unsafe { + check!(br::sc_internal_compiler_get_declared_struct_member_size( + self.sc_compiler, + id, + index, + &mut result, + )); + } + Ok(result) + } + + pub fn get_shader_resources(&self) -> Result { + unsafe { + let mut shader_resources_raw = mem::uninitialized(); + check!(br::sc_internal_compiler_get_shader_resources( + self.sc_compiler, + &mut shader_resources_raw, + )); + + let fill_resources = |array_raw: &br::ScResourceArray| { + let resources = (0..array_raw.num as usize) + .map(|i| { + let resource_raw = read_from_ptr::(array_raw.data.add(i)); + let name = read_string_from_ptr(resource_raw.name)?; + check!(br::sc_internal_free_pointer( + resource_raw.name as *mut c_void, + )); + + Ok(spirv::Resource { + id: resource_raw.id, + type_id: resource_raw.type_id, + base_type_id: resource_raw.base_type_id, + name, + }) + }) + .collect::, ErrorCode>>(); + + check!(br::sc_internal_free_pointer(array_raw.data as *mut c_void)); + + resources + }; + + let uniform_buffers = fill_resources(&shader_resources_raw.uniform_buffers)?; + let storage_buffers = fill_resources(&shader_resources_raw.storage_buffers)?; + let stage_inputs = fill_resources(&shader_resources_raw.stage_inputs)?; + let stage_outputs = fill_resources(&shader_resources_raw.stage_outputs)?; + let subpass_inputs = fill_resources(&shader_resources_raw.subpass_inputs)?; + let storage_images = fill_resources(&shader_resources_raw.storage_images)?; + let sampled_images = fill_resources(&shader_resources_raw.sampled_images)?; + let atomic_counters = fill_resources(&shader_resources_raw.atomic_counters)?; + let push_constant_buffers = + fill_resources(&shader_resources_raw.push_constant_buffers)?; + let separate_images = fill_resources(&shader_resources_raw.separate_images)?; + let separate_samplers = fill_resources(&shader_resources_raw.separate_samplers)?; + + Ok(spirv::ShaderResources { + uniform_buffers, + storage_buffers, + stage_inputs, + stage_outputs, + subpass_inputs, + storage_images, + sampled_images, + atomic_counters, + push_constant_buffers, + separate_images, + separate_samplers, + }) + } + } + + pub fn rename_interface_variable( + &self, + resources: &[spirv::Resource], + location: u32, + new_name: &str, + ) -> Result<(), ErrorCode> { + unsafe { + let mut resources_names = Vec::new(); + for resource in resources.iter() { + match CString::new(&*resource.name) { + Ok(rn) => resources_names.push(rn), + Err(_) => return Err(ErrorCode::Unhandled), + } + } + + let new_name = CString::new(new_name).map_err(|_| ErrorCode::Unhandled)?; + let new_name_ptr = new_name.as_ptr(); + let resources = resources + .iter() + .enumerate() + .map(|(i, r)| br::ScResource { + id: r.id, + type_id: r.type_id, + base_type_id: r.base_type_id, + name: resources_names[i].as_ptr() as _, + }) + .collect::>(); + let resources_ptr = resources.as_ptr(); + + check!(br::sc_internal_compiler_rename_interface_variable( + self.sc_compiler, + resources_ptr, + resources_names.len(), + location, + new_name_ptr, + )); + + Ok(()) + } + } + + pub fn get_work_group_size_specialization_constants( + &self, + ) -> Result { + let mut constants_raw = ptr::null_mut(); + + unsafe { + check!( + br::sc_internal_compiler_get_work_group_size_specialization_constants( + self.sc_compiler, + &mut constants_raw, + ) + ); + + let x = read_from_ptr::(constants_raw.offset(0)); + let y = read_from_ptr::(constants_raw.offset(1)); + let z = read_from_ptr::(constants_raw.offset(2)); + + let constants = spirv::WorkGroupSizeSpecializationConstants { + x: spirv::SpecializationConstant { + id: x.id, + constant_id: x.constant_id, + }, + y: spirv::SpecializationConstant { + id: y.id, + constant_id: y.constant_id, + }, + z: spirv::SpecializationConstant { + id: z.id, + constant_id: z.constant_id, + }, + }; + + check!(br::sc_internal_free_pointer(constants_raw as *mut c_void)); + + Ok(constants) + } + } +} + +impl Drop for Compiler { + fn drop(&mut self) { + unsafe { + br::sc_internal_compiler_delete(self.sc_compiler); + } + } +} diff --git a/third_party/rust/spirv_cross/src/emscripten.rs b/third_party/rust/spirv_cross/src/emscripten.rs new file mode 100644 index 000000000000..3eadc04cb3a7 --- /dev/null +++ b/third_party/rust/spirv_cross/src/emscripten.rs @@ -0,0 +1,162 @@ + + + +use crate::{bindings, ErrorCode}; +use js_sys::{global, Object, Reflect, Uint32Array, Uint8Array}; +use wasm_bindgen::prelude::*; + +#[wasm_bindgen] +extern "C" { + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _malloc(size: u32) -> u32; + + #[wasm_bindgen(js_namespace = sc_internal)] + fn _free(offset: u32); +} + +pub fn get_module() -> Module { + const MODULE_NAME: &'static str = "sc_internal"; + let module = Reflect::get(&global(), &JsValue::from_str(MODULE_NAME)) + .unwrap() + .into(); + Module { module } +} + +const U32_SIZE: u32 = std::mem::size_of::() as u32; + +fn get_value(object: &Object, key: &str) -> T +where + T: std::convert::From, +{ + Reflect::get(object, &JsValue::from_str(key)) + .unwrap() + .into() +} + +/// An Emscripten pointer. +/// Internally stores an offset to a location on the Emscripten `u8` heap. +#[derive(Clone, Copy)] +pub struct Pointer { + offset: u32, +} + +impl Pointer { + pub fn from_offset(offset: u32) -> Self { + Pointer { offset } + } + + pub fn as_offset(&self) -> u32 { + self.offset + } +} + +pub struct Module { + module: Object, +} + +impl Module { + /// Allocate memory on the heap. + pub unsafe fn allocate(&self, byte_len: u32) -> Pointer { + Pointer { + offset: _malloc(byte_len), + } + } + + pub unsafe fn free(&self, pointer: Pointer) { + _free(pointer.as_offset()) + } + + // Read a `u32` value from the heap. + pub unsafe fn get_u32(&self, pointer: Pointer) -> u32 { + let offset = &JsValue::from_f64((pointer.offset / U32_SIZE) as f64); + // TODO: Remove Reflect + Reflect::get(&self.heap_u32(), offset) + .unwrap() + .as_f64() + .unwrap() as u32 + } + + /// Set memory on the heap to `bytes`. + pub unsafe fn set_from_u8_typed_array(&self, pointer: Pointer, bytes: Uint8Array) { + let buffer: JsValue = self.heap_u8().buffer().into(); + let memory = + Uint8Array::new_with_byte_offset_and_length(&buffer, pointer.offset, bytes.length()); + memory.set(&bytes, 0); + } + + /// Set memory on the heap to `bytes`. + pub unsafe fn set_from_u8_slice(&self, pointer: Pointer, bytes: &[u8]) { + self.set_from_u8_typed_array(pointer, Uint8Array::view(bytes)); + } + + fn heap_u8(&self) -> Uint8Array { + const HEAP_U8: &'static str = "HEAPU8"; + get_value(&self.module, HEAP_U8) + } + + fn heap_u32(&self) -> Uint32Array { + const HEAP_U32: &'static str = "HEAPU32"; + get_value(&self.module, HEAP_U32) + } + + + + pub unsafe fn read_bytes_into_vec_while( + &self, + pointer: Pointer, + should_continue: F, + include_last_byte: bool, + ) -> Vec + where + F: Fn(u8, usize) -> bool, + { + let mut bytes = Vec::new(); + let heap = &self.heap_u8(); + let start_offset = pointer.offset as usize; + loop { + let bytes_read = bytes.len(); + let offset = &JsValue::from_f64((start_offset + bytes_read) as f64); + let byte = Reflect::get(heap, offset).unwrap().as_f64().unwrap() as u8; + if should_continue(byte, bytes_read) { + bytes.push(byte); + continue; + } + if include_last_byte { + bytes.push(byte); + } + break; + } + bytes + } + + + + + pub unsafe fn read_bytes_into_pointer_while( + &self, + pointer: Pointer, + should_continue: F, + include_last_byte: bool, + into_pointer: *mut u8, + ) where + F: Fn(u8, usize) -> bool, + { + let heap = &self.heap_u8(); + let start_offset = pointer.offset as usize; + let mut bytes_read = 0; + loop { + let offset = &JsValue::from_f64((start_offset + bytes_read) as f64); + let byte = Reflect::get(heap, offset).unwrap().as_f64().unwrap() as u8; + if should_continue(byte, bytes_read) { + *into_pointer.offset(bytes_read as isize) = byte; + bytes_read += 1; + continue; + } + if include_last_byte { + *into_pointer.offset(bytes_read as isize) = byte; + } + break; + } + } +} diff --git a/third_party/rust/spirv_cross/src/glsl.rs b/third_party/rust/spirv_cross/src/glsl.rs new file mode 100644 index 000000000000..60d5abe35196 --- /dev/null +++ b/third_party/rust/spirv_cross/src/glsl.rs @@ -0,0 +1,190 @@ +use crate::bindings as br; +use crate::ptr_util::read_into_vec_from_ptr; +use crate::{compiler, spirv, ErrorCode}; +use std::marker::PhantomData; +use std::ptr; + + +#[derive(Debug, Clone)] +pub enum Target {} + +pub struct TargetData { + combined_image_samplers_built: bool, +} + +impl spirv::Target for Target { + type Data = TargetData; +} + +#[allow(non_snake_case, non_camel_case_types)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum Version { + V1_10, + V1_20, + V1_30, + V1_40, + V1_50, + V3_30, + V4_00, + V4_10, + V4_20, + V4_30, + V4_40, + V4_50, + V4_60, + V1_00Es, + V3_00Es, +} + +#[derive(Debug, Clone)] +pub struct CompilerVertexOptions { + pub invert_y: bool, + pub transform_clip_space: bool, +} + +impl Default for CompilerVertexOptions { + fn default() -> CompilerVertexOptions { + CompilerVertexOptions { + invert_y: false, + transform_clip_space: false, + } + } +} + + +#[derive(Debug, Clone)] +pub struct CompilerOptions { + pub version: Version, + pub vertex: CompilerVertexOptions, +} + +impl CompilerOptions { + fn as_raw(&self) -> br::ScGlslCompilerOptions { + use self::Version::*; + let (version, es) = match self.version { + V1_10 => (1_10, false), + V1_20 => (1_20, false), + V1_30 => (1_30, false), + V1_40 => (1_40, false), + V1_50 => (1_50, false), + V3_30 => (3_30, false), + V4_00 => (4_00, false), + V4_10 => (4_10, false), + V4_20 => (4_20, false), + V4_30 => (4_30, false), + V4_40 => (4_40, false), + V4_50 => (4_50, false), + V4_60 => (4_60, false), + V1_00Es => (1_00, true), + V3_00Es => (3_00, true), + }; + br::ScGlslCompilerOptions { + vertex_invert_y: self.vertex.invert_y, + vertex_transform_clip_space: self.vertex.transform_clip_space, + version, + es, + } + } +} + +impl Default for CompilerOptions { + fn default() -> CompilerOptions { + CompilerOptions { + version: Version::V4_50, + vertex: CompilerVertexOptions::default(), + } + } +} + +impl spirv::Parse for spirv::Ast { + fn parse(module: &spirv::Module) -> Result { + let compiler = { + let mut compiler = ptr::null_mut(); + unsafe { + check!(br::sc_internal_compiler_glsl_new( + &mut compiler, + module.words.as_ptr() as *const u32, + module.words.len() as usize, + )); + } + + compiler::Compiler { + sc_compiler: compiler, + target_data: TargetData { + combined_image_samplers_built: false, + }, + has_been_compiled: false, + } + }; + + Ok(spirv::Ast { + compiler, + target_type: PhantomData, + }) + } +} + +impl spirv::Compile for spirv::Ast { + type CompilerOptions = CompilerOptions; + + + fn set_compiler_options(&mut self, options: &CompilerOptions) -> Result<(), ErrorCode> { + let raw_options = options.as_raw(); + unsafe { + check!(br::sc_internal_compiler_glsl_set_options( + self.compiler.sc_compiler, + &raw_options, + )); + } + + Ok(()) + } + + + fn compile(&mut self) -> Result { + self.build_combined_image_samplers()?; + self.compiler.compile() + } +} + +impl spirv::Ast { + pub fn build_combined_image_samplers(&mut self) -> Result<(), ErrorCode> { + unsafe { + if !self.compiler.target_data.combined_image_samplers_built { + check!(br::sc_internal_compiler_glsl_build_combined_image_samplers( + self.compiler.sc_compiler + )); + self.compiler.target_data.combined_image_samplers_built = true + } + } + + Ok(()) + } + + pub fn get_combined_image_samplers( + &mut self, + ) -> Result, ErrorCode> { + self.build_combined_image_samplers()?; + unsafe { + let mut samplers_raw: *const br::ScCombinedImageSampler = std::ptr::null(); + let mut samplers_raw_length: usize = 0; + + check!(br::sc_internal_compiler_glsl_get_combined_image_samplers( + self.compiler.sc_compiler, + &mut samplers_raw as _, + &mut samplers_raw_length as _, + )); + + let samplers = read_into_vec_from_ptr(samplers_raw, samplers_raw_length) + .iter() + .map(|sc| spirv::CombinedImageSampler { + combined_id: sc.combined_id, + image_id: sc.image_id, + sampler_id: sc.sampler_id, + }) + .collect(); + + Ok(samplers) + } + } +} diff --git a/third_party/rust/spirv_cross/src/hlsl.rs b/third_party/rust/spirv_cross/src/hlsl.rs new file mode 100644 index 000000000000..9a3f9523fd5d --- /dev/null +++ b/third_party/rust/spirv_cross/src/hlsl.rs @@ -0,0 +1,159 @@ +use crate::bindings as br; +use crate::{compiler, spirv, ErrorCode}; +use std::marker::PhantomData; +use std::ptr; + +pub use crate::bindings::root::ScHlslRootConstant as RootConstant; + + +#[derive(Debug, Clone)] +pub enum Target {} + +impl spirv::Target for Target { + type Data = (); +} + + +#[allow(non_snake_case, non_camel_case_types)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum ShaderModel { + V3_0, + V4_0, + V4_0L9_0, + V4_0L9_1, + V4_0L9_3, + V4_1, + V5_0, + V5_1, + V6_0, +} + +#[allow(non_snake_case, non_camel_case_types)] +impl ShaderModel { + fn as_raw(self) -> i32 { + use self::ShaderModel::*; + match self { + V3_0 => 30, + V4_0 => 40, + V4_0L9_0 => 40, + V4_0L9_1 => 40, + V4_0L9_3 => 40, + V4_1 => 41, + V5_0 => 50, + V5_1 => 51, + V6_0 => 60, + } + } +} + +#[derive(Debug, Clone)] +pub struct CompilerVertexOptions { + pub invert_y: bool, + pub transform_clip_space: bool, +} + +impl Default for CompilerVertexOptions { + fn default() -> CompilerVertexOptions { + CompilerVertexOptions { + invert_y: false, + transform_clip_space: false, + } + } +} + + +#[derive(Debug, Clone)] +pub struct CompilerOptions { + pub shader_model: ShaderModel, + + pub point_size_compat: bool, + + pub point_coord_compat: bool, + pub vertex: CompilerVertexOptions, +} + +impl CompilerOptions { + fn as_raw(&self) -> br::ScHlslCompilerOptions { + br::ScHlslCompilerOptions { + shader_model: self.shader_model.as_raw(), + point_size_compat: self.point_size_compat, + point_coord_compat: self.point_coord_compat, + vertex_invert_y: self.vertex.invert_y, + vertex_transform_clip_space: self.vertex.transform_clip_space, + } + } +} + +impl Default for CompilerOptions { + fn default() -> CompilerOptions { + CompilerOptions { + shader_model: ShaderModel::V3_0, + point_size_compat: false, + point_coord_compat: false, + vertex: CompilerVertexOptions::default(), + } + } +} + +impl spirv::Parse for spirv::Ast { + fn parse(module: &spirv::Module) -> Result { + let compiler = { + let mut compiler = ptr::null_mut(); + unsafe { + check!(br::sc_internal_compiler_hlsl_new( + &mut compiler, + module.words.as_ptr() as *const u32, + module.words.len() as usize, + )); + } + + compiler::Compiler { + sc_compiler: compiler, + target_data: (), + has_been_compiled: false, + } + }; + + Ok(spirv::Ast { + compiler, + target_type: PhantomData, + }) + } +} + +impl spirv::Compile for spirv::Ast { + type CompilerOptions = CompilerOptions; + + + fn set_compiler_options(&mut self, options: &CompilerOptions) -> Result<(), ErrorCode> { + let raw_options = options.as_raw(); + unsafe { + check!(br::sc_internal_compiler_hlsl_set_options( + self.compiler.sc_compiler, + &raw_options, + )); + } + + Ok(()) + } + + + fn compile(&mut self) -> Result { + self.compiler.compile() + } +} + +impl spirv::Ast { + + pub fn set_root_constant_layout(&mut self, layout: Vec) -> Result<(), ErrorCode> { + unsafe { + check!(br::sc_internal_compiler_hlsl_set_root_constant_layout( + self.compiler.sc_compiler, + layout.as_ptr(), + layout.len() as _, + )); + } + + Ok(()) + } +} diff --git a/third_party/rust/spirv_cross/src/lib.rs b/third_party/rust/spirv_cross/src/lib.rs new file mode 100644 index 000000000000..d060459679ea --- /dev/null +++ b/third_party/rust/spirv_cross/src/lib.rs @@ -0,0 +1,87 @@ +#[cfg(target_arch = "wasm32")] +macro_rules! check { + ($check:expr) => {{ + $check + }}; +} + +#[cfg(not(target_arch = "wasm32"))] +macro_rules! check { + ($check:expr) => {{ + let result = $check; + if br::ScInternalResult::Success != result { + if br::ScInternalResult::CompilationError == result { + let mut message_ptr = ptr::null(); + + if br::ScInternalResult::Success + != br::sc_internal_get_latest_exception_message(&mut message_ptr) + { + return Err(ErrorCode::Unhandled); + } + + let message = match std::ffi::CStr::from_ptr(message_ptr) + .to_owned() + .into_string() + { + Err(_) => return Err(ErrorCode::Unhandled), + Ok(v) => v, + }; + + if br::ScInternalResult::Success + != br::sc_internal_free_pointer(message_ptr as *mut std::os::raw::c_void) + { + return Err(ErrorCode::Unhandled); + } + + return Err(ErrorCode::CompilationError(message)); + } + + return Err(ErrorCode::Unhandled); + } + }}; +} + +mod compiler; + +#[cfg(feature = "glsl")] +pub mod glsl; +#[cfg(all(feature = "hlsl", not(target_arch = "wasm32")))] +pub mod hlsl; +#[cfg(all(feature = "msl", not(target_arch = "wasm32")))] +pub mod msl; + +pub mod spirv; + +#[cfg(target_arch = "wasm32")] +pub(crate) mod emscripten; +pub(crate) mod ptr_util; + +#[cfg(target_arch = "wasm32")] +mod bindings_wasm_functions; + +#[cfg(target_arch = "wasm32")] +mod bindings { + #![allow(dead_code)] + #![allow(non_upper_case_globals)] + #![allow(non_camel_case_types)] + #![allow(non_snake_case)] + include!(concat!("bindings_wasm.rs")); + pub use crate::bindings_wasm_functions::*; + pub use root::*; +} + +#[cfg(not(target_arch = "wasm32"))] +mod bindings { + #![allow(dead_code)] + #![allow(non_upper_case_globals)] + #![allow(non_camel_case_types)] + #![allow(non_snake_case)] + include!(concat!("bindings_native.rs")); + pub use root::*; +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq)] +pub enum ErrorCode { + Unhandled, + CompilationError(String), +} diff --git a/third_party/rust/spirv_cross/src/msl.rs b/third_party/rust/spirv_cross/src/msl.rs new file mode 100644 index 000000000000..cf2af8a4ff9e --- /dev/null +++ b/third_party/rust/spirv_cross/src/msl.rs @@ -0,0 +1,463 @@ +use crate::bindings as br; +use crate::{compiler, spirv, ErrorCode}; + +use std::collections::BTreeMap; +use std::ffi::CStr; +use std::marker::PhantomData; +use std::ptr; +use std::u8; + + +#[derive(Debug, Clone)] +pub enum Target {} + +pub struct TargetData { + vertex_attribute_overrides: Vec, + resource_binding_overrides: Vec, + const_samplers: Vec, +} + +impl spirv::Target for Target { + type Data = TargetData; +} + + +#[derive(Debug, Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub struct VertexAttributeLocation(pub u32); + + +#[derive(Debug, Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub enum Format { + Other, + Uint8, + Uint16, +} + +impl Format { + fn as_raw(&self) -> br::SPIRV_CROSS_NAMESPACE::MSLVertexFormat { + use self::Format::*; + use crate::bindings::root::SPIRV_CROSS_NAMESPACE::MSLVertexFormat as R; + match self { + Other => R::MSL_VERTEX_FORMAT_OTHER, + Uint8 => R::MSL_VERTEX_FORMAT_UINT8, + Uint16 => R::MSL_VERTEX_FORMAT_UINT16, + } + } +} + + +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub struct VertexAttribute { + pub buffer_id: u32, + pub offset: u32, + pub stride: u32, + pub step: spirv::VertexAttributeStep, + pub format: Format, + pub built_in: Option, +} + + +#[derive(Debug, Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub struct ResourceBindingLocation { + pub stage: spirv::ExecutionModel, + pub desc_set: u32, + pub binding: u32, +} + + +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub struct ResourceBinding { + pub buffer_id: u32, + pub texture_id: u32, + pub sampler_id: u32, +} + + +#[derive(Debug, Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub struct SamplerLocation { + pub desc_set: u32, + pub binding: u32, +} + +#[repr(C)] +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub enum SamplerCoord { + Normalized = 0, + Pixel = 1, +} + +#[repr(C)] +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub enum SamplerFilter { + Nearest = 0, + Linear = 1, +} + +#[repr(C)] +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub enum SamplerMipFilter { + None = 0, + Nearest = 1, + Linear = 2, +} + +#[repr(C)] +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub enum SamplerAddress { + ClampToZero = 0, + ClampToEdge = 1, + ClampToBorder = 2, + Repeat = 3, + MirroredRepeat = 4, +} + +#[repr(C)] +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub enum SamplerCompareFunc { + Never = 0, + Less = 1, + LessEqual = 2, + Greater = 3, + GreaterEqual = 4, + Equal = 5, + NotEqual = 6, + Always = 7, +} + +#[repr(C)] +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub enum SamplerBorderColor { + TransparentBlack = 0, + OpaqueBlack = 1, + OpaqueWhite = 2, +} + +#[repr(transparent)] +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub struct LodBase16(u8); + +impl LodBase16 { + pub const ZERO: Self = LodBase16(0); + pub const MAX: Self = LodBase16(!0); +} +impl From for LodBase16 { + fn from(v: f32) -> Self { + LodBase16((v * 16.0).max(0.0).min(u8::MAX as f32) as u8) + } +} +impl Into for LodBase16 { + fn into(self) -> f32 { + self.0 as f32 / 16.0 + } +} + + +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub struct SamplerData { + pub coord: SamplerCoord, + pub min_filter: SamplerFilter, + pub mag_filter: SamplerFilter, + pub mip_filter: SamplerMipFilter, + pub s_address: SamplerAddress, + pub t_address: SamplerAddress, + pub r_address: SamplerAddress, + pub compare_func: SamplerCompareFunc, + pub border_color: SamplerBorderColor, + pub lod_clamp_min: LodBase16, + pub lod_clamp_max: LodBase16, + pub max_anisotropy: i32, +} + + +#[repr(u8)] +#[allow(non_snake_case, non_camel_case_types)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum Platform { + iOS = 0, + macOS = 1, +} + + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum Version { + V1_0, + V1_1, + V1_2, + V2_0, + V2_1, + V2_2, +} + +impl Version { + fn as_raw(self) -> u32 { + use self::Version::*; + match self { + V1_0 => 10000, + V1_1 => 10100, + V1_2 => 10200, + V2_0 => 20000, + V2_1 => 20100, + V2_2 => 20200, + } + } +} + +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub struct CompilerVertexOptions { + pub invert_y: bool, + pub transform_clip_space: bool, +} + +impl Default for CompilerVertexOptions { + fn default() -> Self { + CompilerVertexOptions { + invert_y: false, + transform_clip_space: false, + } + } +} + + +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub struct CompilerOptions { + + pub platform: Platform, + + pub version: Version, + + pub vertex: CompilerVertexOptions, + + pub swizzle_buffer_index: u32, + + pub indirect_params_buffer_index: u32, + + pub output_buffer_index: u32, + + pub patch_output_buffer_index: u32, + + pub tessellation_factor_buffer_index: u32, + + pub buffer_size_buffer_index: u32, + + pub enable_point_size_builtin: bool, + + pub enable_rasterization: bool, + + pub capture_output_to_buffer: bool, + + pub swizzle_texture_samples: bool, + + pub tessellation_domain_origin_lower_left: bool, + + pub enable_argument_buffers: bool, + + pub pad_fragment_output_components: bool, + + pub resource_binding_overrides: BTreeMap, + + pub vertex_attribute_overrides: BTreeMap, + + pub const_samplers: BTreeMap, +} + +impl CompilerOptions { + fn as_raw(&self) -> br::ScMslCompilerOptions { + br::ScMslCompilerOptions { + vertex_invert_y: self.vertex.invert_y, + vertex_transform_clip_space: self.vertex.transform_clip_space, + platform: self.platform as _, + version: self.version.as_raw(), + enable_point_size_builtin: self.enable_point_size_builtin, + disable_rasterization: !self.enable_rasterization, + swizzle_buffer_index: self.swizzle_buffer_index, + indirect_params_buffer_index: self.indirect_params_buffer_index, + shader_output_buffer_index: self.output_buffer_index, + shader_patch_output_buffer_index: self.patch_output_buffer_index, + shader_tess_factor_buffer_index: self.tessellation_factor_buffer_index, + buffer_size_buffer_index: self.buffer_size_buffer_index, + capture_output_to_buffer: self.capture_output_to_buffer, + swizzle_texture_samples: self.swizzle_texture_samples, + tess_domain_origin_lower_left: self.tessellation_domain_origin_lower_left, + argument_buffers: self.enable_argument_buffers, + pad_fragment_output_components: self.pad_fragment_output_components, + } + } +} + +impl Default for CompilerOptions { + fn default() -> Self { + CompilerOptions { + platform: Platform::macOS, + version: Version::V1_2, + vertex: CompilerVertexOptions::default(), + swizzle_buffer_index: 30, + indirect_params_buffer_index: 29, + output_buffer_index: 28, + patch_output_buffer_index: 27, + tessellation_factor_buffer_index: 26, + buffer_size_buffer_index: 25, + enable_point_size_builtin: true, + enable_rasterization: true, + capture_output_to_buffer: false, + swizzle_texture_samples: false, + tessellation_domain_origin_lower_left: false, + enable_argument_buffers: false, + pad_fragment_output_components: false, + resource_binding_overrides: Default::default(), + vertex_attribute_overrides: Default::default(), + const_samplers: Default::default(), + } + } +} + +impl<'a> spirv::Parse for spirv::Ast { + fn parse(module: &spirv::Module) -> Result { + let mut sc_compiler = ptr::null_mut(); + unsafe { + check!(br::sc_internal_compiler_msl_new( + &mut sc_compiler, + module.words.as_ptr(), + module.words.len(), + )); + } + + Ok(spirv::Ast { + compiler: compiler::Compiler { + sc_compiler, + target_data: TargetData { + resource_binding_overrides: Vec::new(), + vertex_attribute_overrides: Vec::new(), + const_samplers: Vec::new(), + }, + has_been_compiled: false, + }, + target_type: PhantomData, + }) + } +} + +impl spirv::Compile for spirv::Ast { + type CompilerOptions = CompilerOptions; + + + fn set_compiler_options(&mut self, options: &CompilerOptions) -> Result<(), ErrorCode> { + let raw_options = options.as_raw(); + unsafe { + check!(br::sc_internal_compiler_msl_set_options( + self.compiler.sc_compiler, + &raw_options, + )); + } + + self.compiler.target_data.resource_binding_overrides.clear(); + self.compiler.target_data.resource_binding_overrides.extend( + options.resource_binding_overrides.iter().map(|(loc, res)| { + br::SPIRV_CROSS_NAMESPACE::MSLResourceBinding { + stage: loc.stage.as_raw(), + desc_set: loc.desc_set, + binding: loc.binding, + msl_buffer: res.buffer_id, + msl_texture: res.texture_id, + msl_sampler: res.sampler_id, + } + }), + ); + + self.compiler.target_data.vertex_attribute_overrides.clear(); + self.compiler.target_data.vertex_attribute_overrides.extend( + options.vertex_attribute_overrides.iter().map(|(loc, vat)| { + br::SPIRV_CROSS_NAMESPACE::MSLVertexAttr { + location: loc.0, + msl_buffer: vat.buffer_id, + msl_offset: vat.offset, + msl_stride: vat.stride, + per_instance: match vat.step { + spirv::VertexAttributeStep::Vertex => false, + spirv::VertexAttributeStep::Instance => true, + }, + format: vat.format.as_raw(), + builtin: spirv::built_in_as_raw(vat.built_in), + } + }), + ); + + self.compiler.target_data.const_samplers.clear(); + self.compiler.target_data.const_samplers.extend( + options.const_samplers.iter().map(|(loc, data)| unsafe { + use std::mem::transmute; + br::MslConstSamplerMapping { + desc_set: loc.desc_set, + binding: loc.binding, + sampler: br::SPIRV_CROSS_NAMESPACE::MSLConstexprSampler { + coord: transmute(data.coord), + min_filter: transmute(data.min_filter), + mag_filter: transmute(data.mag_filter), + mip_filter: transmute(data.mip_filter), + s_address: transmute(data.s_address), + t_address: transmute(data.t_address), + r_address: transmute(data.r_address), + compare_func: transmute(data.compare_func), + border_color: transmute(data.border_color), + lod_clamp_min: data.lod_clamp_min.into(), + lod_clamp_max: data.lod_clamp_max.into(), + max_anisotropy: data.max_anisotropy, + compare_enable: data.compare_func != SamplerCompareFunc::Always, + lod_clamp_enable: data.lod_clamp_min != LodBase16::ZERO || + data.lod_clamp_max != LodBase16::MAX, + anisotropy_enable: data.max_anisotropy != 0, + }, + } + }), + ); + + Ok(()) + } + + + fn compile(&mut self) -> Result { + self.compile_internal() + } +} + +impl spirv::Ast { + fn compile_internal(&self) -> Result { + let vat_overrides = &self.compiler.target_data.vertex_attribute_overrides; + let res_overrides = &self.compiler.target_data.resource_binding_overrides; + let const_samplers = &self.compiler.target_data.const_samplers; + unsafe { + let mut shader_ptr = ptr::null(); + check!(br::sc_internal_compiler_msl_compile( + self.compiler.sc_compiler, + &mut shader_ptr, + vat_overrides.as_ptr(), + vat_overrides.len(), + res_overrides.as_ptr(), + res_overrides.len(), + const_samplers.as_ptr(), + const_samplers.len(), + )); + let shader = match CStr::from_ptr(shader_ptr).to_str() { + Ok(v) => v.to_owned(), + Err(_) => return Err(ErrorCode::Unhandled), + }; + check!(br::sc_internal_free_pointer( + shader_ptr as *mut std::os::raw::c_void + )); + Ok(shader) + } + } + + pub fn is_rasterization_enabled(&self) -> Result { + unsafe { + let mut is_disabled = false; + check!(br::sc_internal_compiler_msl_get_is_rasterization_disabled( + self.compiler.sc_compiler, + &mut is_disabled + )); + Ok(!is_disabled) + } + } +} + + +pub const ARGUMENT_BUFFER_BINDING: u32 = !3; diff --git a/third_party/rust/spirv_cross/src/ptr_util.rs b/third_party/rust/spirv_cross/src/ptr_util.rs new file mode 100644 index 000000000000..07794f44d890 --- /dev/null +++ b/third_party/rust/spirv_cross/src/ptr_util.rs @@ -0,0 +1,58 @@ + + + +use crate::ErrorCode; +use std::ffi::CStr; +use std::slice; + +#[cfg(target_arch = "wasm32")] +use crate::emscripten; + +pub unsafe fn read_string_from_ptr(ptr: *const std::os::raw::c_char) -> Result { + #[cfg(not(target_arch = "wasm32"))] + let string = CStr::from_ptr(ptr) + .to_owned() + .into_string() + .map_err(|_| ErrorCode::Unhandled); + #[cfg(target_arch = "wasm32")] + let string = { + let bytes = emscripten::get_module().read_bytes_into_vec_while( + emscripten::Pointer::from_offset(ptr as u32), + |byte, _| 0 != byte, + false, + ); + String::from_utf8(bytes).map_err(|_| ErrorCode::Unhandled) + }; + string +} + +pub unsafe fn read_from_ptr(ptr: *const T) -> T { + #[cfg(not(target_arch = "wasm32"))] + let value = ptr.read(); + #[cfg(target_arch = "wasm32")] + let value = { + let num_bytes_to_read = std::mem::size_of::(); + let mut t_val: T = std::mem::uninitialized(); + let t_ptr = &mut t_val as *mut T as *mut u8; + let bytes = emscripten::get_module().read_bytes_into_vec_while( + emscripten::Pointer::from_offset(ptr as u32), + |_, bytes_read| bytes_read < num_bytes_to_read, + false, + ); + for (offset, byte) in bytes.iter().enumerate() { + *t_ptr.offset(offset as isize) = *byte; + } + t_val + }; + value +} + +pub unsafe fn read_into_vec_from_ptr(ptr: *const T, size: usize) -> Vec { + #[cfg(not(target_arch = "wasm32"))] + let values = slice::from_raw_parts(ptr, size).to_vec(); + #[cfg(target_arch = "wasm32")] + let values = (0..size) + .map(|offset| read_from_ptr(ptr.add(offset))) + .collect(); + values +} diff --git a/third_party/rust/spirv_cross/src/spirv.rs b/third_party/rust/spirv_cross/src/spirv.rs new file mode 100644 index 000000000000..636f0862d0fd --- /dev/null +++ b/third_party/rust/spirv_cross/src/spirv.rs @@ -0,0 +1,604 @@ +use crate::{compiler, ErrorCode}; +use std::marker::PhantomData; + + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub struct CombinedImageSampler { + pub combined_id: u32, + pub image_id: u32, + pub sampler_id: u32, +} + + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub enum ExecutionModel { + Vertex, + TessellationControl, + TessellationEvaluation, + Geometry, + Fragment, + GlCompute, + Kernel, +} + + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum Decoration { + RelaxedPrecision, + SpecId, + Block, + BufferBlock, + RowMajor, + ColMajor, + ArrayStride, + MatrixStride, + GlslShared, + GlslPacked, + CPacked, + BuiltIn, + NoPerspective, + Flat, + Patch, + Centroid, + Sample, + Invariant, + Restrict, + Aliased, + Volatile, + Constant, + Coherent, + NonWritable, + NonReadable, + Uniform, + SaturatedConversion, + Stream, + Location, + Component, + Index, + Binding, + DescriptorSet, + Offset, + XfbBuffer, + XfbStride, + FuncParamAttr, + FpRoundingMode, + FpFastMathMode, + LinkageAttributes, + NoContraction, + InputAttachmentIndex, + Alignment, + OverrideCoverageNv, + PassthroughNv, + ViewportRelativeNv, + SecondaryViewportRelativeNv, +} + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum VertexAttributeStep { + Vertex, + Instance, +} + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum BuiltIn { + Position, + PointSize, + ClipDistance, + CullDistance, + VertexId, + InstanceId, + PrimitiveId, + InvocationId, + Layer, + ViewportIndex, + TessLevelOuter, + TessLevelInner, + TessCoord, + PatchVertices, + FragCoord, + PointCoord, + FrontFacing, + SampleId, + SamplePosition, + SampleMask, + FragDepth, + HelperInvocation, + NumWorkgroups, + WorkgroupSize, + WorkgroupId, + LocalInvocationId, + GlobalInvocationId, + LocalInvocationIndex, + WorkDim, + GlobalSize, + EnqueuedWorkgroupSize, + GlobalOffset, + GlobalLinearId, + SubgroupSize, + SubgroupMaxSize, + NumSubgroups, + NumEnqueuedSubgroups, + SubgroupId, + SubgroupLocalInvocationId, + VertexIndex, + InstanceIndex, + SubgroupEqMask, + SubgroupGeMask, + SubgroupGtMask, + SubgroupLeMask, + SubgroupLtMask, + BaseVertex, + BaseInstance, + DrawIndex, + DeviceIndex, + ViewIndex, + BaryCoordNoPerspAmd, + BaryCoordNoPerspCentroidAmd, + BaryCoordNoPerspSampleAmd, + BaryCoordSmoothAmd, + BaryCoordSmoothCentroidAmd, + BaryCoordSmoothSampleAmd, + BaryCoordPullModelAmd, + FragStencilRefExt, + ViewportMaskNv, + SecondaryPositionNv, + SecondaryViewportMaskNv, + PositionPerViewNv, + ViewportMaskPerViewNv, + FullyCoveredExt, + TaskCountNv, + PrimitiveCountNv, + PrimitiveIndicesNv, + ClipDistancePerViewNv, + CullDistancePerViewNv, + LayerPerViewNv, + MeshViewCountNv, + MeshViewIndicesNv, + BaryCoordNv, + BaryCoordNoPerspNv, + FragSizeExt, + FragInvocationCountExt, + LaunchIdNv, + LaunchSizeNv, + WorldRayOriginNv, + WorldRayDirectionNv, + ObjectRayOriginNv, + ObjectRayDirectionNv, + RayTminNv, + RayTmaxNv, + InstanceCustomIndexNv, + ObjectToWorldNv, + WorldToObjectNv, + HitTNv, + HitKindNv, + IncomingRayFlagsNv, +} + +#[cfg(feature = "msl")] +pub(crate) fn built_in_as_raw(built_in: Option) -> crate::bindings::spv::BuiltIn { + use BuiltIn::*; + use crate::bindings as br; + match built_in { + None => br::spv::BuiltIn::BuiltInMax, + Some(Position) => br::spv::BuiltIn::BuiltInPosition, + Some(PointSize) => br::spv::BuiltIn::BuiltInPointSize, + Some(ClipDistance) => br::spv::BuiltIn::BuiltInClipDistance, + Some(CullDistance) => br::spv::BuiltIn::BuiltInCullDistance, + Some(VertexId) => br::spv::BuiltIn::BuiltInVertexId, + Some(InstanceId) => br::spv::BuiltIn::BuiltInInstanceId, + Some(PrimitiveId) => br::spv::BuiltIn::BuiltInPrimitiveId, + Some(InvocationId) => br::spv::BuiltIn::BuiltInInvocationId, + Some(Layer) => br::spv::BuiltIn::BuiltInLayer, + Some(ViewportIndex) => br::spv::BuiltIn::BuiltInViewportIndex, + Some(TessLevelOuter) => br::spv::BuiltIn::BuiltInTessLevelOuter, + Some(TessLevelInner) => br::spv::BuiltIn::BuiltInTessLevelInner, + Some(TessCoord) => br::spv::BuiltIn::BuiltInTessCoord, + Some(PatchVertices) => br::spv::BuiltIn::BuiltInPatchVertices, + Some(FragCoord) => br::spv::BuiltIn::BuiltInFragCoord, + Some(PointCoord) => br::spv::BuiltIn::BuiltInPointCoord, + Some(FrontFacing) => br::spv::BuiltIn::BuiltInFrontFacing, + Some(SampleId) => br::spv::BuiltIn::BuiltInSampleId, + Some(SamplePosition) => br::spv::BuiltIn::BuiltInSamplePosition, + Some(SampleMask) => br::spv::BuiltIn::BuiltInSampleMask, + Some(FragDepth) => br::spv::BuiltIn::BuiltInFragDepth, + Some(HelperInvocation) => br::spv::BuiltIn::BuiltInHelperInvocation, + Some(NumWorkgroups) => br::spv::BuiltIn::BuiltInNumWorkgroups, + Some(WorkgroupSize) => br::spv::BuiltIn::BuiltInWorkgroupSize, + Some(WorkgroupId) => br::spv::BuiltIn::BuiltInWorkgroupId, + Some(LocalInvocationId) => br::spv::BuiltIn::BuiltInLocalInvocationId, + Some(GlobalInvocationId) => br::spv::BuiltIn::BuiltInGlobalInvocationId, + Some(LocalInvocationIndex) => br::spv::BuiltIn::BuiltInLocalInvocationIndex, + Some(WorkDim) => br::spv::BuiltIn::BuiltInWorkDim, + Some(GlobalSize) => br::spv::BuiltIn::BuiltInGlobalSize, + Some(EnqueuedWorkgroupSize) => br::spv::BuiltIn::BuiltInEnqueuedWorkgroupSize, + Some(GlobalOffset) => br::spv::BuiltIn::BuiltInGlobalOffset, + Some(GlobalLinearId) => br::spv::BuiltIn::BuiltInGlobalLinearId, + Some(SubgroupSize) => br::spv::BuiltIn::BuiltInSubgroupSize, + Some(SubgroupMaxSize) => br::spv::BuiltIn::BuiltInSubgroupMaxSize, + Some(NumSubgroups) => br::spv::BuiltIn::BuiltInNumSubgroups, + Some(NumEnqueuedSubgroups) => br::spv::BuiltIn::BuiltInNumEnqueuedSubgroups, + Some(SubgroupId) => br::spv::BuiltIn::BuiltInSubgroupId, + Some(SubgroupLocalInvocationId) => br::spv::BuiltIn::BuiltInSubgroupLocalInvocationId, + Some(VertexIndex) => br::spv::BuiltIn::BuiltInVertexIndex, + Some(InstanceIndex) => br::spv::BuiltIn::BuiltInInstanceIndex, + Some(SubgroupEqMask) => br::spv::BuiltIn::BuiltInSubgroupEqMask, + Some(SubgroupGeMask) => br::spv::BuiltIn::BuiltInSubgroupGeMask, + Some(SubgroupGtMask) => br::spv::BuiltIn::BuiltInSubgroupGtMask, + Some(SubgroupLeMask) => br::spv::BuiltIn::BuiltInSubgroupLeMask, + Some(SubgroupLtMask) => br::spv::BuiltIn::BuiltInSubgroupLtMask, + Some(BaseVertex) => br::spv::BuiltIn::BuiltInBaseVertex, + Some(BaseInstance) => br::spv::BuiltIn::BuiltInBaseInstance, + Some(DrawIndex) => br::spv::BuiltIn::BuiltInDrawIndex, + Some(DeviceIndex) => br::spv::BuiltIn::BuiltInDeviceIndex, + Some(ViewIndex) => br::spv::BuiltIn::BuiltInViewIndex, + Some(BaryCoordNoPerspAmd) => br::spv::BuiltIn::BuiltInBaryCoordNoPerspAMD, + Some(BaryCoordNoPerspCentroidAmd) => br::spv::BuiltIn::BuiltInBaryCoordNoPerspCentroidAMD, + Some(BaryCoordNoPerspSampleAmd) => br::spv::BuiltIn::BuiltInBaryCoordNoPerspSampleAMD, + Some(BaryCoordSmoothAmd) => br::spv::BuiltIn::BuiltInBaryCoordSmoothAMD, + Some(BaryCoordSmoothCentroidAmd) => br::spv::BuiltIn::BuiltInBaryCoordSmoothCentroidAMD, + Some(BaryCoordSmoothSampleAmd) => br::spv::BuiltIn::BuiltInBaryCoordSmoothSampleAMD, + Some(BaryCoordPullModelAmd) => br::spv::BuiltIn::BuiltInBaryCoordPullModelAMD, + Some(FragStencilRefExt) => br::spv::BuiltIn::BuiltInFragStencilRefEXT, + Some(ViewportMaskNv) => br::spv::BuiltIn::BuiltInViewportMaskNV, + Some(SecondaryPositionNv) => br::spv::BuiltIn::BuiltInSecondaryPositionNV, + Some(SecondaryViewportMaskNv) => br::spv::BuiltIn::BuiltInSecondaryViewportMaskNV, + Some(PositionPerViewNv) => br::spv::BuiltIn::BuiltInPositionPerViewNV, + Some(ViewportMaskPerViewNv) => br::spv::BuiltIn::BuiltInViewportMaskPerViewNV, + Some(FullyCoveredExt) => br::spv::BuiltIn::BuiltInFullyCoveredEXT, + Some(TaskCountNv) => br::spv::BuiltIn::BuiltInTaskCountNV, + Some(PrimitiveCountNv) => br::spv::BuiltIn::BuiltInPrimitiveCountNV, + Some(PrimitiveIndicesNv) => br::spv::BuiltIn::BuiltInPrimitiveIndicesNV, + Some(ClipDistancePerViewNv) => br::spv::BuiltIn::BuiltInClipDistancePerViewNV, + Some(CullDistancePerViewNv) => br::spv::BuiltIn::BuiltInCullDistancePerViewNV, + Some(LayerPerViewNv) => br::spv::BuiltIn::BuiltInLayerPerViewNV, + Some(MeshViewCountNv) => br::spv::BuiltIn::BuiltInMeshViewCountNV, + Some(MeshViewIndicesNv) => br::spv::BuiltIn::BuiltInMeshViewIndicesNV, + Some(BaryCoordNv) => br::spv::BuiltIn::BuiltInBaryCoordNV, + Some(BaryCoordNoPerspNv) => br::spv::BuiltIn::BuiltInBaryCoordNoPerspNV, + Some(FragSizeExt) => br::spv::BuiltIn::BuiltInFragSizeEXT, + Some(FragInvocationCountExt) => br::spv::BuiltIn::BuiltInFragInvocationCountEXT, + Some(LaunchIdNv) => br::spv::BuiltIn::BuiltInLaunchIdNV, + Some(LaunchSizeNv) => br::spv::BuiltIn::BuiltInLaunchSizeNV, + Some(WorldRayOriginNv) => br::spv::BuiltIn::BuiltInWorldRayOriginNV, + Some(WorldRayDirectionNv) => br::spv::BuiltIn::BuiltInWorldRayDirectionNV, + Some(ObjectRayOriginNv) => br::spv::BuiltIn::BuiltInObjectRayOriginNV, + Some(ObjectRayDirectionNv) => br::spv::BuiltIn::BuiltInObjectRayDirectionNV, + Some(RayTminNv) => br::spv::BuiltIn::BuiltInRayTminNV, + Some(RayTmaxNv) => br::spv::BuiltIn::BuiltInRayTmaxNV, + Some(InstanceCustomIndexNv) => br::spv::BuiltIn::BuiltInInstanceCustomIndexNV, + Some(ObjectToWorldNv) => br::spv::BuiltIn::BuiltInObjectToWorldNV, + Some(WorldToObjectNv) => br::spv::BuiltIn::BuiltInWorldToObjectNV, + Some(HitTNv) => br::spv::BuiltIn::BuiltInHitTNV, + Some(HitKindNv) => br::spv::BuiltIn::BuiltInHitKindNV, + Some(IncomingRayFlagsNv) => br::spv::BuiltIn::BuiltInIncomingRayFlagsNV, + } +} + + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub struct WorkGroupSize { + pub x: u32, + pub y: u32, + pub z: u32, +} + + +#[derive(Clone, Debug, Hash, Eq, PartialEq)] +pub struct EntryPoint { + pub name: String, + pub execution_model: ExecutionModel, + pub work_group_size: WorkGroupSize, +} + + +#[derive(Clone, Debug, Hash, Eq, PartialEq)] +pub struct BufferRange { + + pub index: u32, + + pub offset: usize, + + pub range: usize, +} + + +#[derive(Clone, Debug, Hash, Eq, PartialEq)] +pub struct Resource { + pub id: u32, + pub type_id: u32, + pub base_type_id: u32, + pub name: String, +} + + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub struct SpecializationConstant { + pub id: u32, + pub constant_id: u32, +} + + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub struct WorkGroupSizeSpecializationConstants { + pub x: SpecializationConstant, + pub y: SpecializationConstant, + pub z: SpecializationConstant, +} + + +#[derive(Debug, Clone)] +pub struct ShaderResources { + pub uniform_buffers: Vec, + pub storage_buffers: Vec, + pub stage_inputs: Vec, + pub stage_outputs: Vec, + pub subpass_inputs: Vec, + pub storage_images: Vec, + pub sampled_images: Vec, + pub atomic_counters: Vec, + pub push_constant_buffers: Vec, + pub separate_images: Vec, + pub separate_samplers: Vec, +} + +#[derive(Debug, Clone)] +pub enum Type { + + Unknown, + Void, + Boolean { + array: Vec, + }, + Char { + array: Vec, + }, + Int { + array: Vec, + }, + UInt { + array: Vec, + }, + Int64 { + array: Vec, + }, + UInt64 { + array: Vec, + }, + AtomicCounter { + array: Vec, + }, + Half { + array: Vec, + }, + Float { + array: Vec, + }, + Double { + array: Vec, + }, + Struct { + member_types: Vec, + array: Vec, + }, + Image { + array: Vec, + }, + SampledImage { + array: Vec, + }, + Sampler { + array: Vec, + }, + SByte { + array: Vec, + }, + UByte { + array: Vec, + }, + Short { + array: Vec, + }, + UShort { + array: Vec, + }, + ControlPointArray, + AccelerationStructureNv, +} + + +#[derive(Debug, Clone)] +pub struct Module<'a> { + pub(crate) words: &'a [u32], +} + +impl<'a> Module<'a> { + + pub fn from_words(words: &[u32]) -> Module { + Module { words } + } +} + +pub trait Target { + type Data; +} + + +pub struct Ast +where + TTarget: Target, +{ + pub(crate) compiler: compiler::Compiler, + pub(crate) target_type: PhantomData, +} + +pub trait Parse: Sized { + fn parse(module: &Module) -> Result; +} + +pub trait Compile { + type CompilerOptions; + + fn set_compiler_options( + &mut self, + compiler_options: &Self::CompilerOptions, + ) -> Result<(), ErrorCode>; + fn compile(&mut self) -> Result; +} + +impl Ast +where + Self: Parse + Compile, + TTarget: Target, +{ + + pub fn get_decoration(&self, id: u32, decoration: Decoration) -> Result { + self.compiler.get_decoration(id, decoration) + } + + + pub fn get_name(&mut self, id: u32) -> Result { + self.compiler.get_name(id) + } + + + pub fn set_name(&mut self, id: u32, name: &str) -> Result<(), ErrorCode> { + self.compiler.set_name(id, name) + } + + + pub fn unset_decoration(&mut self, id: u32, decoration: Decoration) -> Result<(), ErrorCode> { + self.compiler.unset_decoration(id, decoration) + } + + + pub fn set_decoration( + &mut self, + id: u32, + decoration: Decoration, + argument: u32, + ) -> Result<(), ErrorCode> { + self.compiler.set_decoration(id, decoration, argument) + } + + + pub fn get_entry_points(&self) -> Result, ErrorCode> { + self.compiler.get_entry_points() + } + + + pub fn get_cleansed_entry_point_name( + &self, + entry_point_name: &str, + execution_model: ExecutionModel, + ) -> Result { + if self.compiler.has_been_compiled { + self.compiler + .get_cleansed_entry_point_name(entry_point_name, execution_model) + } else { + Err(ErrorCode::CompilationError(String::from( + "`compile` must be called first", + ))) + } + } + + + pub fn get_active_buffer_ranges(&self, id: u32) -> Result, ErrorCode> { + self.compiler.get_active_buffer_ranges(id) + } + + + pub fn get_specialization_constants(&self) -> Result, ErrorCode> { + self.compiler.get_specialization_constants() + } + + + + + pub fn set_scalar_constant(&mut self, id: u32, value: u64) -> Result<(), ErrorCode> { + self.compiler.set_scalar_constant(id, value) + } + + + pub fn get_shader_resources(&self) -> Result { + self.compiler.get_shader_resources() + } + + + pub fn get_type(&self, id: u32) -> Result { + self.compiler.get_type(id) + } + + + pub fn get_member_name(&self, id: u32, index: u32) -> Result { + self.compiler.get_member_name(id, index) + } + + + pub fn get_member_decoration( + &self, + id: u32, + index: u32, + decoration: Decoration, + ) -> Result { + self.compiler.get_member_decoration(id, index, decoration) + } + + + pub fn set_member_decoration( + &mut self, + id: u32, + index: u32, + decoration: Decoration, + argument: u32, + ) -> Result<(), ErrorCode> { + self.compiler + .set_member_decoration(id, index, decoration, argument) + } + + + pub fn get_declared_struct_size(&self, id: u32) -> Result { + self.compiler.get_declared_struct_size(id) + } + + + pub fn get_declared_struct_member_size(&self, id: u32, index: u32) -> Result { + self.compiler.get_declared_struct_member_size(id, index) + } + + + pub fn rename_interface_variable( + &mut self, + resources: &[Resource], + location: u32, + name: &str, + ) -> Result<(), ErrorCode> { + self.compiler + .rename_interface_variable(resources, location, name) + } + + + pub fn get_work_group_size_specialization_constants( + &self, + ) -> Result { + self.compiler.get_work_group_size_specialization_constants() + } + + + pub fn parse(module: &Module) -> Result { + Parse::::parse(&module) + } + + + pub fn set_compiler_options( + &mut self, + options: &>::CompilerOptions, + ) -> Result<(), ErrorCode> { + Compile::::set_compiler_options(self, options) + } + + + pub fn compile(&mut self) -> Result { + self.compiler.has_been_compiled = true; + Compile::::compile(self) + } +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/.clang-format b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/.clang-format new file mode 100644 index 000000000000..443f90b774da --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/.clang-format @@ -0,0 +1,167 @@ +# The style used for all options not specifically set in the configuration. +BasedOnStyle: LLVM + +# The extra indent or outdent of access modifiers, e.g. public:. +AccessModifierOffset: -4 + +# If true, aligns escaped newlines as far left as possible. Otherwise puts them into the right-most column. +AlignEscapedNewlinesLeft: true + +# If true, aligns trailing comments. +AlignTrailingComments: false + +# Allow putting all parameters of a function declaration onto the next line even if BinPackParameters is false. +AllowAllParametersOfDeclarationOnNextLine: false + +# Allows contracting simple braced statements to a single line. +AllowShortBlocksOnASingleLine: false + +# If true, short case labels will be contracted to a single line. +AllowShortCaseLabelsOnASingleLine: false + +# Dependent on the value, int f() { return 0; } can be put on a single line. Possible values: None, Inline, All. +AllowShortFunctionsOnASingleLine: None + +# If true, if (a) return; can be put on a single line. +AllowShortIfStatementsOnASingleLine: false + +# If true, while (true) continue; can be put on a single line. +AllowShortLoopsOnASingleLine: false + +# If true, always break after function definition return types. +AlwaysBreakAfterDefinitionReturnType: false + +# If true, always break before multiline string literals. +AlwaysBreakBeforeMultilineStrings: false + +# If true, always break after the template<...> of a template declaration. +AlwaysBreakTemplateDeclarations: true + +# If false, a function call's arguments will either be all on the same line or will have one line each. +BinPackArguments: true + +# If false, a function declaration's or function definition's parameters will either all be on the same line +# or will have one line each. +BinPackParameters: true + +# The way to wrap binary operators. Possible values: None, NonAssignment, All. +BreakBeforeBinaryOperators: None + +# The brace breaking style to use. Possible values: Attach, Linux, Stroustrup, Allman, GNU. +BreakBeforeBraces: Allman + +# If true, ternary operators will be placed after line breaks. +BreakBeforeTernaryOperators: false + +# Always break constructor initializers before commas and align the commas with the colon. +BreakConstructorInitializersBeforeComma: true + +# The column limit. A column limit of 0 means that there is no column limit. +ColumnLimit: 120 + +# A regular expression that describes comments with special meaning, which should not be split into lines or otherwise changed. +CommentPragmas: '^ *' + +# If the constructor initializers don't fit on a line, put each initializer on its own line. +ConstructorInitializerAllOnOneLineOrOnePerLine: false + +# The number of characters to use for indentation of constructor initializer lists. +ConstructorInitializerIndentWidth: 4 + +# Indent width for line continuations. +ContinuationIndentWidth: 4 + +# If true, format braced lists as best suited for C++11 braced lists. +Cpp11BracedListStyle: false + +# Disables formatting at all. +DisableFormat: false + +# A vector of macros that should be interpreted as foreach loops instead of as function calls. +#ForEachMacros: '' + +# Indent case labels one level from the switch statement. +# When false, use the same indentation level as for the switch statement. +# Switch statement body is always indented one level more than case labels. +IndentCaseLabels: false + +# The number of columns to use for indentation. +IndentWidth: 4 + +# Indent if a function definition or declaration is wrapped after the type. +IndentWrappedFunctionNames: false + +# If true, empty lines at the start of blocks are kept. +KeepEmptyLinesAtTheStartOfBlocks: true + +# Language, this format style is targeted at. Possible values: None, Cpp, Java, JavaScript, Proto. +Language: Cpp + +# The maximum number of consecutive empty lines to keep. +MaxEmptyLinesToKeep: 1 + +# The indentation used for namespaces. Possible values: None, Inner, All. +NamespaceIndentation: None + +# The penalty for breaking a function call after "call(". +PenaltyBreakBeforeFirstCallParameter: 19 + +# The penalty for each line break introduced inside a comment. +PenaltyBreakComment: 300 + +# The penalty for breaking before the first <<. +PenaltyBreakFirstLessLess: 120 + +# The penalty for each line break introduced inside a string literal. +PenaltyBreakString: 1000 + +# The penalty for each character outside of the column limit. +PenaltyExcessCharacter: 1000000 + +# Penalty for putting the return type of a function onto its own line. +PenaltyReturnTypeOnItsOwnLine: 1000000000 + +# Pointer and reference alignment style. Possible values: Left, Right, Middle. +PointerAlignment: Right + +# If true, a space may be inserted after C style casts. +SpaceAfterCStyleCast: false + +# If false, spaces will be removed before assignment operators. +SpaceBeforeAssignmentOperators: true + +# Defines in which cases to put a space before opening parentheses. Possible values: Never, ControlStatements, Always. +SpaceBeforeParens: ControlStatements + +# If true, spaces may be inserted into '()'. +SpaceInEmptyParentheses: false + +# The number of spaces before trailing line comments (// - comments). +SpacesBeforeTrailingComments: 1 + +# If true, spaces will be inserted after '<' and before '>' in template argument lists. +SpacesInAngles: false + +# If true, spaces may be inserted into C style casts. +SpacesInCStyleCastParentheses: false + +# If true, spaces are inserted inside container literals (e.g. ObjC and Javascript array and dict literals). +SpacesInContainerLiterals: false + +# If true, spaces will be inserted after '(' and before ')'. +SpacesInParentheses: false + +# If true, spaces will be inserted after '[' and befor']'. +SpacesInSquareBrackets: false + +# Format compatible with this standard, e.g. use A > instead of A> for LS_Cpp03. Possible values: Cpp03, Cpp11, Auto. +Standard: Cpp11 + +# The number of columns used for tab stops. +TabWidth: 4 + +# The way to use tab characters in the resulting file. Possible values: Never, ForIndentation, Always. +UseTab: ForIndentation + +# Do not reflow comments +ReflowComments: false diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/.gitignore b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/.gitignore new file mode 100644 index 000000000000..abd718958389 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/.gitignore @@ -0,0 +1,20 @@ +*.o +*.d +*.txt +/test +/spirv-cross +/obj +/msvc/x64 +/msvc/Debug +/msvc/Release +*.suo +*.sdf +*.opensdf +*.shader +*.a +*.bc +/external +.vs/ +*.vcxproj.user + +!CMakeLists.txt diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/CMakeLists.txt b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/CMakeLists.txt new file mode 100644 index 000000000000..c926f5c0063f --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/CMakeLists.txt @@ -0,0 +1,584 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 2.8) +set(CMAKE_CXX_STANDARD 11) +project(SPIRV-Cross LANGUAGES CXX C) +enable_testing() + +option(SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS "Instead of throwing exceptions assert" OFF) +option(SPIRV_CROSS_SHARED "Build the C API as a single shared library." OFF) +option(SPIRV_CROSS_STATIC "Build the C and C++ API as static libraries." ON) +option(SPIRV_CROSS_CLI "Build the CLI binary. Requires SPIRV_CROSS_STATIC." ON) +option(SPIRV_CROSS_ENABLE_TESTS "Enable SPIRV-Cross tests." ON) + +option(SPIRV_CROSS_ENABLE_GLSL "Enable GLSL support." ON) +option(SPIRV_CROSS_ENABLE_HLSL "Enable HLSL target support." ON) +option(SPIRV_CROSS_ENABLE_MSL "Enable MSL target support." ON) +option(SPIRV_CROSS_ENABLE_CPP "Enable C++ target support." ON) +option(SPIRV_CROSS_ENABLE_REFLECT "Enable JSON reflection target support." ON) +option(SPIRV_CROSS_ENABLE_C_API "Enable C API wrapper support in static library." ON) +option(SPIRV_CROSS_ENABLE_UTIL "Enable util module support." ON) + +option(SPIRV_CROSS_SANITIZE_ADDRESS "Sanitize address" OFF) +option(SPIRV_CROSS_SANITIZE_MEMORY "Sanitize memory" OFF) +option(SPIRV_CROSS_SANITIZE_THREADS "Sanitize threads" OFF) +option(SPIRV_CROSS_SANITIZE_UNDEFINED "Sanitize undefined" OFF) + +option(SPIRV_CROSS_NAMESPACE_OVERRIDE "" "Override the namespace used in the C++ API.") +option(SPIRV_CROSS_FORCE_STL_TYPES "Force use of STL types instead of STL replacements in certain places. Might reduce performance." OFF) + +option(SPIRV_CROSS_SKIP_INSTALL "Skips installation targets." OFF) + +option(SPIRV_CROSS_WERROR "Fail build on warnings." OFF) +option(SPIRV_CROSS_MISC_WARNINGS "Misc warnings useful for Travis runs." OFF) + +option(SPIRV_CROSS_FORCE_PIC "Force position-independent code for all targets." OFF) + +if(${CMAKE_GENERATOR} MATCHES "Makefile") + if(${CMAKE_CURRENT_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_BINARY_DIR}) + message(FATAL_ERROR "Build out of tree to avoid overwriting Makefile") + endif() +endif() + +set(spirv-compiler-options "") +set(spirv-compiler-defines "") +set(spirv-cross-link-flags "") + +message(STATUS "Finding Git version for SPIRV-Cross.") +set(spirv-cross-build-version "unknown") +find_package(Git) +if (GIT_FOUND) + execute_process( + COMMAND ${GIT_EXECUTABLE} rev-parse --short HEAD + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE spirv-cross-build-version + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + message(STATUS "Git hash: ${spirv-cross-build-version}") +else() + message(STATUS "Git not found, using unknown build version.") +endif() + +string(TIMESTAMP spirv-cross-timestamp) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/gitversion.in.h ${CMAKE_CURRENT_BINARY_DIR}/gitversion.h @ONLY) + +if(SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS) + set(spirv-compiler-defines ${spirv-compiler-defines} SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS) +endif() + +if(SPIRV_CROSS_FORCE_STL_TYPES) + set(spirv-compiler-defines ${spirv-compiler-defines} SPIRV_CROSS_FORCE_STL_TYPES) +endif() + +if (CMAKE_COMPILER_IS_GNUCXX OR (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")) + set(spirv-compiler-options ${spirv-compiler-options} -Wall -Wextra -Wshadow) + if (SPIRV_CROSS_MISC_WARNINGS) + if (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") + set(spirv-compiler-options ${spirv-compiler-options} -Wshorten-64-to-32) + endif() + endif() + if (SPIRV_CROSS_WERROR) + set(spirv-compiler-options ${spirv-compiler-options} -Werror) + endif() + + if (SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS) + set(spirv-compiler-options ${spirv-compiler-options} -fno-exceptions) + endif() + + if (SPIRV_CROSS_SANITIZE_ADDRESS) + set(spirv-compiler-options ${spirv-compiler-options} -fsanitize=address) + set(spirv-cross-link-flags "${spirv-cross-link-flags} -fsanitize=address") + endif() + + if (SPIRV_CROSS_SANITIZE_UNDEFINED) + set(spirv-compiler-options ${spirv-compiler-options} -fsanitize=undefined) + set(spirv-cross-link-flags "${spirv-cross-link-flags} -fsanitize=undefined") + endif() + + if (SPIRV_CROSS_SANITIZE_MEMORY) + set(spirv-compiler-options ${spirv-compiler-options} -fsanitize=memory) + set(spirv-cross-link-flags "${spirv-cross-link-flags} -fsanitize=memory") + endif() + + if (SPIRV_CROSS_SANITIZE_THREADS) + set(spirv-compiler-options ${spirv-compiler-options} -fsanitize=thread) + set(spirv-cross-link-flags "${spirv-cross-link-flags} -fsanitize=thread") + endif() +elseif (MSVC) + set(spirv-compiler-options ${spirv-compiler-options} /wd4267 /wd4996) +endif() + +macro(extract_headers out_abs file_list) + set(${out_abs}) # absolute paths + foreach(_a ${file_list}) + # get_filename_component only returns the longest extension, so use a regex + string(REGEX REPLACE ".*\\.(h|hpp)" "\\1" ext ${_a}) + + # For shared library, we are only interested in the C header. + if (SPIRV_CROSS_STATIC) + if(("${ext}" STREQUAL "h") OR ("${ext}" STREQUAL "hpp")) + list(APPEND ${out_abs} "${_a}") + endif() + else() + if("${ext}" STREQUAL "h") + list(APPEND ${out_abs} "${_a}") + endif() + endif() + endforeach() +endmacro() + +macro(spirv_cross_add_library name config_name library_type) + add_library(${name} ${library_type} ${ARGN}) + extract_headers(hdrs "${ARGN}") + target_include_directories(${name} PUBLIC + $ + $) + set_target_properties(${name} PROPERTIES + PUBLIC_HEADERS "${hdrs}") + if (SPIRV_CROSS_FORCE_PIC) + set_target_properties(${name} PROPERTIES POSITION_INDEPENDENT_CODE ON) + endif() + target_compile_options(${name} PRIVATE ${spirv-compiler-options}) + target_compile_definitions(${name} PRIVATE ${spirv-compiler-defines}) + if (SPIRV_CROSS_NAMESPACE_OVERRIDE) + if (${library_type} MATCHES "STATIC") + target_compile_definitions(${name} PUBLIC SPIRV_CROSS_NAMESPACE_OVERRIDE=${SPIRV_CROSS_NAMESPACE_OVERRIDE}) + else() + target_compile_definitions(${name} PRIVATE SPIRV_CROSS_NAMESPACE_OVERRIDE=${SPIRV_CROSS_NAMESPACE_OVERRIDE}) + endif() + endif() + + if (NOT SPIRV_CROSS_SKIP_INSTALL) + install(TARGETS ${name} + EXPORT ${config_name}Config + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib + PUBLIC_HEADER DESTINATION include/spirv_cross) + install(FILES ${hdrs} DESTINATION include/spirv_cross) + install(EXPORT ${config_name}Config DESTINATION share/${config_name}/cmake) + export(TARGETS ${name} FILE ${config_name}Config.cmake) + endif() +endmacro() + +set(spirv-cross-core-sources + ${CMAKE_CURRENT_SOURCE_DIR}/GLSL.std.450.h + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_common.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_containers.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_error_handling.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_parser.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_parser.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_parsed_ir.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_parsed_ir.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cfg.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cfg.cpp) + +set(spirv-cross-c-sources + spirv.h + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_c.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_c.h) + +set(spirv-cross-glsl-sources + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_glsl.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_glsl.hpp) + +set(spirv-cross-cpp-sources + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cpp.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cpp.hpp) + +set(spirv-cross-msl-sources + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_msl.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_msl.hpp) + +set(spirv-cross-hlsl-sources + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_hlsl.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_hlsl.hpp) + +set(spirv-cross-reflect-sources + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_reflect.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_reflect.hpp) + +set(spirv-cross-util-sources + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_util.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_util.hpp) + +if (SPIRV_CROSS_STATIC) + spirv_cross_add_library(spirv-cross-core spirv_cross_core STATIC + ${spirv-cross-core-sources}) + + if (SPIRV_CROSS_ENABLE_GLSL) + spirv_cross_add_library(spirv-cross-glsl spirv_cross_glsl STATIC + ${spirv-cross-glsl-sources}) + target_link_libraries(spirv-cross-glsl PRIVATE spirv-cross-core) + endif() + + if (SPIRV_CROSS_ENABLE_CPP) + spirv_cross_add_library(spirv-cross-cpp spirv_cross_cpp STATIC + ${spirv-cross-cpp-sources}) + + if (SPIRV_CROSS_ENABLE_GLSL) + target_link_libraries(spirv-cross-cpp PRIVATE spirv-cross-glsl) + else() + message(FATAL_ERROR "Must enable GLSL support to enable C++ support.") + endif() + endif() + + if (SPIRV_CROSS_ENABLE_REFLECT) + if (SPIRV_CROSS_ENABLE_GLSL) + spirv_cross_add_library(spirv-cross-reflect spirv_cross_reflect STATIC + ${spirv-cross-reflect-sources}) + else() + message(FATAL_ERROR "Must enable GLSL support to enable JSON reflection support.") + endif() + endif() + + if (SPIRV_CROSS_ENABLE_MSL) + spirv_cross_add_library(spirv-cross-msl spirv_cross_msl STATIC + ${spirv-cross-msl-sources}) + if (SPIRV_CROSS_ENABLE_GLSL) + target_link_libraries(spirv-cross-msl PRIVATE spirv-cross-glsl) + else() + message(FATAL_ERROR "Must enable GLSL support to enable MSL support.") + endif() + endif() + + if (SPIRV_CROSS_ENABLE_HLSL) + spirv_cross_add_library(spirv-cross-hlsl spirv_cross_hlsl STATIC + ${spirv-cross-hlsl-sources}) + if (SPIRV_CROSS_ENABLE_GLSL) + target_link_libraries(spirv-cross-hlsl PRIVATE spirv-cross-glsl) + else() + message(FATAL_ERROR "Must enable GLSL support to enable HLSL support.") + endif() + endif() + + if (SPIRV_CROSS_ENABLE_UTIL) + spirv_cross_add_library(spirv-cross-util spirv_cross_util STATIC + ${spirv-cross-util-sources}) + target_link_libraries(spirv-cross-util PRIVATE spirv-cross-core) + endif() + + if (SPIRV_CROSS_ENABLE_C_API) + spirv_cross_add_library(spirv-cross-c spirv_cross_c STATIC + ${spirv-cross-c-sources}) + target_include_directories(spirv-cross-c PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) + target_compile_definitions(spirv-cross-c PRIVATE HAVE_SPIRV_CROSS_GIT_VERSION) + + if (SPIRV_CROSS_ENABLE_GLSL) + target_link_libraries(spirv-cross-c PRIVATE spirv-cross-glsl) + target_compile_definitions(spirv-cross-c PRIVATE SPIRV_CROSS_C_API_GLSL=1) + endif() + + if (SPIRV_CROSS_ENABLE_HLSL) + target_link_libraries(spirv-cross-c PRIVATE spirv-cross-hlsl) + target_compile_definitions(spirv-cross-c PRIVATE SPIRV_CROSS_C_API_HLSL=1) + endif() + + if (SPIRV_CROSS_ENABLE_MSL) + target_link_libraries(spirv-cross-c PRIVATE spirv-cross-msl) + target_compile_definitions(spirv-cross-c PRIVATE SPIRV_CROSS_C_API_MSL=1) + endif() + + if (SPIRV_CROSS_ENABLE_CPP) + target_link_libraries(spirv-cross-c PRIVATE spirv-cross-cpp) + target_compile_definitions(spirv-cross-c PRIVATE SPIRV_CROSS_C_API_CPP=1) + endif() + + if (SPIRV_CROSS_ENABLE_REFLECT) + target_link_libraries(spirv-cross-c PRIVATE spirv-cross-reflect) + target_compile_definitions(spirv-cross-c PRIVATE SPIRV_CROSS_C_API_REFLECT=1) + endif() + endif() +endif() + +set(spirv-cross-abi-major 0) +set(spirv-cross-abi-minor 19) +set(spirv-cross-abi-patch 0) + +if (SPIRV_CROSS_SHARED) + set(SPIRV_CROSS_VERSION ${spirv-cross-abi-major}.${spirv-cross-abi-minor}.${spirv-cross-abi-patch}) + set(SPIRV_CROSS_INSTALL_LIB_DIR ${CMAKE_INSTALL_PREFIX}/lib) + set(SPIRV_CROSS_INSTALL_INC_DIR ${CMAKE_INSTALL_PREFIX}/include/spirv_cross) + + if (NOT SPIRV_CROSS_SKIP_INSTALL) + configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/pkg-config/spirv-cross-c-shared.pc.in + ${CMAKE_CURRENT_BINARY_DIR}/spirv-cross-c-shared.pc @ONLY) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/spirv-cross-c-shared.pc DESTINATION ${CMAKE_INSTALL_PREFIX}/share/pkgconfig) + endif() + + spirv_cross_add_library(spirv-cross-c-shared spirv_cross_c_shared SHARED + ${spirv-cross-core-sources} + ${spirv-cross-c-sources}) + + target_include_directories(spirv-cross-c-shared PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) + target_compile_definitions(spirv-cross-c-shared PRIVATE HAVE_SPIRV_CROSS_GIT_VERSION) + + if (SPIRV_CROSS_ENABLE_GLSL) + target_sources(spirv-cross-c-shared PRIVATE ${spirv-cross-glsl-sources}) + target_compile_definitions(spirv-cross-c-shared PRIVATE SPIRV_CROSS_C_API_GLSL=1) + endif() + + if (SPIRV_CROSS_ENABLE_HLSL) + if (SPIRV_CROSS_ENABLE_GLSL) + target_sources(spirv-cross-c-shared PRIVATE ${spirv-cross-hlsl-sources}) + else() + message(FATAL_ERROR "Must enable GLSL support to enable HLSL support.") + endif() + target_compile_definitions(spirv-cross-c-shared PRIVATE SPIRV_CROSS_C_API_HLSL=1) + endif() + + if (SPIRV_CROSS_ENABLE_MSL) + if (SPIRV_CROSS_ENABLE_GLSL) + target_sources(spirv-cross-c-shared PRIVATE ${spirv-cross-msl-sources}) + else() + message(FATAL_ERROR "Must enable GLSL support to enable MSL support.") + endif() + target_compile_definitions(spirv-cross-c-shared PRIVATE SPIRV_CROSS_C_API_MSL=1) + endif() + + if (SPIRV_CROSS_ENABLE_CPP) + if (SPIRV_CROSS_ENABLE_GLSL) + target_sources(spirv-cross-c-shared PRIVATE ${spirv-cross-cpp-sources}) + else() + message(FATAL_ERROR "Must enable GLSL support to enable C++ support.") + endif() + target_compile_definitions(spirv-cross-c-shared PRIVATE SPIRV_CROSS_C_API_CPP=1) + endif() + + if (SPIRV_CROSS_ENABLE_REFLECT) + if (SPIRV_CROSS_ENABLE_GLSL) + target_sources(spirv-cross-c-shared PRIVATE ${spirv-cross-reflect-sources}) + else() + message(FATAL_ERROR "Must enable GLSL support to enable JSON reflection support.") + endif() + target_compile_definitions(spirv-cross-c-shared PRIVATE SPIRV_CROSS_C_API_REFLECT=1) + endif() + + if (CMAKE_COMPILER_IS_GNUCXX OR (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")) + # Only export the C API. + target_compile_options(spirv-cross-c-shared PRIVATE -fvisibility=hidden) + if (NOT APPLE) + set_target_properties(spirv-cross-c-shared PROPERTIES LINK_FLAGS "${spirv-cross-link-flags}") + endif() + endif() + + target_compile_definitions(spirv-cross-c-shared PRIVATE SPVC_EXPORT_SYMBOLS) + + set_target_properties(spirv-cross-c-shared PROPERTIES + VERSION ${SPIRV_CROSS_VERSION} + SOVERSION ${spirv-cross-abi-major}) +endif() + +if (SPIRV_CROSS_CLI) + if (NOT SPIRV_CROSS_ENABLE_GLSL) + message(FATAL_ERROR "Must enable GLSL if building CLI.") + endif() + + if (NOT SPIRV_CROSS_ENABLE_HLSL) + message(FATAL_ERROR "Must enable HLSL if building CLI.") + endif() + + if (NOT SPIRV_CROSS_ENABLE_MSL) + message(FATAL_ERROR "Must enable MSL if building CLI.") + endif() + + if (NOT SPIRV_CROSS_ENABLE_CPP) + message(FATAL_ERROR "Must enable C++ if building CLI.") + endif() + + if (NOT SPIRV_CROSS_ENABLE_REFLECT) + message(FATAL_ERROR "Must enable reflection if building CLI.") + endif() + + if (NOT SPIRV_CROSS_ENABLE_UTIL) + message(FATAL_ERROR "Must enable utils if building CLI.") + endif() + + if (NOT SPIRV_CROSS_STATIC) + message(FATAL_ERROR "Must build static libraries if building CLI.") + endif() + add_executable(spirv-cross main.cpp) + target_compile_options(spirv-cross PRIVATE ${spirv-compiler-options}) + target_include_directories(spirv-cross PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) + target_compile_definitions(spirv-cross PRIVATE ${spirv-compiler-defines} HAVE_SPIRV_CROSS_GIT_VERSION) + set_target_properties(spirv-cross PROPERTIES LINK_FLAGS "${spirv-cross-link-flags}") + if (NOT SPIRV_CROSS_SKIP_INSTALL) + install(TARGETS spirv-cross RUNTIME DESTINATION bin) + endif() + target_link_libraries(spirv-cross PRIVATE + spirv-cross-glsl + spirv-cross-hlsl + spirv-cross-cpp + spirv-cross-reflect + spirv-cross-msl + spirv-cross-util + spirv-cross-core) + + if (SPIRV_CROSS_ENABLE_TESTS) + # Set up tests, using only the simplest modes of the test_shaders + # script. You have to invoke the script manually to: + # - Update the reference files + # - Get cycle counts from malisc + # - Keep failing outputs + find_package(PythonInterp) + find_program(spirv-cross-glslang NAMES glslangValidator + PATHS ${CMAKE_CURRENT_SOURCE_DIR}/external/glslang-build/output/bin + NO_DEFAULT_PATH) + find_program(spirv-cross-spirv-as NAMES spirv-as + PATHS ${CMAKE_CURRENT_SOURCE_DIR}/external/spirv-tools-build/output/bin + NO_DEFAULT_PATH) + find_program(spirv-cross-spirv-val NAMES spirv-val + PATHS ${CMAKE_CURRENT_SOURCE_DIR}/external/spirv-tools-build/output/bin + NO_DEFAULT_PATH) + find_program(spirv-cross-spirv-opt NAMES spirv-opt + PATHS ${CMAKE_CURRENT_SOURCE_DIR}/external/spirv-tools-build/output/bin + NO_DEFAULT_PATH) + + if ((${spirv-cross-glslang} MATCHES "NOTFOUND") OR (${spirv-cross-spirv-as} MATCHES "NOTFOUND") OR (${spirv-cross-spirv-val} MATCHES "NOTFOUND") OR (${spirv-cross-spirv-opt} MATCHES "NOTFOUND")) + set(SPIRV_CROSS_ENABLE_TESTS OFF) + message("Could not find glslang or SPIRV-Tools build under external/. Run ./checkout_glslang_spirv_tools.sh and ./build_glslang_spirv_tools.sh. Testing will be disabled.") + else() + set(SPIRV_CROSS_ENABLE_TESTS ON) + message("Found glslang and SPIRV-Tools. Enabling test suite.") + message("Found glslangValidator in: ${spirv-cross-glslang}.") + message("Found spirv-as in: ${spirv-cross-spirv-as}.") + message("Found spirv-val in: ${spirv-cross-spirv-val}.") + message("Found spirv-opt in: ${spirv-cross-spirv-opt}.") + endif() + + set(spirv-cross-externals + --glslang "${spirv-cross-glslang}" + --spirv-as "${spirv-cross-spirv-as}" + --spirv-opt "${spirv-cross-spirv-opt}" + --spirv-val "${spirv-cross-spirv-val}") + + if (${PYTHONINTERP_FOUND} AND SPIRV_CROSS_ENABLE_TESTS) + if (${PYTHON_VERSION_MAJOR} GREATER 2) + add_executable(spirv-cross-c-api-test tests-other/c_api_test.c) + target_link_libraries(spirv-cross-c-api-test spirv-cross-c) + set_target_properties(spirv-cross-c-api-test PROPERTIES LINK_FLAGS "${spirv-cross-link-flags}") + + add_executable(spirv-cross-small-vector-test tests-other/small_vector.cpp) + target_link_libraries(spirv-cross-small-vector-test spirv-cross-core) + set_target_properties(spirv-cross-small-vector-test PROPERTIES LINK_FLAGS "${spirv-cross-link-flags}") + + add_executable(spirv-cross-msl-constexpr-test tests-other/msl_constexpr_test.cpp) + target_link_libraries(spirv-cross-msl-constexpr-test spirv-cross-c) + set_target_properties(spirv-cross-msl-constexpr-test PROPERTIES LINK_FLAGS "${spirv-cross-link-flags}") + + add_executable(spirv-cross-msl-resource-binding-test tests-other/msl_resource_bindings.cpp) + target_link_libraries(spirv-cross-msl-resource-binding-test spirv-cross-c) + set_target_properties(spirv-cross-msl-resource-binding-test PROPERTIES LINK_FLAGS "${spirv-cross-link-flags}") + + add_executable(spirv-cross-msl-ycbcr-conversion-test tests-other/msl_ycbcr_conversion_test.cpp) + target_link_libraries(spirv-cross-msl-ycbcr-conversion-test spirv-cross-c) + set_target_properties(spirv-cross-msl-ycbcr-conversion-test PROPERTIES LINK_FLAGS "${spirv-cross-link-flags}") + + add_executable(spirv-cross-typed-id-test tests-other/typed_id_test.cpp) + target_link_libraries(spirv-cross-typed-id-test spirv-cross-core) + set_target_properties(spirv-cross-typed-id-test PROPERTIES LINK_FLAGS "${spirv-cross-link-flags}") + + if (CMAKE_COMPILER_IS_GNUCXX OR (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")) + target_compile_options(spirv-cross-c-api-test PRIVATE -std=c89 -Wall -Wextra) + endif() + add_test(NAME spirv-cross-c-api-test + COMMAND $ ${CMAKE_CURRENT_SOURCE_DIR}/tests-other/c_api_test.spv + ${spirv-cross-abi-major} + ${spirv-cross-abi-minor} + ${spirv-cross-abi-patch}) + add_test(NAME spirv-cross-small-vector-test + COMMAND $) + add_test(NAME spirv-cross-msl-constexpr-test + COMMAND $ ${CMAKE_CURRENT_SOURCE_DIR}/tests-other/msl_constexpr_test.spv) + add_test(NAME spirv-cross-msl-resource-binding-test + COMMAND $ ${CMAKE_CURRENT_SOURCE_DIR}/tests-other/msl_resource_binding.spv) + add_test(NAME spirv-cross-msl-ycbcr-conversion-test + COMMAND $ ${CMAKE_CURRENT_SOURCE_DIR}/tests-other/msl_ycbcr_conversion_test.spv) + add_test(NAME spirv-cross-msl-ycbcr-conversion-test-2 + COMMAND $ ${CMAKE_CURRENT_SOURCE_DIR}/tests-other/msl_ycbcr_conversion_test_2.spv) + add_test(NAME spirv-cross-typed-id-test + COMMAND $) + add_test(NAME spirv-cross-test + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --parallel + ${spirv-cross-externals} + ${CMAKE_CURRENT_SOURCE_DIR}/shaders + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-no-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --parallel + ${spirv-cross-externals} + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-no-opt + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-metal + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --metal --parallel + ${spirv-cross-externals} + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-msl + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-metal-no-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --metal --parallel + ${spirv-cross-externals} + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-msl-no-opt + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-hlsl + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --hlsl --parallel + ${spirv-cross-externals} + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-hlsl + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-hlsl-no-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --hlsl --parallel + ${spirv-cross-externals} + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-hlsl-no-opt + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --opt --parallel + ${spirv-cross-externals} + ${CMAKE_CURRENT_SOURCE_DIR}/shaders + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-metal-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --metal --opt --parallel + ${spirv-cross-externals} + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-msl + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-hlsl-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --hlsl --opt --parallel + ${spirv-cross-externals} + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-hlsl + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-reflection + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --reflect --parallel + ${spirv-cross-externals} + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-reflection + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-ue4 + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --msl --parallel + ${spirv-cross-externals} + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-ue4 + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-ue4-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --msl --opt --parallel + ${spirv-cross-externals} + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-ue4 + WORKING_DIRECTORY $) + endif() + elseif(NOT ${PYTHONINTERP_FOUND}) + message(WARNING "Testing disabled. Could not find python3. If you have python3 installed try running " + "cmake with -DPYTHON_EXECUTABLE:FILEPATH=/path/to/python3 to help it find the executable") + endif() + endif() +endif() diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/GLSL.std.450.h b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/GLSL.std.450.h new file mode 100644 index 000000000000..be8be7c03ffc --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/GLSL.std.450.h @@ -0,0 +1,131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + +#ifndef GLSLstd450_H +#define GLSLstd450_H + +static const int GLSLstd450Version = 100; +static const int GLSLstd450Revision = 3; + +enum GLSLstd450 { + GLSLstd450Bad = 0, + + GLSLstd450Round = 1, + GLSLstd450RoundEven = 2, + GLSLstd450Trunc = 3, + GLSLstd450FAbs = 4, + GLSLstd450SAbs = 5, + GLSLstd450FSign = 6, + GLSLstd450SSign = 7, + GLSLstd450Floor = 8, + GLSLstd450Ceil = 9, + GLSLstd450Fract = 10, + + GLSLstd450Radians = 11, + GLSLstd450Degrees = 12, + GLSLstd450Sin = 13, + GLSLstd450Cos = 14, + GLSLstd450Tan = 15, + GLSLstd450Asin = 16, + GLSLstd450Acos = 17, + GLSLstd450Atan = 18, + GLSLstd450Sinh = 19, + GLSLstd450Cosh = 20, + GLSLstd450Tanh = 21, + GLSLstd450Asinh = 22, + GLSLstd450Acosh = 23, + GLSLstd450Atanh = 24, + GLSLstd450Atan2 = 25, + + GLSLstd450Pow = 26, + GLSLstd450Exp = 27, + GLSLstd450Log = 28, + GLSLstd450Exp2 = 29, + GLSLstd450Log2 = 30, + GLSLstd450Sqrt = 31, + GLSLstd450InverseSqrt = 32, + + GLSLstd450Determinant = 33, + GLSLstd450MatrixInverse = 34, + + GLSLstd450Modf = 35, + GLSLstd450ModfStruct = 36, + GLSLstd450FMin = 37, + GLSLstd450UMin = 38, + GLSLstd450SMin = 39, + GLSLstd450FMax = 40, + GLSLstd450UMax = 41, + GLSLstd450SMax = 42, + GLSLstd450FClamp = 43, + GLSLstd450UClamp = 44, + GLSLstd450SClamp = 45, + GLSLstd450FMix = 46, + GLSLstd450IMix = 47, + GLSLstd450Step = 48, + GLSLstd450SmoothStep = 49, + + GLSLstd450Fma = 50, + GLSLstd450Frexp = 51, + GLSLstd450FrexpStruct = 52, + GLSLstd450Ldexp = 53, + + GLSLstd450PackSnorm4x8 = 54, + GLSLstd450PackUnorm4x8 = 55, + GLSLstd450PackSnorm2x16 = 56, + GLSLstd450PackUnorm2x16 = 57, + GLSLstd450PackHalf2x16 = 58, + GLSLstd450PackDouble2x32 = 59, + GLSLstd450UnpackSnorm2x16 = 60, + GLSLstd450UnpackUnorm2x16 = 61, + GLSLstd450UnpackHalf2x16 = 62, + GLSLstd450UnpackSnorm4x8 = 63, + GLSLstd450UnpackUnorm4x8 = 64, + GLSLstd450UnpackDouble2x32 = 65, + + GLSLstd450Length = 66, + GLSLstd450Distance = 67, + GLSLstd450Cross = 68, + GLSLstd450Normalize = 69, + GLSLstd450FaceForward = 70, + GLSLstd450Reflect = 71, + GLSLstd450Refract = 72, + + GLSLstd450FindILsb = 73, + GLSLstd450FindSMsb = 74, + GLSLstd450FindUMsb = 75, + + GLSLstd450InterpolateAtCentroid = 76, + GLSLstd450InterpolateAtSample = 77, + GLSLstd450InterpolateAtOffset = 78, + + GLSLstd450NMin = 79, + GLSLstd450NMax = 80, + GLSLstd450NClamp = 81, + + GLSLstd450Count +}; + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/LICENSE b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/Makefile b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/Makefile new file mode 100644 index 000000000000..a006e81faa71 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/Makefile @@ -0,0 +1,41 @@ +TARGET := spirv-cross + +SOURCES := $(wildcard spirv_*.cpp) +CLI_SOURCES := main.cpp + +OBJECTS := $(SOURCES:.cpp=.o) +CLI_OBJECTS := $(CLI_SOURCES:.cpp=.o) + +STATIC_LIB := lib$(TARGET).a + +DEPS := $(OBJECTS:.o=.d) $(CLI_OBJECTS:.o=.d) + +CXXFLAGS += -std=c++11 -Wall -Wextra -Wshadow + +ifeq ($(DEBUG), 1) + CXXFLAGS += -O0 -g +else + CXXFLAGS += -O2 -DNDEBUG +endif + +ifeq ($(SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS), 1) + CXXFLAGS += -DSPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS -fno-exceptions +endif + +all: $(TARGET) + +-include $(DEPS) + +$(TARGET): $(CLI_OBJECTS) $(STATIC_LIB) + $(CXX) -o $@ $(CLI_OBJECTS) $(STATIC_LIB) $(LDFLAGS) + +$(STATIC_LIB): $(OBJECTS) + $(AR) rcs $@ $(OBJECTS) + +%.o: %.cpp + $(CXX) -c -o $@ $< $(CXXFLAGS) -MMD + +clean: + rm -f $(TARGET) $(OBJECTS) $(CLI_OBJECTS) $(STATIC_LIB) $(DEPS) + +.PHONY: clean diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/cmake/gitversion.in.h b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/cmake/gitversion.in.h new file mode 100644 index 000000000000..7135e283b23d --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/cmake/gitversion.in.h @@ -0,0 +1,6 @@ +#ifndef SPIRV_CROSS_GIT_VERSION_H_ +#define SPIRV_CROSS_GIT_VERSION_H_ + +#define SPIRV_CROSS_GIT_REVISION "Git commit: @spirv-cross-build-version@ Timestamp: @spirv-cross-timestamp@" + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/format_all.sh b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/format_all.sh new file mode 100644 index 000000000000..fcfffc57f864 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/format_all.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +#for file in spirv_*.{cpp,hpp} include/spirv_cross/*.{hpp,h} samples/cpp/*.cpp main.cpp +for file in spirv_*.{cpp,hpp} main.cpp +do + echo "Formatting file: $file ..." + clang-format -style=file -i $file +done diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/gn/BUILD.gn b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/gn/BUILD.gn new file mode 100644 index 000000000000..8458c1a70390 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/gn/BUILD.gn @@ -0,0 +1,63 @@ +# Copyright (C) 2019 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +config("spirv_cross_public") { + include_dirs = [ ".." ] + + defines = [ "SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS" ] +} + +source_set("spirv_cross_sources") { + public_configs = [ ":spirv_cross_public" ] + + sources = [ + "../GLSL.std.450.h", + "../spirv.hpp", + "../spirv_cfg.cpp", + "../spirv_cfg.hpp", + "../spirv_common.hpp", + "../spirv_cross.cpp", + "../spirv_cross.hpp", + "../spirv_cross_containers.hpp", + "../spirv_cross_error_handling.hpp", + "../spirv_cross_parsed_ir.cpp", + "../spirv_cross_parsed_ir.hpp", + "../spirv_cross_util.cpp", + "../spirv_cross_util.hpp", + "../spirv_glsl.cpp", + "../spirv_glsl.hpp", + "../spirv_msl.cpp", + "../spirv_msl.hpp", + "../spirv_parser.cpp", + "../spirv_parser.hpp", + "../spirv_reflect.cpp", + "../spirv_reflect.hpp", + ] + + cflags = [ "-fno-exceptions" ] + + if (is_clang) { + cflags_cc = [ + "-Wno-extra-semi", + "-Wno-ignored-qualifiers", + "-Wno-implicit-fallthrough", + "-Wno-inconsistent-missing-override", + "-Wno-missing-field-initializers", + "-Wno-newline-eof", + "-Wno-sign-compare", + "-Wno-unused-variable", + ] + } +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/barrier.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/barrier.hpp new file mode 100644 index 000000000000..5e0456019b6d --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/barrier.hpp @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_BARRIER_HPP +#define SPIRV_CROSS_BARRIER_HPP + +#include +#include + +namespace spirv_cross +{ +class Barrier +{ +public: + Barrier() + { + count.store(0); + iteration.store(0); + } + + void set_release_divisor(unsigned divisor) + { + this->divisor = divisor; + } + + static inline void memoryBarrier() + { + std::atomic_thread_fence(std::memory_order_seq_cst); + } + + void reset_counter() + { + count.store(0); + iteration.store(0); + } + + void wait() + { + unsigned target_iteration = iteration.load(std::memory_order_relaxed) + 1; + + unsigned target_count = divisor * target_iteration; + + + + unsigned c = count.fetch_add(1u, std::memory_order_relaxed); + + if (c + 1 == target_count) + { + iteration.store(target_iteration, std::memory_order_relaxed); + } + else + { + + while (iteration.load(std::memory_order_relaxed) != target_iteration) + std::this_thread::yield(); + } + } + +private: + unsigned divisor = 1; + std::atomic count; + std::atomic iteration; +}; +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/external_interface.h b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/external_interface.h new file mode 100644 index 000000000000..aa8ed905b047 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/external_interface.h @@ -0,0 +1,126 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_EXTERNAL_INTERFACE_H +#define SPIRV_CROSS_EXTERNAL_INTERFACE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +typedef struct spirv_cross_shader spirv_cross_shader_t; + +struct spirv_cross_interface +{ + spirv_cross_shader_t *(*construct)(void); + void (*destruct)(spirv_cross_shader_t *thiz); + void (*invoke)(spirv_cross_shader_t *thiz); +}; + +void spirv_cross_set_stage_input(spirv_cross_shader_t *thiz, unsigned location, void *data, size_t size); + +void spirv_cross_set_stage_output(spirv_cross_shader_t *thiz, unsigned location, void *data, size_t size); + +void spirv_cross_set_push_constant(spirv_cross_shader_t *thiz, void *data, size_t size); + +void spirv_cross_set_uniform_constant(spirv_cross_shader_t *thiz, unsigned location, void *data, size_t size); + +void spirv_cross_set_resource(spirv_cross_shader_t *thiz, unsigned set, unsigned binding, void **data, size_t size); + +const struct spirv_cross_interface *spirv_cross_get_interface(void); + +typedef enum spirv_cross_builtin { + SPIRV_CROSS_BUILTIN_POSITION = 0, + SPIRV_CROSS_BUILTIN_FRAG_COORD = 1, + SPIRV_CROSS_BUILTIN_WORK_GROUP_ID = 2, + SPIRV_CROSS_BUILTIN_NUM_WORK_GROUPS = 3, + SPIRV_CROSS_NUM_BUILTINS +} spirv_cross_builtin; + +void spirv_cross_set_builtin(spirv_cross_shader_t *thiz, spirv_cross_builtin builtin, void *data, size_t size); + +#define SPIRV_CROSS_NUM_DESCRIPTOR_SETS 4 +#define SPIRV_CROSS_NUM_DESCRIPTOR_BINDINGS 16 +#define SPIRV_CROSS_NUM_STAGE_INPUTS 16 +#define SPIRV_CROSS_NUM_STAGE_OUTPUTS 16 +#define SPIRV_CROSS_NUM_UNIFORM_CONSTANTS 32 + +enum spirv_cross_format +{ + SPIRV_CROSS_FORMAT_R8_UNORM = 0, + SPIRV_CROSS_FORMAT_R8G8_UNORM = 1, + SPIRV_CROSS_FORMAT_R8G8B8_UNORM = 2, + SPIRV_CROSS_FORMAT_R8G8B8A8_UNORM = 3, + + SPIRV_CROSS_NUM_FORMATS +}; + +enum spirv_cross_wrap +{ + SPIRV_CROSS_WRAP_CLAMP_TO_EDGE = 0, + SPIRV_CROSS_WRAP_REPEAT = 1, + + SPIRV_CROSS_NUM_WRAP +}; + +enum spirv_cross_filter +{ + SPIRV_CROSS_FILTER_NEAREST = 0, + SPIRV_CROSS_FILTER_LINEAR = 1, + + SPIRV_CROSS_NUM_FILTER +}; + +enum spirv_cross_mipfilter +{ + SPIRV_CROSS_MIPFILTER_BASE = 0, + SPIRV_CROSS_MIPFILTER_NEAREST = 1, + SPIRV_CROSS_MIPFILTER_LINEAR = 2, + + SPIRV_CROSS_NUM_MIPFILTER +}; + +struct spirv_cross_miplevel +{ + const void *data; + unsigned width, height; + size_t stride; +}; + +struct spirv_cross_sampler_info +{ + const struct spirv_cross_miplevel *mipmaps; + unsigned num_mipmaps; + + enum spirv_cross_format format; + enum spirv_cross_wrap wrap_s; + enum spirv_cross_wrap wrap_t; + enum spirv_cross_filter min_filter; + enum spirv_cross_filter mag_filter; + enum spirv_cross_mipfilter mip_filter; +}; + +typedef struct spirv_cross_sampler_2d spirv_cross_sampler_2d_t; +spirv_cross_sampler_2d_t *spirv_cross_create_sampler_2d(const struct spirv_cross_sampler_info *info); +void spirv_cross_destroy_sampler_2d(spirv_cross_sampler_2d_t *samp); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/image.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/image.hpp new file mode 100644 index 000000000000..948a75d0c915 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/image.hpp @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_IMAGE_HPP +#define SPIRV_CROSS_IMAGE_HPP + +#ifndef GLM_SWIZZLE +#define GLM_SWIZZLE +#endif + +#ifndef GLM_FORCE_RADIANS +#define GLM_FORCE_RADIANS +#endif + +#include + +namespace spirv_cross +{ +template +struct image2DBase +{ + virtual ~image2DBase() = default; + inline virtual T load(glm::ivec2 coord) const + { + return T(0, 0, 0, 1); + } + inline virtual void store(glm::ivec2 coord, const T &v) + { + } +}; + +typedef image2DBase image2D; +typedef image2DBase iimage2D; +typedef image2DBase uimage2D; + +template +inline T imageLoad(const image2DBase &image, glm::ivec2 coord) +{ + return image.load(coord); +} + +template +void imageStore(image2DBase &image, glm::ivec2 coord, const T &value) +{ + image.store(coord, value); +} +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/internal_interface.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/internal_interface.hpp new file mode 100644 index 000000000000..bf525e63ca1e --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/internal_interface.hpp @@ -0,0 +1,603 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_INTERNAL_INTERFACE_HPP +#define SPIRV_CROSS_INTERNAL_INTERFACE_HPP + + + +#ifndef GLM_FORCE_SWIZZLE +#define GLM_FORCE_SWIZZLE +#endif + +#ifndef GLM_FORCE_RADIANS +#define GLM_FORCE_RADIANS +#endif + +#include + +#include "barrier.hpp" +#include "external_interface.h" +#include "image.hpp" +#include "sampler.hpp" +#include "thread_group.hpp" +#include +#include + +namespace internal +{ + + + + +template +struct Interface +{ + enum + { + ArraySize = 1, + Size = sizeof(T) + }; + + Interface() + : ptr(0) + { + } + T &get() + { + assert(ptr); + return *ptr; + } + + T *ptr; +}; + + +template +struct Interface +{ + enum + { + ArraySize = U, + Size = U * sizeof(T) + }; + + Interface() + : ptr(0) + { + } + T *get() + { + assert(ptr); + return ptr; + } + + T *ptr; +}; + + +template +struct PointerInterface +{ + enum + { + ArraySize = 1, + Size = sizeof(T *) + }; + enum + { + PreDereference = true + }; + + PointerInterface() + : ptr(0) + { + } + + T &get() + { + assert(ptr); + return *ptr; + } + + T *ptr; +}; + + +template +struct DereferenceAdaptor +{ + DereferenceAdaptor(T **ptr) + : ptr(ptr) + { + } + T &operator[](unsigned index) const + { + return *(ptr[index]); + } + T **ptr; +}; + + + +template +struct PointerInterface +{ + enum + { + ArraySize = U, + Size = sizeof(T *) * U + }; + enum + { + PreDereference = false + }; + PointerInterface() + : ptr(0) + { + } + + DereferenceAdaptor get() + { + assert(ptr); + return DereferenceAdaptor(ptr); + } + + T **ptr; +}; + + + +template +struct Resource : PointerInterface +{ +}; + + +template +struct UniformConstant : Interface +{ +}; +template +struct StageInput : Interface +{ +}; +template +struct StageOutput : Interface +{ +}; +template +struct PushConstant : Interface +{ +}; +} + +struct spirv_cross_shader +{ + struct PPSize + { + PPSize() + : ptr(0) + , size(0) + { + } + void **ptr; + size_t size; + }; + + struct PPSizeResource + { + PPSizeResource() + : ptr(0) + , size(0) + , pre_dereference(false) + { + } + void **ptr; + size_t size; + bool pre_dereference; + }; + + PPSizeResource resources[SPIRV_CROSS_NUM_DESCRIPTOR_SETS][SPIRV_CROSS_NUM_DESCRIPTOR_BINDINGS]; + PPSize stage_inputs[SPIRV_CROSS_NUM_STAGE_INPUTS]; + PPSize stage_outputs[SPIRV_CROSS_NUM_STAGE_OUTPUTS]; + PPSize uniform_constants[SPIRV_CROSS_NUM_UNIFORM_CONSTANTS]; + PPSize push_constant; + PPSize builtins[SPIRV_CROSS_NUM_BUILTINS]; + + template + void register_builtin(spirv_cross_builtin builtin, const U &value) + { + assert(!builtins[builtin].ptr); + + builtins[builtin].ptr = (void **)&value.ptr; + builtins[builtin].size = sizeof(*value.ptr) * U::ArraySize; + } + + void set_builtin(spirv_cross_builtin builtin, void *data, size_t size) + { + assert(builtins[builtin].ptr); + assert(size >= builtins[builtin].size); + + *builtins[builtin].ptr = data; + } + + template + void register_resource(const internal::Resource &value, unsigned set, unsigned binding) + { + assert(set < SPIRV_CROSS_NUM_DESCRIPTOR_SETS); + assert(binding < SPIRV_CROSS_NUM_DESCRIPTOR_BINDINGS); + assert(!resources[set][binding].ptr); + + resources[set][binding].ptr = (void **)&value.ptr; + resources[set][binding].size = internal::Resource::Size; + resources[set][binding].pre_dereference = internal::Resource::PreDereference; + } + + template + void register_stage_input(const internal::StageInput &value, unsigned location) + { + assert(location < SPIRV_CROSS_NUM_STAGE_INPUTS); + assert(!stage_inputs[location].ptr); + + stage_inputs[location].ptr = (void **)&value.ptr; + stage_inputs[location].size = internal::StageInput::Size; + } + + template + void register_stage_output(const internal::StageOutput &value, unsigned location) + { + assert(location < SPIRV_CROSS_NUM_STAGE_OUTPUTS); + assert(!stage_outputs[location].ptr); + + stage_outputs[location].ptr = (void **)&value.ptr; + stage_outputs[location].size = internal::StageOutput::Size; + } + + template + void register_uniform_constant(const internal::UniformConstant &value, unsigned location) + { + assert(location < SPIRV_CROSS_NUM_UNIFORM_CONSTANTS); + assert(!uniform_constants[location].ptr); + + uniform_constants[location].ptr = (void **)&value.ptr; + uniform_constants[location].size = internal::UniformConstant::Size; + } + + template + void register_push_constant(const internal::PushConstant &value) + { + assert(!push_constant.ptr); + + push_constant.ptr = (void **)&value.ptr; + push_constant.size = internal::PushConstant::Size; + } + + void set_stage_input(unsigned location, void *data, size_t size) + { + assert(location < SPIRV_CROSS_NUM_STAGE_INPUTS); + assert(stage_inputs[location].ptr); + assert(size >= stage_inputs[location].size); + + *stage_inputs[location].ptr = data; + } + + void set_stage_output(unsigned location, void *data, size_t size) + { + assert(location < SPIRV_CROSS_NUM_STAGE_OUTPUTS); + assert(stage_outputs[location].ptr); + assert(size >= stage_outputs[location].size); + + *stage_outputs[location].ptr = data; + } + + void set_uniform_constant(unsigned location, void *data, size_t size) + { + assert(location < SPIRV_CROSS_NUM_UNIFORM_CONSTANTS); + assert(uniform_constants[location].ptr); + assert(size >= uniform_constants[location].size); + + *uniform_constants[location].ptr = data; + } + + void set_push_constant(void *data, size_t size) + { + assert(push_constant.ptr); + assert(size >= push_constant.size); + + *push_constant.ptr = data; + } + + void set_resource(unsigned set, unsigned binding, void **data, size_t size) + { + assert(set < SPIRV_CROSS_NUM_DESCRIPTOR_SETS); + assert(binding < SPIRV_CROSS_NUM_DESCRIPTOR_BINDINGS); + assert(resources[set][binding].ptr); + assert(size >= resources[set][binding].size); + + + if (resources[set][binding].pre_dereference) + *resources[set][binding].ptr = *data; + else + *resources[set][binding].ptr = data; + } +}; + +namespace spirv_cross +{ +template +struct BaseShader : spirv_cross_shader +{ + void invoke() + { + static_cast(this)->main(); + } +}; + +struct FragmentResources +{ + internal::StageOutput gl_FragCoord; + void init(spirv_cross_shader &s) + { + s.register_builtin(SPIRV_CROSS_BUILTIN_FRAG_COORD, gl_FragCoord); + } +#define gl_FragCoord __res->gl_FragCoord.get() +}; + +template +struct FragmentShader : BaseShader> +{ + inline void main() + { + impl.main(); + } + + FragmentShader() + { + resources.init(*this); + impl.__res = &resources; + } + + T impl; + Res resources; +}; + +struct VertexResources +{ + internal::StageOutput gl_Position; + void init(spirv_cross_shader &s) + { + s.register_builtin(SPIRV_CROSS_BUILTIN_POSITION, gl_Position); + } +#define gl_Position __res->gl_Position.get() +}; + +template +struct VertexShader : BaseShader> +{ + inline void main() + { + impl.main(); + } + + VertexShader() + { + resources.init(*this); + impl.__res = &resources; + } + + T impl; + Res resources; +}; + +struct TessEvaluationResources +{ + inline void init(spirv_cross_shader &) + { + } +}; + +template +struct TessEvaluationShader : BaseShader> +{ + inline void main() + { + impl.main(); + } + + TessEvaluationShader() + { + resources.init(*this); + impl.__res = &resources; + } + + T impl; + Res resources; +}; + +struct TessControlResources +{ + inline void init(spirv_cross_shader &) + { + } +}; + +template +struct TessControlShader : BaseShader> +{ + inline void main() + { + impl.main(); + } + + TessControlShader() + { + resources.init(*this); + impl.__res = &resources; + } + + T impl; + Res resources; +}; + +struct GeometryResources +{ + inline void init(spirv_cross_shader &) + { + } +}; + +template +struct GeometryShader : BaseShader> +{ + inline void main() + { + impl.main(); + } + + GeometryShader() + { + resources.init(*this); + impl.__res = &resources; + } + + T impl; + Res resources; +}; + +struct ComputeResources +{ + internal::StageInput gl_WorkGroupID__; + internal::StageInput gl_NumWorkGroups__; + void init(spirv_cross_shader &s) + { + s.register_builtin(SPIRV_CROSS_BUILTIN_WORK_GROUP_ID, gl_WorkGroupID__); + s.register_builtin(SPIRV_CROSS_BUILTIN_NUM_WORK_GROUPS, gl_NumWorkGroups__); + } +#define gl_WorkGroupID __res->gl_WorkGroupID__.get() +#define gl_NumWorkGroups __res->gl_NumWorkGroups__.get() + + Barrier barrier__; +#define barrier() __res->barrier__.wait() +}; + +struct ComputePrivateResources +{ + uint32_t gl_LocalInvocationIndex__; +#define gl_LocalInvocationIndex __priv_res.gl_LocalInvocationIndex__ + glm::uvec3 gl_LocalInvocationID__; +#define gl_LocalInvocationID __priv_res.gl_LocalInvocationID__ + glm::uvec3 gl_GlobalInvocationID__; +#define gl_GlobalInvocationID __priv_res.gl_GlobalInvocationID__ +}; + +template +struct ComputeShader : BaseShader> +{ + inline void main() + { + resources.barrier__.reset_counter(); + + for (unsigned z = 0; z < WorkGroupZ; z++) + for (unsigned y = 0; y < WorkGroupY; y++) + for (unsigned x = 0; x < WorkGroupX; x++) + impl[z][y][x].__priv_res.gl_GlobalInvocationID__ = + glm::uvec3(WorkGroupX, WorkGroupY, WorkGroupZ) * resources.gl_WorkGroupID__.get() + + glm::uvec3(x, y, z); + + group.run(); + group.wait(); + } + + ComputeShader() + : group(&impl[0][0][0]) + { + resources.init(*this); + resources.barrier__.set_release_divisor(WorkGroupX * WorkGroupY * WorkGroupZ); + + unsigned i = 0; + for (unsigned z = 0; z < WorkGroupZ; z++) + { + for (unsigned y = 0; y < WorkGroupY; y++) + { + for (unsigned x = 0; x < WorkGroupX; x++) + { + impl[z][y][x].__priv_res.gl_LocalInvocationID__ = glm::uvec3(x, y, z); + impl[z][y][x].__priv_res.gl_LocalInvocationIndex__ = i++; + impl[z][y][x].__res = &resources; + } + } + } + } + + T impl[WorkGroupZ][WorkGroupY][WorkGroupX]; + ThreadGroup group; + Res resources; +}; + +inline void memoryBarrierShared() +{ + Barrier::memoryBarrier(); +} +inline void memoryBarrier() +{ + Barrier::memoryBarrier(); +} + + + +template +inline T atomicAdd(T &v, T a) +{ + static_assert(sizeof(std::atomic) == sizeof(T), "Cannot cast properly to std::atomic."); + + + + return std::atomic_fetch_add_explicit(reinterpret_cast *>(&v), a, std::memory_order_relaxed); +} +} + +void spirv_cross_set_stage_input(spirv_cross_shader_t *shader, unsigned location, void *data, size_t size) +{ + shader->set_stage_input(location, data, size); +} + +void spirv_cross_set_stage_output(spirv_cross_shader_t *shader, unsigned location, void *data, size_t size) +{ + shader->set_stage_output(location, data, size); +} + +void spirv_cross_set_uniform_constant(spirv_cross_shader_t *shader, unsigned location, void *data, size_t size) +{ + shader->set_uniform_constant(location, data, size); +} + +void spirv_cross_set_resource(spirv_cross_shader_t *shader, unsigned set, unsigned binding, void **data, size_t size) +{ + shader->set_resource(set, binding, data, size); +} + +void spirv_cross_set_push_constant(spirv_cross_shader_t *shader, void *data, size_t size) +{ + shader->set_push_constant(data, size); +} + +void spirv_cross_set_builtin(spirv_cross_shader_t *shader, spirv_cross_builtin builtin, void *data, size_t size) +{ + shader->set_builtin(builtin, data, size); +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/sampler.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/sampler.hpp new file mode 100644 index 000000000000..732507ac07aa --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/sampler.hpp @@ -0,0 +1,105 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_SAMPLER_HPP +#define SPIRV_CROSS_SAMPLER_HPP + +#include + +namespace spirv_cross +{ +struct spirv_cross_sampler_2d +{ + inline virtual ~spirv_cross_sampler_2d() + { + } +}; + +template +struct sampler2DBase : spirv_cross_sampler_2d +{ + sampler2DBase(const spirv_cross_sampler_info *info) + { + mips.insert(mips.end(), info->mipmaps, info->mipmaps + info->num_mipmaps); + format = info->format; + wrap_s = info->wrap_s; + wrap_t = info->wrap_t; + min_filter = info->min_filter; + mag_filter = info->mag_filter; + mip_filter = info->mip_filter; + } + + inline virtual T sample(glm::vec2 uv, float bias) + { + return sampleLod(uv, bias); + } + + inline virtual T sampleLod(glm::vec2 uv, float lod) + { + if (mag_filter == SPIRV_CROSS_FILTER_NEAREST) + { + uv.x = wrap(uv.x, wrap_s, mips[0].width); + uv.y = wrap(uv.y, wrap_t, mips[0].height); + glm::vec2 uv_full = uv * glm::vec2(mips[0].width, mips[0].height); + + int x = int(uv_full.x); + int y = int(uv_full.y); + return sample(x, y, 0); + } + else + { + return T(0, 0, 0, 1); + } + } + + inline float wrap(float v, spirv_cross_wrap wrap, unsigned size) + { + switch (wrap) + { + case SPIRV_CROSS_WRAP_REPEAT: + return v - glm::floor(v); + case SPIRV_CROSS_WRAP_CLAMP_TO_EDGE: + { + float half = 0.5f / size; + return glm::clamp(v, half, 1.0f - half); + } + + default: + return 0.0f; + } + } + + std::vector mips; + spirv_cross_format format; + spirv_cross_wrap wrap_s; + spirv_cross_format wrap_t; + spirv_cross_filter min_filter; + spirv_cross_filter mag_filter; + spirv_cross_mipfilter mip_filter; +}; + +typedef sampler2DBase sampler2D; +typedef sampler2DBase isampler2D; +typedef sampler2DBase usampler2D; + +template +inline T texture(const sampler2DBase &samp, const glm::vec2 &uv, float bias = 0.0f) +{ + return samp.sample(uv, bias); +} +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/thread_group.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/thread_group.hpp new file mode 100644 index 000000000000..c8abf1c20548 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/include/spirv_cross/thread_group.hpp @@ -0,0 +1,113 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_THREAD_GROUP_HPP +#define SPIRV_CROSS_THREAD_GROUP_HPP + +#include +#include +#include + +namespace spirv_cross +{ +template +class ThreadGroup +{ +public: + ThreadGroup(T *impl) + { + for (unsigned i = 0; i < Size; i++) + workers[i].start(&impl[i]); + } + + void run() + { + for (auto &worker : workers) + worker.run(); + } + + void wait() + { + for (auto &worker : workers) + worker.wait(); + } + +private: + struct Thread + { + enum State + { + Idle, + Running, + Dying + }; + State state = Idle; + + void start(T *impl) + { + worker = std::thread([impl, this] { + for (;;) + { + { + std::unique_lock l{ lock }; + cond.wait(l, [this] { return state != Idle; }); + if (state == Dying) + break; + } + + impl->main(); + + std::lock_guard l{ lock }; + state = Idle; + cond.notify_one(); + } + }); + } + + void wait() + { + std::unique_lock l{ lock }; + cond.wait(l, [this] { return state == Idle; }); + } + + void run() + { + std::lock_guard l{ lock }; + state = Running; + cond.notify_one(); + } + + ~Thread() + { + if (worker.joinable()) + { + { + std::lock_guard l{ lock }; + state = Dying; + cond.notify_one(); + } + worker.join(); + } + } + std::thread worker; + std::condition_variable cond; + std::mutex lock; + }; + Thread workers[Size]; +}; +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/main.cpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/main.cpp new file mode 100644 index 000000000000..b254553ae8b5 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/main.cpp @@ -0,0 +1,1259 @@ + + + + + + + + + + + + + + + + +#include "spirv_cpp.hpp" +#include "spirv_cross_util.hpp" +#include "spirv_glsl.hpp" +#include "spirv_hlsl.hpp" +#include "spirv_msl.hpp" +#include "spirv_parser.hpp" +#include "spirv_reflect.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_SPIRV_CROSS_GIT_VERSION +#include "gitversion.h" +#endif + +#ifdef _MSC_VER +#pragma warning(disable : 4996) +#endif + +using namespace spv; +using namespace SPIRV_CROSS_NAMESPACE; +using namespace std; + +#ifdef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS +static inline void THROW(const char *str) +{ + fprintf(stderr, "SPIRV-Cross will abort: %s\n", str); + fflush(stderr); + abort(); +} +#else +#define THROW(x) throw runtime_error(x) +#endif + +struct CLIParser; +struct CLICallbacks +{ + void add(const char *cli, const function &func) + { + callbacks[cli] = func; + } + unordered_map> callbacks; + function error_handler; + function default_handler; +}; + +struct CLIParser +{ + CLIParser(CLICallbacks cbs_, int argc_, char *argv_[]) + : cbs(move(cbs_)) + , argc(argc_) + , argv(argv_) + { + } + + bool parse() + { +#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS + try +#endif + { + while (argc && !ended_state) + { + const char *next = *argv++; + argc--; + + if (*next != '-' && cbs.default_handler) + { + cbs.default_handler(next); + } + else + { + auto itr = cbs.callbacks.find(next); + if (itr == ::end(cbs.callbacks)) + { + THROW("Invalid argument"); + } + + itr->second(*this); + } + } + + return true; + } +#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS + catch (...) + { + if (cbs.error_handler) + { + cbs.error_handler(); + } + return false; + } +#endif + } + + void end() + { + ended_state = true; + } + + uint32_t next_uint() + { + if (!argc) + { + THROW("Tried to parse uint, but nothing left in arguments"); + } + + uint64_t val = stoul(*argv); + if (val > numeric_limits::max()) + { + THROW("next_uint() out of range"); + } + + argc--; + argv++; + + return uint32_t(val); + } + + double next_double() + { + if (!argc) + { + THROW("Tried to parse double, but nothing left in arguments"); + } + + double val = stod(*argv); + + argc--; + argv++; + + return val; + } + + + const char *next_value_string(const char *default_value) + { + if (!argc) + { + return default_value; + } + + if (0 == strncmp("--", *argv, 2)) + { + return default_value; + } + + return next_string(); + } + + const char *next_string() + { + if (!argc) + { + THROW("Tried to parse string, but nothing left in arguments"); + } + + const char *ret = *argv; + argc--; + argv++; + return ret; + } + + CLICallbacks cbs; + int argc; + char **argv; + bool ended_state = false; +}; + +static vector read_spirv_file(const char *path) +{ + FILE *file = fopen(path, "rb"); + if (!file) + { + fprintf(stderr, "Failed to open SPIR-V file: %s\n", path); + return {}; + } + + fseek(file, 0, SEEK_END); + long len = ftell(file) / sizeof(uint32_t); + rewind(file); + + vector spirv(len); + if (fread(spirv.data(), sizeof(uint32_t), len, file) != size_t(len)) + spirv.clear(); + + fclose(file); + return spirv; +} + +static bool write_string_to_file(const char *path, const char *string) +{ + FILE *file = fopen(path, "w"); + if (!file) + { + fprintf(stderr, "Failed to write file: %s\n", path); + return false; + } + + fprintf(file, "%s", string); + fclose(file); + return true; +} + +static void print_resources(const Compiler &compiler, const char *tag, const SmallVector &resources) +{ + fprintf(stderr, "%s\n", tag); + fprintf(stderr, "=============\n\n"); + bool print_ssbo = !strcmp(tag, "ssbos"); + + for (auto &res : resources) + { + auto &type = compiler.get_type(res.type_id); + + if (print_ssbo && compiler.buffer_is_hlsl_counter_buffer(res.id)) + continue; + + + + + bool is_push_constant = compiler.get_storage_class(res.id) == StorageClassPushConstant; + bool is_block = compiler.get_decoration_bitset(type.self).get(DecorationBlock) || + compiler.get_decoration_bitset(type.self).get(DecorationBufferBlock); + bool is_sized_block = is_block && (compiler.get_storage_class(res.id) == StorageClassUniform || + compiler.get_storage_class(res.id) == StorageClassUniformConstant); + ID fallback_id = !is_push_constant && is_block ? ID(res.base_type_id) : ID(res.id); + + uint32_t block_size = 0; + uint32_t runtime_array_stride = 0; + if (is_sized_block) + { + auto &base_type = compiler.get_type(res.base_type_id); + block_size = uint32_t(compiler.get_declared_struct_size(base_type)); + runtime_array_stride = uint32_t(compiler.get_declared_struct_size_runtime_array(base_type, 1) - + compiler.get_declared_struct_size_runtime_array(base_type, 0)); + } + + Bitset mask; + if (print_ssbo) + mask = compiler.get_buffer_block_flags(res.id); + else + mask = compiler.get_decoration_bitset(res.id); + + string array; + for (auto arr : type.array) + array = join("[", arr ? convert_to_string(arr) : "", "]") + array; + + fprintf(stderr, " ID %03u : %s%s", uint32_t(res.id), + !res.name.empty() ? res.name.c_str() : compiler.get_fallback_name(fallback_id).c_str(), array.c_str()); + + if (mask.get(DecorationLocation)) + fprintf(stderr, " (Location : %u)", compiler.get_decoration(res.id, DecorationLocation)); + if (mask.get(DecorationDescriptorSet)) + fprintf(stderr, " (Set : %u)", compiler.get_decoration(res.id, DecorationDescriptorSet)); + if (mask.get(DecorationBinding)) + fprintf(stderr, " (Binding : %u)", compiler.get_decoration(res.id, DecorationBinding)); + if (mask.get(DecorationInputAttachmentIndex)) + fprintf(stderr, " (Attachment : %u)", compiler.get_decoration(res.id, DecorationInputAttachmentIndex)); + if (mask.get(DecorationNonReadable)) + fprintf(stderr, " writeonly"); + if (mask.get(DecorationNonWritable)) + fprintf(stderr, " readonly"); + if (is_sized_block) + { + fprintf(stderr, " (BlockSize : %u bytes)", block_size); + if (runtime_array_stride) + fprintf(stderr, " (Unsized array stride: %u bytes)", runtime_array_stride); + } + + uint32_t counter_id = 0; + if (print_ssbo && compiler.buffer_get_hlsl_counter_buffer(res.id, counter_id)) + fprintf(stderr, " (HLSL counter buffer ID: %u)", counter_id); + fprintf(stderr, "\n"); + } + fprintf(stderr, "=============\n\n"); +} + +static const char *execution_model_to_str(spv::ExecutionModel model) +{ + switch (model) + { + case spv::ExecutionModelVertex: + return "vertex"; + case spv::ExecutionModelTessellationControl: + return "tessellation control"; + case ExecutionModelTessellationEvaluation: + return "tessellation evaluation"; + case ExecutionModelGeometry: + return "geometry"; + case ExecutionModelFragment: + return "fragment"; + case ExecutionModelGLCompute: + return "compute"; + case ExecutionModelRayGenerationNV: + return "raygenNV"; + case ExecutionModelIntersectionNV: + return "intersectionNV"; + case ExecutionModelCallableNV: + return "callableNV"; + case ExecutionModelAnyHitNV: + return "anyhitNV"; + case ExecutionModelClosestHitNV: + return "closesthitNV"; + case ExecutionModelMissNV: + return "missNV"; + default: + return "???"; + } +} + +static void print_resources(const Compiler &compiler, const ShaderResources &res) +{ + auto &modes = compiler.get_execution_mode_bitset(); + + fprintf(stderr, "Entry points:\n"); + auto entry_points = compiler.get_entry_points_and_stages(); + for (auto &e : entry_points) + fprintf(stderr, " %s (%s)\n", e.name.c_str(), execution_model_to_str(e.execution_model)); + fprintf(stderr, "\n"); + + fprintf(stderr, "Execution modes:\n"); + modes.for_each_bit([&](uint32_t i) { + auto mode = static_cast(i); + uint32_t arg0 = compiler.get_execution_mode_argument(mode, 0); + uint32_t arg1 = compiler.get_execution_mode_argument(mode, 1); + uint32_t arg2 = compiler.get_execution_mode_argument(mode, 2); + + switch (static_cast(i)) + { + case ExecutionModeInvocations: + fprintf(stderr, " Invocations: %u\n", arg0); + break; + + case ExecutionModeLocalSize: + fprintf(stderr, " LocalSize: (%u, %u, %u)\n", arg0, arg1, arg2); + break; + + case ExecutionModeOutputVertices: + fprintf(stderr, " OutputVertices: %u\n", arg0); + break; + +#define CHECK_MODE(m) \ + case ExecutionMode##m: \ + fprintf(stderr, " %s\n", #m); \ + break + CHECK_MODE(SpacingEqual); + CHECK_MODE(SpacingFractionalEven); + CHECK_MODE(SpacingFractionalOdd); + CHECK_MODE(VertexOrderCw); + CHECK_MODE(VertexOrderCcw); + CHECK_MODE(PixelCenterInteger); + CHECK_MODE(OriginUpperLeft); + CHECK_MODE(OriginLowerLeft); + CHECK_MODE(EarlyFragmentTests); + CHECK_MODE(PointMode); + CHECK_MODE(Xfb); + CHECK_MODE(DepthReplacing); + CHECK_MODE(DepthGreater); + CHECK_MODE(DepthLess); + CHECK_MODE(DepthUnchanged); + CHECK_MODE(LocalSizeHint); + CHECK_MODE(InputPoints); + CHECK_MODE(InputLines); + CHECK_MODE(InputLinesAdjacency); + CHECK_MODE(Triangles); + CHECK_MODE(InputTrianglesAdjacency); + CHECK_MODE(Quads); + CHECK_MODE(Isolines); + CHECK_MODE(OutputPoints); + CHECK_MODE(OutputLineStrip); + CHECK_MODE(OutputTriangleStrip); + CHECK_MODE(VecTypeHint); + CHECK_MODE(ContractionOff); + + default: + break; + } + }); + fprintf(stderr, "\n"); + + print_resources(compiler, "subpass inputs", res.subpass_inputs); + print_resources(compiler, "inputs", res.stage_inputs); + print_resources(compiler, "outputs", res.stage_outputs); + print_resources(compiler, "textures", res.sampled_images); + print_resources(compiler, "separate images", res.separate_images); + print_resources(compiler, "separate samplers", res.separate_samplers); + print_resources(compiler, "images", res.storage_images); + print_resources(compiler, "ssbos", res.storage_buffers); + print_resources(compiler, "ubos", res.uniform_buffers); + print_resources(compiler, "push", res.push_constant_buffers); + print_resources(compiler, "counters", res.atomic_counters); + print_resources(compiler, "acceleration structures", res.acceleration_structures); +} + +static void print_push_constant_resources(const Compiler &compiler, const SmallVector &res) +{ + for (auto &block : res) + { + auto ranges = compiler.get_active_buffer_ranges(block.id); + fprintf(stderr, "Active members in buffer: %s\n", + !block.name.empty() ? block.name.c_str() : compiler.get_fallback_name(block.id).c_str()); + + fprintf(stderr, "==================\n\n"); + for (auto &range : ranges) + { + const auto &name = compiler.get_member_name(block.base_type_id, range.index); + + fprintf(stderr, "Member #%3u (%s): Offset: %4u, Range: %4u\n", range.index, + !name.empty() ? name.c_str() : compiler.get_fallback_member_name(range.index).c_str(), + unsigned(range.offset), unsigned(range.range)); + } + fprintf(stderr, "==================\n\n"); + } +} + +static void print_spec_constants(const Compiler &compiler) +{ + auto spec_constants = compiler.get_specialization_constants(); + fprintf(stderr, "Specialization constants\n"); + fprintf(stderr, "==================\n\n"); + for (auto &c : spec_constants) + fprintf(stderr, "ID: %u, Spec ID: %u\n", uint32_t(c.id), c.constant_id); + fprintf(stderr, "==================\n\n"); +} + +static void print_capabilities_and_extensions(const Compiler &compiler) +{ + fprintf(stderr, "Capabilities\n"); + fprintf(stderr, "============\n"); + for (auto &capability : compiler.get_declared_capabilities()) + fprintf(stderr, "Capability: %u\n", static_cast(capability)); + fprintf(stderr, "============\n\n"); + + fprintf(stderr, "Extensions\n"); + fprintf(stderr, "============\n"); + for (auto &ext : compiler.get_declared_extensions()) + fprintf(stderr, "Extension: %s\n", ext.c_str()); + fprintf(stderr, "============\n\n"); +} + +struct PLSArg +{ + PlsFormat format; + string name; +}; + +struct Remap +{ + string src_name; + string dst_name; + unsigned components; +}; + +struct VariableTypeRemap +{ + string variable_name; + string new_variable_type; +}; + +struct InterfaceVariableRename +{ + StorageClass storageClass; + uint32_t location; + string variable_name; +}; + +struct CLIArguments +{ + const char *input = nullptr; + const char *output = nullptr; + const char *cpp_interface_name = nullptr; + uint32_t version = 0; + uint32_t shader_model = 0; + uint32_t msl_version = 0; + bool es = false; + bool set_version = false; + bool set_shader_model = false; + bool set_msl_version = false; + bool set_es = false; + bool dump_resources = false; + bool force_temporary = false; + bool flatten_ubo = false; + bool fixup = false; + bool yflip = false; + bool sso = false; + bool support_nonzero_baseinstance = true; + bool msl_capture_output_to_buffer = false; + bool msl_swizzle_texture_samples = false; + bool msl_ios = false; + bool msl_pad_fragment_output = false; + bool msl_domain_lower_left = false; + bool msl_argument_buffers = false; + bool msl_texture_buffer_native = false; + bool msl_framebuffer_fetch = false; + bool msl_invariant_float_math = false; + bool msl_emulate_cube_array = false; + bool msl_multiview = false; + bool msl_view_index_from_device_index = false; + bool msl_dispatch_base = false; + bool glsl_emit_push_constant_as_ubo = false; + bool glsl_emit_ubo_as_plain_uniforms = false; + bool vulkan_glsl_disable_ext_samplerless_texture_functions = false; + bool emit_line_directives = false; + SmallVector msl_discrete_descriptor_sets; + SmallVector msl_device_argument_buffers; + SmallVector> msl_dynamic_buffers; + SmallVector pls_in; + SmallVector pls_out; + SmallVector remaps; + SmallVector extensions; + SmallVector variable_type_remaps; + SmallVector interface_variable_renames; + SmallVector hlsl_attr_remap; + string entry; + string entry_stage; + + struct Rename + { + string old_name; + string new_name; + ExecutionModel execution_model; + }; + SmallVector entry_point_rename; + + uint32_t iterations = 1; + bool cpp = false; + string reflect; + bool msl = false; + bool hlsl = false; + bool hlsl_compat = false; + bool hlsl_support_nonzero_base = false; + bool vulkan_semantics = false; + bool flatten_multidimensional_arrays = false; + bool use_420pack_extension = true; + bool remove_unused = false; + bool combined_samplers_inherit_bindings = false; +}; + +static void print_version() +{ +#ifdef HAVE_SPIRV_CROSS_GIT_VERSION + fprintf(stderr, "%s\n", SPIRV_CROSS_GIT_REVISION); +#else + fprintf(stderr, "Git revision unknown. Build with CMake to create timestamp and revision info.\n"); +#endif +} + +static void print_help() +{ + print_version(); + + fprintf(stderr, "Usage: spirv-cross\n" + "\t[--output ]\n" + "\t[SPIR-V file]\n" + "\t[--es]\n" + "\t[--no-es]\n" + "\t[--version ]\n" + "\t[--dump-resources]\n" + "\t[--help]\n" + "\t[--revision]\n" + "\t[--force-temporary]\n" + "\t[--vulkan-semantics]\n" + "\t[--flatten-ubo]\n" + "\t[--fixup-clipspace]\n" + "\t[--flip-vert-y]\n" + "\t[--iterations iter]\n" + "\t[--cpp]\n" + "\t[--cpp-interface-name ]\n" + "\t[--glsl-emit-push-constant-as-ubo]\n" + "\t[--glsl-emit-ubo-as-plain-uniforms]\n" + "\t[--vulkan-glsl-disable-ext-samplerless-texture-functions]\n" + "\t[--msl]\n" + "\t[--msl-version ]\n" + "\t[--msl-capture-output]\n" + "\t[--msl-swizzle-texture-samples]\n" + "\t[--msl-ios]\n" + "\t[--msl-pad-fragment-output]\n" + "\t[--msl-domain-lower-left]\n" + "\t[--msl-argument-buffers]\n" + "\t[--msl-texture-buffer-native]\n" + "\t[--msl-framebuffer-fetch]\n" + "\t[--msl-emulate-cube-array]\n" + "\t[--msl-discrete-descriptor-set ]\n" + "\t[--msl-device-argument-buffer ]\n" + "\t[--msl-multiview]\n" + "\t[--msl-view-index-from-device-index]\n" + "\t[--msl-dispatch-base]\n" + "\t[--msl-dynamic-buffer ]\n" + "\t[--hlsl]\n" + "\t[--reflect]\n" + "\t[--shader-model]\n" + "\t[--hlsl-enable-compat]\n" + "\t[--hlsl-support-nonzero-basevertex-baseinstance]\n" + "\t[--separate-shader-objects]\n" + "\t[--pls-in format input-name]\n" + "\t[--pls-out format output-name]\n" + "\t[--remap source_name target_name components]\n" + "\t[--extension ext]\n" + "\t[--entry name]\n" + "\t[--stage ]\n" + "\t[--remove-unused-variables]\n" + "\t[--flatten-multidimensional-arrays]\n" + "\t[--no-420pack-extension]\n" + "\t[--remap-variable-type ]\n" + "\t[--rename-interface-variable ]\n" + "\t[--set-hlsl-vertex-input-semantic ]\n" + "\t[--rename-entry-point ]\n" + "\t[--combined-samplers-inherit-bindings]\n" + "\t[--no-support-nonzero-baseinstance]\n" + "\t[--emit-line-directives]\n" + "\n"); +} + +static bool remap_generic(Compiler &compiler, const SmallVector &resources, const Remap &remap) +{ + auto itr = + find_if(begin(resources), end(resources), [&remap](const Resource &res) { return res.name == remap.src_name; }); + + if (itr != end(resources)) + { + compiler.set_remapped_variable_state(itr->id, true); + compiler.set_name(itr->id, remap.dst_name); + compiler.set_subpass_input_remapped_components(itr->id, remap.components); + return true; + } + else + return false; +} + +static vector remap_pls(const SmallVector &pls_variables, const SmallVector &resources, + const SmallVector *secondary_resources) +{ + vector ret; + + for (auto &pls : pls_variables) + { + bool found = false; + for (auto &res : resources) + { + if (res.name == pls.name) + { + ret.push_back({ res.id, pls.format }); + found = true; + break; + } + } + + if (!found && secondary_resources) + { + for (auto &res : *secondary_resources) + { + if (res.name == pls.name) + { + ret.push_back({ res.id, pls.format }); + found = true; + break; + } + } + } + + if (!found) + fprintf(stderr, "Did not find stage input/output/target with name \"%s\".\n", pls.name.c_str()); + } + + return ret; +} + +static PlsFormat pls_format(const char *str) +{ + if (!strcmp(str, "r11f_g11f_b10f")) + return PlsR11FG11FB10F; + else if (!strcmp(str, "r32f")) + return PlsR32F; + else if (!strcmp(str, "rg16f")) + return PlsRG16F; + else if (!strcmp(str, "rg16")) + return PlsRG16; + else if (!strcmp(str, "rgb10_a2")) + return PlsRGB10A2; + else if (!strcmp(str, "rgba8")) + return PlsRGBA8; + else if (!strcmp(str, "rgba8i")) + return PlsRGBA8I; + else if (!strcmp(str, "rgba8ui")) + return PlsRGBA8UI; + else if (!strcmp(str, "rg16i")) + return PlsRG16I; + else if (!strcmp(str, "rgb10_a2ui")) + return PlsRGB10A2UI; + else if (!strcmp(str, "rg16ui")) + return PlsRG16UI; + else if (!strcmp(str, "r32ui")) + return PlsR32UI; + else + return PlsNone; +} + +static ExecutionModel stage_to_execution_model(const std::string &stage) +{ + if (stage == "vert") + return ExecutionModelVertex; + else if (stage == "frag") + return ExecutionModelFragment; + else if (stage == "comp") + return ExecutionModelGLCompute; + else if (stage == "tesc") + return ExecutionModelTessellationControl; + else if (stage == "tese") + return ExecutionModelTessellationEvaluation; + else if (stage == "geom") + return ExecutionModelGeometry; + else + SPIRV_CROSS_THROW("Invalid stage."); +} + +static string compile_iteration(const CLIArguments &args, std::vector spirv_file) +{ + Parser spirv_parser(move(spirv_file)); + spirv_parser.parse(); + + unique_ptr compiler; + bool combined_image_samplers = false; + bool build_dummy_sampler = false; + + if (args.cpp) + { + compiler.reset(new CompilerCPP(move(spirv_parser.get_parsed_ir()))); + if (args.cpp_interface_name) + static_cast(compiler.get())->set_interface_name(args.cpp_interface_name); + } + else if (args.msl) + { + compiler.reset(new CompilerMSL(move(spirv_parser.get_parsed_ir()))); + + auto *msl_comp = static_cast(compiler.get()); + auto msl_opts = msl_comp->get_msl_options(); + if (args.set_msl_version) + msl_opts.msl_version = args.msl_version; + msl_opts.capture_output_to_buffer = args.msl_capture_output_to_buffer; + msl_opts.swizzle_texture_samples = args.msl_swizzle_texture_samples; + msl_opts.invariant_float_math = args.msl_invariant_float_math; + if (args.msl_ios) + { + msl_opts.platform = CompilerMSL::Options::iOS; + msl_opts.ios_use_framebuffer_fetch_subpasses = args.msl_framebuffer_fetch; + msl_opts.emulate_cube_array = args.msl_emulate_cube_array; + } + msl_opts.pad_fragment_output_components = args.msl_pad_fragment_output; + msl_opts.tess_domain_origin_lower_left = args.msl_domain_lower_left; + msl_opts.argument_buffers = args.msl_argument_buffers; + msl_opts.texture_buffer_native = args.msl_texture_buffer_native; + msl_opts.multiview = args.msl_multiview; + msl_opts.view_index_from_device_index = args.msl_view_index_from_device_index; + msl_opts.dispatch_base = args.msl_dispatch_base; + msl_comp->set_msl_options(msl_opts); + for (auto &v : args.msl_discrete_descriptor_sets) + msl_comp->add_discrete_descriptor_set(v); + for (auto &v : args.msl_device_argument_buffers) + msl_comp->set_argument_buffer_device_address_space(v, true); + uint32_t i = 0; + for (auto &v : args.msl_dynamic_buffers) + msl_comp->add_dynamic_buffer(v.first, v.second, i++); + } + else if (args.hlsl) + compiler.reset(new CompilerHLSL(move(spirv_parser.get_parsed_ir()))); + else + { + combined_image_samplers = !args.vulkan_semantics; + if (!args.vulkan_semantics || args.vulkan_glsl_disable_ext_samplerless_texture_functions) + build_dummy_sampler = true; + compiler.reset(new CompilerGLSL(move(spirv_parser.get_parsed_ir()))); + } + + if (!args.variable_type_remaps.empty()) + { + auto remap_cb = [&](const SPIRType &, const string &name, string &out) -> void { + for (const VariableTypeRemap &remap : args.variable_type_remaps) + if (name == remap.variable_name) + out = remap.new_variable_type; + }; + + compiler->set_variable_type_remap_callback(move(remap_cb)); + } + + for (auto &rename : args.entry_point_rename) + compiler->rename_entry_point(rename.old_name, rename.new_name, rename.execution_model); + + auto entry_points = compiler->get_entry_points_and_stages(); + auto entry_point = args.entry; + ExecutionModel model = ExecutionModelMax; + + if (!args.entry_stage.empty()) + { + model = stage_to_execution_model(args.entry_stage); + if (entry_point.empty()) + { + + for (auto &e : entry_points) + { + if (e.execution_model == model) + { + entry_point = e.name; + break; + } + } + + if (entry_point.empty()) + { + fprintf(stderr, "Could not find an entry point with stage: %s\n", args.entry_stage.c_str()); + exit(EXIT_FAILURE); + } + } + else + { + + bool exists = false; + for (auto &e : entry_points) + { + if (e.execution_model == model && e.name == entry_point) + { + exists = true; + break; + } + } + + if (!exists) + { + fprintf(stderr, "Could not find an entry point %s with stage: %s\n", entry_point.c_str(), + args.entry_stage.c_str()); + exit(EXIT_FAILURE); + } + } + } + else if (!entry_point.empty()) + { + + + uint32_t stage_count = 0; + for (auto &e : entry_points) + { + if (e.name == entry_point) + { + stage_count++; + model = e.execution_model; + } + } + + if (stage_count == 0) + { + fprintf(stderr, "There is no entry point with name: %s\n", entry_point.c_str()); + exit(EXIT_FAILURE); + } + else if (stage_count > 1) + { + fprintf(stderr, "There is more than one entry point with name: %s. Use --stage.\n", entry_point.c_str()); + exit(EXIT_FAILURE); + } + } + + if (!entry_point.empty()) + compiler->set_entry_point(entry_point, model); + + if (!args.set_version && !compiler->get_common_options().version) + { + fprintf(stderr, "Didn't specify GLSL version and SPIR-V did not specify language.\n"); + print_help(); + exit(EXIT_FAILURE); + } + + CompilerGLSL::Options opts = compiler->get_common_options(); + if (args.set_version) + opts.version = args.version; + if (args.set_es) + opts.es = args.es; + opts.force_temporary = args.force_temporary; + opts.separate_shader_objects = args.sso; + opts.flatten_multidimensional_arrays = args.flatten_multidimensional_arrays; + opts.enable_420pack_extension = args.use_420pack_extension; + opts.vulkan_semantics = args.vulkan_semantics; + opts.vertex.fixup_clipspace = args.fixup; + opts.vertex.flip_vert_y = args.yflip; + opts.vertex.support_nonzero_base_instance = args.support_nonzero_baseinstance; + opts.emit_push_constant_as_uniform_buffer = args.glsl_emit_push_constant_as_ubo; + opts.emit_uniform_buffer_as_plain_uniforms = args.glsl_emit_ubo_as_plain_uniforms; + opts.emit_line_directives = args.emit_line_directives; + compiler->set_common_options(opts); + + + if (args.hlsl) + { + auto *hlsl = static_cast(compiler.get()); + auto hlsl_opts = hlsl->get_hlsl_options(); + if (args.set_shader_model) + { + if (args.shader_model < 30) + { + fprintf(stderr, "Shader model earlier than 30 (3.0) not supported.\n"); + exit(EXIT_FAILURE); + } + + hlsl_opts.shader_model = args.shader_model; + } + + if (args.hlsl_compat) + { + + hlsl_opts.point_size_compat = true; + hlsl_opts.point_coord_compat = true; + } + + if (hlsl_opts.shader_model <= 30) + { + combined_image_samplers = true; + build_dummy_sampler = true; + } + + hlsl_opts.support_nonzero_base_vertex_base_instance = args.hlsl_support_nonzero_base; + hlsl->set_hlsl_options(hlsl_opts); + } + + if (build_dummy_sampler) + { + uint32_t sampler = compiler->build_dummy_sampler_for_combined_images(); + if (sampler != 0) + { + + compiler->set_decoration(sampler, DecorationDescriptorSet, 0); + compiler->set_decoration(sampler, DecorationBinding, 0); + } + } + + ShaderResources res; + if (args.remove_unused) + { + auto active = compiler->get_active_interface_variables(); + res = compiler->get_shader_resources(active); + compiler->set_enabled_interface_variables(move(active)); + } + else + res = compiler->get_shader_resources(); + + if (args.flatten_ubo) + { + for (auto &ubo : res.uniform_buffers) + compiler->flatten_buffer_block(ubo.id); + for (auto &ubo : res.push_constant_buffers) + compiler->flatten_buffer_block(ubo.id); + } + + auto pls_inputs = remap_pls(args.pls_in, res.stage_inputs, &res.subpass_inputs); + auto pls_outputs = remap_pls(args.pls_out, res.stage_outputs, nullptr); + compiler->remap_pixel_local_storage(move(pls_inputs), move(pls_outputs)); + + for (auto &ext : args.extensions) + compiler->require_extension(ext); + + for (auto &remap : args.remaps) + { + if (remap_generic(*compiler, res.stage_inputs, remap)) + continue; + if (remap_generic(*compiler, res.stage_outputs, remap)) + continue; + if (remap_generic(*compiler, res.subpass_inputs, remap)) + continue; + } + + for (auto &rename : args.interface_variable_renames) + { + if (rename.storageClass == StorageClassInput) + spirv_cross_util::rename_interface_variable(*compiler, res.stage_inputs, rename.location, + rename.variable_name); + else if (rename.storageClass == StorageClassOutput) + spirv_cross_util::rename_interface_variable(*compiler, res.stage_outputs, rename.location, + rename.variable_name); + else + { + fprintf(stderr, "error at --rename-interface-variable ...\n"); + exit(EXIT_FAILURE); + } + } + + if (args.dump_resources) + { + print_resources(*compiler, res); + print_push_constant_resources(*compiler, res.push_constant_buffers); + print_spec_constants(*compiler); + print_capabilities_and_extensions(*compiler); + } + + if (combined_image_samplers) + { + compiler->build_combined_image_samplers(); + if (args.combined_samplers_inherit_bindings) + spirv_cross_util::inherit_combined_sampler_bindings(*compiler); + + + for (auto &remap : compiler->get_combined_image_samplers()) + { + compiler->set_name(remap.combined_id, join("SPIRV_Cross_Combined", compiler->get_name(remap.image_id), + compiler->get_name(remap.sampler_id))); + } + } + + if (args.hlsl) + { + auto *hlsl_compiler = static_cast(compiler.get()); + uint32_t new_builtin = hlsl_compiler->remap_num_workgroups_builtin(); + if (new_builtin) + { + hlsl_compiler->set_decoration(new_builtin, DecorationDescriptorSet, 0); + hlsl_compiler->set_decoration(new_builtin, DecorationBinding, 0); + } + } + + if (args.hlsl) + { + for (auto &remap : args.hlsl_attr_remap) + static_cast(compiler.get())->add_vertex_attribute_remap(remap); + } + + return compiler->compile(); +} + +static int main_inner(int argc, char *argv[]) +{ + CLIArguments args; + CLICallbacks cbs; + + cbs.add("--help", [](CLIParser &parser) { + print_help(); + parser.end(); + }); + cbs.add("--revision", [](CLIParser &parser) { + print_version(); + parser.end(); + }); + cbs.add("--output", [&args](CLIParser &parser) { args.output = parser.next_string(); }); + cbs.add("--es", [&args](CLIParser &) { + args.es = true; + args.set_es = true; + }); + cbs.add("--no-es", [&args](CLIParser &) { + args.es = false; + args.set_es = true; + }); + cbs.add("--version", [&args](CLIParser &parser) { + args.version = parser.next_uint(); + args.set_version = true; + }); + cbs.add("--dump-resources", [&args](CLIParser &) { args.dump_resources = true; }); + cbs.add("--force-temporary", [&args](CLIParser &) { args.force_temporary = true; }); + cbs.add("--flatten-ubo", [&args](CLIParser &) { args.flatten_ubo = true; }); + cbs.add("--fixup-clipspace", [&args](CLIParser &) { args.fixup = true; }); + cbs.add("--flip-vert-y", [&args](CLIParser &) { args.yflip = true; }); + cbs.add("--iterations", [&args](CLIParser &parser) { args.iterations = parser.next_uint(); }); + cbs.add("--cpp", [&args](CLIParser &) { args.cpp = true; }); + cbs.add("--reflect", [&args](CLIParser &parser) { args.reflect = parser.next_value_string("json"); }); + cbs.add("--cpp-interface-name", [&args](CLIParser &parser) { args.cpp_interface_name = parser.next_string(); }); + cbs.add("--metal", [&args](CLIParser &) { args.msl = true; }); + cbs.add("--glsl-emit-push-constant-as-ubo", [&args](CLIParser &) { args.glsl_emit_push_constant_as_ubo = true; }); + cbs.add("--glsl-emit-ubo-as-plain-uniforms", [&args](CLIParser &) { args.glsl_emit_ubo_as_plain_uniforms = true; }); + cbs.add("--vulkan-glsl-disable-ext-samplerless-texture-functions", + [&args](CLIParser &) { args.vulkan_glsl_disable_ext_samplerless_texture_functions = true; }); + cbs.add("--msl", [&args](CLIParser &) { args.msl = true; }); + cbs.add("--hlsl", [&args](CLIParser &) { args.hlsl = true; }); + cbs.add("--hlsl-enable-compat", [&args](CLIParser &) { args.hlsl_compat = true; }); + cbs.add("--hlsl-support-nonzero-basevertex-baseinstance", + [&args](CLIParser &) { args.hlsl_support_nonzero_base = true; }); + cbs.add("--vulkan-semantics", [&args](CLIParser &) { args.vulkan_semantics = true; }); + cbs.add("--flatten-multidimensional-arrays", [&args](CLIParser &) { args.flatten_multidimensional_arrays = true; }); + cbs.add("--no-420pack-extension", [&args](CLIParser &) { args.use_420pack_extension = false; }); + cbs.add("--msl-capture-output", [&args](CLIParser &) { args.msl_capture_output_to_buffer = true; }); + cbs.add("--msl-swizzle-texture-samples", [&args](CLIParser &) { args.msl_swizzle_texture_samples = true; }); + cbs.add("--msl-ios", [&args](CLIParser &) { args.msl_ios = true; }); + cbs.add("--msl-pad-fragment-output", [&args](CLIParser &) { args.msl_pad_fragment_output = true; }); + cbs.add("--msl-domain-lower-left", [&args](CLIParser &) { args.msl_domain_lower_left = true; }); + cbs.add("--msl-argument-buffers", [&args](CLIParser &) { args.msl_argument_buffers = true; }); + cbs.add("--msl-discrete-descriptor-set", + [&args](CLIParser &parser) { args.msl_discrete_descriptor_sets.push_back(parser.next_uint()); }); + cbs.add("--msl-device-argument-buffer", + [&args](CLIParser &parser) { args.msl_device_argument_buffers.push_back(parser.next_uint()); }); + cbs.add("--msl-texture-buffer-native", [&args](CLIParser &) { args.msl_texture_buffer_native = true; }); + cbs.add("--msl-framebuffer-fetch", [&args](CLIParser &) { args.msl_framebuffer_fetch = true; }); + cbs.add("--msl-invariant-float-math", [&args](CLIParser &) { args.msl_invariant_float_math = true; }); + cbs.add("--msl-emulate-cube-array", [&args](CLIParser &) { args.msl_emulate_cube_array = true; }); + cbs.add("--msl-multiview", [&args](CLIParser &) { args.msl_multiview = true; }); + cbs.add("--msl-view-index-from-device-index", + [&args](CLIParser &) { args.msl_view_index_from_device_index = true; }); + cbs.add("--msl-dispatch-base", [&args](CLIParser &) { args.msl_dispatch_base = true; }); + cbs.add("--msl-dynamic-buffer", [&args](CLIParser &parser) { + args.msl_argument_buffers = true; + + uint32_t desc_set = parser.next_uint(); + uint32_t binding = parser.next_uint(); + args.msl_dynamic_buffers.push_back(make_pair(desc_set, binding)); + }); + cbs.add("--extension", [&args](CLIParser &parser) { args.extensions.push_back(parser.next_string()); }); + cbs.add("--rename-entry-point", [&args](CLIParser &parser) { + auto old_name = parser.next_string(); + auto new_name = parser.next_string(); + auto model = stage_to_execution_model(parser.next_string()); + args.entry_point_rename.push_back({ old_name, new_name, move(model) }); + }); + cbs.add("--entry", [&args](CLIParser &parser) { args.entry = parser.next_string(); }); + cbs.add("--stage", [&args](CLIParser &parser) { args.entry_stage = parser.next_string(); }); + cbs.add("--separate-shader-objects", [&args](CLIParser &) { args.sso = true; }); + cbs.add("--set-hlsl-vertex-input-semantic", [&args](CLIParser &parser) { + HLSLVertexAttributeRemap remap; + remap.location = parser.next_uint(); + remap.semantic = parser.next_string(); + args.hlsl_attr_remap.push_back(move(remap)); + }); + + cbs.add("--remap", [&args](CLIParser &parser) { + string src = parser.next_string(); + string dst = parser.next_string(); + uint32_t components = parser.next_uint(); + args.remaps.push_back({ move(src), move(dst), components }); + }); + + cbs.add("--remap-variable-type", [&args](CLIParser &parser) { + string var_name = parser.next_string(); + string new_type = parser.next_string(); + args.variable_type_remaps.push_back({ move(var_name), move(new_type) }); + }); + + cbs.add("--rename-interface-variable", [&args](CLIParser &parser) { + StorageClass cls = StorageClassMax; + string clsStr = parser.next_string(); + if (clsStr == "in") + cls = StorageClassInput; + else if (clsStr == "out") + cls = StorageClassOutput; + + uint32_t loc = parser.next_uint(); + string var_name = parser.next_string(); + args.interface_variable_renames.push_back({ cls, loc, move(var_name) }); + }); + + cbs.add("--pls-in", [&args](CLIParser &parser) { + auto fmt = pls_format(parser.next_string()); + auto name = parser.next_string(); + args.pls_in.push_back({ move(fmt), move(name) }); + }); + cbs.add("--pls-out", [&args](CLIParser &parser) { + auto fmt = pls_format(parser.next_string()); + auto name = parser.next_string(); + args.pls_out.push_back({ move(fmt), move(name) }); + }); + cbs.add("--shader-model", [&args](CLIParser &parser) { + args.shader_model = parser.next_uint(); + args.set_shader_model = true; + }); + cbs.add("--msl-version", [&args](CLIParser &parser) { + args.msl_version = parser.next_uint(); + args.set_msl_version = true; + }); + + cbs.add("--remove-unused-variables", [&args](CLIParser &) { args.remove_unused = true; }); + cbs.add("--combined-samplers-inherit-bindings", + [&args](CLIParser &) { args.combined_samplers_inherit_bindings = true; }); + + cbs.add("--no-support-nonzero-baseinstance", [&](CLIParser &) { args.support_nonzero_baseinstance = false; }); + cbs.add("--emit-line-directives", [&args](CLIParser &) { args.emit_line_directives = true; }); + + cbs.default_handler = [&args](const char *value) { args.input = value; }; + cbs.error_handler = [] { print_help(); }; + + CLIParser parser{ move(cbs), argc - 1, argv + 1 }; + if (!parser.parse()) + return EXIT_FAILURE; + else if (parser.ended_state) + return EXIT_SUCCESS; + + if (!args.input) + { + fprintf(stderr, "Didn't specify input file.\n"); + print_help(); + return EXIT_FAILURE; + } + + auto spirv_file = read_spirv_file(args.input); + if (spirv_file.empty()) + return EXIT_FAILURE; + + + if (!args.reflect.empty()) + { + Parser spirv_parser(move(spirv_file)); + spirv_parser.parse(); + + CompilerReflection compiler(move(spirv_parser.get_parsed_ir())); + compiler.set_format(args.reflect); + auto json = compiler.compile(); + if (args.output) + write_string_to_file(args.output, json.c_str()); + else + printf("%s", json.c_str()); + return EXIT_SUCCESS; + } + + string compiled_output; + + if (args.iterations == 1) + compiled_output = compile_iteration(args, move(spirv_file)); + else + { + for (unsigned i = 0; i < args.iterations; i++) + compiled_output = compile_iteration(args, spirv_file); + } + + if (args.output) + write_string_to_file(args.output, compiled_output.c_str()); + else + printf("%s", compiled_output.c_str()); + + return EXIT_SUCCESS; +} + +int main(int argc, char *argv[]) +{ +#ifdef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS + return main_inner(argc, argv); +#else + + try + { + return main_inner(argc, argv); + } + catch (const std::exception &e) + { + fprintf(stderr, "SPIRV-Cross threw an exception: %s\n", e.what()); + return EXIT_FAILURE; + } +#endif +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/pkg-config/spirv-cross-c-shared.pc.in b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/pkg-config/spirv-cross-c-shared.pc.in new file mode 100644 index 000000000000..823e4ce48bb7 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/pkg-config/spirv-cross-c-shared.pc.in @@ -0,0 +1,13 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +exec_prefix=@CMAKE_INSTALL_PREFIX@ +libdir=@SPIRV_CROSS_INSTALL_LIB_DIR@ +sharedlibdir=@SPIRV_CROSS_INSTALL_LIB_DIR@ +includedir=@SPIRV_CROSS_INSTALL_INC_DIR@ + +Name: spirv-cross-c-shared +Description: C API for SPIRV-Cross +Version: @SPIRV_CROSS_VERSION@ + +Requires: +Libs: -L${libdir} -L${sharedlibdir} -lspirv-cross-c-shared +Cflags: -I${includedir} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv.h b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv.h new file mode 100644 index 000000000000..4added27c953 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv.h @@ -0,0 +1,1972 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#ifndef spirv_H +#define spirv_H + +typedef unsigned int SpvId; + +#define SPV_VERSION 0x10500 +#define SPV_REVISION 1 + +static const unsigned int SpvMagicNumber = 0x07230203; +static const unsigned int SpvVersion = 0x00010400; +static const unsigned int SpvRevision = 1; +static const unsigned int SpvOpCodeMask = 0xffff; +static const unsigned int SpvWordCountShift = 16; + +typedef enum SpvSourceLanguage_ { + SpvSourceLanguageUnknown = 0, + SpvSourceLanguageESSL = 1, + SpvSourceLanguageGLSL = 2, + SpvSourceLanguageOpenCL_C = 3, + SpvSourceLanguageOpenCL_CPP = 4, + SpvSourceLanguageHLSL = 5, + SpvSourceLanguageMax = 0x7fffffff, +} SpvSourceLanguage; + +typedef enum SpvExecutionModel_ { + SpvExecutionModelVertex = 0, + SpvExecutionModelTessellationControl = 1, + SpvExecutionModelTessellationEvaluation = 2, + SpvExecutionModelGeometry = 3, + SpvExecutionModelFragment = 4, + SpvExecutionModelGLCompute = 5, + SpvExecutionModelKernel = 6, + SpvExecutionModelTaskNV = 5267, + SpvExecutionModelMeshNV = 5268, + SpvExecutionModelRayGenerationNV = 5313, + SpvExecutionModelIntersectionNV = 5314, + SpvExecutionModelAnyHitNV = 5315, + SpvExecutionModelClosestHitNV = 5316, + SpvExecutionModelMissNV = 5317, + SpvExecutionModelCallableNV = 5318, + SpvExecutionModelMax = 0x7fffffff, +} SpvExecutionModel; + +typedef enum SpvAddressingModel_ { + SpvAddressingModelLogical = 0, + SpvAddressingModelPhysical32 = 1, + SpvAddressingModelPhysical64 = 2, + SpvAddressingModelPhysicalStorageBuffer64 = 5348, + SpvAddressingModelPhysicalStorageBuffer64EXT = 5348, + SpvAddressingModelMax = 0x7fffffff, +} SpvAddressingModel; + +typedef enum SpvMemoryModel_ { + SpvMemoryModelSimple = 0, + SpvMemoryModelGLSL450 = 1, + SpvMemoryModelOpenCL = 2, + SpvMemoryModelVulkan = 3, + SpvMemoryModelVulkanKHR = 3, + SpvMemoryModelMax = 0x7fffffff, +} SpvMemoryModel; + +typedef enum SpvExecutionMode_ { + SpvExecutionModeInvocations = 0, + SpvExecutionModeSpacingEqual = 1, + SpvExecutionModeSpacingFractionalEven = 2, + SpvExecutionModeSpacingFractionalOdd = 3, + SpvExecutionModeVertexOrderCw = 4, + SpvExecutionModeVertexOrderCcw = 5, + SpvExecutionModePixelCenterInteger = 6, + SpvExecutionModeOriginUpperLeft = 7, + SpvExecutionModeOriginLowerLeft = 8, + SpvExecutionModeEarlyFragmentTests = 9, + SpvExecutionModePointMode = 10, + SpvExecutionModeXfb = 11, + SpvExecutionModeDepthReplacing = 12, + SpvExecutionModeDepthGreater = 14, + SpvExecutionModeDepthLess = 15, + SpvExecutionModeDepthUnchanged = 16, + SpvExecutionModeLocalSize = 17, + SpvExecutionModeLocalSizeHint = 18, + SpvExecutionModeInputPoints = 19, + SpvExecutionModeInputLines = 20, + SpvExecutionModeInputLinesAdjacency = 21, + SpvExecutionModeTriangles = 22, + SpvExecutionModeInputTrianglesAdjacency = 23, + SpvExecutionModeQuads = 24, + SpvExecutionModeIsolines = 25, + SpvExecutionModeOutputVertices = 26, + SpvExecutionModeOutputPoints = 27, + SpvExecutionModeOutputLineStrip = 28, + SpvExecutionModeOutputTriangleStrip = 29, + SpvExecutionModeVecTypeHint = 30, + SpvExecutionModeContractionOff = 31, + SpvExecutionModeInitializer = 33, + SpvExecutionModeFinalizer = 34, + SpvExecutionModeSubgroupSize = 35, + SpvExecutionModeSubgroupsPerWorkgroup = 36, + SpvExecutionModeSubgroupsPerWorkgroupId = 37, + SpvExecutionModeLocalSizeId = 38, + SpvExecutionModeLocalSizeHintId = 39, + SpvExecutionModePostDepthCoverage = 4446, + SpvExecutionModeDenormPreserve = 4459, + SpvExecutionModeDenormFlushToZero = 4460, + SpvExecutionModeSignedZeroInfNanPreserve = 4461, + SpvExecutionModeRoundingModeRTE = 4462, + SpvExecutionModeRoundingModeRTZ = 4463, + SpvExecutionModeStencilRefReplacingEXT = 5027, + SpvExecutionModeOutputLinesNV = 5269, + SpvExecutionModeOutputPrimitivesNV = 5270, + SpvExecutionModeDerivativeGroupQuadsNV = 5289, + SpvExecutionModeDerivativeGroupLinearNV = 5290, + SpvExecutionModeOutputTrianglesNV = 5298, + SpvExecutionModePixelInterlockOrderedEXT = 5366, + SpvExecutionModePixelInterlockUnorderedEXT = 5367, + SpvExecutionModeSampleInterlockOrderedEXT = 5368, + SpvExecutionModeSampleInterlockUnorderedEXT = 5369, + SpvExecutionModeShadingRateInterlockOrderedEXT = 5370, + SpvExecutionModeShadingRateInterlockUnorderedEXT = 5371, + SpvExecutionModeMax = 0x7fffffff, +} SpvExecutionMode; + +typedef enum SpvStorageClass_ { + SpvStorageClassUniformConstant = 0, + SpvStorageClassInput = 1, + SpvStorageClassUniform = 2, + SpvStorageClassOutput = 3, + SpvStorageClassWorkgroup = 4, + SpvStorageClassCrossWorkgroup = 5, + SpvStorageClassPrivate = 6, + SpvStorageClassFunction = 7, + SpvStorageClassGeneric = 8, + SpvStorageClassPushConstant = 9, + SpvStorageClassAtomicCounter = 10, + SpvStorageClassImage = 11, + SpvStorageClassStorageBuffer = 12, + SpvStorageClassCallableDataNV = 5328, + SpvStorageClassIncomingCallableDataNV = 5329, + SpvStorageClassRayPayloadNV = 5338, + SpvStorageClassHitAttributeNV = 5339, + SpvStorageClassIncomingRayPayloadNV = 5342, + SpvStorageClassShaderRecordBufferNV = 5343, + SpvStorageClassPhysicalStorageBuffer = 5349, + SpvStorageClassPhysicalStorageBufferEXT = 5349, + SpvStorageClassMax = 0x7fffffff, +} SpvStorageClass; + +typedef enum SpvDim_ { + SpvDim1D = 0, + SpvDim2D = 1, + SpvDim3D = 2, + SpvDimCube = 3, + SpvDimRect = 4, + SpvDimBuffer = 5, + SpvDimSubpassData = 6, + SpvDimMax = 0x7fffffff, +} SpvDim; + +typedef enum SpvSamplerAddressingMode_ { + SpvSamplerAddressingModeNone = 0, + SpvSamplerAddressingModeClampToEdge = 1, + SpvSamplerAddressingModeClamp = 2, + SpvSamplerAddressingModeRepeat = 3, + SpvSamplerAddressingModeRepeatMirrored = 4, + SpvSamplerAddressingModeMax = 0x7fffffff, +} SpvSamplerAddressingMode; + +typedef enum SpvSamplerFilterMode_ { + SpvSamplerFilterModeNearest = 0, + SpvSamplerFilterModeLinear = 1, + SpvSamplerFilterModeMax = 0x7fffffff, +} SpvSamplerFilterMode; + +typedef enum SpvImageFormat_ { + SpvImageFormatUnknown = 0, + SpvImageFormatRgba32f = 1, + SpvImageFormatRgba16f = 2, + SpvImageFormatR32f = 3, + SpvImageFormatRgba8 = 4, + SpvImageFormatRgba8Snorm = 5, + SpvImageFormatRg32f = 6, + SpvImageFormatRg16f = 7, + SpvImageFormatR11fG11fB10f = 8, + SpvImageFormatR16f = 9, + SpvImageFormatRgba16 = 10, + SpvImageFormatRgb10A2 = 11, + SpvImageFormatRg16 = 12, + SpvImageFormatRg8 = 13, + SpvImageFormatR16 = 14, + SpvImageFormatR8 = 15, + SpvImageFormatRgba16Snorm = 16, + SpvImageFormatRg16Snorm = 17, + SpvImageFormatRg8Snorm = 18, + SpvImageFormatR16Snorm = 19, + SpvImageFormatR8Snorm = 20, + SpvImageFormatRgba32i = 21, + SpvImageFormatRgba16i = 22, + SpvImageFormatRgba8i = 23, + SpvImageFormatR32i = 24, + SpvImageFormatRg32i = 25, + SpvImageFormatRg16i = 26, + SpvImageFormatRg8i = 27, + SpvImageFormatR16i = 28, + SpvImageFormatR8i = 29, + SpvImageFormatRgba32ui = 30, + SpvImageFormatRgba16ui = 31, + SpvImageFormatRgba8ui = 32, + SpvImageFormatR32ui = 33, + SpvImageFormatRgb10a2ui = 34, + SpvImageFormatRg32ui = 35, + SpvImageFormatRg16ui = 36, + SpvImageFormatRg8ui = 37, + SpvImageFormatR16ui = 38, + SpvImageFormatR8ui = 39, + SpvImageFormatMax = 0x7fffffff, +} SpvImageFormat; + +typedef enum SpvImageChannelOrder_ { + SpvImageChannelOrderR = 0, + SpvImageChannelOrderA = 1, + SpvImageChannelOrderRG = 2, + SpvImageChannelOrderRA = 3, + SpvImageChannelOrderRGB = 4, + SpvImageChannelOrderRGBA = 5, + SpvImageChannelOrderBGRA = 6, + SpvImageChannelOrderARGB = 7, + SpvImageChannelOrderIntensity = 8, + SpvImageChannelOrderLuminance = 9, + SpvImageChannelOrderRx = 10, + SpvImageChannelOrderRGx = 11, + SpvImageChannelOrderRGBx = 12, + SpvImageChannelOrderDepth = 13, + SpvImageChannelOrderDepthStencil = 14, + SpvImageChannelOrdersRGB = 15, + SpvImageChannelOrdersRGBx = 16, + SpvImageChannelOrdersRGBA = 17, + SpvImageChannelOrdersBGRA = 18, + SpvImageChannelOrderABGR = 19, + SpvImageChannelOrderMax = 0x7fffffff, +} SpvImageChannelOrder; + +typedef enum SpvImageChannelDataType_ { + SpvImageChannelDataTypeSnormInt8 = 0, + SpvImageChannelDataTypeSnormInt16 = 1, + SpvImageChannelDataTypeUnormInt8 = 2, + SpvImageChannelDataTypeUnormInt16 = 3, + SpvImageChannelDataTypeUnormShort565 = 4, + SpvImageChannelDataTypeUnormShort555 = 5, + SpvImageChannelDataTypeUnormInt101010 = 6, + SpvImageChannelDataTypeSignedInt8 = 7, + SpvImageChannelDataTypeSignedInt16 = 8, + SpvImageChannelDataTypeSignedInt32 = 9, + SpvImageChannelDataTypeUnsignedInt8 = 10, + SpvImageChannelDataTypeUnsignedInt16 = 11, + SpvImageChannelDataTypeUnsignedInt32 = 12, + SpvImageChannelDataTypeHalfFloat = 13, + SpvImageChannelDataTypeFloat = 14, + SpvImageChannelDataTypeUnormInt24 = 15, + SpvImageChannelDataTypeUnormInt101010_2 = 16, + SpvImageChannelDataTypeMax = 0x7fffffff, +} SpvImageChannelDataType; + +typedef enum SpvImageOperandsShift_ { + SpvImageOperandsBiasShift = 0, + SpvImageOperandsLodShift = 1, + SpvImageOperandsGradShift = 2, + SpvImageOperandsConstOffsetShift = 3, + SpvImageOperandsOffsetShift = 4, + SpvImageOperandsConstOffsetsShift = 5, + SpvImageOperandsSampleShift = 6, + SpvImageOperandsMinLodShift = 7, + SpvImageOperandsMakeTexelAvailableShift = 8, + SpvImageOperandsMakeTexelAvailableKHRShift = 8, + SpvImageOperandsMakeTexelVisibleShift = 9, + SpvImageOperandsMakeTexelVisibleKHRShift = 9, + SpvImageOperandsNonPrivateTexelShift = 10, + SpvImageOperandsNonPrivateTexelKHRShift = 10, + SpvImageOperandsVolatileTexelShift = 11, + SpvImageOperandsVolatileTexelKHRShift = 11, + SpvImageOperandsSignExtendShift = 12, + SpvImageOperandsZeroExtendShift = 13, + SpvImageOperandsMax = 0x7fffffff, +} SpvImageOperandsShift; + +typedef enum SpvImageOperandsMask_ { + SpvImageOperandsMaskNone = 0, + SpvImageOperandsBiasMask = 0x00000001, + SpvImageOperandsLodMask = 0x00000002, + SpvImageOperandsGradMask = 0x00000004, + SpvImageOperandsConstOffsetMask = 0x00000008, + SpvImageOperandsOffsetMask = 0x00000010, + SpvImageOperandsConstOffsetsMask = 0x00000020, + SpvImageOperandsSampleMask = 0x00000040, + SpvImageOperandsMinLodMask = 0x00000080, + SpvImageOperandsMakeTexelAvailableMask = 0x00000100, + SpvImageOperandsMakeTexelAvailableKHRMask = 0x00000100, + SpvImageOperandsMakeTexelVisibleMask = 0x00000200, + SpvImageOperandsMakeTexelVisibleKHRMask = 0x00000200, + SpvImageOperandsNonPrivateTexelMask = 0x00000400, + SpvImageOperandsNonPrivateTexelKHRMask = 0x00000400, + SpvImageOperandsVolatileTexelMask = 0x00000800, + SpvImageOperandsVolatileTexelKHRMask = 0x00000800, + SpvImageOperandsSignExtendMask = 0x00001000, + SpvImageOperandsZeroExtendMask = 0x00002000, +} SpvImageOperandsMask; + +typedef enum SpvFPFastMathModeShift_ { + SpvFPFastMathModeNotNaNShift = 0, + SpvFPFastMathModeNotInfShift = 1, + SpvFPFastMathModeNSZShift = 2, + SpvFPFastMathModeAllowRecipShift = 3, + SpvFPFastMathModeFastShift = 4, + SpvFPFastMathModeMax = 0x7fffffff, +} SpvFPFastMathModeShift; + +typedef enum SpvFPFastMathModeMask_ { + SpvFPFastMathModeMaskNone = 0, + SpvFPFastMathModeNotNaNMask = 0x00000001, + SpvFPFastMathModeNotInfMask = 0x00000002, + SpvFPFastMathModeNSZMask = 0x00000004, + SpvFPFastMathModeAllowRecipMask = 0x00000008, + SpvFPFastMathModeFastMask = 0x00000010, +} SpvFPFastMathModeMask; + +typedef enum SpvFPRoundingMode_ { + SpvFPRoundingModeRTE = 0, + SpvFPRoundingModeRTZ = 1, + SpvFPRoundingModeRTP = 2, + SpvFPRoundingModeRTN = 3, + SpvFPRoundingModeMax = 0x7fffffff, +} SpvFPRoundingMode; + +typedef enum SpvLinkageType_ { + SpvLinkageTypeExport = 0, + SpvLinkageTypeImport = 1, + SpvLinkageTypeMax = 0x7fffffff, +} SpvLinkageType; + +typedef enum SpvAccessQualifier_ { + SpvAccessQualifierReadOnly = 0, + SpvAccessQualifierWriteOnly = 1, + SpvAccessQualifierReadWrite = 2, + SpvAccessQualifierMax = 0x7fffffff, +} SpvAccessQualifier; + +typedef enum SpvFunctionParameterAttribute_ { + SpvFunctionParameterAttributeZext = 0, + SpvFunctionParameterAttributeSext = 1, + SpvFunctionParameterAttributeByVal = 2, + SpvFunctionParameterAttributeSret = 3, + SpvFunctionParameterAttributeNoAlias = 4, + SpvFunctionParameterAttributeNoCapture = 5, + SpvFunctionParameterAttributeNoWrite = 6, + SpvFunctionParameterAttributeNoReadWrite = 7, + SpvFunctionParameterAttributeMax = 0x7fffffff, +} SpvFunctionParameterAttribute; + +typedef enum SpvDecoration_ { + SpvDecorationRelaxedPrecision = 0, + SpvDecorationSpecId = 1, + SpvDecorationBlock = 2, + SpvDecorationBufferBlock = 3, + SpvDecorationRowMajor = 4, + SpvDecorationColMajor = 5, + SpvDecorationArrayStride = 6, + SpvDecorationMatrixStride = 7, + SpvDecorationGLSLShared = 8, + SpvDecorationGLSLPacked = 9, + SpvDecorationCPacked = 10, + SpvDecorationBuiltIn = 11, + SpvDecorationNoPerspective = 13, + SpvDecorationFlat = 14, + SpvDecorationPatch = 15, + SpvDecorationCentroid = 16, + SpvDecorationSample = 17, + SpvDecorationInvariant = 18, + SpvDecorationRestrict = 19, + SpvDecorationAliased = 20, + SpvDecorationVolatile = 21, + SpvDecorationConstant = 22, + SpvDecorationCoherent = 23, + SpvDecorationNonWritable = 24, + SpvDecorationNonReadable = 25, + SpvDecorationUniform = 26, + SpvDecorationUniformId = 27, + SpvDecorationSaturatedConversion = 28, + SpvDecorationStream = 29, + SpvDecorationLocation = 30, + SpvDecorationComponent = 31, + SpvDecorationIndex = 32, + SpvDecorationBinding = 33, + SpvDecorationDescriptorSet = 34, + SpvDecorationOffset = 35, + SpvDecorationXfbBuffer = 36, + SpvDecorationXfbStride = 37, + SpvDecorationFuncParamAttr = 38, + SpvDecorationFPRoundingMode = 39, + SpvDecorationFPFastMathMode = 40, + SpvDecorationLinkageAttributes = 41, + SpvDecorationNoContraction = 42, + SpvDecorationInputAttachmentIndex = 43, + SpvDecorationAlignment = 44, + SpvDecorationMaxByteOffset = 45, + SpvDecorationAlignmentId = 46, + SpvDecorationMaxByteOffsetId = 47, + SpvDecorationNoSignedWrap = 4469, + SpvDecorationNoUnsignedWrap = 4470, + SpvDecorationExplicitInterpAMD = 4999, + SpvDecorationOverrideCoverageNV = 5248, + SpvDecorationPassthroughNV = 5250, + SpvDecorationViewportRelativeNV = 5252, + SpvDecorationSecondaryViewportRelativeNV = 5256, + SpvDecorationPerPrimitiveNV = 5271, + SpvDecorationPerViewNV = 5272, + SpvDecorationPerTaskNV = 5273, + SpvDecorationPerVertexNV = 5285, + SpvDecorationNonUniform = 5300, + SpvDecorationNonUniformEXT = 5300, + SpvDecorationRestrictPointer = 5355, + SpvDecorationRestrictPointerEXT = 5355, + SpvDecorationAliasedPointer = 5356, + SpvDecorationAliasedPointerEXT = 5356, + SpvDecorationCounterBuffer = 5634, + SpvDecorationHlslCounterBufferGOOGLE = 5634, + SpvDecorationHlslSemanticGOOGLE = 5635, + SpvDecorationUserSemantic = 5635, + SpvDecorationUserTypeGOOGLE = 5636, + SpvDecorationMax = 0x7fffffff, +} SpvDecoration; + +typedef enum SpvBuiltIn_ { + SpvBuiltInPosition = 0, + SpvBuiltInPointSize = 1, + SpvBuiltInClipDistance = 3, + SpvBuiltInCullDistance = 4, + SpvBuiltInVertexId = 5, + SpvBuiltInInstanceId = 6, + SpvBuiltInPrimitiveId = 7, + SpvBuiltInInvocationId = 8, + SpvBuiltInLayer = 9, + SpvBuiltInViewportIndex = 10, + SpvBuiltInTessLevelOuter = 11, + SpvBuiltInTessLevelInner = 12, + SpvBuiltInTessCoord = 13, + SpvBuiltInPatchVertices = 14, + SpvBuiltInFragCoord = 15, + SpvBuiltInPointCoord = 16, + SpvBuiltInFrontFacing = 17, + SpvBuiltInSampleId = 18, + SpvBuiltInSamplePosition = 19, + SpvBuiltInSampleMask = 20, + SpvBuiltInFragDepth = 22, + SpvBuiltInHelperInvocation = 23, + SpvBuiltInNumWorkgroups = 24, + SpvBuiltInWorkgroupSize = 25, + SpvBuiltInWorkgroupId = 26, + SpvBuiltInLocalInvocationId = 27, + SpvBuiltInGlobalInvocationId = 28, + SpvBuiltInLocalInvocationIndex = 29, + SpvBuiltInWorkDim = 30, + SpvBuiltInGlobalSize = 31, + SpvBuiltInEnqueuedWorkgroupSize = 32, + SpvBuiltInGlobalOffset = 33, + SpvBuiltInGlobalLinearId = 34, + SpvBuiltInSubgroupSize = 36, + SpvBuiltInSubgroupMaxSize = 37, + SpvBuiltInNumSubgroups = 38, + SpvBuiltInNumEnqueuedSubgroups = 39, + SpvBuiltInSubgroupId = 40, + SpvBuiltInSubgroupLocalInvocationId = 41, + SpvBuiltInVertexIndex = 42, + SpvBuiltInInstanceIndex = 43, + SpvBuiltInSubgroupEqMask = 4416, + SpvBuiltInSubgroupEqMaskKHR = 4416, + SpvBuiltInSubgroupGeMask = 4417, + SpvBuiltInSubgroupGeMaskKHR = 4417, + SpvBuiltInSubgroupGtMask = 4418, + SpvBuiltInSubgroupGtMaskKHR = 4418, + SpvBuiltInSubgroupLeMask = 4419, + SpvBuiltInSubgroupLeMaskKHR = 4419, + SpvBuiltInSubgroupLtMask = 4420, + SpvBuiltInSubgroupLtMaskKHR = 4420, + SpvBuiltInBaseVertex = 4424, + SpvBuiltInBaseInstance = 4425, + SpvBuiltInDrawIndex = 4426, + SpvBuiltInDeviceIndex = 4438, + SpvBuiltInViewIndex = 4440, + SpvBuiltInBaryCoordNoPerspAMD = 4992, + SpvBuiltInBaryCoordNoPerspCentroidAMD = 4993, + SpvBuiltInBaryCoordNoPerspSampleAMD = 4994, + SpvBuiltInBaryCoordSmoothAMD = 4995, + SpvBuiltInBaryCoordSmoothCentroidAMD = 4996, + SpvBuiltInBaryCoordSmoothSampleAMD = 4997, + SpvBuiltInBaryCoordPullModelAMD = 4998, + SpvBuiltInFragStencilRefEXT = 5014, + SpvBuiltInViewportMaskNV = 5253, + SpvBuiltInSecondaryPositionNV = 5257, + SpvBuiltInSecondaryViewportMaskNV = 5258, + SpvBuiltInPositionPerViewNV = 5261, + SpvBuiltInViewportMaskPerViewNV = 5262, + SpvBuiltInFullyCoveredEXT = 5264, + SpvBuiltInTaskCountNV = 5274, + SpvBuiltInPrimitiveCountNV = 5275, + SpvBuiltInPrimitiveIndicesNV = 5276, + SpvBuiltInClipDistancePerViewNV = 5277, + SpvBuiltInCullDistancePerViewNV = 5278, + SpvBuiltInLayerPerViewNV = 5279, + SpvBuiltInMeshViewCountNV = 5280, + SpvBuiltInMeshViewIndicesNV = 5281, + SpvBuiltInBaryCoordNV = 5286, + SpvBuiltInBaryCoordNoPerspNV = 5287, + SpvBuiltInFragSizeEXT = 5292, + SpvBuiltInFragmentSizeNV = 5292, + SpvBuiltInFragInvocationCountEXT = 5293, + SpvBuiltInInvocationsPerPixelNV = 5293, + SpvBuiltInLaunchIdNV = 5319, + SpvBuiltInLaunchSizeNV = 5320, + SpvBuiltInWorldRayOriginNV = 5321, + SpvBuiltInWorldRayDirectionNV = 5322, + SpvBuiltInObjectRayOriginNV = 5323, + SpvBuiltInObjectRayDirectionNV = 5324, + SpvBuiltInRayTminNV = 5325, + SpvBuiltInRayTmaxNV = 5326, + SpvBuiltInInstanceCustomIndexNV = 5327, + SpvBuiltInObjectToWorldNV = 5330, + SpvBuiltInWorldToObjectNV = 5331, + SpvBuiltInHitTNV = 5332, + SpvBuiltInHitKindNV = 5333, + SpvBuiltInIncomingRayFlagsNV = 5351, + SpvBuiltInWarpsPerSMNV = 5374, + SpvBuiltInSMCountNV = 5375, + SpvBuiltInWarpIDNV = 5376, + SpvBuiltInSMIDNV = 5377, + SpvBuiltInMax = 0x7fffffff, +} SpvBuiltIn; + +typedef enum SpvSelectionControlShift_ { + SpvSelectionControlFlattenShift = 0, + SpvSelectionControlDontFlattenShift = 1, + SpvSelectionControlMax = 0x7fffffff, +} SpvSelectionControlShift; + +typedef enum SpvSelectionControlMask_ { + SpvSelectionControlMaskNone = 0, + SpvSelectionControlFlattenMask = 0x00000001, + SpvSelectionControlDontFlattenMask = 0x00000002, +} SpvSelectionControlMask; + +typedef enum SpvLoopControlShift_ { + SpvLoopControlUnrollShift = 0, + SpvLoopControlDontUnrollShift = 1, + SpvLoopControlDependencyInfiniteShift = 2, + SpvLoopControlDependencyLengthShift = 3, + SpvLoopControlMinIterationsShift = 4, + SpvLoopControlMaxIterationsShift = 5, + SpvLoopControlIterationMultipleShift = 6, + SpvLoopControlPeelCountShift = 7, + SpvLoopControlPartialCountShift = 8, + SpvLoopControlMax = 0x7fffffff, +} SpvLoopControlShift; + +typedef enum SpvLoopControlMask_ { + SpvLoopControlMaskNone = 0, + SpvLoopControlUnrollMask = 0x00000001, + SpvLoopControlDontUnrollMask = 0x00000002, + SpvLoopControlDependencyInfiniteMask = 0x00000004, + SpvLoopControlDependencyLengthMask = 0x00000008, + SpvLoopControlMinIterationsMask = 0x00000010, + SpvLoopControlMaxIterationsMask = 0x00000020, + SpvLoopControlIterationMultipleMask = 0x00000040, + SpvLoopControlPeelCountMask = 0x00000080, + SpvLoopControlPartialCountMask = 0x00000100, +} SpvLoopControlMask; + +typedef enum SpvFunctionControlShift_ { + SpvFunctionControlInlineShift = 0, + SpvFunctionControlDontInlineShift = 1, + SpvFunctionControlPureShift = 2, + SpvFunctionControlConstShift = 3, + SpvFunctionControlMax = 0x7fffffff, +} SpvFunctionControlShift; + +typedef enum SpvFunctionControlMask_ { + SpvFunctionControlMaskNone = 0, + SpvFunctionControlInlineMask = 0x00000001, + SpvFunctionControlDontInlineMask = 0x00000002, + SpvFunctionControlPureMask = 0x00000004, + SpvFunctionControlConstMask = 0x00000008, +} SpvFunctionControlMask; + +typedef enum SpvMemorySemanticsShift_ { + SpvMemorySemanticsAcquireShift = 1, + SpvMemorySemanticsReleaseShift = 2, + SpvMemorySemanticsAcquireReleaseShift = 3, + SpvMemorySemanticsSequentiallyConsistentShift = 4, + SpvMemorySemanticsUniformMemoryShift = 6, + SpvMemorySemanticsSubgroupMemoryShift = 7, + SpvMemorySemanticsWorkgroupMemoryShift = 8, + SpvMemorySemanticsCrossWorkgroupMemoryShift = 9, + SpvMemorySemanticsAtomicCounterMemoryShift = 10, + SpvMemorySemanticsImageMemoryShift = 11, + SpvMemorySemanticsOutputMemoryShift = 12, + SpvMemorySemanticsOutputMemoryKHRShift = 12, + SpvMemorySemanticsMakeAvailableShift = 13, + SpvMemorySemanticsMakeAvailableKHRShift = 13, + SpvMemorySemanticsMakeVisibleShift = 14, + SpvMemorySemanticsMakeVisibleKHRShift = 14, + SpvMemorySemanticsVolatileShift = 15, + SpvMemorySemanticsMax = 0x7fffffff, +} SpvMemorySemanticsShift; + +typedef enum SpvMemorySemanticsMask_ { + SpvMemorySemanticsMaskNone = 0, + SpvMemorySemanticsAcquireMask = 0x00000002, + SpvMemorySemanticsReleaseMask = 0x00000004, + SpvMemorySemanticsAcquireReleaseMask = 0x00000008, + SpvMemorySemanticsSequentiallyConsistentMask = 0x00000010, + SpvMemorySemanticsUniformMemoryMask = 0x00000040, + SpvMemorySemanticsSubgroupMemoryMask = 0x00000080, + SpvMemorySemanticsWorkgroupMemoryMask = 0x00000100, + SpvMemorySemanticsCrossWorkgroupMemoryMask = 0x00000200, + SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000400, + SpvMemorySemanticsImageMemoryMask = 0x00000800, + SpvMemorySemanticsOutputMemoryMask = 0x00001000, + SpvMemorySemanticsOutputMemoryKHRMask = 0x00001000, + SpvMemorySemanticsMakeAvailableMask = 0x00002000, + SpvMemorySemanticsMakeAvailableKHRMask = 0x00002000, + SpvMemorySemanticsMakeVisibleMask = 0x00004000, + SpvMemorySemanticsMakeVisibleKHRMask = 0x00004000, + SpvMemorySemanticsVolatileMask = 0x00008000, +} SpvMemorySemanticsMask; + +typedef enum SpvMemoryAccessShift_ { + SpvMemoryAccessVolatileShift = 0, + SpvMemoryAccessAlignedShift = 1, + SpvMemoryAccessNontemporalShift = 2, + SpvMemoryAccessMakePointerAvailableShift = 3, + SpvMemoryAccessMakePointerAvailableKHRShift = 3, + SpvMemoryAccessMakePointerVisibleShift = 4, + SpvMemoryAccessMakePointerVisibleKHRShift = 4, + SpvMemoryAccessNonPrivatePointerShift = 5, + SpvMemoryAccessNonPrivatePointerKHRShift = 5, + SpvMemoryAccessMax = 0x7fffffff, +} SpvMemoryAccessShift; + +typedef enum SpvMemoryAccessMask_ { + SpvMemoryAccessMaskNone = 0, + SpvMemoryAccessVolatileMask = 0x00000001, + SpvMemoryAccessAlignedMask = 0x00000002, + SpvMemoryAccessNontemporalMask = 0x00000004, + SpvMemoryAccessMakePointerAvailableMask = 0x00000008, + SpvMemoryAccessMakePointerAvailableKHRMask = 0x00000008, + SpvMemoryAccessMakePointerVisibleMask = 0x00000010, + SpvMemoryAccessMakePointerVisibleKHRMask = 0x00000010, + SpvMemoryAccessNonPrivatePointerMask = 0x00000020, + SpvMemoryAccessNonPrivatePointerKHRMask = 0x00000020, +} SpvMemoryAccessMask; + +typedef enum SpvScope_ { + SpvScopeCrossDevice = 0, + SpvScopeDevice = 1, + SpvScopeWorkgroup = 2, + SpvScopeSubgroup = 3, + SpvScopeInvocation = 4, + SpvScopeQueueFamily = 5, + SpvScopeQueueFamilyKHR = 5, + SpvScopeMax = 0x7fffffff, +} SpvScope; + +typedef enum SpvGroupOperation_ { + SpvGroupOperationReduce = 0, + SpvGroupOperationInclusiveScan = 1, + SpvGroupOperationExclusiveScan = 2, + SpvGroupOperationClusteredReduce = 3, + SpvGroupOperationPartitionedReduceNV = 6, + SpvGroupOperationPartitionedInclusiveScanNV = 7, + SpvGroupOperationPartitionedExclusiveScanNV = 8, + SpvGroupOperationMax = 0x7fffffff, +} SpvGroupOperation; + +typedef enum SpvKernelEnqueueFlags_ { + SpvKernelEnqueueFlagsNoWait = 0, + SpvKernelEnqueueFlagsWaitKernel = 1, + SpvKernelEnqueueFlagsWaitWorkGroup = 2, + SpvKernelEnqueueFlagsMax = 0x7fffffff, +} SpvKernelEnqueueFlags; + +typedef enum SpvKernelProfilingInfoShift_ { + SpvKernelProfilingInfoCmdExecTimeShift = 0, + SpvKernelProfilingInfoMax = 0x7fffffff, +} SpvKernelProfilingInfoShift; + +typedef enum SpvKernelProfilingInfoMask_ { + SpvKernelProfilingInfoMaskNone = 0, + SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001, +} SpvKernelProfilingInfoMask; + +typedef enum SpvCapability_ { + SpvCapabilityMatrix = 0, + SpvCapabilityShader = 1, + SpvCapabilityGeometry = 2, + SpvCapabilityTessellation = 3, + SpvCapabilityAddresses = 4, + SpvCapabilityLinkage = 5, + SpvCapabilityKernel = 6, + SpvCapabilityVector16 = 7, + SpvCapabilityFloat16Buffer = 8, + SpvCapabilityFloat16 = 9, + SpvCapabilityFloat64 = 10, + SpvCapabilityInt64 = 11, + SpvCapabilityInt64Atomics = 12, + SpvCapabilityImageBasic = 13, + SpvCapabilityImageReadWrite = 14, + SpvCapabilityImageMipmap = 15, + SpvCapabilityPipes = 17, + SpvCapabilityGroups = 18, + SpvCapabilityDeviceEnqueue = 19, + SpvCapabilityLiteralSampler = 20, + SpvCapabilityAtomicStorage = 21, + SpvCapabilityInt16 = 22, + SpvCapabilityTessellationPointSize = 23, + SpvCapabilityGeometryPointSize = 24, + SpvCapabilityImageGatherExtended = 25, + SpvCapabilityStorageImageMultisample = 27, + SpvCapabilityUniformBufferArrayDynamicIndexing = 28, + SpvCapabilitySampledImageArrayDynamicIndexing = 29, + SpvCapabilityStorageBufferArrayDynamicIndexing = 30, + SpvCapabilityStorageImageArrayDynamicIndexing = 31, + SpvCapabilityClipDistance = 32, + SpvCapabilityCullDistance = 33, + SpvCapabilityImageCubeArray = 34, + SpvCapabilitySampleRateShading = 35, + SpvCapabilityImageRect = 36, + SpvCapabilitySampledRect = 37, + SpvCapabilityGenericPointer = 38, + SpvCapabilityInt8 = 39, + SpvCapabilityInputAttachment = 40, + SpvCapabilitySparseResidency = 41, + SpvCapabilityMinLod = 42, + SpvCapabilitySampled1D = 43, + SpvCapabilityImage1D = 44, + SpvCapabilitySampledCubeArray = 45, + SpvCapabilitySampledBuffer = 46, + SpvCapabilityImageBuffer = 47, + SpvCapabilityImageMSArray = 48, + SpvCapabilityStorageImageExtendedFormats = 49, + SpvCapabilityImageQuery = 50, + SpvCapabilityDerivativeControl = 51, + SpvCapabilityInterpolationFunction = 52, + SpvCapabilityTransformFeedback = 53, + SpvCapabilityGeometryStreams = 54, + SpvCapabilityStorageImageReadWithoutFormat = 55, + SpvCapabilityStorageImageWriteWithoutFormat = 56, + SpvCapabilityMultiViewport = 57, + SpvCapabilitySubgroupDispatch = 58, + SpvCapabilityNamedBarrier = 59, + SpvCapabilityPipeStorage = 60, + SpvCapabilityGroupNonUniform = 61, + SpvCapabilityGroupNonUniformVote = 62, + SpvCapabilityGroupNonUniformArithmetic = 63, + SpvCapabilityGroupNonUniformBallot = 64, + SpvCapabilityGroupNonUniformShuffle = 65, + SpvCapabilityGroupNonUniformShuffleRelative = 66, + SpvCapabilityGroupNonUniformClustered = 67, + SpvCapabilityGroupNonUniformQuad = 68, + SpvCapabilityShaderLayer = 69, + SpvCapabilityShaderViewportIndex = 70, + SpvCapabilitySubgroupBallotKHR = 4423, + SpvCapabilityDrawParameters = 4427, + SpvCapabilitySubgroupVoteKHR = 4431, + SpvCapabilityStorageBuffer16BitAccess = 4433, + SpvCapabilityStorageUniformBufferBlock16 = 4433, + SpvCapabilityStorageUniform16 = 4434, + SpvCapabilityUniformAndStorageBuffer16BitAccess = 4434, + SpvCapabilityStoragePushConstant16 = 4435, + SpvCapabilityStorageInputOutput16 = 4436, + SpvCapabilityDeviceGroup = 4437, + SpvCapabilityMultiView = 4439, + SpvCapabilityVariablePointersStorageBuffer = 4441, + SpvCapabilityVariablePointers = 4442, + SpvCapabilityAtomicStorageOps = 4445, + SpvCapabilitySampleMaskPostDepthCoverage = 4447, + SpvCapabilityStorageBuffer8BitAccess = 4448, + SpvCapabilityUniformAndStorageBuffer8BitAccess = 4449, + SpvCapabilityStoragePushConstant8 = 4450, + SpvCapabilityDenormPreserve = 4464, + SpvCapabilityDenormFlushToZero = 4465, + SpvCapabilitySignedZeroInfNanPreserve = 4466, + SpvCapabilityRoundingModeRTE = 4467, + SpvCapabilityRoundingModeRTZ = 4468, + SpvCapabilityFloat16ImageAMD = 5008, + SpvCapabilityImageGatherBiasLodAMD = 5009, + SpvCapabilityFragmentMaskAMD = 5010, + SpvCapabilityStencilExportEXT = 5013, + SpvCapabilityImageReadWriteLodAMD = 5015, + SpvCapabilityShaderClockKHR = 5055, + SpvCapabilitySampleMaskOverrideCoverageNV = 5249, + SpvCapabilityGeometryShaderPassthroughNV = 5251, + SpvCapabilityShaderViewportIndexLayerEXT = 5254, + SpvCapabilityShaderViewportIndexLayerNV = 5254, + SpvCapabilityShaderViewportMaskNV = 5255, + SpvCapabilityShaderStereoViewNV = 5259, + SpvCapabilityPerViewAttributesNV = 5260, + SpvCapabilityFragmentFullyCoveredEXT = 5265, + SpvCapabilityMeshShadingNV = 5266, + SpvCapabilityImageFootprintNV = 5282, + SpvCapabilityFragmentBarycentricNV = 5284, + SpvCapabilityComputeDerivativeGroupQuadsNV = 5288, + SpvCapabilityFragmentDensityEXT = 5291, + SpvCapabilityShadingRateNV = 5291, + SpvCapabilityGroupNonUniformPartitionedNV = 5297, + SpvCapabilityShaderNonUniform = 5301, + SpvCapabilityShaderNonUniformEXT = 5301, + SpvCapabilityRuntimeDescriptorArray = 5302, + SpvCapabilityRuntimeDescriptorArrayEXT = 5302, + SpvCapabilityInputAttachmentArrayDynamicIndexing = 5303, + SpvCapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, + SpvCapabilityUniformTexelBufferArrayDynamicIndexing = 5304, + SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, + SpvCapabilityStorageTexelBufferArrayDynamicIndexing = 5305, + SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, + SpvCapabilityUniformBufferArrayNonUniformIndexing = 5306, + SpvCapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, + SpvCapabilitySampledImageArrayNonUniformIndexing = 5307, + SpvCapabilitySampledImageArrayNonUniformIndexingEXT = 5307, + SpvCapabilityStorageBufferArrayNonUniformIndexing = 5308, + SpvCapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, + SpvCapabilityStorageImageArrayNonUniformIndexing = 5309, + SpvCapabilityStorageImageArrayNonUniformIndexingEXT = 5309, + SpvCapabilityInputAttachmentArrayNonUniformIndexing = 5310, + SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, + SpvCapabilityUniformTexelBufferArrayNonUniformIndexing = 5311, + SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, + SpvCapabilityStorageTexelBufferArrayNonUniformIndexing = 5312, + SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, + SpvCapabilityRayTracingNV = 5340, + SpvCapabilityVulkanMemoryModel = 5345, + SpvCapabilityVulkanMemoryModelKHR = 5345, + SpvCapabilityVulkanMemoryModelDeviceScope = 5346, + SpvCapabilityVulkanMemoryModelDeviceScopeKHR = 5346, + SpvCapabilityPhysicalStorageBufferAddresses = 5347, + SpvCapabilityPhysicalStorageBufferAddressesEXT = 5347, + SpvCapabilityComputeDerivativeGroupLinearNV = 5350, + SpvCapabilityCooperativeMatrixNV = 5357, + SpvCapabilityFragmentShaderSampleInterlockEXT = 5363, + SpvCapabilityFragmentShaderShadingRateInterlockEXT = 5372, + SpvCapabilityShaderSMBuiltinsNV = 5373, + SpvCapabilityFragmentShaderPixelInterlockEXT = 5378, + SpvCapabilityDemoteToHelperInvocationEXT = 5379, + SpvCapabilitySubgroupShuffleINTEL = 5568, + SpvCapabilitySubgroupBufferBlockIOINTEL = 5569, + SpvCapabilitySubgroupImageBlockIOINTEL = 5570, + SpvCapabilitySubgroupImageMediaBlockIOINTEL = 5579, + SpvCapabilityIntegerFunctions2INTEL = 5584, + SpvCapabilitySubgroupAvcMotionEstimationINTEL = 5696, + SpvCapabilitySubgroupAvcMotionEstimationIntraINTEL = 5697, + SpvCapabilitySubgroupAvcMotionEstimationChromaINTEL = 5698, + SpvCapabilityMax = 0x7fffffff, +} SpvCapability; + +typedef enum SpvOp_ { + SpvOpNop = 0, + SpvOpUndef = 1, + SpvOpSourceContinued = 2, + SpvOpSource = 3, + SpvOpSourceExtension = 4, + SpvOpName = 5, + SpvOpMemberName = 6, + SpvOpString = 7, + SpvOpLine = 8, + SpvOpExtension = 10, + SpvOpExtInstImport = 11, + SpvOpExtInst = 12, + SpvOpMemoryModel = 14, + SpvOpEntryPoint = 15, + SpvOpExecutionMode = 16, + SpvOpCapability = 17, + SpvOpTypeVoid = 19, + SpvOpTypeBool = 20, + SpvOpTypeInt = 21, + SpvOpTypeFloat = 22, + SpvOpTypeVector = 23, + SpvOpTypeMatrix = 24, + SpvOpTypeImage = 25, + SpvOpTypeSampler = 26, + SpvOpTypeSampledImage = 27, + SpvOpTypeArray = 28, + SpvOpTypeRuntimeArray = 29, + SpvOpTypeStruct = 30, + SpvOpTypeOpaque = 31, + SpvOpTypePointer = 32, + SpvOpTypeFunction = 33, + SpvOpTypeEvent = 34, + SpvOpTypeDeviceEvent = 35, + SpvOpTypeReserveId = 36, + SpvOpTypeQueue = 37, + SpvOpTypePipe = 38, + SpvOpTypeForwardPointer = 39, + SpvOpConstantTrue = 41, + SpvOpConstantFalse = 42, + SpvOpConstant = 43, + SpvOpConstantComposite = 44, + SpvOpConstantSampler = 45, + SpvOpConstantNull = 46, + SpvOpSpecConstantTrue = 48, + SpvOpSpecConstantFalse = 49, + SpvOpSpecConstant = 50, + SpvOpSpecConstantComposite = 51, + SpvOpSpecConstantOp = 52, + SpvOpFunction = 54, + SpvOpFunctionParameter = 55, + SpvOpFunctionEnd = 56, + SpvOpFunctionCall = 57, + SpvOpVariable = 59, + SpvOpImageTexelPointer = 60, + SpvOpLoad = 61, + SpvOpStore = 62, + SpvOpCopyMemory = 63, + SpvOpCopyMemorySized = 64, + SpvOpAccessChain = 65, + SpvOpInBoundsAccessChain = 66, + SpvOpPtrAccessChain = 67, + SpvOpArrayLength = 68, + SpvOpGenericPtrMemSemantics = 69, + SpvOpInBoundsPtrAccessChain = 70, + SpvOpDecorate = 71, + SpvOpMemberDecorate = 72, + SpvOpDecorationGroup = 73, + SpvOpGroupDecorate = 74, + SpvOpGroupMemberDecorate = 75, + SpvOpVectorExtractDynamic = 77, + SpvOpVectorInsertDynamic = 78, + SpvOpVectorShuffle = 79, + SpvOpCompositeConstruct = 80, + SpvOpCompositeExtract = 81, + SpvOpCompositeInsert = 82, + SpvOpCopyObject = 83, + SpvOpTranspose = 84, + SpvOpSampledImage = 86, + SpvOpImageSampleImplicitLod = 87, + SpvOpImageSampleExplicitLod = 88, + SpvOpImageSampleDrefImplicitLod = 89, + SpvOpImageSampleDrefExplicitLod = 90, + SpvOpImageSampleProjImplicitLod = 91, + SpvOpImageSampleProjExplicitLod = 92, + SpvOpImageSampleProjDrefImplicitLod = 93, + SpvOpImageSampleProjDrefExplicitLod = 94, + SpvOpImageFetch = 95, + SpvOpImageGather = 96, + SpvOpImageDrefGather = 97, + SpvOpImageRead = 98, + SpvOpImageWrite = 99, + SpvOpImage = 100, + SpvOpImageQueryFormat = 101, + SpvOpImageQueryOrder = 102, + SpvOpImageQuerySizeLod = 103, + SpvOpImageQuerySize = 104, + SpvOpImageQueryLod = 105, + SpvOpImageQueryLevels = 106, + SpvOpImageQuerySamples = 107, + SpvOpConvertFToU = 109, + SpvOpConvertFToS = 110, + SpvOpConvertSToF = 111, + SpvOpConvertUToF = 112, + SpvOpUConvert = 113, + SpvOpSConvert = 114, + SpvOpFConvert = 115, + SpvOpQuantizeToF16 = 116, + SpvOpConvertPtrToU = 117, + SpvOpSatConvertSToU = 118, + SpvOpSatConvertUToS = 119, + SpvOpConvertUToPtr = 120, + SpvOpPtrCastToGeneric = 121, + SpvOpGenericCastToPtr = 122, + SpvOpGenericCastToPtrExplicit = 123, + SpvOpBitcast = 124, + SpvOpSNegate = 126, + SpvOpFNegate = 127, + SpvOpIAdd = 128, + SpvOpFAdd = 129, + SpvOpISub = 130, + SpvOpFSub = 131, + SpvOpIMul = 132, + SpvOpFMul = 133, + SpvOpUDiv = 134, + SpvOpSDiv = 135, + SpvOpFDiv = 136, + SpvOpUMod = 137, + SpvOpSRem = 138, + SpvOpSMod = 139, + SpvOpFRem = 140, + SpvOpFMod = 141, + SpvOpVectorTimesScalar = 142, + SpvOpMatrixTimesScalar = 143, + SpvOpVectorTimesMatrix = 144, + SpvOpMatrixTimesVector = 145, + SpvOpMatrixTimesMatrix = 146, + SpvOpOuterProduct = 147, + SpvOpDot = 148, + SpvOpIAddCarry = 149, + SpvOpISubBorrow = 150, + SpvOpUMulExtended = 151, + SpvOpSMulExtended = 152, + SpvOpAny = 154, + SpvOpAll = 155, + SpvOpIsNan = 156, + SpvOpIsInf = 157, + SpvOpIsFinite = 158, + SpvOpIsNormal = 159, + SpvOpSignBitSet = 160, + SpvOpLessOrGreater = 161, + SpvOpOrdered = 162, + SpvOpUnordered = 163, + SpvOpLogicalEqual = 164, + SpvOpLogicalNotEqual = 165, + SpvOpLogicalOr = 166, + SpvOpLogicalAnd = 167, + SpvOpLogicalNot = 168, + SpvOpSelect = 169, + SpvOpIEqual = 170, + SpvOpINotEqual = 171, + SpvOpUGreaterThan = 172, + SpvOpSGreaterThan = 173, + SpvOpUGreaterThanEqual = 174, + SpvOpSGreaterThanEqual = 175, + SpvOpULessThan = 176, + SpvOpSLessThan = 177, + SpvOpULessThanEqual = 178, + SpvOpSLessThanEqual = 179, + SpvOpFOrdEqual = 180, + SpvOpFUnordEqual = 181, + SpvOpFOrdNotEqual = 182, + SpvOpFUnordNotEqual = 183, + SpvOpFOrdLessThan = 184, + SpvOpFUnordLessThan = 185, + SpvOpFOrdGreaterThan = 186, + SpvOpFUnordGreaterThan = 187, + SpvOpFOrdLessThanEqual = 188, + SpvOpFUnordLessThanEqual = 189, + SpvOpFOrdGreaterThanEqual = 190, + SpvOpFUnordGreaterThanEqual = 191, + SpvOpShiftRightLogical = 194, + SpvOpShiftRightArithmetic = 195, + SpvOpShiftLeftLogical = 196, + SpvOpBitwiseOr = 197, + SpvOpBitwiseXor = 198, + SpvOpBitwiseAnd = 199, + SpvOpNot = 200, + SpvOpBitFieldInsert = 201, + SpvOpBitFieldSExtract = 202, + SpvOpBitFieldUExtract = 203, + SpvOpBitReverse = 204, + SpvOpBitCount = 205, + SpvOpDPdx = 207, + SpvOpDPdy = 208, + SpvOpFwidth = 209, + SpvOpDPdxFine = 210, + SpvOpDPdyFine = 211, + SpvOpFwidthFine = 212, + SpvOpDPdxCoarse = 213, + SpvOpDPdyCoarse = 214, + SpvOpFwidthCoarse = 215, + SpvOpEmitVertex = 218, + SpvOpEndPrimitive = 219, + SpvOpEmitStreamVertex = 220, + SpvOpEndStreamPrimitive = 221, + SpvOpControlBarrier = 224, + SpvOpMemoryBarrier = 225, + SpvOpAtomicLoad = 227, + SpvOpAtomicStore = 228, + SpvOpAtomicExchange = 229, + SpvOpAtomicCompareExchange = 230, + SpvOpAtomicCompareExchangeWeak = 231, + SpvOpAtomicIIncrement = 232, + SpvOpAtomicIDecrement = 233, + SpvOpAtomicIAdd = 234, + SpvOpAtomicISub = 235, + SpvOpAtomicSMin = 236, + SpvOpAtomicUMin = 237, + SpvOpAtomicSMax = 238, + SpvOpAtomicUMax = 239, + SpvOpAtomicAnd = 240, + SpvOpAtomicOr = 241, + SpvOpAtomicXor = 242, + SpvOpPhi = 245, + SpvOpLoopMerge = 246, + SpvOpSelectionMerge = 247, + SpvOpLabel = 248, + SpvOpBranch = 249, + SpvOpBranchConditional = 250, + SpvOpSwitch = 251, + SpvOpKill = 252, + SpvOpReturn = 253, + SpvOpReturnValue = 254, + SpvOpUnreachable = 255, + SpvOpLifetimeStart = 256, + SpvOpLifetimeStop = 257, + SpvOpGroupAsyncCopy = 259, + SpvOpGroupWaitEvents = 260, + SpvOpGroupAll = 261, + SpvOpGroupAny = 262, + SpvOpGroupBroadcast = 263, + SpvOpGroupIAdd = 264, + SpvOpGroupFAdd = 265, + SpvOpGroupFMin = 266, + SpvOpGroupUMin = 267, + SpvOpGroupSMin = 268, + SpvOpGroupFMax = 269, + SpvOpGroupUMax = 270, + SpvOpGroupSMax = 271, + SpvOpReadPipe = 274, + SpvOpWritePipe = 275, + SpvOpReservedReadPipe = 276, + SpvOpReservedWritePipe = 277, + SpvOpReserveReadPipePackets = 278, + SpvOpReserveWritePipePackets = 279, + SpvOpCommitReadPipe = 280, + SpvOpCommitWritePipe = 281, + SpvOpIsValidReserveId = 282, + SpvOpGetNumPipePackets = 283, + SpvOpGetMaxPipePackets = 284, + SpvOpGroupReserveReadPipePackets = 285, + SpvOpGroupReserveWritePipePackets = 286, + SpvOpGroupCommitReadPipe = 287, + SpvOpGroupCommitWritePipe = 288, + SpvOpEnqueueMarker = 291, + SpvOpEnqueueKernel = 292, + SpvOpGetKernelNDrangeSubGroupCount = 293, + SpvOpGetKernelNDrangeMaxSubGroupSize = 294, + SpvOpGetKernelWorkGroupSize = 295, + SpvOpGetKernelPreferredWorkGroupSizeMultiple = 296, + SpvOpRetainEvent = 297, + SpvOpReleaseEvent = 298, + SpvOpCreateUserEvent = 299, + SpvOpIsValidEvent = 300, + SpvOpSetUserEventStatus = 301, + SpvOpCaptureEventProfilingInfo = 302, + SpvOpGetDefaultQueue = 303, + SpvOpBuildNDRange = 304, + SpvOpImageSparseSampleImplicitLod = 305, + SpvOpImageSparseSampleExplicitLod = 306, + SpvOpImageSparseSampleDrefImplicitLod = 307, + SpvOpImageSparseSampleDrefExplicitLod = 308, + SpvOpImageSparseSampleProjImplicitLod = 309, + SpvOpImageSparseSampleProjExplicitLod = 310, + SpvOpImageSparseSampleProjDrefImplicitLod = 311, + SpvOpImageSparseSampleProjDrefExplicitLod = 312, + SpvOpImageSparseFetch = 313, + SpvOpImageSparseGather = 314, + SpvOpImageSparseDrefGather = 315, + SpvOpImageSparseTexelsResident = 316, + SpvOpNoLine = 317, + SpvOpAtomicFlagTestAndSet = 318, + SpvOpAtomicFlagClear = 319, + SpvOpImageSparseRead = 320, + SpvOpSizeOf = 321, + SpvOpTypePipeStorage = 322, + SpvOpConstantPipeStorage = 323, + SpvOpCreatePipeFromPipeStorage = 324, + SpvOpGetKernelLocalSizeForSubgroupCount = 325, + SpvOpGetKernelMaxNumSubgroups = 326, + SpvOpTypeNamedBarrier = 327, + SpvOpNamedBarrierInitialize = 328, + SpvOpMemoryNamedBarrier = 329, + SpvOpModuleProcessed = 330, + SpvOpExecutionModeId = 331, + SpvOpDecorateId = 332, + SpvOpGroupNonUniformElect = 333, + SpvOpGroupNonUniformAll = 334, + SpvOpGroupNonUniformAny = 335, + SpvOpGroupNonUniformAllEqual = 336, + SpvOpGroupNonUniformBroadcast = 337, + SpvOpGroupNonUniformBroadcastFirst = 338, + SpvOpGroupNonUniformBallot = 339, + SpvOpGroupNonUniformInverseBallot = 340, + SpvOpGroupNonUniformBallotBitExtract = 341, + SpvOpGroupNonUniformBallotBitCount = 342, + SpvOpGroupNonUniformBallotFindLSB = 343, + SpvOpGroupNonUniformBallotFindMSB = 344, + SpvOpGroupNonUniformShuffle = 345, + SpvOpGroupNonUniformShuffleXor = 346, + SpvOpGroupNonUniformShuffleUp = 347, + SpvOpGroupNonUniformShuffleDown = 348, + SpvOpGroupNonUniformIAdd = 349, + SpvOpGroupNonUniformFAdd = 350, + SpvOpGroupNonUniformIMul = 351, + SpvOpGroupNonUniformFMul = 352, + SpvOpGroupNonUniformSMin = 353, + SpvOpGroupNonUniformUMin = 354, + SpvOpGroupNonUniformFMin = 355, + SpvOpGroupNonUniformSMax = 356, + SpvOpGroupNonUniformUMax = 357, + SpvOpGroupNonUniformFMax = 358, + SpvOpGroupNonUniformBitwiseAnd = 359, + SpvOpGroupNonUniformBitwiseOr = 360, + SpvOpGroupNonUniformBitwiseXor = 361, + SpvOpGroupNonUniformLogicalAnd = 362, + SpvOpGroupNonUniformLogicalOr = 363, + SpvOpGroupNonUniformLogicalXor = 364, + SpvOpGroupNonUniformQuadBroadcast = 365, + SpvOpGroupNonUniformQuadSwap = 366, + SpvOpCopyLogical = 400, + SpvOpPtrEqual = 401, + SpvOpPtrNotEqual = 402, + SpvOpPtrDiff = 403, + SpvOpSubgroupBallotKHR = 4421, + SpvOpSubgroupFirstInvocationKHR = 4422, + SpvOpSubgroupAllKHR = 4428, + SpvOpSubgroupAnyKHR = 4429, + SpvOpSubgroupAllEqualKHR = 4430, + SpvOpSubgroupReadInvocationKHR = 4432, + SpvOpGroupIAddNonUniformAMD = 5000, + SpvOpGroupFAddNonUniformAMD = 5001, + SpvOpGroupFMinNonUniformAMD = 5002, + SpvOpGroupUMinNonUniformAMD = 5003, + SpvOpGroupSMinNonUniformAMD = 5004, + SpvOpGroupFMaxNonUniformAMD = 5005, + SpvOpGroupUMaxNonUniformAMD = 5006, + SpvOpGroupSMaxNonUniformAMD = 5007, + SpvOpFragmentMaskFetchAMD = 5011, + SpvOpFragmentFetchAMD = 5012, + SpvOpReadClockKHR = 5056, + SpvOpImageSampleFootprintNV = 5283, + SpvOpGroupNonUniformPartitionNV = 5296, + SpvOpWritePackedPrimitiveIndices4x8NV = 5299, + SpvOpReportIntersectionNV = 5334, + SpvOpIgnoreIntersectionNV = 5335, + SpvOpTerminateRayNV = 5336, + SpvOpTraceNV = 5337, + SpvOpTypeAccelerationStructureNV = 5341, + SpvOpExecuteCallableNV = 5344, + SpvOpTypeCooperativeMatrixNV = 5358, + SpvOpCooperativeMatrixLoadNV = 5359, + SpvOpCooperativeMatrixStoreNV = 5360, + SpvOpCooperativeMatrixMulAddNV = 5361, + SpvOpCooperativeMatrixLengthNV = 5362, + SpvOpBeginInvocationInterlockEXT = 5364, + SpvOpEndInvocationInterlockEXT = 5365, + SpvOpDemoteToHelperInvocationEXT = 5380, + SpvOpIsHelperInvocationEXT = 5381, + SpvOpSubgroupShuffleINTEL = 5571, + SpvOpSubgroupShuffleDownINTEL = 5572, + SpvOpSubgroupShuffleUpINTEL = 5573, + SpvOpSubgroupShuffleXorINTEL = 5574, + SpvOpSubgroupBlockReadINTEL = 5575, + SpvOpSubgroupBlockWriteINTEL = 5576, + SpvOpSubgroupImageBlockReadINTEL = 5577, + SpvOpSubgroupImageBlockWriteINTEL = 5578, + SpvOpSubgroupImageMediaBlockReadINTEL = 5580, + SpvOpSubgroupImageMediaBlockWriteINTEL = 5581, + SpvOpUCountLeadingZerosINTEL = 5585, + SpvOpUCountTrailingZerosINTEL = 5586, + SpvOpAbsISubINTEL = 5587, + SpvOpAbsUSubINTEL = 5588, + SpvOpIAddSatINTEL = 5589, + SpvOpUAddSatINTEL = 5590, + SpvOpIAverageINTEL = 5591, + SpvOpUAverageINTEL = 5592, + SpvOpIAverageRoundedINTEL = 5593, + SpvOpUAverageRoundedINTEL = 5594, + SpvOpISubSatINTEL = 5595, + SpvOpUSubSatINTEL = 5596, + SpvOpIMul32x16INTEL = 5597, + SpvOpUMul32x16INTEL = 5598, + SpvOpDecorateString = 5632, + SpvOpDecorateStringGOOGLE = 5632, + SpvOpMemberDecorateString = 5633, + SpvOpMemberDecorateStringGOOGLE = 5633, + SpvOpVmeImageINTEL = 5699, + SpvOpTypeVmeImageINTEL = 5700, + SpvOpTypeAvcImePayloadINTEL = 5701, + SpvOpTypeAvcRefPayloadINTEL = 5702, + SpvOpTypeAvcSicPayloadINTEL = 5703, + SpvOpTypeAvcMcePayloadINTEL = 5704, + SpvOpTypeAvcMceResultINTEL = 5705, + SpvOpTypeAvcImeResultINTEL = 5706, + SpvOpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707, + SpvOpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708, + SpvOpTypeAvcImeSingleReferenceStreaminINTEL = 5709, + SpvOpTypeAvcImeDualReferenceStreaminINTEL = 5710, + SpvOpTypeAvcRefResultINTEL = 5711, + SpvOpTypeAvcSicResultINTEL = 5712, + SpvOpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713, + SpvOpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714, + SpvOpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715, + SpvOpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716, + SpvOpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717, + SpvOpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718, + SpvOpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719, + SpvOpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720, + SpvOpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721, + SpvOpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722, + SpvOpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723, + SpvOpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724, + SpvOpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725, + SpvOpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726, + SpvOpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727, + SpvOpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728, + SpvOpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729, + SpvOpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730, + SpvOpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731, + SpvOpSubgroupAvcMceConvertToImePayloadINTEL = 5732, + SpvOpSubgroupAvcMceConvertToImeResultINTEL = 5733, + SpvOpSubgroupAvcMceConvertToRefPayloadINTEL = 5734, + SpvOpSubgroupAvcMceConvertToRefResultINTEL = 5735, + SpvOpSubgroupAvcMceConvertToSicPayloadINTEL = 5736, + SpvOpSubgroupAvcMceConvertToSicResultINTEL = 5737, + SpvOpSubgroupAvcMceGetMotionVectorsINTEL = 5738, + SpvOpSubgroupAvcMceGetInterDistortionsINTEL = 5739, + SpvOpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740, + SpvOpSubgroupAvcMceGetInterMajorShapeINTEL = 5741, + SpvOpSubgroupAvcMceGetInterMinorShapeINTEL = 5742, + SpvOpSubgroupAvcMceGetInterDirectionsINTEL = 5743, + SpvOpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744, + SpvOpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745, + SpvOpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746, + SpvOpSubgroupAvcImeInitializeINTEL = 5747, + SpvOpSubgroupAvcImeSetSingleReferenceINTEL = 5748, + SpvOpSubgroupAvcImeSetDualReferenceINTEL = 5749, + SpvOpSubgroupAvcImeRefWindowSizeINTEL = 5750, + SpvOpSubgroupAvcImeAdjustRefOffsetINTEL = 5751, + SpvOpSubgroupAvcImeConvertToMcePayloadINTEL = 5752, + SpvOpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753, + SpvOpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754, + SpvOpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755, + SpvOpSubgroupAvcImeSetWeightedSadINTEL = 5756, + SpvOpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757, + SpvOpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758, + SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759, + SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760, + SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761, + SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762, + SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763, + SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764, + SpvOpSubgroupAvcImeConvertToMceResultINTEL = 5765, + SpvOpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766, + SpvOpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767, + SpvOpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768, + SpvOpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769, + SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL = 5770, + SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL = 5771, + SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL = 5772, + SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL = 5773, + SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774, + SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL = 5775, + SpvOpSubgroupAvcImeGetBorderReachedINTEL = 5776, + SpvOpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777, + SpvOpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778, + SpvOpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779, + SpvOpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780, + SpvOpSubgroupAvcFmeInitializeINTEL = 5781, + SpvOpSubgroupAvcBmeInitializeINTEL = 5782, + SpvOpSubgroupAvcRefConvertToMcePayloadINTEL = 5783, + SpvOpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784, + SpvOpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785, + SpvOpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786, + SpvOpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787, + SpvOpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788, + SpvOpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789, + SpvOpSubgroupAvcRefConvertToMceResultINTEL = 5790, + SpvOpSubgroupAvcSicInitializeINTEL = 5791, + SpvOpSubgroupAvcSicConfigureSkcINTEL = 5792, + SpvOpSubgroupAvcSicConfigureIpeLumaINTEL = 5793, + SpvOpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794, + SpvOpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795, + SpvOpSubgroupAvcSicConvertToMcePayloadINTEL = 5796, + SpvOpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797, + SpvOpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798, + SpvOpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799, + SpvOpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800, + SpvOpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801, + SpvOpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802, + SpvOpSubgroupAvcSicEvaluateIpeINTEL = 5803, + SpvOpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804, + SpvOpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805, + SpvOpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806, + SpvOpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807, + SpvOpSubgroupAvcSicConvertToMceResultINTEL = 5808, + SpvOpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809, + SpvOpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810, + SpvOpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811, + SpvOpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812, + SpvOpSubgroupAvcSicGetIpeChromaModeINTEL = 5813, + SpvOpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814, + SpvOpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815, + SpvOpSubgroupAvcSicGetInterRawSadsINTEL = 5816, + SpvOpMax = 0x7fffffff, +} SpvOp; + +#ifdef SPV_ENABLE_UTILITY_CODE +inline void SpvHasResultAndType(SpvOp opcode, bool *hasResult, bool *hasResultType) { + *hasResult = *hasResultType = false; + switch (opcode) { + default: break; + case SpvOpNop: *hasResult = false; *hasResultType = false; break; + case SpvOpUndef: *hasResult = true; *hasResultType = true; break; + case SpvOpSourceContinued: *hasResult = false; *hasResultType = false; break; + case SpvOpSource: *hasResult = false; *hasResultType = false; break; + case SpvOpSourceExtension: *hasResult = false; *hasResultType = false; break; + case SpvOpName: *hasResult = false; *hasResultType = false; break; + case SpvOpMemberName: *hasResult = false; *hasResultType = false; break; + case SpvOpString: *hasResult = true; *hasResultType = false; break; + case SpvOpLine: *hasResult = false; *hasResultType = false; break; + case SpvOpExtension: *hasResult = false; *hasResultType = false; break; + case SpvOpExtInstImport: *hasResult = true; *hasResultType = false; break; + case SpvOpExtInst: *hasResult = true; *hasResultType = true; break; + case SpvOpMemoryModel: *hasResult = false; *hasResultType = false; break; + case SpvOpEntryPoint: *hasResult = false; *hasResultType = false; break; + case SpvOpExecutionMode: *hasResult = false; *hasResultType = false; break; + case SpvOpCapability: *hasResult = false; *hasResultType = false; break; + case SpvOpTypeVoid: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeBool: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeInt: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeFloat: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeVector: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeMatrix: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeImage: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeSampler: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeSampledImage: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeArray: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeRuntimeArray: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeStruct: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeOpaque: *hasResult = true; *hasResultType = false; break; + case SpvOpTypePointer: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeFunction: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeEvent: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeDeviceEvent: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeReserveId: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeQueue: *hasResult = true; *hasResultType = false; break; + case SpvOpTypePipe: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeForwardPointer: *hasResult = false; *hasResultType = false; break; + case SpvOpConstantTrue: *hasResult = true; *hasResultType = true; break; + case SpvOpConstantFalse: *hasResult = true; *hasResultType = true; break; + case SpvOpConstant: *hasResult = true; *hasResultType = true; break; + case SpvOpConstantComposite: *hasResult = true; *hasResultType = true; break; + case SpvOpConstantSampler: *hasResult = true; *hasResultType = true; break; + case SpvOpConstantNull: *hasResult = true; *hasResultType = true; break; + case SpvOpSpecConstantTrue: *hasResult = true; *hasResultType = true; break; + case SpvOpSpecConstantFalse: *hasResult = true; *hasResultType = true; break; + case SpvOpSpecConstant: *hasResult = true; *hasResultType = true; break; + case SpvOpSpecConstantComposite: *hasResult = true; *hasResultType = true; break; + case SpvOpSpecConstantOp: *hasResult = true; *hasResultType = true; break; + case SpvOpFunction: *hasResult = true; *hasResultType = true; break; + case SpvOpFunctionParameter: *hasResult = true; *hasResultType = true; break; + case SpvOpFunctionEnd: *hasResult = false; *hasResultType = false; break; + case SpvOpFunctionCall: *hasResult = true; *hasResultType = true; break; + case SpvOpVariable: *hasResult = true; *hasResultType = true; break; + case SpvOpImageTexelPointer: *hasResult = true; *hasResultType = true; break; + case SpvOpLoad: *hasResult = true; *hasResultType = true; break; + case SpvOpStore: *hasResult = false; *hasResultType = false; break; + case SpvOpCopyMemory: *hasResult = false; *hasResultType = false; break; + case SpvOpCopyMemorySized: *hasResult = false; *hasResultType = false; break; + case SpvOpAccessChain: *hasResult = true; *hasResultType = true; break; + case SpvOpInBoundsAccessChain: *hasResult = true; *hasResultType = true; break; + case SpvOpPtrAccessChain: *hasResult = true; *hasResultType = true; break; + case SpvOpArrayLength: *hasResult = true; *hasResultType = true; break; + case SpvOpGenericPtrMemSemantics: *hasResult = true; *hasResultType = true; break; + case SpvOpInBoundsPtrAccessChain: *hasResult = true; *hasResultType = true; break; + case SpvOpDecorate: *hasResult = false; *hasResultType = false; break; + case SpvOpMemberDecorate: *hasResult = false; *hasResultType = false; break; + case SpvOpDecorationGroup: *hasResult = true; *hasResultType = false; break; + case SpvOpGroupDecorate: *hasResult = false; *hasResultType = false; break; + case SpvOpGroupMemberDecorate: *hasResult = false; *hasResultType = false; break; + case SpvOpVectorExtractDynamic: *hasResult = true; *hasResultType = true; break; + case SpvOpVectorInsertDynamic: *hasResult = true; *hasResultType = true; break; + case SpvOpVectorShuffle: *hasResult = true; *hasResultType = true; break; + case SpvOpCompositeConstruct: *hasResult = true; *hasResultType = true; break; + case SpvOpCompositeExtract: *hasResult = true; *hasResultType = true; break; + case SpvOpCompositeInsert: *hasResult = true; *hasResultType = true; break; + case SpvOpCopyObject: *hasResult = true; *hasResultType = true; break; + case SpvOpTranspose: *hasResult = true; *hasResultType = true; break; + case SpvOpSampledImage: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageFetch: *hasResult = true; *hasResultType = true; break; + case SpvOpImageGather: *hasResult = true; *hasResultType = true; break; + case SpvOpImageDrefGather: *hasResult = true; *hasResultType = true; break; + case SpvOpImageRead: *hasResult = true; *hasResultType = true; break; + case SpvOpImageWrite: *hasResult = false; *hasResultType = false; break; + case SpvOpImage: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQueryFormat: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQueryOrder: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQuerySizeLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQuerySize: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQueryLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQueryLevels: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQuerySamples: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertFToU: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertFToS: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertSToF: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertUToF: *hasResult = true; *hasResultType = true; break; + case SpvOpUConvert: *hasResult = true; *hasResultType = true; break; + case SpvOpSConvert: *hasResult = true; *hasResultType = true; break; + case SpvOpFConvert: *hasResult = true; *hasResultType = true; break; + case SpvOpQuantizeToF16: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertPtrToU: *hasResult = true; *hasResultType = true; break; + case SpvOpSatConvertSToU: *hasResult = true; *hasResultType = true; break; + case SpvOpSatConvertUToS: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertUToPtr: *hasResult = true; *hasResultType = true; break; + case SpvOpPtrCastToGeneric: *hasResult = true; *hasResultType = true; break; + case SpvOpGenericCastToPtr: *hasResult = true; *hasResultType = true; break; + case SpvOpGenericCastToPtrExplicit: *hasResult = true; *hasResultType = true; break; + case SpvOpBitcast: *hasResult = true; *hasResultType = true; break; + case SpvOpSNegate: *hasResult = true; *hasResultType = true; break; + case SpvOpFNegate: *hasResult = true; *hasResultType = true; break; + case SpvOpIAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpFAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpISub: *hasResult = true; *hasResultType = true; break; + case SpvOpFSub: *hasResult = true; *hasResultType = true; break; + case SpvOpIMul: *hasResult = true; *hasResultType = true; break; + case SpvOpFMul: *hasResult = true; *hasResultType = true; break; + case SpvOpUDiv: *hasResult = true; *hasResultType = true; break; + case SpvOpSDiv: *hasResult = true; *hasResultType = true; break; + case SpvOpFDiv: *hasResult = true; *hasResultType = true; break; + case SpvOpUMod: *hasResult = true; *hasResultType = true; break; + case SpvOpSRem: *hasResult = true; *hasResultType = true; break; + case SpvOpSMod: *hasResult = true; *hasResultType = true; break; + case SpvOpFRem: *hasResult = true; *hasResultType = true; break; + case SpvOpFMod: *hasResult = true; *hasResultType = true; break; + case SpvOpVectorTimesScalar: *hasResult = true; *hasResultType = true; break; + case SpvOpMatrixTimesScalar: *hasResult = true; *hasResultType = true; break; + case SpvOpVectorTimesMatrix: *hasResult = true; *hasResultType = true; break; + case SpvOpMatrixTimesVector: *hasResult = true; *hasResultType = true; break; + case SpvOpMatrixTimesMatrix: *hasResult = true; *hasResultType = true; break; + case SpvOpOuterProduct: *hasResult = true; *hasResultType = true; break; + case SpvOpDot: *hasResult = true; *hasResultType = true; break; + case SpvOpIAddCarry: *hasResult = true; *hasResultType = true; break; + case SpvOpISubBorrow: *hasResult = true; *hasResultType = true; break; + case SpvOpUMulExtended: *hasResult = true; *hasResultType = true; break; + case SpvOpSMulExtended: *hasResult = true; *hasResultType = true; break; + case SpvOpAny: *hasResult = true; *hasResultType = true; break; + case SpvOpAll: *hasResult = true; *hasResultType = true; break; + case SpvOpIsNan: *hasResult = true; *hasResultType = true; break; + case SpvOpIsInf: *hasResult = true; *hasResultType = true; break; + case SpvOpIsFinite: *hasResult = true; *hasResultType = true; break; + case SpvOpIsNormal: *hasResult = true; *hasResultType = true; break; + case SpvOpSignBitSet: *hasResult = true; *hasResultType = true; break; + case SpvOpLessOrGreater: *hasResult = true; *hasResultType = true; break; + case SpvOpOrdered: *hasResult = true; *hasResultType = true; break; + case SpvOpUnordered: *hasResult = true; *hasResultType = true; break; + case SpvOpLogicalEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpLogicalNotEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpLogicalOr: *hasResult = true; *hasResultType = true; break; + case SpvOpLogicalAnd: *hasResult = true; *hasResultType = true; break; + case SpvOpLogicalNot: *hasResult = true; *hasResultType = true; break; + case SpvOpSelect: *hasResult = true; *hasResultType = true; break; + case SpvOpIEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpINotEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpUGreaterThan: *hasResult = true; *hasResultType = true; break; + case SpvOpSGreaterThan: *hasResult = true; *hasResultType = true; break; + case SpvOpUGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpSGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpULessThan: *hasResult = true; *hasResultType = true; break; + case SpvOpSLessThan: *hasResult = true; *hasResultType = true; break; + case SpvOpULessThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpSLessThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFOrdEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFUnordEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFOrdNotEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFUnordNotEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFOrdLessThan: *hasResult = true; *hasResultType = true; break; + case SpvOpFUnordLessThan: *hasResult = true; *hasResultType = true; break; + case SpvOpFOrdGreaterThan: *hasResult = true; *hasResultType = true; break; + case SpvOpFUnordGreaterThan: *hasResult = true; *hasResultType = true; break; + case SpvOpFOrdLessThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFUnordLessThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFOrdGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFUnordGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpShiftRightLogical: *hasResult = true; *hasResultType = true; break; + case SpvOpShiftRightArithmetic: *hasResult = true; *hasResultType = true; break; + case SpvOpShiftLeftLogical: *hasResult = true; *hasResultType = true; break; + case SpvOpBitwiseOr: *hasResult = true; *hasResultType = true; break; + case SpvOpBitwiseXor: *hasResult = true; *hasResultType = true; break; + case SpvOpBitwiseAnd: *hasResult = true; *hasResultType = true; break; + case SpvOpNot: *hasResult = true; *hasResultType = true; break; + case SpvOpBitFieldInsert: *hasResult = true; *hasResultType = true; break; + case SpvOpBitFieldSExtract: *hasResult = true; *hasResultType = true; break; + case SpvOpBitFieldUExtract: *hasResult = true; *hasResultType = true; break; + case SpvOpBitReverse: *hasResult = true; *hasResultType = true; break; + case SpvOpBitCount: *hasResult = true; *hasResultType = true; break; + case SpvOpDPdx: *hasResult = true; *hasResultType = true; break; + case SpvOpDPdy: *hasResult = true; *hasResultType = true; break; + case SpvOpFwidth: *hasResult = true; *hasResultType = true; break; + case SpvOpDPdxFine: *hasResult = true; *hasResultType = true; break; + case SpvOpDPdyFine: *hasResult = true; *hasResultType = true; break; + case SpvOpFwidthFine: *hasResult = true; *hasResultType = true; break; + case SpvOpDPdxCoarse: *hasResult = true; *hasResultType = true; break; + case SpvOpDPdyCoarse: *hasResult = true; *hasResultType = true; break; + case SpvOpFwidthCoarse: *hasResult = true; *hasResultType = true; break; + case SpvOpEmitVertex: *hasResult = false; *hasResultType = false; break; + case SpvOpEndPrimitive: *hasResult = false; *hasResultType = false; break; + case SpvOpEmitStreamVertex: *hasResult = false; *hasResultType = false; break; + case SpvOpEndStreamPrimitive: *hasResult = false; *hasResultType = false; break; + case SpvOpControlBarrier: *hasResult = false; *hasResultType = false; break; + case SpvOpMemoryBarrier: *hasResult = false; *hasResultType = false; break; + case SpvOpAtomicLoad: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicStore: *hasResult = false; *hasResultType = false; break; + case SpvOpAtomicExchange: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicCompareExchange: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicCompareExchangeWeak: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicIIncrement: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicIDecrement: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicIAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicISub: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicSMin: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicUMin: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicSMax: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicUMax: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicAnd: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicOr: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicXor: *hasResult = true; *hasResultType = true; break; + case SpvOpPhi: *hasResult = true; *hasResultType = true; break; + case SpvOpLoopMerge: *hasResult = false; *hasResultType = false; break; + case SpvOpSelectionMerge: *hasResult = false; *hasResultType = false; break; + case SpvOpLabel: *hasResult = true; *hasResultType = false; break; + case SpvOpBranch: *hasResult = false; *hasResultType = false; break; + case SpvOpBranchConditional: *hasResult = false; *hasResultType = false; break; + case SpvOpSwitch: *hasResult = false; *hasResultType = false; break; + case SpvOpKill: *hasResult = false; *hasResultType = false; break; + case SpvOpReturn: *hasResult = false; *hasResultType = false; break; + case SpvOpReturnValue: *hasResult = false; *hasResultType = false; break; + case SpvOpUnreachable: *hasResult = false; *hasResultType = false; break; + case SpvOpLifetimeStart: *hasResult = false; *hasResultType = false; break; + case SpvOpLifetimeStop: *hasResult = false; *hasResultType = false; break; + case SpvOpGroupAsyncCopy: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupWaitEvents: *hasResult = false; *hasResultType = false; break; + case SpvOpGroupAll: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupAny: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupBroadcast: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupIAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFMin: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupUMin: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupSMin: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFMax: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupUMax: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupSMax: *hasResult = true; *hasResultType = true; break; + case SpvOpReadPipe: *hasResult = true; *hasResultType = true; break; + case SpvOpWritePipe: *hasResult = true; *hasResultType = true; break; + case SpvOpReservedReadPipe: *hasResult = true; *hasResultType = true; break; + case SpvOpReservedWritePipe: *hasResult = true; *hasResultType = true; break; + case SpvOpReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; + case SpvOpReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; + case SpvOpCommitReadPipe: *hasResult = false; *hasResultType = false; break; + case SpvOpCommitWritePipe: *hasResult = false; *hasResultType = false; break; + case SpvOpIsValidReserveId: *hasResult = true; *hasResultType = true; break; + case SpvOpGetNumPipePackets: *hasResult = true; *hasResultType = true; break; + case SpvOpGetMaxPipePackets: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupCommitReadPipe: *hasResult = false; *hasResultType = false; break; + case SpvOpGroupCommitWritePipe: *hasResult = false; *hasResultType = false; break; + case SpvOpEnqueueMarker: *hasResult = true; *hasResultType = true; break; + case SpvOpEnqueueKernel: *hasResult = true; *hasResultType = true; break; + case SpvOpGetKernelNDrangeSubGroupCount: *hasResult = true; *hasResultType = true; break; + case SpvOpGetKernelNDrangeMaxSubGroupSize: *hasResult = true; *hasResultType = true; break; + case SpvOpGetKernelWorkGroupSize: *hasResult = true; *hasResultType = true; break; + case SpvOpGetKernelPreferredWorkGroupSizeMultiple: *hasResult = true; *hasResultType = true; break; + case SpvOpRetainEvent: *hasResult = false; *hasResultType = false; break; + case SpvOpReleaseEvent: *hasResult = false; *hasResultType = false; break; + case SpvOpCreateUserEvent: *hasResult = true; *hasResultType = true; break; + case SpvOpIsValidEvent: *hasResult = true; *hasResultType = true; break; + case SpvOpSetUserEventStatus: *hasResult = false; *hasResultType = false; break; + case SpvOpCaptureEventProfilingInfo: *hasResult = false; *hasResultType = false; break; + case SpvOpGetDefaultQueue: *hasResult = true; *hasResultType = true; break; + case SpvOpBuildNDRange: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseFetch: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseGather: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseDrefGather: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseTexelsResident: *hasResult = true; *hasResultType = true; break; + case SpvOpNoLine: *hasResult = false; *hasResultType = false; break; + case SpvOpAtomicFlagTestAndSet: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicFlagClear: *hasResult = false; *hasResultType = false; break; + case SpvOpImageSparseRead: *hasResult = true; *hasResultType = true; break; + case SpvOpSizeOf: *hasResult = true; *hasResultType = true; break; + case SpvOpTypePipeStorage: *hasResult = true; *hasResultType = false; break; + case SpvOpConstantPipeStorage: *hasResult = true; *hasResultType = true; break; + case SpvOpCreatePipeFromPipeStorage: *hasResult = true; *hasResultType = true; break; + case SpvOpGetKernelLocalSizeForSubgroupCount: *hasResult = true; *hasResultType = true; break; + case SpvOpGetKernelMaxNumSubgroups: *hasResult = true; *hasResultType = true; break; + case SpvOpTypeNamedBarrier: *hasResult = true; *hasResultType = false; break; + case SpvOpNamedBarrierInitialize: *hasResult = true; *hasResultType = true; break; + case SpvOpMemoryNamedBarrier: *hasResult = false; *hasResultType = false; break; + case SpvOpModuleProcessed: *hasResult = false; *hasResultType = false; break; + case SpvOpExecutionModeId: *hasResult = false; *hasResultType = false; break; + case SpvOpDecorateId: *hasResult = false; *hasResultType = false; break; + case SpvOpGroupNonUniformElect: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformAll: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformAny: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformAllEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBroadcast: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBroadcastFirst: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBallot: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformInverseBallot: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBallotBitExtract: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBallotBitCount: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBallotFindLSB: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBallotFindMSB: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformShuffle: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformShuffleXor: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformShuffleUp: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformShuffleDown: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformIAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformFAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformIMul: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformFMul: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformSMin: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformUMin: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformFMin: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformSMax: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformUMax: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformFMax: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBitwiseAnd: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBitwiseOr: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBitwiseXor: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformLogicalAnd: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformLogicalOr: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformLogicalXor: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformQuadBroadcast: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformQuadSwap: *hasResult = true; *hasResultType = true; break; + case SpvOpCopyLogical: *hasResult = true; *hasResultType = true; break; + case SpvOpPtrEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpPtrNotEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpPtrDiff: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupBallotKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupFirstInvocationKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupIAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupUMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupSMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupUMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupSMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpFragmentMaskFetchAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpFragmentFetchAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpReadClockKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleFootprintNV: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformPartitionNV: *hasResult = true; *hasResultType = true; break; + case SpvOpWritePackedPrimitiveIndices4x8NV: *hasResult = false; *hasResultType = false; break; + case SpvOpReportIntersectionNV: *hasResult = true; *hasResultType = true; break; + case SpvOpIgnoreIntersectionNV: *hasResult = false; *hasResultType = false; break; + case SpvOpTerminateRayNV: *hasResult = false; *hasResultType = false; break; + case SpvOpTraceNV: *hasResult = false; *hasResultType = false; break; + case SpvOpTypeAccelerationStructureNV: *hasResult = true; *hasResultType = false; break; + case SpvOpExecuteCallableNV: *hasResult = false; *hasResultType = false; break; + case SpvOpTypeCooperativeMatrixNV: *hasResult = true; *hasResultType = false; break; + case SpvOpCooperativeMatrixLoadNV: *hasResult = true; *hasResultType = true; break; + case SpvOpCooperativeMatrixStoreNV: *hasResult = false; *hasResultType = false; break; + case SpvOpCooperativeMatrixMulAddNV: *hasResult = true; *hasResultType = true; break; + case SpvOpCooperativeMatrixLengthNV: *hasResult = true; *hasResultType = true; break; + case SpvOpBeginInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; + case SpvOpEndInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; + case SpvOpDemoteToHelperInvocationEXT: *hasResult = false; *hasResultType = false; break; + case SpvOpIsHelperInvocationEXT: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupShuffleINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupShuffleDownINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupShuffleUpINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupShuffleXorINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpSubgroupImageBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupImageBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpSubgroupImageMediaBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupImageMediaBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpUCountLeadingZerosINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpUCountTrailingZerosINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpAbsISubINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpAbsUSubINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpIAddSatINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpUAddSatINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpIAverageINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpUAverageINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpIAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpUAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpISubSatINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpUSubSatINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpIMul32x16INTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpUMul32x16INTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpDecorateString: *hasResult = false; *hasResultType = false; break; + case SpvOpMemberDecorateString: *hasResult = false; *hasResultType = false; break; + case SpvOpVmeImageINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpTypeVmeImageINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcImePayloadINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcRefPayloadINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcSicPayloadINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcMcePayloadINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcMceResultINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcImeResultINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcImeResultSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcImeResultDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcImeSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcImeDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcRefResultINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcSicResultINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetMotionVectorCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetAcOnlyHaarINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceConvertToImePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceConvertToImeResultINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceConvertToRefPayloadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceConvertToRefResultINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceConvertToSicPayloadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceConvertToSicResultINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetBestInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterMajorShapeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterMinorShapeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterDirectionsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeSetSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeSetDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeRefWindowSizeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeAdjustRefOffsetINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeSetMaxMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeSetUnidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeSetWeightedSadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeStripSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeStripDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetBorderReachedINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetTruncatedSearchIndicationINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcFmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcBmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefSetBidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicConfigureSkcINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicConfigureIpeLumaINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicConfigureIpeLumaChromaINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetMotionVectorMaskINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicSetSkcForwardTransformEnableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicEvaluateIpeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetIpeLumaShapeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetBestIpeLumaDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetBestIpeChromaDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetPackedIpeLumaModesINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetIpeChromaModeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetInterRawSadsINTEL: *hasResult = true; *hasResultType = true; break; + } +} +#endif + +#endif + diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv.hpp new file mode 100644 index 000000000000..127e334c24c9 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv.hpp @@ -0,0 +1,1981 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#ifndef spirv_HPP +#define spirv_HPP + +namespace spv { + +typedef unsigned int Id; + +#define SPV_VERSION 0x10500 +#define SPV_REVISION 1 + +static const unsigned int MagicNumber = 0x07230203; +static const unsigned int Version = 0x00010400; +static const unsigned int Revision = 1; +static const unsigned int OpCodeMask = 0xffff; +static const unsigned int WordCountShift = 16; + +enum SourceLanguage { + SourceLanguageUnknown = 0, + SourceLanguageESSL = 1, + SourceLanguageGLSL = 2, + SourceLanguageOpenCL_C = 3, + SourceLanguageOpenCL_CPP = 4, + SourceLanguageHLSL = 5, + SourceLanguageMax = 0x7fffffff, +}; + +enum ExecutionModel { + ExecutionModelVertex = 0, + ExecutionModelTessellationControl = 1, + ExecutionModelTessellationEvaluation = 2, + ExecutionModelGeometry = 3, + ExecutionModelFragment = 4, + ExecutionModelGLCompute = 5, + ExecutionModelKernel = 6, + ExecutionModelTaskNV = 5267, + ExecutionModelMeshNV = 5268, + ExecutionModelRayGenerationNV = 5313, + ExecutionModelIntersectionNV = 5314, + ExecutionModelAnyHitNV = 5315, + ExecutionModelClosestHitNV = 5316, + ExecutionModelMissNV = 5317, + ExecutionModelCallableNV = 5318, + ExecutionModelMax = 0x7fffffff, +}; + +enum AddressingModel { + AddressingModelLogical = 0, + AddressingModelPhysical32 = 1, + AddressingModelPhysical64 = 2, + AddressingModelPhysicalStorageBuffer64 = 5348, + AddressingModelPhysicalStorageBuffer64EXT = 5348, + AddressingModelMax = 0x7fffffff, +}; + +enum MemoryModel { + MemoryModelSimple = 0, + MemoryModelGLSL450 = 1, + MemoryModelOpenCL = 2, + MemoryModelVulkan = 3, + MemoryModelVulkanKHR = 3, + MemoryModelMax = 0x7fffffff, +}; + +enum ExecutionMode { + ExecutionModeInvocations = 0, + ExecutionModeSpacingEqual = 1, + ExecutionModeSpacingFractionalEven = 2, + ExecutionModeSpacingFractionalOdd = 3, + ExecutionModeVertexOrderCw = 4, + ExecutionModeVertexOrderCcw = 5, + ExecutionModePixelCenterInteger = 6, + ExecutionModeOriginUpperLeft = 7, + ExecutionModeOriginLowerLeft = 8, + ExecutionModeEarlyFragmentTests = 9, + ExecutionModePointMode = 10, + ExecutionModeXfb = 11, + ExecutionModeDepthReplacing = 12, + ExecutionModeDepthGreater = 14, + ExecutionModeDepthLess = 15, + ExecutionModeDepthUnchanged = 16, + ExecutionModeLocalSize = 17, + ExecutionModeLocalSizeHint = 18, + ExecutionModeInputPoints = 19, + ExecutionModeInputLines = 20, + ExecutionModeInputLinesAdjacency = 21, + ExecutionModeTriangles = 22, + ExecutionModeInputTrianglesAdjacency = 23, + ExecutionModeQuads = 24, + ExecutionModeIsolines = 25, + ExecutionModeOutputVertices = 26, + ExecutionModeOutputPoints = 27, + ExecutionModeOutputLineStrip = 28, + ExecutionModeOutputTriangleStrip = 29, + ExecutionModeVecTypeHint = 30, + ExecutionModeContractionOff = 31, + ExecutionModeInitializer = 33, + ExecutionModeFinalizer = 34, + ExecutionModeSubgroupSize = 35, + ExecutionModeSubgroupsPerWorkgroup = 36, + ExecutionModeSubgroupsPerWorkgroupId = 37, + ExecutionModeLocalSizeId = 38, + ExecutionModeLocalSizeHintId = 39, + ExecutionModePostDepthCoverage = 4446, + ExecutionModeDenormPreserve = 4459, + ExecutionModeDenormFlushToZero = 4460, + ExecutionModeSignedZeroInfNanPreserve = 4461, + ExecutionModeRoundingModeRTE = 4462, + ExecutionModeRoundingModeRTZ = 4463, + ExecutionModeStencilRefReplacingEXT = 5027, + ExecutionModeOutputLinesNV = 5269, + ExecutionModeOutputPrimitivesNV = 5270, + ExecutionModeDerivativeGroupQuadsNV = 5289, + ExecutionModeDerivativeGroupLinearNV = 5290, + ExecutionModeOutputTrianglesNV = 5298, + ExecutionModePixelInterlockOrderedEXT = 5366, + ExecutionModePixelInterlockUnorderedEXT = 5367, + ExecutionModeSampleInterlockOrderedEXT = 5368, + ExecutionModeSampleInterlockUnorderedEXT = 5369, + ExecutionModeShadingRateInterlockOrderedEXT = 5370, + ExecutionModeShadingRateInterlockUnorderedEXT = 5371, + ExecutionModeMax = 0x7fffffff, +}; + +enum StorageClass { + StorageClassUniformConstant = 0, + StorageClassInput = 1, + StorageClassUniform = 2, + StorageClassOutput = 3, + StorageClassWorkgroup = 4, + StorageClassCrossWorkgroup = 5, + StorageClassPrivate = 6, + StorageClassFunction = 7, + StorageClassGeneric = 8, + StorageClassPushConstant = 9, + StorageClassAtomicCounter = 10, + StorageClassImage = 11, + StorageClassStorageBuffer = 12, + StorageClassCallableDataNV = 5328, + StorageClassIncomingCallableDataNV = 5329, + StorageClassRayPayloadNV = 5338, + StorageClassHitAttributeNV = 5339, + StorageClassIncomingRayPayloadNV = 5342, + StorageClassShaderRecordBufferNV = 5343, + StorageClassPhysicalStorageBuffer = 5349, + StorageClassPhysicalStorageBufferEXT = 5349, + StorageClassMax = 0x7fffffff, +}; + +enum Dim { + Dim1D = 0, + Dim2D = 1, + Dim3D = 2, + DimCube = 3, + DimRect = 4, + DimBuffer = 5, + DimSubpassData = 6, + DimMax = 0x7fffffff, +}; + +enum SamplerAddressingMode { + SamplerAddressingModeNone = 0, + SamplerAddressingModeClampToEdge = 1, + SamplerAddressingModeClamp = 2, + SamplerAddressingModeRepeat = 3, + SamplerAddressingModeRepeatMirrored = 4, + SamplerAddressingModeMax = 0x7fffffff, +}; + +enum SamplerFilterMode { + SamplerFilterModeNearest = 0, + SamplerFilterModeLinear = 1, + SamplerFilterModeMax = 0x7fffffff, +}; + +enum ImageFormat { + ImageFormatUnknown = 0, + ImageFormatRgba32f = 1, + ImageFormatRgba16f = 2, + ImageFormatR32f = 3, + ImageFormatRgba8 = 4, + ImageFormatRgba8Snorm = 5, + ImageFormatRg32f = 6, + ImageFormatRg16f = 7, + ImageFormatR11fG11fB10f = 8, + ImageFormatR16f = 9, + ImageFormatRgba16 = 10, + ImageFormatRgb10A2 = 11, + ImageFormatRg16 = 12, + ImageFormatRg8 = 13, + ImageFormatR16 = 14, + ImageFormatR8 = 15, + ImageFormatRgba16Snorm = 16, + ImageFormatRg16Snorm = 17, + ImageFormatRg8Snorm = 18, + ImageFormatR16Snorm = 19, + ImageFormatR8Snorm = 20, + ImageFormatRgba32i = 21, + ImageFormatRgba16i = 22, + ImageFormatRgba8i = 23, + ImageFormatR32i = 24, + ImageFormatRg32i = 25, + ImageFormatRg16i = 26, + ImageFormatRg8i = 27, + ImageFormatR16i = 28, + ImageFormatR8i = 29, + ImageFormatRgba32ui = 30, + ImageFormatRgba16ui = 31, + ImageFormatRgba8ui = 32, + ImageFormatR32ui = 33, + ImageFormatRgb10a2ui = 34, + ImageFormatRg32ui = 35, + ImageFormatRg16ui = 36, + ImageFormatRg8ui = 37, + ImageFormatR16ui = 38, + ImageFormatR8ui = 39, + ImageFormatMax = 0x7fffffff, +}; + +enum ImageChannelOrder { + ImageChannelOrderR = 0, + ImageChannelOrderA = 1, + ImageChannelOrderRG = 2, + ImageChannelOrderRA = 3, + ImageChannelOrderRGB = 4, + ImageChannelOrderRGBA = 5, + ImageChannelOrderBGRA = 6, + ImageChannelOrderARGB = 7, + ImageChannelOrderIntensity = 8, + ImageChannelOrderLuminance = 9, + ImageChannelOrderRx = 10, + ImageChannelOrderRGx = 11, + ImageChannelOrderRGBx = 12, + ImageChannelOrderDepth = 13, + ImageChannelOrderDepthStencil = 14, + ImageChannelOrdersRGB = 15, + ImageChannelOrdersRGBx = 16, + ImageChannelOrdersRGBA = 17, + ImageChannelOrdersBGRA = 18, + ImageChannelOrderABGR = 19, + ImageChannelOrderMax = 0x7fffffff, +}; + +enum ImageChannelDataType { + ImageChannelDataTypeSnormInt8 = 0, + ImageChannelDataTypeSnormInt16 = 1, + ImageChannelDataTypeUnormInt8 = 2, + ImageChannelDataTypeUnormInt16 = 3, + ImageChannelDataTypeUnormShort565 = 4, + ImageChannelDataTypeUnormShort555 = 5, + ImageChannelDataTypeUnormInt101010 = 6, + ImageChannelDataTypeSignedInt8 = 7, + ImageChannelDataTypeSignedInt16 = 8, + ImageChannelDataTypeSignedInt32 = 9, + ImageChannelDataTypeUnsignedInt8 = 10, + ImageChannelDataTypeUnsignedInt16 = 11, + ImageChannelDataTypeUnsignedInt32 = 12, + ImageChannelDataTypeHalfFloat = 13, + ImageChannelDataTypeFloat = 14, + ImageChannelDataTypeUnormInt24 = 15, + ImageChannelDataTypeUnormInt101010_2 = 16, + ImageChannelDataTypeMax = 0x7fffffff, +}; + +enum ImageOperandsShift { + ImageOperandsBiasShift = 0, + ImageOperandsLodShift = 1, + ImageOperandsGradShift = 2, + ImageOperandsConstOffsetShift = 3, + ImageOperandsOffsetShift = 4, + ImageOperandsConstOffsetsShift = 5, + ImageOperandsSampleShift = 6, + ImageOperandsMinLodShift = 7, + ImageOperandsMakeTexelAvailableShift = 8, + ImageOperandsMakeTexelAvailableKHRShift = 8, + ImageOperandsMakeTexelVisibleShift = 9, + ImageOperandsMakeTexelVisibleKHRShift = 9, + ImageOperandsNonPrivateTexelShift = 10, + ImageOperandsNonPrivateTexelKHRShift = 10, + ImageOperandsVolatileTexelShift = 11, + ImageOperandsVolatileTexelKHRShift = 11, + ImageOperandsSignExtendShift = 12, + ImageOperandsZeroExtendShift = 13, + ImageOperandsMax = 0x7fffffff, +}; + +enum ImageOperandsMask { + ImageOperandsMaskNone = 0, + ImageOperandsBiasMask = 0x00000001, + ImageOperandsLodMask = 0x00000002, + ImageOperandsGradMask = 0x00000004, + ImageOperandsConstOffsetMask = 0x00000008, + ImageOperandsOffsetMask = 0x00000010, + ImageOperandsConstOffsetsMask = 0x00000020, + ImageOperandsSampleMask = 0x00000040, + ImageOperandsMinLodMask = 0x00000080, + ImageOperandsMakeTexelAvailableMask = 0x00000100, + ImageOperandsMakeTexelAvailableKHRMask = 0x00000100, + ImageOperandsMakeTexelVisibleMask = 0x00000200, + ImageOperandsMakeTexelVisibleKHRMask = 0x00000200, + ImageOperandsNonPrivateTexelMask = 0x00000400, + ImageOperandsNonPrivateTexelKHRMask = 0x00000400, + ImageOperandsVolatileTexelMask = 0x00000800, + ImageOperandsVolatileTexelKHRMask = 0x00000800, + ImageOperandsSignExtendMask = 0x00001000, + ImageOperandsZeroExtendMask = 0x00002000, +}; + +enum FPFastMathModeShift { + FPFastMathModeNotNaNShift = 0, + FPFastMathModeNotInfShift = 1, + FPFastMathModeNSZShift = 2, + FPFastMathModeAllowRecipShift = 3, + FPFastMathModeFastShift = 4, + FPFastMathModeMax = 0x7fffffff, +}; + +enum FPFastMathModeMask { + FPFastMathModeMaskNone = 0, + FPFastMathModeNotNaNMask = 0x00000001, + FPFastMathModeNotInfMask = 0x00000002, + FPFastMathModeNSZMask = 0x00000004, + FPFastMathModeAllowRecipMask = 0x00000008, + FPFastMathModeFastMask = 0x00000010, +}; + +enum FPRoundingMode { + FPRoundingModeRTE = 0, + FPRoundingModeRTZ = 1, + FPRoundingModeRTP = 2, + FPRoundingModeRTN = 3, + FPRoundingModeMax = 0x7fffffff, +}; + +enum LinkageType { + LinkageTypeExport = 0, + LinkageTypeImport = 1, + LinkageTypeMax = 0x7fffffff, +}; + +enum AccessQualifier { + AccessQualifierReadOnly = 0, + AccessQualifierWriteOnly = 1, + AccessQualifierReadWrite = 2, + AccessQualifierMax = 0x7fffffff, +}; + +enum FunctionParameterAttribute { + FunctionParameterAttributeZext = 0, + FunctionParameterAttributeSext = 1, + FunctionParameterAttributeByVal = 2, + FunctionParameterAttributeSret = 3, + FunctionParameterAttributeNoAlias = 4, + FunctionParameterAttributeNoCapture = 5, + FunctionParameterAttributeNoWrite = 6, + FunctionParameterAttributeNoReadWrite = 7, + FunctionParameterAttributeMax = 0x7fffffff, +}; + +enum Decoration { + DecorationRelaxedPrecision = 0, + DecorationSpecId = 1, + DecorationBlock = 2, + DecorationBufferBlock = 3, + DecorationRowMajor = 4, + DecorationColMajor = 5, + DecorationArrayStride = 6, + DecorationMatrixStride = 7, + DecorationGLSLShared = 8, + DecorationGLSLPacked = 9, + DecorationCPacked = 10, + DecorationBuiltIn = 11, + DecorationNoPerspective = 13, + DecorationFlat = 14, + DecorationPatch = 15, + DecorationCentroid = 16, + DecorationSample = 17, + DecorationInvariant = 18, + DecorationRestrict = 19, + DecorationAliased = 20, + DecorationVolatile = 21, + DecorationConstant = 22, + DecorationCoherent = 23, + DecorationNonWritable = 24, + DecorationNonReadable = 25, + DecorationUniform = 26, + DecorationUniformId = 27, + DecorationSaturatedConversion = 28, + DecorationStream = 29, + DecorationLocation = 30, + DecorationComponent = 31, + DecorationIndex = 32, + DecorationBinding = 33, + DecorationDescriptorSet = 34, + DecorationOffset = 35, + DecorationXfbBuffer = 36, + DecorationXfbStride = 37, + DecorationFuncParamAttr = 38, + DecorationFPRoundingMode = 39, + DecorationFPFastMathMode = 40, + DecorationLinkageAttributes = 41, + DecorationNoContraction = 42, + DecorationInputAttachmentIndex = 43, + DecorationAlignment = 44, + DecorationMaxByteOffset = 45, + DecorationAlignmentId = 46, + DecorationMaxByteOffsetId = 47, + DecorationNoSignedWrap = 4469, + DecorationNoUnsignedWrap = 4470, + DecorationExplicitInterpAMD = 4999, + DecorationOverrideCoverageNV = 5248, + DecorationPassthroughNV = 5250, + DecorationViewportRelativeNV = 5252, + DecorationSecondaryViewportRelativeNV = 5256, + DecorationPerPrimitiveNV = 5271, + DecorationPerViewNV = 5272, + DecorationPerTaskNV = 5273, + DecorationPerVertexNV = 5285, + DecorationNonUniform = 5300, + DecorationNonUniformEXT = 5300, + DecorationRestrictPointer = 5355, + DecorationRestrictPointerEXT = 5355, + DecorationAliasedPointer = 5356, + DecorationAliasedPointerEXT = 5356, + DecorationCounterBuffer = 5634, + DecorationHlslCounterBufferGOOGLE = 5634, + DecorationHlslSemanticGOOGLE = 5635, + DecorationUserSemantic = 5635, + DecorationUserTypeGOOGLE = 5636, + DecorationMax = 0x7fffffff, +}; + +enum BuiltIn { + BuiltInPosition = 0, + BuiltInPointSize = 1, + BuiltInClipDistance = 3, + BuiltInCullDistance = 4, + BuiltInVertexId = 5, + BuiltInInstanceId = 6, + BuiltInPrimitiveId = 7, + BuiltInInvocationId = 8, + BuiltInLayer = 9, + BuiltInViewportIndex = 10, + BuiltInTessLevelOuter = 11, + BuiltInTessLevelInner = 12, + BuiltInTessCoord = 13, + BuiltInPatchVertices = 14, + BuiltInFragCoord = 15, + BuiltInPointCoord = 16, + BuiltInFrontFacing = 17, + BuiltInSampleId = 18, + BuiltInSamplePosition = 19, + BuiltInSampleMask = 20, + BuiltInFragDepth = 22, + BuiltInHelperInvocation = 23, + BuiltInNumWorkgroups = 24, + BuiltInWorkgroupSize = 25, + BuiltInWorkgroupId = 26, + BuiltInLocalInvocationId = 27, + BuiltInGlobalInvocationId = 28, + BuiltInLocalInvocationIndex = 29, + BuiltInWorkDim = 30, + BuiltInGlobalSize = 31, + BuiltInEnqueuedWorkgroupSize = 32, + BuiltInGlobalOffset = 33, + BuiltInGlobalLinearId = 34, + BuiltInSubgroupSize = 36, + BuiltInSubgroupMaxSize = 37, + BuiltInNumSubgroups = 38, + BuiltInNumEnqueuedSubgroups = 39, + BuiltInSubgroupId = 40, + BuiltInSubgroupLocalInvocationId = 41, + BuiltInVertexIndex = 42, + BuiltInInstanceIndex = 43, + BuiltInSubgroupEqMask = 4416, + BuiltInSubgroupEqMaskKHR = 4416, + BuiltInSubgroupGeMask = 4417, + BuiltInSubgroupGeMaskKHR = 4417, + BuiltInSubgroupGtMask = 4418, + BuiltInSubgroupGtMaskKHR = 4418, + BuiltInSubgroupLeMask = 4419, + BuiltInSubgroupLeMaskKHR = 4419, + BuiltInSubgroupLtMask = 4420, + BuiltInSubgroupLtMaskKHR = 4420, + BuiltInBaseVertex = 4424, + BuiltInBaseInstance = 4425, + BuiltInDrawIndex = 4426, + BuiltInDeviceIndex = 4438, + BuiltInViewIndex = 4440, + BuiltInBaryCoordNoPerspAMD = 4992, + BuiltInBaryCoordNoPerspCentroidAMD = 4993, + BuiltInBaryCoordNoPerspSampleAMD = 4994, + BuiltInBaryCoordSmoothAMD = 4995, + BuiltInBaryCoordSmoothCentroidAMD = 4996, + BuiltInBaryCoordSmoothSampleAMD = 4997, + BuiltInBaryCoordPullModelAMD = 4998, + BuiltInFragStencilRefEXT = 5014, + BuiltInViewportMaskNV = 5253, + BuiltInSecondaryPositionNV = 5257, + BuiltInSecondaryViewportMaskNV = 5258, + BuiltInPositionPerViewNV = 5261, + BuiltInViewportMaskPerViewNV = 5262, + BuiltInFullyCoveredEXT = 5264, + BuiltInTaskCountNV = 5274, + BuiltInPrimitiveCountNV = 5275, + BuiltInPrimitiveIndicesNV = 5276, + BuiltInClipDistancePerViewNV = 5277, + BuiltInCullDistancePerViewNV = 5278, + BuiltInLayerPerViewNV = 5279, + BuiltInMeshViewCountNV = 5280, + BuiltInMeshViewIndicesNV = 5281, + BuiltInBaryCoordNV = 5286, + BuiltInBaryCoordNoPerspNV = 5287, + BuiltInFragSizeEXT = 5292, + BuiltInFragmentSizeNV = 5292, + BuiltInFragInvocationCountEXT = 5293, + BuiltInInvocationsPerPixelNV = 5293, + BuiltInLaunchIdNV = 5319, + BuiltInLaunchSizeNV = 5320, + BuiltInWorldRayOriginNV = 5321, + BuiltInWorldRayDirectionNV = 5322, + BuiltInObjectRayOriginNV = 5323, + BuiltInObjectRayDirectionNV = 5324, + BuiltInRayTminNV = 5325, + BuiltInRayTmaxNV = 5326, + BuiltInInstanceCustomIndexNV = 5327, + BuiltInObjectToWorldNV = 5330, + BuiltInWorldToObjectNV = 5331, + BuiltInHitTNV = 5332, + BuiltInHitKindNV = 5333, + BuiltInIncomingRayFlagsNV = 5351, + BuiltInWarpsPerSMNV = 5374, + BuiltInSMCountNV = 5375, + BuiltInWarpIDNV = 5376, + BuiltInSMIDNV = 5377, + BuiltInMax = 0x7fffffff, +}; + +enum SelectionControlShift { + SelectionControlFlattenShift = 0, + SelectionControlDontFlattenShift = 1, + SelectionControlMax = 0x7fffffff, +}; + +enum SelectionControlMask { + SelectionControlMaskNone = 0, + SelectionControlFlattenMask = 0x00000001, + SelectionControlDontFlattenMask = 0x00000002, +}; + +enum LoopControlShift { + LoopControlUnrollShift = 0, + LoopControlDontUnrollShift = 1, + LoopControlDependencyInfiniteShift = 2, + LoopControlDependencyLengthShift = 3, + LoopControlMinIterationsShift = 4, + LoopControlMaxIterationsShift = 5, + LoopControlIterationMultipleShift = 6, + LoopControlPeelCountShift = 7, + LoopControlPartialCountShift = 8, + LoopControlMax = 0x7fffffff, +}; + +enum LoopControlMask { + LoopControlMaskNone = 0, + LoopControlUnrollMask = 0x00000001, + LoopControlDontUnrollMask = 0x00000002, + LoopControlDependencyInfiniteMask = 0x00000004, + LoopControlDependencyLengthMask = 0x00000008, + LoopControlMinIterationsMask = 0x00000010, + LoopControlMaxIterationsMask = 0x00000020, + LoopControlIterationMultipleMask = 0x00000040, + LoopControlPeelCountMask = 0x00000080, + LoopControlPartialCountMask = 0x00000100, +}; + +enum FunctionControlShift { + FunctionControlInlineShift = 0, + FunctionControlDontInlineShift = 1, + FunctionControlPureShift = 2, + FunctionControlConstShift = 3, + FunctionControlMax = 0x7fffffff, +}; + +enum FunctionControlMask { + FunctionControlMaskNone = 0, + FunctionControlInlineMask = 0x00000001, + FunctionControlDontInlineMask = 0x00000002, + FunctionControlPureMask = 0x00000004, + FunctionControlConstMask = 0x00000008, +}; + +enum MemorySemanticsShift { + MemorySemanticsAcquireShift = 1, + MemorySemanticsReleaseShift = 2, + MemorySemanticsAcquireReleaseShift = 3, + MemorySemanticsSequentiallyConsistentShift = 4, + MemorySemanticsUniformMemoryShift = 6, + MemorySemanticsSubgroupMemoryShift = 7, + MemorySemanticsWorkgroupMemoryShift = 8, + MemorySemanticsCrossWorkgroupMemoryShift = 9, + MemorySemanticsAtomicCounterMemoryShift = 10, + MemorySemanticsImageMemoryShift = 11, + MemorySemanticsOutputMemoryShift = 12, + MemorySemanticsOutputMemoryKHRShift = 12, + MemorySemanticsMakeAvailableShift = 13, + MemorySemanticsMakeAvailableKHRShift = 13, + MemorySemanticsMakeVisibleShift = 14, + MemorySemanticsMakeVisibleKHRShift = 14, + MemorySemanticsVolatileShift = 15, + MemorySemanticsMax = 0x7fffffff, +}; + +enum MemorySemanticsMask { + MemorySemanticsMaskNone = 0, + MemorySemanticsAcquireMask = 0x00000002, + MemorySemanticsReleaseMask = 0x00000004, + MemorySemanticsAcquireReleaseMask = 0x00000008, + MemorySemanticsSequentiallyConsistentMask = 0x00000010, + MemorySemanticsUniformMemoryMask = 0x00000040, + MemorySemanticsSubgroupMemoryMask = 0x00000080, + MemorySemanticsWorkgroupMemoryMask = 0x00000100, + MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200, + MemorySemanticsAtomicCounterMemoryMask = 0x00000400, + MemorySemanticsImageMemoryMask = 0x00000800, + MemorySemanticsOutputMemoryMask = 0x00001000, + MemorySemanticsOutputMemoryKHRMask = 0x00001000, + MemorySemanticsMakeAvailableMask = 0x00002000, + MemorySemanticsMakeAvailableKHRMask = 0x00002000, + MemorySemanticsMakeVisibleMask = 0x00004000, + MemorySemanticsMakeVisibleKHRMask = 0x00004000, + MemorySemanticsVolatileMask = 0x00008000, +}; + +enum MemoryAccessShift { + MemoryAccessVolatileShift = 0, + MemoryAccessAlignedShift = 1, + MemoryAccessNontemporalShift = 2, + MemoryAccessMakePointerAvailableShift = 3, + MemoryAccessMakePointerAvailableKHRShift = 3, + MemoryAccessMakePointerVisibleShift = 4, + MemoryAccessMakePointerVisibleKHRShift = 4, + MemoryAccessNonPrivatePointerShift = 5, + MemoryAccessNonPrivatePointerKHRShift = 5, + MemoryAccessMax = 0x7fffffff, +}; + +enum MemoryAccessMask { + MemoryAccessMaskNone = 0, + MemoryAccessVolatileMask = 0x00000001, + MemoryAccessAlignedMask = 0x00000002, + MemoryAccessNontemporalMask = 0x00000004, + MemoryAccessMakePointerAvailableMask = 0x00000008, + MemoryAccessMakePointerAvailableKHRMask = 0x00000008, + MemoryAccessMakePointerVisibleMask = 0x00000010, + MemoryAccessMakePointerVisibleKHRMask = 0x00000010, + MemoryAccessNonPrivatePointerMask = 0x00000020, + MemoryAccessNonPrivatePointerKHRMask = 0x00000020, +}; + +enum Scope { + ScopeCrossDevice = 0, + ScopeDevice = 1, + ScopeWorkgroup = 2, + ScopeSubgroup = 3, + ScopeInvocation = 4, + ScopeQueueFamily = 5, + ScopeQueueFamilyKHR = 5, + ScopeMax = 0x7fffffff, +}; + +enum GroupOperation { + GroupOperationReduce = 0, + GroupOperationInclusiveScan = 1, + GroupOperationExclusiveScan = 2, + GroupOperationClusteredReduce = 3, + GroupOperationPartitionedReduceNV = 6, + GroupOperationPartitionedInclusiveScanNV = 7, + GroupOperationPartitionedExclusiveScanNV = 8, + GroupOperationMax = 0x7fffffff, +}; + +enum KernelEnqueueFlags { + KernelEnqueueFlagsNoWait = 0, + KernelEnqueueFlagsWaitKernel = 1, + KernelEnqueueFlagsWaitWorkGroup = 2, + KernelEnqueueFlagsMax = 0x7fffffff, +}; + +enum KernelProfilingInfoShift { + KernelProfilingInfoCmdExecTimeShift = 0, + KernelProfilingInfoMax = 0x7fffffff, +}; + +enum KernelProfilingInfoMask { + KernelProfilingInfoMaskNone = 0, + KernelProfilingInfoCmdExecTimeMask = 0x00000001, +}; + +enum Capability { + CapabilityMatrix = 0, + CapabilityShader = 1, + CapabilityGeometry = 2, + CapabilityTessellation = 3, + CapabilityAddresses = 4, + CapabilityLinkage = 5, + CapabilityKernel = 6, + CapabilityVector16 = 7, + CapabilityFloat16Buffer = 8, + CapabilityFloat16 = 9, + CapabilityFloat64 = 10, + CapabilityInt64 = 11, + CapabilityInt64Atomics = 12, + CapabilityImageBasic = 13, + CapabilityImageReadWrite = 14, + CapabilityImageMipmap = 15, + CapabilityPipes = 17, + CapabilityGroups = 18, + CapabilityDeviceEnqueue = 19, + CapabilityLiteralSampler = 20, + CapabilityAtomicStorage = 21, + CapabilityInt16 = 22, + CapabilityTessellationPointSize = 23, + CapabilityGeometryPointSize = 24, + CapabilityImageGatherExtended = 25, + CapabilityStorageImageMultisample = 27, + CapabilityUniformBufferArrayDynamicIndexing = 28, + CapabilitySampledImageArrayDynamicIndexing = 29, + CapabilityStorageBufferArrayDynamicIndexing = 30, + CapabilityStorageImageArrayDynamicIndexing = 31, + CapabilityClipDistance = 32, + CapabilityCullDistance = 33, + CapabilityImageCubeArray = 34, + CapabilitySampleRateShading = 35, + CapabilityImageRect = 36, + CapabilitySampledRect = 37, + CapabilityGenericPointer = 38, + CapabilityInt8 = 39, + CapabilityInputAttachment = 40, + CapabilitySparseResidency = 41, + CapabilityMinLod = 42, + CapabilitySampled1D = 43, + CapabilityImage1D = 44, + CapabilitySampledCubeArray = 45, + CapabilitySampledBuffer = 46, + CapabilityImageBuffer = 47, + CapabilityImageMSArray = 48, + CapabilityStorageImageExtendedFormats = 49, + CapabilityImageQuery = 50, + CapabilityDerivativeControl = 51, + CapabilityInterpolationFunction = 52, + CapabilityTransformFeedback = 53, + CapabilityGeometryStreams = 54, + CapabilityStorageImageReadWithoutFormat = 55, + CapabilityStorageImageWriteWithoutFormat = 56, + CapabilityMultiViewport = 57, + CapabilitySubgroupDispatch = 58, + CapabilityNamedBarrier = 59, + CapabilityPipeStorage = 60, + CapabilityGroupNonUniform = 61, + CapabilityGroupNonUniformVote = 62, + CapabilityGroupNonUniformArithmetic = 63, + CapabilityGroupNonUniformBallot = 64, + CapabilityGroupNonUniformShuffle = 65, + CapabilityGroupNonUniformShuffleRelative = 66, + CapabilityGroupNonUniformClustered = 67, + CapabilityGroupNonUniformQuad = 68, + CapabilityShaderLayer = 69, + CapabilityShaderViewportIndex = 70, + CapabilitySubgroupBallotKHR = 4423, + CapabilityDrawParameters = 4427, + CapabilitySubgroupVoteKHR = 4431, + CapabilityStorageBuffer16BitAccess = 4433, + CapabilityStorageUniformBufferBlock16 = 4433, + CapabilityStorageUniform16 = 4434, + CapabilityUniformAndStorageBuffer16BitAccess = 4434, + CapabilityStoragePushConstant16 = 4435, + CapabilityStorageInputOutput16 = 4436, + CapabilityDeviceGroup = 4437, + CapabilityMultiView = 4439, + CapabilityVariablePointersStorageBuffer = 4441, + CapabilityVariablePointers = 4442, + CapabilityAtomicStorageOps = 4445, + CapabilitySampleMaskPostDepthCoverage = 4447, + CapabilityStorageBuffer8BitAccess = 4448, + CapabilityUniformAndStorageBuffer8BitAccess = 4449, + CapabilityStoragePushConstant8 = 4450, + CapabilityDenormPreserve = 4464, + CapabilityDenormFlushToZero = 4465, + CapabilitySignedZeroInfNanPreserve = 4466, + CapabilityRoundingModeRTE = 4467, + CapabilityRoundingModeRTZ = 4468, + CapabilityFloat16ImageAMD = 5008, + CapabilityImageGatherBiasLodAMD = 5009, + CapabilityFragmentMaskAMD = 5010, + CapabilityStencilExportEXT = 5013, + CapabilityImageReadWriteLodAMD = 5015, + CapabilityShaderClockKHR = 5055, + CapabilitySampleMaskOverrideCoverageNV = 5249, + CapabilityGeometryShaderPassthroughNV = 5251, + CapabilityShaderViewportIndexLayerEXT = 5254, + CapabilityShaderViewportIndexLayerNV = 5254, + CapabilityShaderViewportMaskNV = 5255, + CapabilityShaderStereoViewNV = 5259, + CapabilityPerViewAttributesNV = 5260, + CapabilityFragmentFullyCoveredEXT = 5265, + CapabilityMeshShadingNV = 5266, + CapabilityImageFootprintNV = 5282, + CapabilityFragmentBarycentricNV = 5284, + CapabilityComputeDerivativeGroupQuadsNV = 5288, + CapabilityFragmentDensityEXT = 5291, + CapabilityShadingRateNV = 5291, + CapabilityGroupNonUniformPartitionedNV = 5297, + CapabilityShaderNonUniform = 5301, + CapabilityShaderNonUniformEXT = 5301, + CapabilityRuntimeDescriptorArray = 5302, + CapabilityRuntimeDescriptorArrayEXT = 5302, + CapabilityInputAttachmentArrayDynamicIndexing = 5303, + CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, + CapabilityUniformTexelBufferArrayDynamicIndexing = 5304, + CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, + CapabilityStorageTexelBufferArrayDynamicIndexing = 5305, + CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, + CapabilityUniformBufferArrayNonUniformIndexing = 5306, + CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, + CapabilitySampledImageArrayNonUniformIndexing = 5307, + CapabilitySampledImageArrayNonUniformIndexingEXT = 5307, + CapabilityStorageBufferArrayNonUniformIndexing = 5308, + CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, + CapabilityStorageImageArrayNonUniformIndexing = 5309, + CapabilityStorageImageArrayNonUniformIndexingEXT = 5309, + CapabilityInputAttachmentArrayNonUniformIndexing = 5310, + CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, + CapabilityUniformTexelBufferArrayNonUniformIndexing = 5311, + CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, + CapabilityStorageTexelBufferArrayNonUniformIndexing = 5312, + CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, + CapabilityRayTracingNV = 5340, + CapabilityVulkanMemoryModel = 5345, + CapabilityVulkanMemoryModelKHR = 5345, + CapabilityVulkanMemoryModelDeviceScope = 5346, + CapabilityVulkanMemoryModelDeviceScopeKHR = 5346, + CapabilityPhysicalStorageBufferAddresses = 5347, + CapabilityPhysicalStorageBufferAddressesEXT = 5347, + CapabilityComputeDerivativeGroupLinearNV = 5350, + CapabilityCooperativeMatrixNV = 5357, + CapabilityFragmentShaderSampleInterlockEXT = 5363, + CapabilityFragmentShaderShadingRateInterlockEXT = 5372, + CapabilityShaderSMBuiltinsNV = 5373, + CapabilityFragmentShaderPixelInterlockEXT = 5378, + CapabilityDemoteToHelperInvocationEXT = 5379, + CapabilitySubgroupShuffleINTEL = 5568, + CapabilitySubgroupBufferBlockIOINTEL = 5569, + CapabilitySubgroupImageBlockIOINTEL = 5570, + CapabilitySubgroupImageMediaBlockIOINTEL = 5579, + CapabilityIntegerFunctions2INTEL = 5584, + CapabilitySubgroupAvcMotionEstimationINTEL = 5696, + CapabilitySubgroupAvcMotionEstimationIntraINTEL = 5697, + CapabilitySubgroupAvcMotionEstimationChromaINTEL = 5698, + CapabilityMax = 0x7fffffff, +}; + +enum Op { + OpNop = 0, + OpUndef = 1, + OpSourceContinued = 2, + OpSource = 3, + OpSourceExtension = 4, + OpName = 5, + OpMemberName = 6, + OpString = 7, + OpLine = 8, + OpExtension = 10, + OpExtInstImport = 11, + OpExtInst = 12, + OpMemoryModel = 14, + OpEntryPoint = 15, + OpExecutionMode = 16, + OpCapability = 17, + OpTypeVoid = 19, + OpTypeBool = 20, + OpTypeInt = 21, + OpTypeFloat = 22, + OpTypeVector = 23, + OpTypeMatrix = 24, + OpTypeImage = 25, + OpTypeSampler = 26, + OpTypeSampledImage = 27, + OpTypeArray = 28, + OpTypeRuntimeArray = 29, + OpTypeStruct = 30, + OpTypeOpaque = 31, + OpTypePointer = 32, + OpTypeFunction = 33, + OpTypeEvent = 34, + OpTypeDeviceEvent = 35, + OpTypeReserveId = 36, + OpTypeQueue = 37, + OpTypePipe = 38, + OpTypeForwardPointer = 39, + OpConstantTrue = 41, + OpConstantFalse = 42, + OpConstant = 43, + OpConstantComposite = 44, + OpConstantSampler = 45, + OpConstantNull = 46, + OpSpecConstantTrue = 48, + OpSpecConstantFalse = 49, + OpSpecConstant = 50, + OpSpecConstantComposite = 51, + OpSpecConstantOp = 52, + OpFunction = 54, + OpFunctionParameter = 55, + OpFunctionEnd = 56, + OpFunctionCall = 57, + OpVariable = 59, + OpImageTexelPointer = 60, + OpLoad = 61, + OpStore = 62, + OpCopyMemory = 63, + OpCopyMemorySized = 64, + OpAccessChain = 65, + OpInBoundsAccessChain = 66, + OpPtrAccessChain = 67, + OpArrayLength = 68, + OpGenericPtrMemSemantics = 69, + OpInBoundsPtrAccessChain = 70, + OpDecorate = 71, + OpMemberDecorate = 72, + OpDecorationGroup = 73, + OpGroupDecorate = 74, + OpGroupMemberDecorate = 75, + OpVectorExtractDynamic = 77, + OpVectorInsertDynamic = 78, + OpVectorShuffle = 79, + OpCompositeConstruct = 80, + OpCompositeExtract = 81, + OpCompositeInsert = 82, + OpCopyObject = 83, + OpTranspose = 84, + OpSampledImage = 86, + OpImageSampleImplicitLod = 87, + OpImageSampleExplicitLod = 88, + OpImageSampleDrefImplicitLod = 89, + OpImageSampleDrefExplicitLod = 90, + OpImageSampleProjImplicitLod = 91, + OpImageSampleProjExplicitLod = 92, + OpImageSampleProjDrefImplicitLod = 93, + OpImageSampleProjDrefExplicitLod = 94, + OpImageFetch = 95, + OpImageGather = 96, + OpImageDrefGather = 97, + OpImageRead = 98, + OpImageWrite = 99, + OpImage = 100, + OpImageQueryFormat = 101, + OpImageQueryOrder = 102, + OpImageQuerySizeLod = 103, + OpImageQuerySize = 104, + OpImageQueryLod = 105, + OpImageQueryLevels = 106, + OpImageQuerySamples = 107, + OpConvertFToU = 109, + OpConvertFToS = 110, + OpConvertSToF = 111, + OpConvertUToF = 112, + OpUConvert = 113, + OpSConvert = 114, + OpFConvert = 115, + OpQuantizeToF16 = 116, + OpConvertPtrToU = 117, + OpSatConvertSToU = 118, + OpSatConvertUToS = 119, + OpConvertUToPtr = 120, + OpPtrCastToGeneric = 121, + OpGenericCastToPtr = 122, + OpGenericCastToPtrExplicit = 123, + OpBitcast = 124, + OpSNegate = 126, + OpFNegate = 127, + OpIAdd = 128, + OpFAdd = 129, + OpISub = 130, + OpFSub = 131, + OpIMul = 132, + OpFMul = 133, + OpUDiv = 134, + OpSDiv = 135, + OpFDiv = 136, + OpUMod = 137, + OpSRem = 138, + OpSMod = 139, + OpFRem = 140, + OpFMod = 141, + OpVectorTimesScalar = 142, + OpMatrixTimesScalar = 143, + OpVectorTimesMatrix = 144, + OpMatrixTimesVector = 145, + OpMatrixTimesMatrix = 146, + OpOuterProduct = 147, + OpDot = 148, + OpIAddCarry = 149, + OpISubBorrow = 150, + OpUMulExtended = 151, + OpSMulExtended = 152, + OpAny = 154, + OpAll = 155, + OpIsNan = 156, + OpIsInf = 157, + OpIsFinite = 158, + OpIsNormal = 159, + OpSignBitSet = 160, + OpLessOrGreater = 161, + OpOrdered = 162, + OpUnordered = 163, + OpLogicalEqual = 164, + OpLogicalNotEqual = 165, + OpLogicalOr = 166, + OpLogicalAnd = 167, + OpLogicalNot = 168, + OpSelect = 169, + OpIEqual = 170, + OpINotEqual = 171, + OpUGreaterThan = 172, + OpSGreaterThan = 173, + OpUGreaterThanEqual = 174, + OpSGreaterThanEqual = 175, + OpULessThan = 176, + OpSLessThan = 177, + OpULessThanEqual = 178, + OpSLessThanEqual = 179, + OpFOrdEqual = 180, + OpFUnordEqual = 181, + OpFOrdNotEqual = 182, + OpFUnordNotEqual = 183, + OpFOrdLessThan = 184, + OpFUnordLessThan = 185, + OpFOrdGreaterThan = 186, + OpFUnordGreaterThan = 187, + OpFOrdLessThanEqual = 188, + OpFUnordLessThanEqual = 189, + OpFOrdGreaterThanEqual = 190, + OpFUnordGreaterThanEqual = 191, + OpShiftRightLogical = 194, + OpShiftRightArithmetic = 195, + OpShiftLeftLogical = 196, + OpBitwiseOr = 197, + OpBitwiseXor = 198, + OpBitwiseAnd = 199, + OpNot = 200, + OpBitFieldInsert = 201, + OpBitFieldSExtract = 202, + OpBitFieldUExtract = 203, + OpBitReverse = 204, + OpBitCount = 205, + OpDPdx = 207, + OpDPdy = 208, + OpFwidth = 209, + OpDPdxFine = 210, + OpDPdyFine = 211, + OpFwidthFine = 212, + OpDPdxCoarse = 213, + OpDPdyCoarse = 214, + OpFwidthCoarse = 215, + OpEmitVertex = 218, + OpEndPrimitive = 219, + OpEmitStreamVertex = 220, + OpEndStreamPrimitive = 221, + OpControlBarrier = 224, + OpMemoryBarrier = 225, + OpAtomicLoad = 227, + OpAtomicStore = 228, + OpAtomicExchange = 229, + OpAtomicCompareExchange = 230, + OpAtomicCompareExchangeWeak = 231, + OpAtomicIIncrement = 232, + OpAtomicIDecrement = 233, + OpAtomicIAdd = 234, + OpAtomicISub = 235, + OpAtomicSMin = 236, + OpAtomicUMin = 237, + OpAtomicSMax = 238, + OpAtomicUMax = 239, + OpAtomicAnd = 240, + OpAtomicOr = 241, + OpAtomicXor = 242, + OpPhi = 245, + OpLoopMerge = 246, + OpSelectionMerge = 247, + OpLabel = 248, + OpBranch = 249, + OpBranchConditional = 250, + OpSwitch = 251, + OpKill = 252, + OpReturn = 253, + OpReturnValue = 254, + OpUnreachable = 255, + OpLifetimeStart = 256, + OpLifetimeStop = 257, + OpGroupAsyncCopy = 259, + OpGroupWaitEvents = 260, + OpGroupAll = 261, + OpGroupAny = 262, + OpGroupBroadcast = 263, + OpGroupIAdd = 264, + OpGroupFAdd = 265, + OpGroupFMin = 266, + OpGroupUMin = 267, + OpGroupSMin = 268, + OpGroupFMax = 269, + OpGroupUMax = 270, + OpGroupSMax = 271, + OpReadPipe = 274, + OpWritePipe = 275, + OpReservedReadPipe = 276, + OpReservedWritePipe = 277, + OpReserveReadPipePackets = 278, + OpReserveWritePipePackets = 279, + OpCommitReadPipe = 280, + OpCommitWritePipe = 281, + OpIsValidReserveId = 282, + OpGetNumPipePackets = 283, + OpGetMaxPipePackets = 284, + OpGroupReserveReadPipePackets = 285, + OpGroupReserveWritePipePackets = 286, + OpGroupCommitReadPipe = 287, + OpGroupCommitWritePipe = 288, + OpEnqueueMarker = 291, + OpEnqueueKernel = 292, + OpGetKernelNDrangeSubGroupCount = 293, + OpGetKernelNDrangeMaxSubGroupSize = 294, + OpGetKernelWorkGroupSize = 295, + OpGetKernelPreferredWorkGroupSizeMultiple = 296, + OpRetainEvent = 297, + OpReleaseEvent = 298, + OpCreateUserEvent = 299, + OpIsValidEvent = 300, + OpSetUserEventStatus = 301, + OpCaptureEventProfilingInfo = 302, + OpGetDefaultQueue = 303, + OpBuildNDRange = 304, + OpImageSparseSampleImplicitLod = 305, + OpImageSparseSampleExplicitLod = 306, + OpImageSparseSampleDrefImplicitLod = 307, + OpImageSparseSampleDrefExplicitLod = 308, + OpImageSparseSampleProjImplicitLod = 309, + OpImageSparseSampleProjExplicitLod = 310, + OpImageSparseSampleProjDrefImplicitLod = 311, + OpImageSparseSampleProjDrefExplicitLod = 312, + OpImageSparseFetch = 313, + OpImageSparseGather = 314, + OpImageSparseDrefGather = 315, + OpImageSparseTexelsResident = 316, + OpNoLine = 317, + OpAtomicFlagTestAndSet = 318, + OpAtomicFlagClear = 319, + OpImageSparseRead = 320, + OpSizeOf = 321, + OpTypePipeStorage = 322, + OpConstantPipeStorage = 323, + OpCreatePipeFromPipeStorage = 324, + OpGetKernelLocalSizeForSubgroupCount = 325, + OpGetKernelMaxNumSubgroups = 326, + OpTypeNamedBarrier = 327, + OpNamedBarrierInitialize = 328, + OpMemoryNamedBarrier = 329, + OpModuleProcessed = 330, + OpExecutionModeId = 331, + OpDecorateId = 332, + OpGroupNonUniformElect = 333, + OpGroupNonUniformAll = 334, + OpGroupNonUniformAny = 335, + OpGroupNonUniformAllEqual = 336, + OpGroupNonUniformBroadcast = 337, + OpGroupNonUniformBroadcastFirst = 338, + OpGroupNonUniformBallot = 339, + OpGroupNonUniformInverseBallot = 340, + OpGroupNonUniformBallotBitExtract = 341, + OpGroupNonUniformBallotBitCount = 342, + OpGroupNonUniformBallotFindLSB = 343, + OpGroupNonUniformBallotFindMSB = 344, + OpGroupNonUniformShuffle = 345, + OpGroupNonUniformShuffleXor = 346, + OpGroupNonUniformShuffleUp = 347, + OpGroupNonUniformShuffleDown = 348, + OpGroupNonUniformIAdd = 349, + OpGroupNonUniformFAdd = 350, + OpGroupNonUniformIMul = 351, + OpGroupNonUniformFMul = 352, + OpGroupNonUniformSMin = 353, + OpGroupNonUniformUMin = 354, + OpGroupNonUniformFMin = 355, + OpGroupNonUniformSMax = 356, + OpGroupNonUniformUMax = 357, + OpGroupNonUniformFMax = 358, + OpGroupNonUniformBitwiseAnd = 359, + OpGroupNonUniformBitwiseOr = 360, + OpGroupNonUniformBitwiseXor = 361, + OpGroupNonUniformLogicalAnd = 362, + OpGroupNonUniformLogicalOr = 363, + OpGroupNonUniformLogicalXor = 364, + OpGroupNonUniformQuadBroadcast = 365, + OpGroupNonUniformQuadSwap = 366, + OpCopyLogical = 400, + OpPtrEqual = 401, + OpPtrNotEqual = 402, + OpPtrDiff = 403, + OpSubgroupBallotKHR = 4421, + OpSubgroupFirstInvocationKHR = 4422, + OpSubgroupAllKHR = 4428, + OpSubgroupAnyKHR = 4429, + OpSubgroupAllEqualKHR = 4430, + OpSubgroupReadInvocationKHR = 4432, + OpGroupIAddNonUniformAMD = 5000, + OpGroupFAddNonUniformAMD = 5001, + OpGroupFMinNonUniformAMD = 5002, + OpGroupUMinNonUniformAMD = 5003, + OpGroupSMinNonUniformAMD = 5004, + OpGroupFMaxNonUniformAMD = 5005, + OpGroupUMaxNonUniformAMD = 5006, + OpGroupSMaxNonUniformAMD = 5007, + OpFragmentMaskFetchAMD = 5011, + OpFragmentFetchAMD = 5012, + OpReadClockKHR = 5056, + OpImageSampleFootprintNV = 5283, + OpGroupNonUniformPartitionNV = 5296, + OpWritePackedPrimitiveIndices4x8NV = 5299, + OpReportIntersectionNV = 5334, + OpIgnoreIntersectionNV = 5335, + OpTerminateRayNV = 5336, + OpTraceNV = 5337, + OpTypeAccelerationStructureNV = 5341, + OpExecuteCallableNV = 5344, + OpTypeCooperativeMatrixNV = 5358, + OpCooperativeMatrixLoadNV = 5359, + OpCooperativeMatrixStoreNV = 5360, + OpCooperativeMatrixMulAddNV = 5361, + OpCooperativeMatrixLengthNV = 5362, + OpBeginInvocationInterlockEXT = 5364, + OpEndInvocationInterlockEXT = 5365, + OpDemoteToHelperInvocationEXT = 5380, + OpIsHelperInvocationEXT = 5381, + OpSubgroupShuffleINTEL = 5571, + OpSubgroupShuffleDownINTEL = 5572, + OpSubgroupShuffleUpINTEL = 5573, + OpSubgroupShuffleXorINTEL = 5574, + OpSubgroupBlockReadINTEL = 5575, + OpSubgroupBlockWriteINTEL = 5576, + OpSubgroupImageBlockReadINTEL = 5577, + OpSubgroupImageBlockWriteINTEL = 5578, + OpSubgroupImageMediaBlockReadINTEL = 5580, + OpSubgroupImageMediaBlockWriteINTEL = 5581, + OpUCountLeadingZerosINTEL = 5585, + OpUCountTrailingZerosINTEL = 5586, + OpAbsISubINTEL = 5587, + OpAbsUSubINTEL = 5588, + OpIAddSatINTEL = 5589, + OpUAddSatINTEL = 5590, + OpIAverageINTEL = 5591, + OpUAverageINTEL = 5592, + OpIAverageRoundedINTEL = 5593, + OpUAverageRoundedINTEL = 5594, + OpISubSatINTEL = 5595, + OpUSubSatINTEL = 5596, + OpIMul32x16INTEL = 5597, + OpUMul32x16INTEL = 5598, + OpDecorateString = 5632, + OpDecorateStringGOOGLE = 5632, + OpMemberDecorateString = 5633, + OpMemberDecorateStringGOOGLE = 5633, + OpVmeImageINTEL = 5699, + OpTypeVmeImageINTEL = 5700, + OpTypeAvcImePayloadINTEL = 5701, + OpTypeAvcRefPayloadINTEL = 5702, + OpTypeAvcSicPayloadINTEL = 5703, + OpTypeAvcMcePayloadINTEL = 5704, + OpTypeAvcMceResultINTEL = 5705, + OpTypeAvcImeResultINTEL = 5706, + OpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707, + OpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708, + OpTypeAvcImeSingleReferenceStreaminINTEL = 5709, + OpTypeAvcImeDualReferenceStreaminINTEL = 5710, + OpTypeAvcRefResultINTEL = 5711, + OpTypeAvcSicResultINTEL = 5712, + OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713, + OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714, + OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715, + OpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716, + OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717, + OpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718, + OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719, + OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720, + OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721, + OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722, + OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723, + OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724, + OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725, + OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726, + OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727, + OpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728, + OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729, + OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730, + OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731, + OpSubgroupAvcMceConvertToImePayloadINTEL = 5732, + OpSubgroupAvcMceConvertToImeResultINTEL = 5733, + OpSubgroupAvcMceConvertToRefPayloadINTEL = 5734, + OpSubgroupAvcMceConvertToRefResultINTEL = 5735, + OpSubgroupAvcMceConvertToSicPayloadINTEL = 5736, + OpSubgroupAvcMceConvertToSicResultINTEL = 5737, + OpSubgroupAvcMceGetMotionVectorsINTEL = 5738, + OpSubgroupAvcMceGetInterDistortionsINTEL = 5739, + OpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740, + OpSubgroupAvcMceGetInterMajorShapeINTEL = 5741, + OpSubgroupAvcMceGetInterMinorShapeINTEL = 5742, + OpSubgroupAvcMceGetInterDirectionsINTEL = 5743, + OpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744, + OpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745, + OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746, + OpSubgroupAvcImeInitializeINTEL = 5747, + OpSubgroupAvcImeSetSingleReferenceINTEL = 5748, + OpSubgroupAvcImeSetDualReferenceINTEL = 5749, + OpSubgroupAvcImeRefWindowSizeINTEL = 5750, + OpSubgroupAvcImeAdjustRefOffsetINTEL = 5751, + OpSubgroupAvcImeConvertToMcePayloadINTEL = 5752, + OpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753, + OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754, + OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755, + OpSubgroupAvcImeSetWeightedSadINTEL = 5756, + OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757, + OpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758, + OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759, + OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760, + OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761, + OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762, + OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763, + OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764, + OpSubgroupAvcImeConvertToMceResultINTEL = 5765, + OpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766, + OpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767, + OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768, + OpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769, + OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL = 5770, + OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL = 5771, + OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL = 5772, + OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL = 5773, + OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774, + OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL = 5775, + OpSubgroupAvcImeGetBorderReachedINTEL = 5776, + OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777, + OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778, + OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779, + OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780, + OpSubgroupAvcFmeInitializeINTEL = 5781, + OpSubgroupAvcBmeInitializeINTEL = 5782, + OpSubgroupAvcRefConvertToMcePayloadINTEL = 5783, + OpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784, + OpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785, + OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786, + OpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787, + OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788, + OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789, + OpSubgroupAvcRefConvertToMceResultINTEL = 5790, + OpSubgroupAvcSicInitializeINTEL = 5791, + OpSubgroupAvcSicConfigureSkcINTEL = 5792, + OpSubgroupAvcSicConfigureIpeLumaINTEL = 5793, + OpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794, + OpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795, + OpSubgroupAvcSicConvertToMcePayloadINTEL = 5796, + OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797, + OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798, + OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799, + OpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800, + OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801, + OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802, + OpSubgroupAvcSicEvaluateIpeINTEL = 5803, + OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804, + OpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805, + OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806, + OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807, + OpSubgroupAvcSicConvertToMceResultINTEL = 5808, + OpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809, + OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810, + OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811, + OpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812, + OpSubgroupAvcSicGetIpeChromaModeINTEL = 5813, + OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814, + OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815, + OpSubgroupAvcSicGetInterRawSadsINTEL = 5816, + OpMax = 0x7fffffff, +}; + +#ifdef SPV_ENABLE_UTILITY_CODE +inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) { + *hasResult = *hasResultType = false; + switch (opcode) { + default: break; + case OpNop: *hasResult = false; *hasResultType = false; break; + case OpUndef: *hasResult = true; *hasResultType = true; break; + case OpSourceContinued: *hasResult = false; *hasResultType = false; break; + case OpSource: *hasResult = false; *hasResultType = false; break; + case OpSourceExtension: *hasResult = false; *hasResultType = false; break; + case OpName: *hasResult = false; *hasResultType = false; break; + case OpMemberName: *hasResult = false; *hasResultType = false; break; + case OpString: *hasResult = true; *hasResultType = false; break; + case OpLine: *hasResult = false; *hasResultType = false; break; + case OpExtension: *hasResult = false; *hasResultType = false; break; + case OpExtInstImport: *hasResult = true; *hasResultType = false; break; + case OpExtInst: *hasResult = true; *hasResultType = true; break; + case OpMemoryModel: *hasResult = false; *hasResultType = false; break; + case OpEntryPoint: *hasResult = false; *hasResultType = false; break; + case OpExecutionMode: *hasResult = false; *hasResultType = false; break; + case OpCapability: *hasResult = false; *hasResultType = false; break; + case OpTypeVoid: *hasResult = true; *hasResultType = false; break; + case OpTypeBool: *hasResult = true; *hasResultType = false; break; + case OpTypeInt: *hasResult = true; *hasResultType = false; break; + case OpTypeFloat: *hasResult = true; *hasResultType = false; break; + case OpTypeVector: *hasResult = true; *hasResultType = false; break; + case OpTypeMatrix: *hasResult = true; *hasResultType = false; break; + case OpTypeImage: *hasResult = true; *hasResultType = false; break; + case OpTypeSampler: *hasResult = true; *hasResultType = false; break; + case OpTypeSampledImage: *hasResult = true; *hasResultType = false; break; + case OpTypeArray: *hasResult = true; *hasResultType = false; break; + case OpTypeRuntimeArray: *hasResult = true; *hasResultType = false; break; + case OpTypeStruct: *hasResult = true; *hasResultType = false; break; + case OpTypeOpaque: *hasResult = true; *hasResultType = false; break; + case OpTypePointer: *hasResult = true; *hasResultType = false; break; + case OpTypeFunction: *hasResult = true; *hasResultType = false; break; + case OpTypeEvent: *hasResult = true; *hasResultType = false; break; + case OpTypeDeviceEvent: *hasResult = true; *hasResultType = false; break; + case OpTypeReserveId: *hasResult = true; *hasResultType = false; break; + case OpTypeQueue: *hasResult = true; *hasResultType = false; break; + case OpTypePipe: *hasResult = true; *hasResultType = false; break; + case OpTypeForwardPointer: *hasResult = false; *hasResultType = false; break; + case OpConstantTrue: *hasResult = true; *hasResultType = true; break; + case OpConstantFalse: *hasResult = true; *hasResultType = true; break; + case OpConstant: *hasResult = true; *hasResultType = true; break; + case OpConstantComposite: *hasResult = true; *hasResultType = true; break; + case OpConstantSampler: *hasResult = true; *hasResultType = true; break; + case OpConstantNull: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantTrue: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantFalse: *hasResult = true; *hasResultType = true; break; + case OpSpecConstant: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantComposite: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantOp: *hasResult = true; *hasResultType = true; break; + case OpFunction: *hasResult = true; *hasResultType = true; break; + case OpFunctionParameter: *hasResult = true; *hasResultType = true; break; + case OpFunctionEnd: *hasResult = false; *hasResultType = false; break; + case OpFunctionCall: *hasResult = true; *hasResultType = true; break; + case OpVariable: *hasResult = true; *hasResultType = true; break; + case OpImageTexelPointer: *hasResult = true; *hasResultType = true; break; + case OpLoad: *hasResult = true; *hasResultType = true; break; + case OpStore: *hasResult = false; *hasResultType = false; break; + case OpCopyMemory: *hasResult = false; *hasResultType = false; break; + case OpCopyMemorySized: *hasResult = false; *hasResultType = false; break; + case OpAccessChain: *hasResult = true; *hasResultType = true; break; + case OpInBoundsAccessChain: *hasResult = true; *hasResultType = true; break; + case OpPtrAccessChain: *hasResult = true; *hasResultType = true; break; + case OpArrayLength: *hasResult = true; *hasResultType = true; break; + case OpGenericPtrMemSemantics: *hasResult = true; *hasResultType = true; break; + case OpInBoundsPtrAccessChain: *hasResult = true; *hasResultType = true; break; + case OpDecorate: *hasResult = false; *hasResultType = false; break; + case OpMemberDecorate: *hasResult = false; *hasResultType = false; break; + case OpDecorationGroup: *hasResult = true; *hasResultType = false; break; + case OpGroupDecorate: *hasResult = false; *hasResultType = false; break; + case OpGroupMemberDecorate: *hasResult = false; *hasResultType = false; break; + case OpVectorExtractDynamic: *hasResult = true; *hasResultType = true; break; + case OpVectorInsertDynamic: *hasResult = true; *hasResultType = true; break; + case OpVectorShuffle: *hasResult = true; *hasResultType = true; break; + case OpCompositeConstruct: *hasResult = true; *hasResultType = true; break; + case OpCompositeExtract: *hasResult = true; *hasResultType = true; break; + case OpCompositeInsert: *hasResult = true; *hasResultType = true; break; + case OpCopyObject: *hasResult = true; *hasResultType = true; break; + case OpTranspose: *hasResult = true; *hasResultType = true; break; + case OpSampledImage: *hasResult = true; *hasResultType = true; break; + case OpImageSampleImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageFetch: *hasResult = true; *hasResultType = true; break; + case OpImageGather: *hasResult = true; *hasResultType = true; break; + case OpImageDrefGather: *hasResult = true; *hasResultType = true; break; + case OpImageRead: *hasResult = true; *hasResultType = true; break; + case OpImageWrite: *hasResult = false; *hasResultType = false; break; + case OpImage: *hasResult = true; *hasResultType = true; break; + case OpImageQueryFormat: *hasResult = true; *hasResultType = true; break; + case OpImageQueryOrder: *hasResult = true; *hasResultType = true; break; + case OpImageQuerySizeLod: *hasResult = true; *hasResultType = true; break; + case OpImageQuerySize: *hasResult = true; *hasResultType = true; break; + case OpImageQueryLod: *hasResult = true; *hasResultType = true; break; + case OpImageQueryLevels: *hasResult = true; *hasResultType = true; break; + case OpImageQuerySamples: *hasResult = true; *hasResultType = true; break; + case OpConvertFToU: *hasResult = true; *hasResultType = true; break; + case OpConvertFToS: *hasResult = true; *hasResultType = true; break; + case OpConvertSToF: *hasResult = true; *hasResultType = true; break; + case OpConvertUToF: *hasResult = true; *hasResultType = true; break; + case OpUConvert: *hasResult = true; *hasResultType = true; break; + case OpSConvert: *hasResult = true; *hasResultType = true; break; + case OpFConvert: *hasResult = true; *hasResultType = true; break; + case OpQuantizeToF16: *hasResult = true; *hasResultType = true; break; + case OpConvertPtrToU: *hasResult = true; *hasResultType = true; break; + case OpSatConvertSToU: *hasResult = true; *hasResultType = true; break; + case OpSatConvertUToS: *hasResult = true; *hasResultType = true; break; + case OpConvertUToPtr: *hasResult = true; *hasResultType = true; break; + case OpPtrCastToGeneric: *hasResult = true; *hasResultType = true; break; + case OpGenericCastToPtr: *hasResult = true; *hasResultType = true; break; + case OpGenericCastToPtrExplicit: *hasResult = true; *hasResultType = true; break; + case OpBitcast: *hasResult = true; *hasResultType = true; break; + case OpSNegate: *hasResult = true; *hasResultType = true; break; + case OpFNegate: *hasResult = true; *hasResultType = true; break; + case OpIAdd: *hasResult = true; *hasResultType = true; break; + case OpFAdd: *hasResult = true; *hasResultType = true; break; + case OpISub: *hasResult = true; *hasResultType = true; break; + case OpFSub: *hasResult = true; *hasResultType = true; break; + case OpIMul: *hasResult = true; *hasResultType = true; break; + case OpFMul: *hasResult = true; *hasResultType = true; break; + case OpUDiv: *hasResult = true; *hasResultType = true; break; + case OpSDiv: *hasResult = true; *hasResultType = true; break; + case OpFDiv: *hasResult = true; *hasResultType = true; break; + case OpUMod: *hasResult = true; *hasResultType = true; break; + case OpSRem: *hasResult = true; *hasResultType = true; break; + case OpSMod: *hasResult = true; *hasResultType = true; break; + case OpFRem: *hasResult = true; *hasResultType = true; break; + case OpFMod: *hasResult = true; *hasResultType = true; break; + case OpVectorTimesScalar: *hasResult = true; *hasResultType = true; break; + case OpMatrixTimesScalar: *hasResult = true; *hasResultType = true; break; + case OpVectorTimesMatrix: *hasResult = true; *hasResultType = true; break; + case OpMatrixTimesVector: *hasResult = true; *hasResultType = true; break; + case OpMatrixTimesMatrix: *hasResult = true; *hasResultType = true; break; + case OpOuterProduct: *hasResult = true; *hasResultType = true; break; + case OpDot: *hasResult = true; *hasResultType = true; break; + case OpIAddCarry: *hasResult = true; *hasResultType = true; break; + case OpISubBorrow: *hasResult = true; *hasResultType = true; break; + case OpUMulExtended: *hasResult = true; *hasResultType = true; break; + case OpSMulExtended: *hasResult = true; *hasResultType = true; break; + case OpAny: *hasResult = true; *hasResultType = true; break; + case OpAll: *hasResult = true; *hasResultType = true; break; + case OpIsNan: *hasResult = true; *hasResultType = true; break; + case OpIsInf: *hasResult = true; *hasResultType = true; break; + case OpIsFinite: *hasResult = true; *hasResultType = true; break; + case OpIsNormal: *hasResult = true; *hasResultType = true; break; + case OpSignBitSet: *hasResult = true; *hasResultType = true; break; + case OpLessOrGreater: *hasResult = true; *hasResultType = true; break; + case OpOrdered: *hasResult = true; *hasResultType = true; break; + case OpUnordered: *hasResult = true; *hasResultType = true; break; + case OpLogicalEqual: *hasResult = true; *hasResultType = true; break; + case OpLogicalNotEqual: *hasResult = true; *hasResultType = true; break; + case OpLogicalOr: *hasResult = true; *hasResultType = true; break; + case OpLogicalAnd: *hasResult = true; *hasResultType = true; break; + case OpLogicalNot: *hasResult = true; *hasResultType = true; break; + case OpSelect: *hasResult = true; *hasResultType = true; break; + case OpIEqual: *hasResult = true; *hasResultType = true; break; + case OpINotEqual: *hasResult = true; *hasResultType = true; break; + case OpUGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpSGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpUGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpSGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpULessThan: *hasResult = true; *hasResultType = true; break; + case OpSLessThan: *hasResult = true; *hasResultType = true; break; + case OpULessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpSLessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdNotEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordNotEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdLessThan: *hasResult = true; *hasResultType = true; break; + case OpFUnordLessThan: *hasResult = true; *hasResultType = true; break; + case OpFOrdGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpFUnordGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpFOrdLessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordLessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpShiftRightLogical: *hasResult = true; *hasResultType = true; break; + case OpShiftRightArithmetic: *hasResult = true; *hasResultType = true; break; + case OpShiftLeftLogical: *hasResult = true; *hasResultType = true; break; + case OpBitwiseOr: *hasResult = true; *hasResultType = true; break; + case OpBitwiseXor: *hasResult = true; *hasResultType = true; break; + case OpBitwiseAnd: *hasResult = true; *hasResultType = true; break; + case OpNot: *hasResult = true; *hasResultType = true; break; + case OpBitFieldInsert: *hasResult = true; *hasResultType = true; break; + case OpBitFieldSExtract: *hasResult = true; *hasResultType = true; break; + case OpBitFieldUExtract: *hasResult = true; *hasResultType = true; break; + case OpBitReverse: *hasResult = true; *hasResultType = true; break; + case OpBitCount: *hasResult = true; *hasResultType = true; break; + case OpDPdx: *hasResult = true; *hasResultType = true; break; + case OpDPdy: *hasResult = true; *hasResultType = true; break; + case OpFwidth: *hasResult = true; *hasResultType = true; break; + case OpDPdxFine: *hasResult = true; *hasResultType = true; break; + case OpDPdyFine: *hasResult = true; *hasResultType = true; break; + case OpFwidthFine: *hasResult = true; *hasResultType = true; break; + case OpDPdxCoarse: *hasResult = true; *hasResultType = true; break; + case OpDPdyCoarse: *hasResult = true; *hasResultType = true; break; + case OpFwidthCoarse: *hasResult = true; *hasResultType = true; break; + case OpEmitVertex: *hasResult = false; *hasResultType = false; break; + case OpEndPrimitive: *hasResult = false; *hasResultType = false; break; + case OpEmitStreamVertex: *hasResult = false; *hasResultType = false; break; + case OpEndStreamPrimitive: *hasResult = false; *hasResultType = false; break; + case OpControlBarrier: *hasResult = false; *hasResultType = false; break; + case OpMemoryBarrier: *hasResult = false; *hasResultType = false; break; + case OpAtomicLoad: *hasResult = true; *hasResultType = true; break; + case OpAtomicStore: *hasResult = false; *hasResultType = false; break; + case OpAtomicExchange: *hasResult = true; *hasResultType = true; break; + case OpAtomicCompareExchange: *hasResult = true; *hasResultType = true; break; + case OpAtomicCompareExchangeWeak: *hasResult = true; *hasResultType = true; break; + case OpAtomicIIncrement: *hasResult = true; *hasResultType = true; break; + case OpAtomicIDecrement: *hasResult = true; *hasResultType = true; break; + case OpAtomicIAdd: *hasResult = true; *hasResultType = true; break; + case OpAtomicISub: *hasResult = true; *hasResultType = true; break; + case OpAtomicSMin: *hasResult = true; *hasResultType = true; break; + case OpAtomicUMin: *hasResult = true; *hasResultType = true; break; + case OpAtomicSMax: *hasResult = true; *hasResultType = true; break; + case OpAtomicUMax: *hasResult = true; *hasResultType = true; break; + case OpAtomicAnd: *hasResult = true; *hasResultType = true; break; + case OpAtomicOr: *hasResult = true; *hasResultType = true; break; + case OpAtomicXor: *hasResult = true; *hasResultType = true; break; + case OpPhi: *hasResult = true; *hasResultType = true; break; + case OpLoopMerge: *hasResult = false; *hasResultType = false; break; + case OpSelectionMerge: *hasResult = false; *hasResultType = false; break; + case OpLabel: *hasResult = true; *hasResultType = false; break; + case OpBranch: *hasResult = false; *hasResultType = false; break; + case OpBranchConditional: *hasResult = false; *hasResultType = false; break; + case OpSwitch: *hasResult = false; *hasResultType = false; break; + case OpKill: *hasResult = false; *hasResultType = false; break; + case OpReturn: *hasResult = false; *hasResultType = false; break; + case OpReturnValue: *hasResult = false; *hasResultType = false; break; + case OpUnreachable: *hasResult = false; *hasResultType = false; break; + case OpLifetimeStart: *hasResult = false; *hasResultType = false; break; + case OpLifetimeStop: *hasResult = false; *hasResultType = false; break; + case OpGroupAsyncCopy: *hasResult = true; *hasResultType = true; break; + case OpGroupWaitEvents: *hasResult = false; *hasResultType = false; break; + case OpGroupAll: *hasResult = true; *hasResultType = true; break; + case OpGroupAny: *hasResult = true; *hasResultType = true; break; + case OpGroupBroadcast: *hasResult = true; *hasResultType = true; break; + case OpGroupIAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupFAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupFMin: *hasResult = true; *hasResultType = true; break; + case OpGroupUMin: *hasResult = true; *hasResultType = true; break; + case OpGroupSMin: *hasResult = true; *hasResultType = true; break; + case OpGroupFMax: *hasResult = true; *hasResultType = true; break; + case OpGroupUMax: *hasResult = true; *hasResultType = true; break; + case OpGroupSMax: *hasResult = true; *hasResultType = true; break; + case OpReadPipe: *hasResult = true; *hasResultType = true; break; + case OpWritePipe: *hasResult = true; *hasResultType = true; break; + case OpReservedReadPipe: *hasResult = true; *hasResultType = true; break; + case OpReservedWritePipe: *hasResult = true; *hasResultType = true; break; + case OpReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; + case OpReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; + case OpCommitReadPipe: *hasResult = false; *hasResultType = false; break; + case OpCommitWritePipe: *hasResult = false; *hasResultType = false; break; + case OpIsValidReserveId: *hasResult = true; *hasResultType = true; break; + case OpGetNumPipePackets: *hasResult = true; *hasResultType = true; break; + case OpGetMaxPipePackets: *hasResult = true; *hasResultType = true; break; + case OpGroupReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; + case OpGroupReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; + case OpGroupCommitReadPipe: *hasResult = false; *hasResultType = false; break; + case OpGroupCommitWritePipe: *hasResult = false; *hasResultType = false; break; + case OpEnqueueMarker: *hasResult = true; *hasResultType = true; break; + case OpEnqueueKernel: *hasResult = true; *hasResultType = true; break; + case OpGetKernelNDrangeSubGroupCount: *hasResult = true; *hasResultType = true; break; + case OpGetKernelNDrangeMaxSubGroupSize: *hasResult = true; *hasResultType = true; break; + case OpGetKernelWorkGroupSize: *hasResult = true; *hasResultType = true; break; + case OpGetKernelPreferredWorkGroupSizeMultiple: *hasResult = true; *hasResultType = true; break; + case OpRetainEvent: *hasResult = false; *hasResultType = false; break; + case OpReleaseEvent: *hasResult = false; *hasResultType = false; break; + case OpCreateUserEvent: *hasResult = true; *hasResultType = true; break; + case OpIsValidEvent: *hasResult = true; *hasResultType = true; break; + case OpSetUserEventStatus: *hasResult = false; *hasResultType = false; break; + case OpCaptureEventProfilingInfo: *hasResult = false; *hasResultType = false; break; + case OpGetDefaultQueue: *hasResult = true; *hasResultType = true; break; + case OpBuildNDRange: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseFetch: *hasResult = true; *hasResultType = true; break; + case OpImageSparseGather: *hasResult = true; *hasResultType = true; break; + case OpImageSparseDrefGather: *hasResult = true; *hasResultType = true; break; + case OpImageSparseTexelsResident: *hasResult = true; *hasResultType = true; break; + case OpNoLine: *hasResult = false; *hasResultType = false; break; + case OpAtomicFlagTestAndSet: *hasResult = true; *hasResultType = true; break; + case OpAtomicFlagClear: *hasResult = false; *hasResultType = false; break; + case OpImageSparseRead: *hasResult = true; *hasResultType = true; break; + case OpSizeOf: *hasResult = true; *hasResultType = true; break; + case OpTypePipeStorage: *hasResult = true; *hasResultType = false; break; + case OpConstantPipeStorage: *hasResult = true; *hasResultType = true; break; + case OpCreatePipeFromPipeStorage: *hasResult = true; *hasResultType = true; break; + case OpGetKernelLocalSizeForSubgroupCount: *hasResult = true; *hasResultType = true; break; + case OpGetKernelMaxNumSubgroups: *hasResult = true; *hasResultType = true; break; + case OpTypeNamedBarrier: *hasResult = true; *hasResultType = false; break; + case OpNamedBarrierInitialize: *hasResult = true; *hasResultType = true; break; + case OpMemoryNamedBarrier: *hasResult = false; *hasResultType = false; break; + case OpModuleProcessed: *hasResult = false; *hasResultType = false; break; + case OpExecutionModeId: *hasResult = false; *hasResultType = false; break; + case OpDecorateId: *hasResult = false; *hasResultType = false; break; + case OpGroupNonUniformElect: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformAll: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformAny: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformAllEqual: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBroadcast: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBroadcastFirst: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallot: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformInverseBallot: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotBitExtract: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotBitCount: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotFindLSB: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotFindMSB: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffle: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffleXor: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffleUp: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffleDown: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformIAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformIMul: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFMul: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformSMin: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformUMin: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFMin: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformSMax: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformUMax: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFMax: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBitwiseAnd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBitwiseOr: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBitwiseXor: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformLogicalAnd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformLogicalOr: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformLogicalXor: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformQuadBroadcast: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformQuadSwap: *hasResult = true; *hasResultType = true; break; + case OpCopyLogical: *hasResult = true; *hasResultType = true; break; + case OpPtrEqual: *hasResult = true; *hasResultType = true; break; + case OpPtrNotEqual: *hasResult = true; *hasResultType = true; break; + case OpPtrDiff: *hasResult = true; *hasResultType = true; break; + case OpSubgroupBallotKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupFirstInvocationKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break; + case OpGroupIAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupFAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupFMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupUMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupSMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupFMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupUMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupSMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpFragmentMaskFetchAMD: *hasResult = true; *hasResultType = true; break; + case OpFragmentFetchAMD: *hasResult = true; *hasResultType = true; break; + case OpReadClockKHR: *hasResult = true; *hasResultType = true; break; + case OpImageSampleFootprintNV: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformPartitionNV: *hasResult = true; *hasResultType = true; break; + case OpWritePackedPrimitiveIndices4x8NV: *hasResult = false; *hasResultType = false; break; + case OpReportIntersectionNV: *hasResult = true; *hasResultType = true; break; + case OpIgnoreIntersectionNV: *hasResult = false; *hasResultType = false; break; + case OpTerminateRayNV: *hasResult = false; *hasResultType = false; break; + case OpTraceNV: *hasResult = false; *hasResultType = false; break; + case OpTypeAccelerationStructureNV: *hasResult = true; *hasResultType = false; break; + case OpExecuteCallableNV: *hasResult = false; *hasResultType = false; break; + case OpTypeCooperativeMatrixNV: *hasResult = true; *hasResultType = false; break; + case OpCooperativeMatrixLoadNV: *hasResult = true; *hasResultType = true; break; + case OpCooperativeMatrixStoreNV: *hasResult = false; *hasResultType = false; break; + case OpCooperativeMatrixMulAddNV: *hasResult = true; *hasResultType = true; break; + case OpCooperativeMatrixLengthNV: *hasResult = true; *hasResultType = true; break; + case OpBeginInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; + case OpEndInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; + case OpDemoteToHelperInvocationEXT: *hasResult = false; *hasResultType = false; break; + case OpIsHelperInvocationEXT: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleDownINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleUpINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleXorINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case OpSubgroupImageBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupImageBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case OpSubgroupImageMediaBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupImageMediaBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case OpUCountLeadingZerosINTEL: *hasResult = true; *hasResultType = true; break; + case OpUCountTrailingZerosINTEL: *hasResult = true; *hasResultType = true; break; + case OpAbsISubINTEL: *hasResult = true; *hasResultType = true; break; + case OpAbsUSubINTEL: *hasResult = true; *hasResultType = true; break; + case OpIAddSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpUAddSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpIAverageINTEL: *hasResult = true; *hasResultType = true; break; + case OpUAverageINTEL: *hasResult = true; *hasResultType = true; break; + case OpIAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; + case OpUAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; + case OpISubSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpUSubSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpIMul32x16INTEL: *hasResult = true; *hasResultType = true; break; + case OpUMul32x16INTEL: *hasResult = true; *hasResultType = true; break; + case OpDecorateString: *hasResult = false; *hasResultType = false; break; + case OpMemberDecorateString: *hasResult = false; *hasResultType = false; break; + case OpVmeImageINTEL: *hasResult = true; *hasResultType = true; break; + case OpTypeVmeImageINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImePayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcRefPayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcSicPayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcMcePayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcMceResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeResultSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeResultDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcRefResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcSicResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetAcOnlyHaarINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToImePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToImeResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToRefPayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToRefResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToSicPayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToSicResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetBestInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterMajorShapeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterMinorShapeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterDirectionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeRefWindowSizeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeAdjustRefOffsetINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetMaxMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetWeightedSadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeStripDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetBorderReachedINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcFmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcBmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefSetBidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConfigureSkcINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConfigureIpeLumaINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConfigureIpeLumaChromaINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetMotionVectorMaskINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateIpeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetIpeLumaShapeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetPackedIpeLumaModesINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetIpeChromaModeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetInterRawSadsINTEL: *hasResult = true; *hasResultType = true; break; + } +} +#endif + + + +inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); } +inline FPFastMathModeMask operator|(FPFastMathModeMask a, FPFastMathModeMask b) { return FPFastMathModeMask(unsigned(a) | unsigned(b)); } +inline SelectionControlMask operator|(SelectionControlMask a, SelectionControlMask b) { return SelectionControlMask(unsigned(a) | unsigned(b)); } +inline LoopControlMask operator|(LoopControlMask a, LoopControlMask b) { return LoopControlMask(unsigned(a) | unsigned(b)); } +inline FunctionControlMask operator|(FunctionControlMask a, FunctionControlMask b) { return FunctionControlMask(unsigned(a) | unsigned(b)); } +inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask b) { return MemorySemanticsMask(unsigned(a) | unsigned(b)); } +inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); } +inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); } + +} + +#endif + diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cfg.cpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cfg.cpp new file mode 100644 index 000000000000..894af0a6504e --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cfg.cpp @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + +#include "spirv_cfg.hpp" +#include "spirv_cross.hpp" +#include +#include + +using namespace std; + +namespace SPIRV_CROSS_NAMESPACE +{ +CFG::CFG(Compiler &compiler_, const SPIRFunction &func_) + : compiler(compiler_) + , func(func_) +{ + build_post_order_visit_order(); + build_immediate_dominators(); +} + +uint32_t CFG::find_common_dominator(uint32_t a, uint32_t b) const +{ + while (a != b) + { + if (get_visit_order(a) < get_visit_order(b)) + a = get_immediate_dominator(a); + else + b = get_immediate_dominator(b); + } + return a; +} + +void CFG::build_immediate_dominators() +{ + + immediate_dominators.clear(); + immediate_dominators[func.entry_block] = func.entry_block; + + for (auto i = post_order.size(); i; i--) + { + uint32_t block = post_order[i - 1]; + auto &pred = preceding_edges[block]; + if (pred.empty()) + continue; + + for (auto &edge : pred) + { + if (immediate_dominators[block]) + { + assert(immediate_dominators[edge]); + immediate_dominators[block] = find_common_dominator(immediate_dominators[block], edge); + } + else + immediate_dominators[block] = edge; + } + } +} + +bool CFG::is_back_edge(uint32_t to) const +{ + + + auto itr = visit_order.find(to); + return itr != end(visit_order) && itr->second.get() == 0; +} + +bool CFG::has_visited_forward_edge(uint32_t to) const +{ + + auto itr = visit_order.find(to); + return itr != end(visit_order) && itr->second.get() > 0; +} + +bool CFG::post_order_visit(uint32_t block_id) +{ + + + + if (has_visited_forward_edge(block_id)) + return true; + else if (is_back_edge(block_id)) + return false; + + + visit_order[block_id].get() = 0; + + auto &block = compiler.get(block_id); + + + + + + + + + + + + if (block.merge == SPIRBlock::MergeLoop && post_order_visit(block.merge_block)) + add_branch(block_id, block.merge_block); + + + switch (block.terminator) + { + case SPIRBlock::Direct: + if (post_order_visit(block.next_block)) + add_branch(block_id, block.next_block); + break; + + case SPIRBlock::Select: + if (post_order_visit(block.true_block)) + add_branch(block_id, block.true_block); + if (post_order_visit(block.false_block)) + add_branch(block_id, block.false_block); + break; + + case SPIRBlock::MultiSelect: + for (auto &target : block.cases) + { + if (post_order_visit(target.block)) + add_branch(block_id, target.block); + } + if (block.default_block && post_order_visit(block.default_block)) + add_branch(block_id, block.default_block); + break; + + default: + break; + } + + + + + + + + + if (block.merge == SPIRBlock::MergeSelection && post_order_visit(block.next_block)) + { + + + + + + + + + auto pred_itr = preceding_edges.find(block.next_block); + if (pred_itr != end(preceding_edges)) + { + auto &pred = pred_itr->second; + auto succ_itr = succeeding_edges.find(block_id); + size_t num_succeeding_edges = 0; + if (succ_itr != end(succeeding_edges)) + num_succeeding_edges = succ_itr->second.size(); + + if (block.terminator == SPIRBlock::MultiSelect && num_succeeding_edges == 1) + { + + + + + + + if (!pred.empty()) + add_branch(block_id, block.next_block); + } + else + { + if (pred.size() == 1 && *pred.begin() != block_id) + add_branch(block_id, block.next_block); + } + } + else + { + + + add_branch(block_id, block.next_block); + } + } + + + visit_order[block_id].get() = ++visit_count; + post_order.push_back(block_id); + return true; +} + +void CFG::build_post_order_visit_order() +{ + uint32_t block = func.entry_block; + visit_count = 0; + visit_order.clear(); + post_order.clear(); + post_order_visit(block); +} + +void CFG::add_branch(uint32_t from, uint32_t to) +{ + const auto add_unique = [](SmallVector &l, uint32_t value) { + auto itr = find(begin(l), end(l), value); + if (itr == end(l)) + l.push_back(value); + }; + add_unique(preceding_edges[to], from); + add_unique(succeeding_edges[from], to); +} + +uint32_t CFG::find_loop_dominator(uint32_t block_id) const +{ + while (block_id != SPIRBlock::NoDominator) + { + auto itr = preceding_edges.find(block_id); + if (itr == end(preceding_edges)) + return SPIRBlock::NoDominator; + if (itr->second.empty()) + return SPIRBlock::NoDominator; + + uint32_t pred_block_id = SPIRBlock::NoDominator; + bool ignore_loop_header = false; + + + + + for (auto &pred : itr->second) + { + auto &pred_block = compiler.get(pred); + if (pred_block.merge == SPIRBlock::MergeLoop && pred_block.merge_block == ID(block_id)) + { + pred_block_id = pred; + ignore_loop_header = true; + break; + } + else if (pred_block.merge == SPIRBlock::MergeSelection && pred_block.next_block == ID(block_id)) + { + pred_block_id = pred; + break; + } + } + + + + if (pred_block_id == SPIRBlock::NoDominator) + pred_block_id = itr->second.front(); + + block_id = pred_block_id; + + if (!ignore_loop_header && block_id) + { + auto &block = compiler.get(block_id); + if (block.merge == SPIRBlock::MergeLoop) + return block_id; + } + } + + return block_id; +} + +bool CFG::node_terminates_control_flow_in_sub_graph(BlockID from, BlockID to) const +{ + + + + + auto &from_block = compiler.get(from); + BlockID ignore_block_id = 0; + if (from_block.merge == SPIRBlock::MergeLoop) + ignore_block_id = from_block.merge_block; + + while (to != from) + { + auto pred_itr = preceding_edges.find(to); + if (pred_itr == end(preceding_edges)) + return false; + + DominatorBuilder builder(*this); + for (auto &edge : pred_itr->second) + builder.add_block(edge); + + uint32_t dominator = builder.get_dominator(); + if (dominator == 0) + return false; + + auto &dom = compiler.get(dominator); + + bool true_path_ignore = false; + bool false_path_ignore = false; + if (ignore_block_id && dom.terminator == SPIRBlock::Select) + { + auto &true_block = compiler.get(dom.true_block); + auto &false_block = compiler.get(dom.false_block); + auto &ignore_block = compiler.get(ignore_block_id); + true_path_ignore = compiler.execution_is_branchless(true_block, ignore_block); + false_path_ignore = compiler.execution_is_branchless(false_block, ignore_block); + } + + if ((dom.merge == SPIRBlock::MergeSelection && dom.next_block == to) || + (dom.merge == SPIRBlock::MergeLoop && dom.merge_block == to) || + (dom.terminator == SPIRBlock::Direct && dom.next_block == to) || + (dom.terminator == SPIRBlock::Select && dom.true_block == to && false_path_ignore) || + (dom.terminator == SPIRBlock::Select && dom.false_block == to && true_path_ignore)) + { + + + to = dominator; + } + else + return false; + } + + return true; +} + +DominatorBuilder::DominatorBuilder(const CFG &cfg_) + : cfg(cfg_) +{ +} + +void DominatorBuilder::add_block(uint32_t block) +{ + if (!cfg.get_immediate_dominator(block)) + { + + return; + } + + if (!dominator) + { + dominator = block; + return; + } + + if (block != dominator) + dominator = cfg.find_common_dominator(block, dominator); +} + +void DominatorBuilder::lift_continue_block_dominator() +{ + + + + + + + if (!dominator) + return; + + auto &block = cfg.get_compiler().get(dominator); + auto post_order = cfg.get_visit_order(dominator); + + + + bool back_edge_dominator = false; + switch (block.terminator) + { + case SPIRBlock::Direct: + if (cfg.get_visit_order(block.next_block) > post_order) + back_edge_dominator = true; + break; + + case SPIRBlock::Select: + if (cfg.get_visit_order(block.true_block) > post_order) + back_edge_dominator = true; + if (cfg.get_visit_order(block.false_block) > post_order) + back_edge_dominator = true; + break; + + case SPIRBlock::MultiSelect: + for (auto &target : block.cases) + { + if (cfg.get_visit_order(target.block) > post_order) + back_edge_dominator = true; + } + if (block.default_block && cfg.get_visit_order(block.default_block) > post_order) + back_edge_dominator = true; + break; + + default: + break; + } + + if (back_edge_dominator) + dominator = cfg.get_function().entry_block; +} +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cfg.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cfg.hpp new file mode 100644 index 000000000000..12aeee563b88 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cfg.hpp @@ -0,0 +1,156 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_CFG_HPP +#define SPIRV_CROSS_CFG_HPP + +#include "spirv_common.hpp" +#include + +namespace SPIRV_CROSS_NAMESPACE +{ +class Compiler; +class CFG +{ +public: + CFG(Compiler &compiler, const SPIRFunction &function); + + Compiler &get_compiler() + { + return compiler; + } + + const Compiler &get_compiler() const + { + return compiler; + } + + const SPIRFunction &get_function() const + { + return func; + } + + uint32_t get_immediate_dominator(uint32_t block) const + { + auto itr = immediate_dominators.find(block); + if (itr != std::end(immediate_dominators)) + return itr->second; + else + return 0; + } + + uint32_t get_visit_order(uint32_t block) const + { + auto itr = visit_order.find(block); + assert(itr != std::end(visit_order)); + int v = itr->second.get(); + assert(v > 0); + return uint32_t(v); + } + + uint32_t find_common_dominator(uint32_t a, uint32_t b) const; + + const SmallVector &get_preceding_edges(uint32_t block) const + { + auto itr = preceding_edges.find(block); + if (itr != std::end(preceding_edges)) + return itr->second; + else + return empty_vector; + } + + const SmallVector &get_succeeding_edges(uint32_t block) const + { + auto itr = succeeding_edges.find(block); + if (itr != std::end(succeeding_edges)) + return itr->second; + else + return empty_vector; + } + + template + void walk_from(std::unordered_set &seen_blocks, uint32_t block, const Op &op) const + { + if (seen_blocks.count(block)) + return; + seen_blocks.insert(block); + + if (op(block)) + { + for (auto b : get_succeeding_edges(block)) + walk_from(seen_blocks, b, op); + } + } + + uint32_t find_loop_dominator(uint32_t block) const; + + bool node_terminates_control_flow_in_sub_graph(BlockID from, BlockID to) const; + +private: + struct VisitOrder + { + int &get() + { + return v; + } + + const int &get() const + { + return v; + } + + int v = -1; + }; + + Compiler &compiler; + const SPIRFunction &func; + std::unordered_map> preceding_edges; + std::unordered_map> succeeding_edges; + std::unordered_map immediate_dominators; + std::unordered_map visit_order; + SmallVector post_order; + SmallVector empty_vector; + + void add_branch(uint32_t from, uint32_t to); + void build_post_order_visit_order(); + void build_immediate_dominators(); + bool post_order_visit(uint32_t block); + uint32_t visit_count = 0; + + bool is_back_edge(uint32_t to) const; + bool has_visited_forward_edge(uint32_t to) const; +}; + +class DominatorBuilder +{ +public: + DominatorBuilder(const CFG &cfg); + + void add_block(uint32_t block); + uint32_t get_dominator() const + { + return dominator; + } + + void lift_continue_block_dominator(); + +private: + const CFG &cfg; + uint32_t dominator = 0; +}; +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_common.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_common.hpp new file mode 100644 index 000000000000..c4887275783f --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_common.hpp @@ -0,0 +1,1712 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_COMMON_HPP +#define SPIRV_CROSS_COMMON_HPP + +#include "spirv.hpp" +#include "spirv_cross_containers.hpp" +#include "spirv_cross_error_handling.hpp" +#include + + + + + + + + + + + +#ifdef SPIRV_CROSS_NAMESPACE_OVERRIDE +#define SPIRV_CROSS_NAMESPACE SPIRV_CROSS_NAMESPACE_OVERRIDE +#else +#define SPIRV_CROSS_NAMESPACE spirv_cross +#endif + +namespace SPIRV_CROSS_NAMESPACE +{ +namespace inner +{ +template +void join_helper(StringStream<> &stream, T &&t) +{ + stream << std::forward(t); +} + +template +void join_helper(StringStream<> &stream, T &&t, Ts &&... ts) +{ + stream << std::forward(t); + join_helper(stream, std::forward(ts)...); +} +} + +class Bitset +{ +public: + Bitset() = default; + explicit inline Bitset(uint64_t lower_) + : lower(lower_) + { + } + + inline bool get(uint32_t bit) const + { + if (bit < 64) + return (lower & (1ull << bit)) != 0; + else + return higher.count(bit) != 0; + } + + inline void set(uint32_t bit) + { + if (bit < 64) + lower |= 1ull << bit; + else + higher.insert(bit); + } + + inline void clear(uint32_t bit) + { + if (bit < 64) + lower &= ~(1ull << bit); + else + higher.erase(bit); + } + + inline uint64_t get_lower() const + { + return lower; + } + + inline void reset() + { + lower = 0; + higher.clear(); + } + + inline void merge_and(const Bitset &other) + { + lower &= other.lower; + std::unordered_set tmp_set; + for (auto &v : higher) + if (other.higher.count(v) != 0) + tmp_set.insert(v); + higher = std::move(tmp_set); + } + + inline void merge_or(const Bitset &other) + { + lower |= other.lower; + for (auto &v : other.higher) + higher.insert(v); + } + + inline bool operator==(const Bitset &other) const + { + if (lower != other.lower) + return false; + + if (higher.size() != other.higher.size()) + return false; + + for (auto &v : higher) + if (other.higher.count(v) == 0) + return false; + + return true; + } + + inline bool operator!=(const Bitset &other) const + { + return !(*this == other); + } + + template + void for_each_bit(const Op &op) const + { + + for (uint32_t i = 0; i < 64; i++) + { + if (lower & (1ull << i)) + op(i); + } + + if (higher.empty()) + return; + + + + SmallVector bits; + bits.reserve(higher.size()); + for (auto &v : higher) + bits.push_back(v); + std::sort(std::begin(bits), std::end(bits)); + + for (auto &v : bits) + op(v); + } + + inline bool empty() const + { + return lower == 0 && higher.empty(); + } + +private: + + + + uint64_t lower = 0; + std::unordered_set higher; +}; + + +template +std::string join(Ts &&... ts) +{ + StringStream<> stream; + inner::join_helper(stream, std::forward(ts)...); + return stream.str(); +} + +inline std::string merge(const SmallVector &list, const char *between = ", ") +{ + StringStream<> stream; + for (auto &elem : list) + { + stream << elem; + if (&elem != &list.back()) + stream << between; + } + return stream.str(); +} + + + +template ::value, int>::type = 0> +inline std::string convert_to_string(const T &t) +{ + return std::to_string(t); +} + + +#ifndef SPIRV_CROSS_FLT_FMT +#define SPIRV_CROSS_FLT_FMT "%.32g" +#endif + +#ifdef _MSC_VER + + +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + +static inline void fixup_radix_point(char *str, char radix_point) +{ + + + if (radix_point != '.') + { + while (*str != '\0') + { + if (*str == radix_point) + *str = '.'; + str++; + } + } +} + +inline std::string convert_to_string(float t, char locale_radix_point) +{ + + + char buf[64]; + sprintf(buf, SPIRV_CROSS_FLT_FMT, t); + fixup_radix_point(buf, locale_radix_point); + + + if (!strchr(buf, '.') && !strchr(buf, 'e')) + strcat(buf, ".0"); + return buf; +} + +inline std::string convert_to_string(double t, char locale_radix_point) +{ + + + char buf[64]; + sprintf(buf, SPIRV_CROSS_FLT_FMT, t); + fixup_radix_point(buf, locale_radix_point); + + + if (!strchr(buf, '.') && !strchr(buf, 'e')) + strcat(buf, ".0"); + return buf; +} + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +struct Instruction +{ + uint16_t op = 0; + uint16_t count = 0; + uint32_t offset = 0; + uint32_t length = 0; +}; + +enum Types +{ + TypeNone, + TypeType, + TypeVariable, + TypeConstant, + TypeFunction, + TypeFunctionPrototype, + TypeBlock, + TypeExtension, + TypeExpression, + TypeConstantOp, + TypeCombinedImageSampler, + TypeAccessChain, + TypeUndef, + TypeString, + TypeCount +}; + +template +class TypedID; + +template <> +class TypedID +{ +public: + TypedID() = default; + TypedID(uint32_t id_) + : id(id_) + { + } + + template + TypedID(const TypedID &other) + { + *this = other; + } + + template + TypedID &operator=(const TypedID &other) + { + id = uint32_t(other); + return *this; + } + + + + operator uint32_t() const + { + return id; + } + + template + operator TypedID() const + { + return TypedID(*this); + } + + bool operator==(const TypedID &other) const + { + return id == other.id; + } + + bool operator!=(const TypedID &other) const + { + return id != other.id; + } + + template + bool operator==(const TypedID &other) const + { + return id == uint32_t(other); + } + + template + bool operator!=(const TypedID &other) const + { + return id != uint32_t(other); + } + +private: + uint32_t id = 0; +}; + +template +class TypedID +{ +public: + TypedID() = default; + TypedID(uint32_t id_) + : id(id_) + { + } + + explicit TypedID(const TypedID &other) + : id(uint32_t(other)) + { + } + + operator uint32_t() const + { + return id; + } + + bool operator==(const TypedID &other) const + { + return id == other.id; + } + + bool operator!=(const TypedID &other) const + { + return id != other.id; + } + + bool operator==(const TypedID &other) const + { + return id == uint32_t(other); + } + + bool operator!=(const TypedID &other) const + { + return id != uint32_t(other); + } + +private: + uint32_t id = 0; +}; + +using VariableID = TypedID; +using TypeID = TypedID; +using ConstantID = TypedID; +using FunctionID = TypedID; +using BlockID = TypedID; +using ID = TypedID; + + +struct IVariant +{ + virtual ~IVariant() = default; + virtual IVariant *clone(ObjectPoolBase *pool) = 0; + ID self = 0; +}; + +#define SPIRV_CROSS_DECLARE_CLONE(T) \ + IVariant *clone(ObjectPoolBase *pool) override \ + { \ + return static_cast *>(pool)->allocate(*this); \ + } + +struct SPIRUndef : IVariant +{ + enum + { + type = TypeUndef + }; + + explicit SPIRUndef(TypeID basetype_) + : basetype(basetype_) + { + } + TypeID basetype; + + SPIRV_CROSS_DECLARE_CLONE(SPIRUndef) +}; + +struct SPIRString : IVariant +{ + enum + { + type = TypeString + }; + + explicit SPIRString(std::string str_) + : str(std::move(str_)) + { + } + + std::string str; + + SPIRV_CROSS_DECLARE_CLONE(SPIRString) +}; + + + +struct SPIRCombinedImageSampler : IVariant +{ + enum + { + type = TypeCombinedImageSampler + }; + SPIRCombinedImageSampler(TypeID type_, VariableID image_, VariableID sampler_) + : combined_type(type_) + , image(image_) + , sampler(sampler_) + { + } + TypeID combined_type; + VariableID image; + VariableID sampler; + + SPIRV_CROSS_DECLARE_CLONE(SPIRCombinedImageSampler) +}; + +struct SPIRConstantOp : IVariant +{ + enum + { + type = TypeConstantOp + }; + + SPIRConstantOp(TypeID result_type, spv::Op op, const uint32_t *args, uint32_t length) + : opcode(op) + , basetype(result_type) + { + arguments.reserve(length); + for (uint32_t i = 0; i < length; i++) + arguments.push_back(args[i]); + } + + spv::Op opcode; + SmallVector arguments; + TypeID basetype; + + SPIRV_CROSS_DECLARE_CLONE(SPIRConstantOp) +}; + +struct SPIRType : IVariant +{ + enum + { + type = TypeType + }; + + enum BaseType + { + Unknown, + Void, + Boolean, + SByte, + UByte, + Short, + UShort, + Int, + UInt, + Int64, + UInt64, + AtomicCounter, + Half, + Float, + Double, + Struct, + Image, + SampledImage, + Sampler, + AccelerationStructureNV, + + + ControlPointArray, + Char + }; + + + BaseType basetype = Unknown; + uint32_t width = 0; + uint32_t vecsize = 1; + uint32_t columns = 1; + + + SmallVector array; + + + + + + + SmallVector array_size_literal; + + + + uint32_t pointer_depth = 0; + bool pointer = false; + + spv::StorageClass storage = spv::StorageClassGeneric; + + SmallVector member_types; + + struct ImageType + { + TypeID type; + spv::Dim dim; + bool depth; + bool arrayed; + bool ms; + uint32_t sampled; + spv::ImageFormat format; + spv::AccessQualifier access; + } image; + + + + + TypeID type_alias = 0; + + + + TypeID parent_type = 0; + + + std::unordered_set member_name_cache; + + SPIRV_CROSS_DECLARE_CLONE(SPIRType) +}; + +struct SPIRExtension : IVariant +{ + enum + { + type = TypeExtension + }; + + enum Extension + { + Unsupported, + GLSL, + SPV_debug_info, + SPV_AMD_shader_ballot, + SPV_AMD_shader_explicit_vertex_parameter, + SPV_AMD_shader_trinary_minmax, + SPV_AMD_gcn_shader + }; + + explicit SPIRExtension(Extension ext_) + : ext(ext_) + { + } + + Extension ext; + SPIRV_CROSS_DECLARE_CLONE(SPIRExtension) +}; + + + +struct SPIREntryPoint +{ + SPIREntryPoint(FunctionID self_, spv::ExecutionModel execution_model, const std::string &entry_name) + : self(self_) + , name(entry_name) + , orig_name(entry_name) + , model(execution_model) + { + } + SPIREntryPoint() = default; + + FunctionID self = 0; + std::string name; + std::string orig_name; + SmallVector interface_variables; + + Bitset flags; + struct + { + uint32_t x = 0, y = 0, z = 0; + uint32_t constant = 0; + } workgroup_size; + uint32_t invocations = 0; + uint32_t output_vertices = 0; + spv::ExecutionModel model = spv::ExecutionModelMax; +}; + +struct SPIRExpression : IVariant +{ + enum + { + type = TypeExpression + }; + + + SPIRExpression(std::string expr, TypeID expression_type_, bool immutable_) + : expression(move(expr)) + , expression_type(expression_type_) + , immutable(immutable_) + { + } + + + + + ID base_expression = 0; + + std::string expression; + TypeID expression_type = 0; + + + + ID loaded_from = 0; + + + + + + bool immutable = false; + + + + bool need_transpose = false; + + + bool access_chain = false; + + + SmallVector expression_dependencies; + + + + SmallVector implied_read_expressions; + + SPIRV_CROSS_DECLARE_CLONE(SPIRExpression) +}; + +struct SPIRFunctionPrototype : IVariant +{ + enum + { + type = TypeFunctionPrototype + }; + + explicit SPIRFunctionPrototype(TypeID return_type_) + : return_type(return_type_) + { + } + + TypeID return_type; + SmallVector parameter_types; + + SPIRV_CROSS_DECLARE_CLONE(SPIRFunctionPrototype) +}; + +struct SPIRBlock : IVariant +{ + enum + { + type = TypeBlock + }; + + enum Terminator + { + Unknown, + Direct, + + Select, + MultiSelect, + + Return, + Unreachable, + Kill + }; + + enum Merge + { + MergeNone, + MergeLoop, + MergeSelection + }; + + enum Hints + { + HintNone, + HintUnroll, + HintDontUnroll, + HintFlatten, + HintDontFlatten + }; + + enum Method + { + MergeToSelectForLoop, + MergeToDirectForLoop, + MergeToSelectContinueForLoop + }; + + enum ContinueBlockType + { + ContinueNone, + + + ForLoop, + + + WhileLoop, + + + DoWhileLoop, + + + + ComplexLoop + }; + + enum + { + NoDominator = 0xffffffffu + }; + + Terminator terminator = Unknown; + Merge merge = MergeNone; + Hints hint = HintNone; + BlockID next_block = 0; + BlockID merge_block = 0; + BlockID continue_block = 0; + + ID return_value = 0; + ID condition = 0; + BlockID true_block = 0; + BlockID false_block = 0; + BlockID default_block = 0; + + SmallVector ops; + + struct Phi + { + ID local_variable; + BlockID parent; + VariableID function_variable; + }; + + + SmallVector phi_variables; + + + + SmallVector> declare_temporary; + + + + SmallVector> potential_declare_temporary; + + struct Case + { + uint32_t value; + BlockID block; + }; + SmallVector cases; + + + + bool disable_block_optimization = false; + + + bool complex_continue = false; + + + bool need_ladder_break = false; + + + + BlockID ignore_phi_from_block = 0; + + + + BlockID loop_dominator = 0; + + + + SmallVector dominated_variables; + + + + + SmallVector loop_variables; + + + + + SmallVector invalidate_expressions; + + SPIRV_CROSS_DECLARE_CLONE(SPIRBlock) +}; + +struct SPIRFunction : IVariant +{ + enum + { + type = TypeFunction + }; + + SPIRFunction(TypeID return_type_, TypeID function_type_) + : return_type(return_type_) + , function_type(function_type_) + { + } + + struct Parameter + { + TypeID type; + ID id; + uint32_t read_count; + uint32_t write_count; + + + + + + + + bool alias_global_variable; + }; + + + + + + + + + struct CombinedImageSamplerParameter + { + VariableID id; + VariableID image_id; + VariableID sampler_id; + bool global_image; + bool global_sampler; + bool depth; + }; + + TypeID return_type; + TypeID function_type; + SmallVector arguments; + + + + + SmallVector shadow_arguments; + SmallVector local_variables; + BlockID entry_block = 0; + SmallVector blocks; + SmallVector combined_parameters; + + struct EntryLine + { + uint32_t file_id = 0; + uint32_t line_literal = 0; + }; + EntryLine entry_line; + + void add_local_variable(VariableID id) + { + local_variables.push_back(id); + } + + void add_parameter(TypeID parameter_type, ID id, bool alias_global_variable = false) + { + + arguments.push_back({ parameter_type, id, 0u, 0u, alias_global_variable }); + } + + + + + + Vector> fixup_hooks_out; + + + + + + Vector> fixup_hooks_in; + + bool active = false; + bool flush_undeclared = true; + bool do_combined_parameters = true; + + SPIRV_CROSS_DECLARE_CLONE(SPIRFunction) +}; + +struct SPIRAccessChain : IVariant +{ + enum + { + type = TypeAccessChain + }; + + SPIRAccessChain(TypeID basetype_, spv::StorageClass storage_, std::string base_, std::string dynamic_index_, + int32_t static_index_) + : basetype(basetype_) + , storage(storage_) + , base(std::move(base_)) + , dynamic_index(std::move(dynamic_index_)) + , static_index(static_index_) + { + } + + + + + + + TypeID basetype; + spv::StorageClass storage; + std::string base; + std::string dynamic_index; + int32_t static_index; + + VariableID loaded_from = 0; + uint32_t matrix_stride = 0; + bool row_major_matrix = false; + bool immutable = false; + + + + SmallVector implied_read_expressions; + + SPIRV_CROSS_DECLARE_CLONE(SPIRAccessChain) +}; + +struct SPIRVariable : IVariant +{ + enum + { + type = TypeVariable + }; + + SPIRVariable() = default; + SPIRVariable(TypeID basetype_, spv::StorageClass storage_, ID initializer_ = 0, VariableID basevariable_ = 0) + : basetype(basetype_) + , storage(storage_) + , initializer(initializer_) + , basevariable(basevariable_) + { + } + + TypeID basetype = 0; + spv::StorageClass storage = spv::StorageClassGeneric; + uint32_t decoration = 0; + ID initializer = 0; + VariableID basevariable = 0; + + SmallVector dereference_chain; + bool compat_builtin = false; + + + + + + bool statically_assigned = false; + ID static_expression = 0; + + + SmallVector dependees; + bool forwardable = true; + + bool deferred_declaration = false; + bool phi_variable = false; + + + bool allocate_temporary_copy = false; + + bool remapped_variable = false; + uint32_t remapped_components = 0; + + + BlockID dominator = 0; + + + + bool loop_variable = false; + + bool loop_variable_enable = false; + + SPIRFunction::Parameter *parameter = nullptr; + + SPIRV_CROSS_DECLARE_CLONE(SPIRVariable) +}; + +struct SPIRConstant : IVariant +{ + enum + { + type = TypeConstant + }; + + union Constant + { + uint32_t u32; + int32_t i32; + float f32; + + uint64_t u64; + int64_t i64; + double f64; + }; + + struct ConstantVector + { + Constant r[4]; + + ID id[4]; + uint32_t vecsize = 1; + + ConstantVector() + { + memset(r, 0, sizeof(r)); + } + }; + + struct ConstantMatrix + { + ConstantVector c[4]; + + ID id[4]; + uint32_t columns = 1; + }; + + static inline float f16_to_f32(uint16_t u16_value) + { + + int s = (u16_value >> 15) & 0x1; + int e = (u16_value >> 10) & 0x1f; + int m = (u16_value >> 0) & 0x3ff; + + union + { + float f32; + uint32_t u32; + } u; + + if (e == 0) + { + if (m == 0) + { + u.u32 = uint32_t(s) << 31; + return u.f32; + } + else + { + while ((m & 0x400) == 0) + { + m <<= 1; + e--; + } + + e++; + m &= ~0x400; + } + } + else if (e == 31) + { + if (m == 0) + { + u.u32 = (uint32_t(s) << 31) | 0x7f800000u; + return u.f32; + } + else + { + u.u32 = (uint32_t(s) << 31) | 0x7f800000u | (m << 13); + return u.f32; + } + } + + e += 127 - 15; + m <<= 13; + u.u32 = (uint32_t(s) << 31) | (e << 23) | m; + return u.f32; + } + + inline uint32_t specialization_constant_id(uint32_t col, uint32_t row) const + { + return m.c[col].id[row]; + } + + inline uint32_t specialization_constant_id(uint32_t col) const + { + return m.id[col]; + } + + inline uint32_t scalar(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].u32; + } + + inline int16_t scalar_i16(uint32_t col = 0, uint32_t row = 0) const + { + return int16_t(m.c[col].r[row].u32 & 0xffffu); + } + + inline uint16_t scalar_u16(uint32_t col = 0, uint32_t row = 0) const + { + return uint16_t(m.c[col].r[row].u32 & 0xffffu); + } + + inline int8_t scalar_i8(uint32_t col = 0, uint32_t row = 0) const + { + return int8_t(m.c[col].r[row].u32 & 0xffu); + } + + inline uint8_t scalar_u8(uint32_t col = 0, uint32_t row = 0) const + { + return uint8_t(m.c[col].r[row].u32 & 0xffu); + } + + inline float scalar_f16(uint32_t col = 0, uint32_t row = 0) const + { + return f16_to_f32(scalar_u16(col, row)); + } + + inline float scalar_f32(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].f32; + } + + inline int32_t scalar_i32(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].i32; + } + + inline double scalar_f64(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].f64; + } + + inline int64_t scalar_i64(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].i64; + } + + inline uint64_t scalar_u64(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].u64; + } + + inline const ConstantVector &vector() const + { + return m.c[0]; + } + + inline uint32_t vector_size() const + { + return m.c[0].vecsize; + } + + inline uint32_t columns() const + { + return m.columns; + } + + inline void make_null(const SPIRType &constant_type_) + { + m = {}; + m.columns = constant_type_.columns; + for (auto &c : m.c) + c.vecsize = constant_type_.vecsize; + } + + inline bool constant_is_null() const + { + if (specialization) + return false; + if (!subconstants.empty()) + return false; + + for (uint32_t col = 0; col < columns(); col++) + for (uint32_t row = 0; row < vector_size(); row++) + if (scalar_u64(col, row) != 0) + return false; + + return true; + } + + explicit SPIRConstant(uint32_t constant_type_) + : constant_type(constant_type_) + { + } + + SPIRConstant() = default; + + SPIRConstant(TypeID constant_type_, const uint32_t *elements, uint32_t num_elements, bool specialized) + : constant_type(constant_type_) + , specialization(specialized) + { + subconstants.reserve(num_elements); + for (uint32_t i = 0; i < num_elements; i++) + subconstants.push_back(elements[i]); + specialization = specialized; + } + + + SPIRConstant(TypeID constant_type_, uint32_t v0, bool specialized) + : constant_type(constant_type_) + , specialization(specialized) + { + m.c[0].r[0].u32 = v0; + m.c[0].vecsize = 1; + m.columns = 1; + } + + + SPIRConstant(TypeID constant_type_, uint64_t v0, bool specialized) + : constant_type(constant_type_) + , specialization(specialized) + { + m.c[0].r[0].u64 = v0; + m.c[0].vecsize = 1; + m.columns = 1; + } + + + SPIRConstant(TypeID constant_type_, const SPIRConstant *const *vector_elements, uint32_t num_elements, + bool specialized) + : constant_type(constant_type_) + , specialization(specialized) + { + bool matrix = vector_elements[0]->m.c[0].vecsize > 1; + + if (matrix) + { + m.columns = num_elements; + + for (uint32_t i = 0; i < num_elements; i++) + { + m.c[i] = vector_elements[i]->m.c[0]; + if (vector_elements[i]->specialization) + m.id[i] = vector_elements[i]->self; + } + } + else + { + m.c[0].vecsize = num_elements; + m.columns = 1; + + for (uint32_t i = 0; i < num_elements; i++) + { + m.c[0].r[i] = vector_elements[i]->m.c[0].r[0]; + if (vector_elements[i]->specialization) + m.c[0].id[i] = vector_elements[i]->self; + } + } + } + + TypeID constant_type = 0; + ConstantMatrix m; + + + bool specialization = false; + + bool is_used_as_array_length = false; + + + bool is_used_as_lut = false; + + + SmallVector subconstants; + + + + + + std::string specialization_constant_macro_name; + + SPIRV_CROSS_DECLARE_CLONE(SPIRConstant) +}; + + +struct ObjectPoolGroup +{ + std::unique_ptr pools[TypeCount]; +}; + +class Variant +{ +public: + explicit Variant(ObjectPoolGroup *group_) + : group(group_) + { + } + + ~Variant() + { + if (holder) + group->pools[type]->free_opaque(holder); + } + + + Variant(Variant &&other) SPIRV_CROSS_NOEXCEPT + { + *this = std::move(other); + } + + + + Variant(const Variant &variant) = delete; + + + Variant &operator=(Variant &&other) SPIRV_CROSS_NOEXCEPT + { + if (this != &other) + { + if (holder) + group->pools[type]->free_opaque(holder); + holder = other.holder; + group = other.group; + type = other.type; + allow_type_rewrite = other.allow_type_rewrite; + + other.holder = nullptr; + other.type = TypeNone; + } + return *this; + } + + + + + Variant &operator=(const Variant &other) + { + +#ifdef SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE + abort(); +#endif + if (this != &other) + { + if (holder) + group->pools[type]->free_opaque(holder); + + if (other.holder) + holder = other.holder->clone(group->pools[other.type].get()); + else + holder = nullptr; + + type = other.type; + allow_type_rewrite = other.allow_type_rewrite; + } + return *this; + } + + void set(IVariant *val, Types new_type) + { + if (holder) + group->pools[type]->free_opaque(holder); + holder = nullptr; + + if (!allow_type_rewrite && type != TypeNone && type != new_type) + { + if (val) + group->pools[new_type]->free_opaque(val); + SPIRV_CROSS_THROW("Overwriting a variant with new type."); + } + + holder = val; + type = new_type; + allow_type_rewrite = false; + } + + template + T *allocate_and_set(Types new_type, Ts &&... ts) + { + T *val = static_cast &>(*group->pools[new_type]).allocate(std::forward(ts)...); + set(val, new_type); + return val; + } + + template + T &get() + { + if (!holder) + SPIRV_CROSS_THROW("nullptr"); + if (static_cast(T::type) != type) + SPIRV_CROSS_THROW("Bad cast"); + return *static_cast(holder); + } + + template + const T &get() const + { + if (!holder) + SPIRV_CROSS_THROW("nullptr"); + if (static_cast(T::type) != type) + SPIRV_CROSS_THROW("Bad cast"); + return *static_cast(holder); + } + + Types get_type() const + { + return type; + } + + ID get_id() const + { + return holder ? holder->self : ID(0); + } + + bool empty() const + { + return !holder; + } + + void reset() + { + if (holder) + group->pools[type]->free_opaque(holder); + holder = nullptr; + type = TypeNone; + } + + void set_allow_type_rewrite() + { + allow_type_rewrite = true; + } + +private: + ObjectPoolGroup *group = nullptr; + IVariant *holder = nullptr; + Types type = TypeNone; + bool allow_type_rewrite = false; +}; + +template +T &variant_get(Variant &var) +{ + return var.get(); +} + +template +const T &variant_get(const Variant &var) +{ + return var.get(); +} + +template +T &variant_set(Variant &var, P &&... args) +{ + auto *ptr = var.allocate_and_set(static_cast(T::type), std::forward

(args)...); + return *ptr; +} + +struct AccessChainMeta +{ + uint32_t storage_physical_type = 0; + bool need_transpose = false; + bool storage_is_packed = false; + bool storage_is_invariant = false; +}; + +enum ExtendedDecorations +{ + + SPIRVCrossDecorationBufferBlockRepacked = 0, + + + + SPIRVCrossDecorationPhysicalTypeID, + + + + + + SPIRVCrossDecorationPhysicalTypePacked, + + + + SPIRVCrossDecorationPaddingTarget, + + SPIRVCrossDecorationInterfaceMemberIndex, + SPIRVCrossDecorationInterfaceOrigID, + SPIRVCrossDecorationResourceIndexPrimary, + + + SPIRVCrossDecorationResourceIndexSecondary, + + SPIRVCrossDecorationResourceIndexTertiary, + SPIRVCrossDecorationResourceIndexQuaternary, + + + SPIRVCrossDecorationExplicitOffset, + + + + SPIRVCrossDecorationBuiltInDispatchBase, + + + + + + SPIRVCrossDecorationDynamicImageSampler, + + SPIRVCrossDecorationCount +}; + +struct Meta +{ + struct Decoration + { + std::string alias; + std::string qualified_alias; + std::string hlsl_semantic; + Bitset decoration_flags; + spv::BuiltIn builtin_type = spv::BuiltInMax; + uint32_t location = 0; + uint32_t component = 0; + uint32_t set = 0; + uint32_t binding = 0; + uint32_t offset = 0; + uint32_t array_stride = 0; + uint32_t matrix_stride = 0; + uint32_t input_attachment = 0; + uint32_t spec_id = 0; + uint32_t index = 0; + spv::FPRoundingMode fp_rounding_mode = spv::FPRoundingModeMax; + bool builtin = false; + + struct Extended + { + Extended() + { + + for (auto &v : values) + v = 0; + } + + Bitset flags; + uint32_t values[SPIRVCrossDecorationCount]; + } extended; + }; + + Decoration decoration; + + + Vector members; + + std::unordered_map decoration_word_offset; + + + bool hlsl_is_magic_counter_buffer = false; + + uint32_t hlsl_magic_counter_buffer = 0; +}; + + + + +using VariableTypeRemapCallback = + std::function; + +class Hasher +{ +public: + inline void u32(uint32_t value) + { + h = (h * 0x100000001b3ull) ^ value; + } + + inline uint64_t get() const + { + return h; + } + +private: + uint64_t h = 0xcbf29ce484222325ull; +}; + +static inline bool type_is_floating_point(const SPIRType &type) +{ + return type.basetype == SPIRType::Half || type.basetype == SPIRType::Float || type.basetype == SPIRType::Double; +} + +static inline bool type_is_integral(const SPIRType &type) +{ + return type.basetype == SPIRType::SByte || type.basetype == SPIRType::UByte || type.basetype == SPIRType::Short || + type.basetype == SPIRType::UShort || type.basetype == SPIRType::Int || type.basetype == SPIRType::UInt || + type.basetype == SPIRType::Int64 || type.basetype == SPIRType::UInt64; +} + +static inline SPIRType::BaseType to_signed_basetype(uint32_t width) +{ + switch (width) + { + case 8: + return SPIRType::SByte; + case 16: + return SPIRType::Short; + case 32: + return SPIRType::Int; + case 64: + return SPIRType::Int64; + default: + SPIRV_CROSS_THROW("Invalid bit width."); + } +} + +static inline SPIRType::BaseType to_unsigned_basetype(uint32_t width) +{ + switch (width) + { + case 8: + return SPIRType::UByte; + case 16: + return SPIRType::UShort; + case 32: + return SPIRType::UInt; + case 64: + return SPIRType::UInt64; + default: + SPIRV_CROSS_THROW("Invalid bit width."); + } +} + + +static inline bool opcode_is_sign_invariant(spv::Op opcode) +{ + switch (opcode) + { + case spv::OpIEqual: + case spv::OpINotEqual: + case spv::OpISub: + case spv::OpIAdd: + case spv::OpIMul: + case spv::OpShiftLeftLogical: + case spv::OpBitwiseOr: + case spv::OpBitwiseXor: + case spv::OpBitwiseAnd: + return true; + + default: + return false; + } +} +} + +namespace std +{ +template +struct hash> +{ + size_t operator()(const SPIRV_CROSS_NAMESPACE::TypedID &value) const + { + return std::hash()(value); + } +}; +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cpp.cpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cpp.cpp new file mode 100644 index 000000000000..3753cae06eb1 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cpp.cpp @@ -0,0 +1,549 @@ + + + + + + + + + + + + + + + + +#include "spirv_cpp.hpp" + +using namespace spv; +using namespace SPIRV_CROSS_NAMESPACE; +using namespace std; + +void CompilerCPP::emit_buffer_block(const SPIRVariable &var) +{ + add_resource_name(var.self); + + auto &type = get(var.basetype); + auto instance_name = to_name(var.self); + + uint32_t descriptor_set = ir.meta[var.self].decoration.set; + uint32_t binding = ir.meta[var.self].decoration.binding; + + emit_block_struct(type); + auto buffer_name = to_name(type.self); + + statement("internal::Resource<", buffer_name, type_to_array_glsl(type), "> ", instance_name, "__;"); + statement_no_indent("#define ", instance_name, " __res->", instance_name, "__.get()"); + resource_registrations.push_back( + join("s.register_resource(", instance_name, "__", ", ", descriptor_set, ", ", binding, ");")); + statement(""); +} + +void CompilerCPP::emit_interface_block(const SPIRVariable &var) +{ + add_resource_name(var.self); + + auto &type = get(var.basetype); + + const char *qual = var.storage == StorageClassInput ? "StageInput" : "StageOutput"; + const char *lowerqual = var.storage == StorageClassInput ? "stage_input" : "stage_output"; + auto instance_name = to_name(var.self); + uint32_t location = ir.meta[var.self].decoration.location; + + string buffer_name; + auto flags = ir.meta[type.self].decoration.decoration_flags; + if (flags.get(DecorationBlock)) + { + emit_block_struct(type); + buffer_name = to_name(type.self); + } + else + buffer_name = type_to_glsl(type); + + statement("internal::", qual, "<", buffer_name, type_to_array_glsl(type), "> ", instance_name, "__;"); + statement_no_indent("#define ", instance_name, " __res->", instance_name, "__.get()"); + resource_registrations.push_back(join("s.register_", lowerqual, "(", instance_name, "__", ", ", location, ");")); + statement(""); +} + +void CompilerCPP::emit_shared(const SPIRVariable &var) +{ + add_resource_name(var.self); + + auto instance_name = to_name(var.self); + statement(CompilerGLSL::variable_decl(var), ";"); + statement_no_indent("#define ", instance_name, " __res->", instance_name); +} + +void CompilerCPP::emit_uniform(const SPIRVariable &var) +{ + add_resource_name(var.self); + + auto &type = get(var.basetype); + auto instance_name = to_name(var.self); + + uint32_t descriptor_set = ir.meta[var.self].decoration.set; + uint32_t binding = ir.meta[var.self].decoration.binding; + uint32_t location = ir.meta[var.self].decoration.location; + + string type_name = type_to_glsl(type); + remap_variable_type_name(type, instance_name, type_name); + + if (type.basetype == SPIRType::Image || type.basetype == SPIRType::SampledImage || + type.basetype == SPIRType::AtomicCounter) + { + statement("internal::Resource<", type_name, type_to_array_glsl(type), "> ", instance_name, "__;"); + statement_no_indent("#define ", instance_name, " __res->", instance_name, "__.get()"); + resource_registrations.push_back( + join("s.register_resource(", instance_name, "__", ", ", descriptor_set, ", ", binding, ");")); + } + else + { + statement("internal::UniformConstant<", type_name, type_to_array_glsl(type), "> ", instance_name, "__;"); + statement_no_indent("#define ", instance_name, " __res->", instance_name, "__.get()"); + resource_registrations.push_back( + join("s.register_uniform_constant(", instance_name, "__", ", ", location, ");")); + } + + statement(""); +} + +void CompilerCPP::emit_push_constant_block(const SPIRVariable &var) +{ + add_resource_name(var.self); + + auto &type = get(var.basetype); + auto &flags = ir.meta[var.self].decoration.decoration_flags; + if (flags.get(DecorationBinding) || flags.get(DecorationDescriptorSet)) + SPIRV_CROSS_THROW("Push constant blocks cannot be compiled to GLSL with Binding or Set syntax. " + "Remap to location with reflection API first or disable these decorations."); + + emit_block_struct(type); + auto buffer_name = to_name(type.self); + auto instance_name = to_name(var.self); + + statement("internal::PushConstant<", buffer_name, type_to_array_glsl(type), "> ", instance_name, ";"); + statement_no_indent("#define ", instance_name, " __res->", instance_name, ".get()"); + resource_registrations.push_back(join("s.register_push_constant(", instance_name, "__", ");")); + statement(""); +} + +void CompilerCPP::emit_block_struct(SPIRType &type) +{ + + + + + + auto &self = get(type.self); + self.type_alias = 0; + emit_struct(self); +} + +void CompilerCPP::emit_resources() +{ + for (auto &id : ir.ids) + { + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + + bool needs_declaration = c.specialization || c.is_used_as_lut; + + if (needs_declaration) + { + if (!options.vulkan_semantics && c.specialization) + { + c.specialization_constant_macro_name = + constant_value_macro_name(get_decoration(c.self, DecorationSpecId)); + } + emit_constant(c); + } + } + else if (id.get_type() == TypeConstantOp) + { + emit_specialization_constant_op(id.get()); + } + } + + + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeType) + { + auto &type = id.get(); + if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer && + (!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) && + !ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock))) + { + emit_struct(type); + } + } + } + + statement("struct Resources : ", resource_type); + begin_scope(); + + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + + if (var.storage != StorageClassFunction && type.pointer && type.storage == StorageClassUniform && + !is_hidden_variable(var) && + (ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock))) + { + emit_buffer_block(var); + } + } + } + + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + if (!is_hidden_variable(var) && var.storage != StorageClassFunction && type.pointer && + type.storage == StorageClassPushConstant) + { + emit_push_constant_block(var); + } + } + } + + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + + if (var.storage != StorageClassFunction && !is_hidden_variable(var) && type.pointer && + (var.storage == StorageClassInput || var.storage == StorageClassOutput) && + interface_variable_exists_in_entry_point(var.self)) + { + emit_interface_block(var); + } + } + } + + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + + if (var.storage != StorageClassFunction && !is_hidden_variable(var) && type.pointer && + (type.storage == StorageClassUniformConstant || type.storage == StorageClassAtomicCounter)) + { + emit_uniform(var); + } + } + } + + + bool emitted = false; + for (auto global : global_variables) + { + auto &var = get(global); + if (var.storage == StorageClassWorkgroup) + { + emit_shared(var); + emitted = true; + } + } + + if (emitted) + statement(""); + + declare_undefined_values(); + + statement("inline void init(spirv_cross_shader& s)"); + begin_scope(); + statement(resource_type, "::init(s);"); + for (auto ® : resource_registrations) + statement(reg); + end_scope(); + resource_registrations.clear(); + + end_scope_decl(); + + statement(""); + statement("Resources* __res;"); + if (get_entry_point().model == ExecutionModelGLCompute) + statement("ComputePrivateResources __priv_res;"); + statement(""); + + + emitted = false; + for (auto global : global_variables) + { + auto &var = get(global); + if (var.storage == StorageClassPrivate) + { + if (var.storage == StorageClassWorkgroup) + emit_shared(var); + else + statement(CompilerGLSL::variable_decl(var), ";"); + emitted = true; + } + } + + if (emitted) + statement(""); +} + +string CompilerCPP::compile() +{ + + options.es = false; + options.version = 450; + backend.float_literal_suffix = true; + backend.double_literal_suffix = false; + backend.long_long_literal_suffix = true; + backend.uint32_t_literal_suffix = true; + backend.basic_int_type = "int32_t"; + backend.basic_uint_type = "uint32_t"; + backend.swizzle_is_function = true; + backend.shared_is_implied = true; + backend.unsized_array_supported = false; + backend.explicit_struct_type = true; + backend.use_initializer_list = true; + + fixup_type_alias(); + reorder_type_alias(); + build_function_control_flow_graphs_and_analyze(); + update_active_builtins(); + + uint32_t pass_count = 0; + do + { + if (pass_count >= 3) + SPIRV_CROSS_THROW("Over 3 compilation loops detected. Must be a bug!"); + + resource_registrations.clear(); + reset(); + + + buffer.reset(); + + emit_header(); + emit_resources(); + + emit_function(get(ir.default_entry_point), Bitset()); + + pass_count++; + } while (is_forcing_recompilation()); + + + end_scope_decl(); + + end_scope(); + + + emit_c_linkage(); + + + get_entry_point().name = "main"; + + return buffer.str(); +} + +void CompilerCPP::emit_c_linkage() +{ + statement(""); + + statement("spirv_cross_shader_t *spirv_cross_construct(void)"); + begin_scope(); + statement("return new ", impl_type, "();"); + end_scope(); + + statement(""); + statement("void spirv_cross_destruct(spirv_cross_shader_t *shader)"); + begin_scope(); + statement("delete static_cast<", impl_type, "*>(shader);"); + end_scope(); + + statement(""); + statement("void spirv_cross_invoke(spirv_cross_shader_t *shader)"); + begin_scope(); + statement("static_cast<", impl_type, "*>(shader)->invoke();"); + end_scope(); + + statement(""); + statement("static const struct spirv_cross_interface vtable ="); + begin_scope(); + statement("spirv_cross_construct,"); + statement("spirv_cross_destruct,"); + statement("spirv_cross_invoke,"); + end_scope_decl(); + + statement(""); + statement("const struct spirv_cross_interface *", + interface_name.empty() ? string("spirv_cross_get_interface") : interface_name, "(void)"); + begin_scope(); + statement("return &vtable;"); + end_scope(); +} + +void CompilerCPP::emit_function_prototype(SPIRFunction &func, const Bitset &) +{ + if (func.self != ir.default_entry_point) + add_function_overload(func); + + local_variable_names = resource_names; + string decl; + + auto &type = get(func.return_type); + decl += "inline "; + decl += type_to_glsl(type); + decl += " "; + + if (func.self == ir.default_entry_point) + { + decl += "main"; + processing_entry_point = true; + } + else + decl += to_name(func.self); + + decl += "("; + for (auto &arg : func.arguments) + { + add_local_variable_name(arg.id); + + decl += argument_decl(arg); + if (&arg != &func.arguments.back()) + decl += ", "; + + + auto *var = maybe_get(arg.id); + if (var) + var->parameter = &arg; + } + + decl += ")"; + statement(decl); +} + +string CompilerCPP::argument_decl(const SPIRFunction::Parameter &arg) +{ + auto &type = expression_type(arg.id); + bool constref = !type.pointer || arg.write_count == 0; + + auto &var = get(arg.id); + + string base = type_to_glsl(type); + string variable_name = to_name(var.self); + remap_variable_type_name(type, variable_name, base); + + for (uint32_t i = 0; i < type.array.size(); i++) + base = join("std::array<", base, ", ", to_array_size(type, i), ">"); + + return join(constref ? "const " : "", base, " &", variable_name); +} + +string CompilerCPP::variable_decl(const SPIRType &type, const string &name, uint32_t ) +{ + string base = type_to_glsl(type); + remap_variable_type_name(type, name, base); + bool runtime = false; + + for (uint32_t i = 0; i < type.array.size(); i++) + { + auto &array = type.array[i]; + if (!array && type.array_size_literal[i]) + { + + + runtime = true; + } + else + base = join("std::array<", base, ", ", to_array_size(type, i), ">"); + } + base += ' '; + return base + name + (runtime ? "[1]" : ""); +} + +void CompilerCPP::emit_header() +{ + auto &execution = get_entry_point(); + + statement("// This C++ shader is autogenerated by spirv-cross."); + statement("#include \"spirv_cross/internal_interface.hpp\""); + statement("#include \"spirv_cross/external_interface.h\""); + + statement("#include "); + statement("#include "); + statement(""); + statement("using namespace spirv_cross;"); + statement("using namespace glm;"); + statement(""); + + statement("namespace Impl"); + begin_scope(); + + switch (execution.model) + { + case ExecutionModelGeometry: + case ExecutionModelTessellationControl: + case ExecutionModelTessellationEvaluation: + case ExecutionModelGLCompute: + case ExecutionModelFragment: + case ExecutionModelVertex: + statement("struct Shader"); + begin_scope(); + break; + + default: + SPIRV_CROSS_THROW("Unsupported execution model."); + } + + switch (execution.model) + { + case ExecutionModelGeometry: + impl_type = "GeometryShader"; + resource_type = "GeometryResources"; + break; + + case ExecutionModelVertex: + impl_type = "VertexShader"; + resource_type = "VertexResources"; + break; + + case ExecutionModelFragment: + impl_type = "FragmentShader"; + resource_type = "FragmentResources"; + break; + + case ExecutionModelGLCompute: + impl_type = join("ComputeShader"); + resource_type = "ComputeResources"; + break; + + case ExecutionModelTessellationControl: + impl_type = "TessControlShader"; + resource_type = "TessControlResources"; + break; + + case ExecutionModelTessellationEvaluation: + impl_type = "TessEvaluationShader"; + resource_type = "TessEvaluationResources"; + break; + + default: + SPIRV_CROSS_THROW("Unsupported execution model."); + } +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cpp.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cpp.hpp new file mode 100644 index 000000000000..d2addc1a2165 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cpp.hpp @@ -0,0 +1,86 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_CPP_HPP +#define SPIRV_CROSS_CPP_HPP + +#include "spirv_glsl.hpp" +#include + +namespace SPIRV_CROSS_NAMESPACE +{ +class CompilerCPP : public CompilerGLSL +{ +public: + explicit CompilerCPP(std::vector spirv_) + : CompilerGLSL(std::move(spirv_)) + { + } + + CompilerCPP(const uint32_t *ir_, size_t word_count) + : CompilerGLSL(ir_, word_count) + { + } + + explicit CompilerCPP(const ParsedIR &ir_) + : CompilerGLSL(ir_) + { + } + + explicit CompilerCPP(ParsedIR &&ir_) + : CompilerGLSL(std::move(ir_)) + { + } + + std::string compile() override; + + + + + + + void set_interface_name(std::string name) + { + interface_name = std::move(name); + } + +private: + void emit_header() override; + void emit_c_linkage(); + void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override; + + void emit_resources(); + void emit_buffer_block(const SPIRVariable &type) override; + void emit_push_constant_block(const SPIRVariable &var) override; + void emit_interface_block(const SPIRVariable &type); + void emit_block_chain(SPIRBlock &block); + void emit_uniform(const SPIRVariable &var) override; + void emit_shared(const SPIRVariable &var); + void emit_block_struct(SPIRType &type); + std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id) override; + + std::string argument_decl(const SPIRFunction::Parameter &arg); + + SmallVector resource_registrations; + std::string impl_type; + std::string resource_type; + uint32_t shared_counter = 0; + + std::string interface_name; +}; +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross.cpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross.cpp new file mode 100644 index 000000000000..7d1fc6388a13 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross.cpp @@ -0,0 +1,4639 @@ + + + + + + + + + + + + + + + + +#include "spirv_cross.hpp" +#include "GLSL.std.450.h" +#include "spirv_cfg.hpp" +#include "spirv_common.hpp" +#include "spirv_parser.hpp" +#include +#include +#include + +using namespace std; +using namespace spv; +using namespace SPIRV_CROSS_NAMESPACE; + +Compiler::Compiler(vector ir_) +{ + Parser parser(move(ir_)); + parser.parse(); + set_ir(move(parser.get_parsed_ir())); +} + +Compiler::Compiler(const uint32_t *ir_, size_t word_count) +{ + Parser parser(ir_, word_count); + parser.parse(); + set_ir(move(parser.get_parsed_ir())); +} + +Compiler::Compiler(const ParsedIR &ir_) +{ + set_ir(ir_); +} + +Compiler::Compiler(ParsedIR &&ir_) +{ + set_ir(move(ir_)); +} + +void Compiler::set_ir(ParsedIR &&ir_) +{ + ir = move(ir_); + parse_fixup(); +} + +void Compiler::set_ir(const ParsedIR &ir_) +{ + ir = ir_; + parse_fixup(); +} + +string Compiler::compile() +{ + return ""; +} + +bool Compiler::variable_storage_is_aliased(const SPIRVariable &v) +{ + auto &type = get(v.basetype); + bool ssbo = v.storage == StorageClassStorageBuffer || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + bool image = type.basetype == SPIRType::Image; + bool counter = type.basetype == SPIRType::AtomicCounter; + bool buffer_reference = type.storage == StorageClassPhysicalStorageBufferEXT; + + bool is_restrict; + if (ssbo) + is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict); + else + is_restrict = has_decoration(v.self, DecorationRestrict); + + return !is_restrict && (ssbo || image || counter || buffer_reference); +} + +bool Compiler::block_is_pure(const SPIRBlock &block) +{ + + if (block.terminator == SPIRBlock::Kill) + return false; + + for (auto &i : block.ops) + { + auto ops = stream(i); + auto op = static_cast(i.op); + + switch (op) + { + case OpFunctionCall: + { + uint32_t func = ops[2]; + if (!function_is_pure(get(func))) + return false; + break; + } + + case OpCopyMemory: + case OpStore: + { + auto &type = expression_type(ops[0]); + if (type.storage != StorageClassFunction) + return false; + break; + } + + case OpImageWrite: + return false; + + + case OpAtomicLoad: + case OpAtomicStore: + case OpAtomicExchange: + case OpAtomicCompareExchange: + case OpAtomicCompareExchangeWeak: + case OpAtomicIIncrement: + case OpAtomicIDecrement: + case OpAtomicIAdd: + case OpAtomicISub: + case OpAtomicSMin: + case OpAtomicUMin: + case OpAtomicSMax: + case OpAtomicUMax: + case OpAtomicAnd: + case OpAtomicOr: + case OpAtomicXor: + return false; + + + case OpEndPrimitive: + case OpEmitStreamVertex: + case OpEndStreamPrimitive: + case OpEmitVertex: + return false; + + + case OpControlBarrier: + case OpMemoryBarrier: + return false; + + + case OpReportIntersectionNV: + case OpIgnoreIntersectionNV: + case OpTerminateRayNV: + case OpTraceNV: + case OpExecuteCallableNV: + return false; + + + + case OpDemoteToHelperInvocationEXT: + + return false; + + default: + break; + } + } + + return true; +} + +string Compiler::to_name(uint32_t id, bool allow_alias) const +{ + if (allow_alias && ir.ids[id].get_type() == TypeType) + { + + + + + auto &type = get(id); + if (type.type_alias) + { + + + if (!has_extended_decoration(type.type_alias, SPIRVCrossDecorationBufferBlockRepacked)) + return to_name(type.type_alias); + } + } + + auto &alias = ir.get_name(id); + if (alias.empty()) + return join("_", id); + else + return alias; +} + +bool Compiler::function_is_pure(const SPIRFunction &func) +{ + for (auto block : func.blocks) + { + if (!block_is_pure(get(block))) + { + + return false; + } + } + + + return true; +} + +void Compiler::register_global_read_dependencies(const SPIRBlock &block, uint32_t id) +{ + for (auto &i : block.ops) + { + auto ops = stream(i); + auto op = static_cast(i.op); + + switch (op) + { + case OpFunctionCall: + { + uint32_t func = ops[2]; + register_global_read_dependencies(get(func), id); + break; + } + + case OpLoad: + case OpImageRead: + { + + auto *var = maybe_get_backing_variable(ops[2]); + if (var && var->storage != StorageClassFunction) + { + auto &type = get(var->basetype); + + + if (type.basetype != SPIRType::Image && type.image.dim != DimSubpassData) + var->dependees.push_back(id); + } + break; + } + + default: + break; + } + } +} + +void Compiler::register_global_read_dependencies(const SPIRFunction &func, uint32_t id) +{ + for (auto block : func.blocks) + register_global_read_dependencies(get(block), id); +} + +SPIRVariable *Compiler::maybe_get_backing_variable(uint32_t chain) +{ + auto *var = maybe_get(chain); + if (!var) + { + auto *cexpr = maybe_get(chain); + if (cexpr) + var = maybe_get(cexpr->loaded_from); + + auto *access_chain = maybe_get(chain); + if (access_chain) + var = maybe_get(access_chain->loaded_from); + } + + return var; +} + +StorageClass Compiler::get_backing_variable_storage(uint32_t ptr) +{ + auto *var = maybe_get_backing_variable(ptr); + if (var) + return var->storage; + else + return expression_type(ptr).storage; +} + +void Compiler::register_read(uint32_t expr, uint32_t chain, bool forwarded) +{ + auto &e = get(expr); + auto *var = maybe_get_backing_variable(chain); + + if (var) + { + e.loaded_from = var->self; + + + if (forwarded && !is_immutable(var->self)) + var->dependees.push_back(e.self); + + + + if (var && var->parameter) + var->parameter->read_count++; + } +} + +void Compiler::register_write(uint32_t chain) +{ + auto *var = maybe_get(chain); + if (!var) + { + + auto *expr = maybe_get(chain); + if (expr && expr->loaded_from) + var = maybe_get(expr->loaded_from); + + auto *access_chain = maybe_get(chain); + if (access_chain && access_chain->loaded_from) + var = maybe_get(access_chain->loaded_from); + } + + if (var) + { + bool check_argument_storage_qualifier = true; + auto &type = expression_type(chain); + + + + + if (get_variable_data_type(*var).pointer) + { + flush_all_active_variables(); + + if (type.pointer_depth == 1) + { + + + + + + + + + + + + + check_argument_storage_qualifier = false; + } + } + + if (type.storage == StorageClassPhysicalStorageBufferEXT || variable_storage_is_aliased(*var)) + flush_all_aliased_variables(); + else if (var) + flush_dependees(*var); + + + if (check_argument_storage_qualifier && var->parameter && var->parameter->write_count == 0) + { + var->parameter->write_count++; + force_recompile(); + } + } + else + { + + + + + + flush_all_active_variables(); + } +} + +void Compiler::flush_dependees(SPIRVariable &var) +{ + for (auto expr : var.dependees) + invalid_expressions.insert(expr); + var.dependees.clear(); +} + +void Compiler::flush_all_aliased_variables() +{ + for (auto aliased : aliased_variables) + flush_dependees(get(aliased)); +} + +void Compiler::flush_all_atomic_capable_variables() +{ + for (auto global : global_variables) + flush_dependees(get(global)); + flush_all_aliased_variables(); +} + +void Compiler::flush_control_dependent_expressions(uint32_t block_id) +{ + auto &block = get(block_id); + for (auto &expr : block.invalidate_expressions) + invalid_expressions.insert(expr); + block.invalidate_expressions.clear(); +} + +void Compiler::flush_all_active_variables() +{ + + + for (auto &v : current_function->local_variables) + flush_dependees(get(v)); + for (auto &arg : current_function->arguments) + flush_dependees(get(arg.id)); + for (auto global : global_variables) + flush_dependees(get(global)); + + flush_all_aliased_variables(); +} + +uint32_t Compiler::expression_type_id(uint32_t id) const +{ + switch (ir.ids[id].get_type()) + { + case TypeVariable: + return get(id).basetype; + + case TypeExpression: + return get(id).expression_type; + + case TypeConstant: + return get(id).constant_type; + + case TypeConstantOp: + return get(id).basetype; + + case TypeUndef: + return get(id).basetype; + + case TypeCombinedImageSampler: + return get(id).combined_type; + + case TypeAccessChain: + return get(id).basetype; + + default: + SPIRV_CROSS_THROW("Cannot resolve expression type."); + } +} + +const SPIRType &Compiler::expression_type(uint32_t id) const +{ + return get(expression_type_id(id)); +} + +bool Compiler::expression_is_lvalue(uint32_t id) const +{ + auto &type = expression_type(id); + switch (type.basetype) + { + case SPIRType::SampledImage: + case SPIRType::Image: + case SPIRType::Sampler: + return false; + + default: + return true; + } +} + +bool Compiler::is_immutable(uint32_t id) const +{ + if (ir.ids[id].get_type() == TypeVariable) + { + auto &var = get(id); + + + bool pointer_to_const = var.storage == StorageClassUniformConstant; + return pointer_to_const || var.phi_variable || !expression_is_lvalue(id); + } + else if (ir.ids[id].get_type() == TypeAccessChain) + return get(id).immutable; + else if (ir.ids[id].get_type() == TypeExpression) + return get(id).immutable; + else if (ir.ids[id].get_type() == TypeConstant || ir.ids[id].get_type() == TypeConstantOp || + ir.ids[id].get_type() == TypeUndef) + return true; + else + return false; +} + +static inline bool storage_class_is_interface(spv::StorageClass storage) +{ + switch (storage) + { + case StorageClassInput: + case StorageClassOutput: + case StorageClassUniform: + case StorageClassUniformConstant: + case StorageClassAtomicCounter: + case StorageClassPushConstant: + case StorageClassStorageBuffer: + return true; + + default: + return false; + } +} + +bool Compiler::is_hidden_variable(const SPIRVariable &var, bool include_builtins) const +{ + if ((is_builtin_variable(var) && !include_builtins) || var.remapped_variable) + return true; + + + if (find_if(begin(combined_image_samplers), end(combined_image_samplers), [&var](const CombinedImageSampler &samp) { + return samp.combined_id == var.self; + }) != end(combined_image_samplers)) + { + return false; + } + + bool hidden = false; + if (check_active_interface_variables && storage_class_is_interface(var.storage)) + hidden = active_interface_variables.find(var.self) == end(active_interface_variables); + return hidden; +} + +bool Compiler::is_builtin_type(const SPIRType &type) const +{ + auto *type_meta = ir.find_meta(type.self); + + + if (type_meta) + for (auto &m : type_meta->members) + if (m.builtin) + return true; + + return false; +} + +bool Compiler::is_builtin_variable(const SPIRVariable &var) const +{ + auto *m = ir.find_meta(var.self); + + if (var.compat_builtin || (m && m->decoration.builtin)) + return true; + else + return is_builtin_type(get(var.basetype)); +} + +bool Compiler::is_member_builtin(const SPIRType &type, uint32_t index, BuiltIn *builtin) const +{ + auto *type_meta = ir.find_meta(type.self); + + if (type_meta) + { + auto &memb = type_meta->members; + if (index < memb.size() && memb[index].builtin) + { + if (builtin) + *builtin = memb[index].builtin_type; + return true; + } + } + + return false; +} + +bool Compiler::is_scalar(const SPIRType &type) const +{ + return type.basetype != SPIRType::Struct && type.vecsize == 1 && type.columns == 1; +} + +bool Compiler::is_vector(const SPIRType &type) const +{ + return type.vecsize > 1 && type.columns == 1; +} + +bool Compiler::is_matrix(const SPIRType &type) const +{ + return type.vecsize > 1 && type.columns > 1; +} + +bool Compiler::is_array(const SPIRType &type) const +{ + return !type.array.empty(); +} + +ShaderResources Compiler::get_shader_resources() const +{ + return get_shader_resources(nullptr); +} + +ShaderResources Compiler::get_shader_resources(const unordered_set &active_variables) const +{ + return get_shader_resources(&active_variables); +} + +bool Compiler::InterfaceVariableAccessHandler::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + uint32_t variable = 0; + switch (opcode) + { + + default: + break; + + case OpFunctionCall: + { + + if (length < 3) + return false; + + uint32_t count = length - 3; + args += 3; + for (uint32_t i = 0; i < count; i++) + { + auto *var = compiler.maybe_get(args[i]); + if (var && storage_class_is_interface(var->storage)) + variables.insert(args[i]); + } + break; + } + + case OpSelect: + { + + if (length < 5) + return false; + + uint32_t count = length - 3; + args += 3; + for (uint32_t i = 0; i < count; i++) + { + auto *var = compiler.maybe_get(args[i]); + if (var && storage_class_is_interface(var->storage)) + variables.insert(args[i]); + } + break; + } + + case OpPhi: + { + + if (length < 2) + return false; + + uint32_t count = length - 2; + args += 2; + for (uint32_t i = 0; i < count; i += 2) + { + auto *var = compiler.maybe_get(args[i]); + if (var && storage_class_is_interface(var->storage)) + variables.insert(args[i]); + } + break; + } + + case OpAtomicStore: + case OpStore: + + if (length < 1) + return false; + variable = args[0]; + break; + + case OpCopyMemory: + { + if (length < 2) + return false; + + auto *var = compiler.maybe_get(args[0]); + if (var && storage_class_is_interface(var->storage)) + variables.insert(args[0]); + + var = compiler.maybe_get(args[1]); + if (var && storage_class_is_interface(var->storage)) + variables.insert(args[1]); + break; + } + + case OpExtInst: + { + if (length < 5) + return false; + uint32_t extension_set = args[2]; + if (compiler.get(extension_set).ext == SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter) + { + enum AMDShaderExplicitVertexParameter + { + InterpolateAtVertexAMD = 1 + }; + + auto op = static_cast(args[3]); + + switch (op) + { + case InterpolateAtVertexAMD: + { + auto *var = compiler.maybe_get(args[4]); + if (var && storage_class_is_interface(var->storage)) + variables.insert(args[4]); + break; + } + + default: + break; + } + } + break; + } + + case OpAccessChain: + case OpInBoundsAccessChain: + case OpPtrAccessChain: + case OpLoad: + case OpCopyObject: + case OpImageTexelPointer: + case OpAtomicLoad: + case OpAtomicExchange: + case OpAtomicCompareExchange: + case OpAtomicCompareExchangeWeak: + case OpAtomicIIncrement: + case OpAtomicIDecrement: + case OpAtomicIAdd: + case OpAtomicISub: + case OpAtomicSMin: + case OpAtomicUMin: + case OpAtomicSMax: + case OpAtomicUMax: + case OpAtomicAnd: + case OpAtomicOr: + case OpAtomicXor: + case OpArrayLength: + + if (length < 3) + return false; + variable = args[2]; + break; + } + + if (variable) + { + auto *var = compiler.maybe_get(variable); + if (var && storage_class_is_interface(var->storage)) + variables.insert(variable); + } + return true; +} + +unordered_set Compiler::get_active_interface_variables() const +{ + + unordered_set variables; + InterfaceVariableAccessHandler handler(*this, variables); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + + + ir.for_each_typed_id([&](uint32_t, const SPIRVariable &var) { + if (var.storage == StorageClassOutput && var.initializer != ID(0)) + variables.insert(var.self); + }); + + + if (dummy_sampler_id) + variables.insert(dummy_sampler_id); + + return variables; +} + +void Compiler::set_enabled_interface_variables(std::unordered_set active_variables) +{ + active_interface_variables = move(active_variables); + check_active_interface_variables = true; +} + +ShaderResources Compiler::get_shader_resources(const unordered_set *active_variables) const +{ + ShaderResources res; + + bool ssbo_instance_name = reflection_ssbo_instance_name_is_significant(); + + ir.for_each_typed_id([&](uint32_t, const SPIRVariable &var) { + auto &type = this->get(var.basetype); + + + + if (var.storage == StorageClassFunction || !type.pointer || is_builtin_variable(var)) + return; + + if (active_variables && active_variables->find(var.self) == end(*active_variables)) + return; + + + if (var.storage == StorageClassInput && interface_variable_exists_in_entry_point(var.self)) + { + if (has_decoration(type.self, DecorationBlock)) + { + res.stage_inputs.push_back( + { var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self, false) }); + } + else + res.stage_inputs.push_back({ var.self, var.basetype, type.self, get_name(var.self) }); + } + + else if (var.storage == StorageClassUniformConstant && type.image.dim == DimSubpassData) + { + res.subpass_inputs.push_back({ var.self, var.basetype, type.self, get_name(var.self) }); + } + + else if (var.storage == StorageClassOutput && interface_variable_exists_in_entry_point(var.self)) + { + if (has_decoration(type.self, DecorationBlock)) + { + res.stage_outputs.push_back( + { var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self, false) }); + } + else + res.stage_outputs.push_back({ var.self, var.basetype, type.self, get_name(var.self) }); + } + + else if (type.storage == StorageClassUniform && has_decoration(type.self, DecorationBlock)) + { + res.uniform_buffers.push_back( + { var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self, false) }); + } + + else if (type.storage == StorageClassUniform && has_decoration(type.self, DecorationBufferBlock)) + { + res.storage_buffers.push_back( + { var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self, ssbo_instance_name) }); + } + + else if (type.storage == StorageClassStorageBuffer) + { + res.storage_buffers.push_back( + { var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self, ssbo_instance_name) }); + } + + else if (type.storage == StorageClassPushConstant) + { + + + res.push_constant_buffers.push_back({ var.self, var.basetype, type.self, get_name(var.self) }); + } + + else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::Image && + type.image.sampled == 2) + { + res.storage_images.push_back({ var.self, var.basetype, type.self, get_name(var.self) }); + } + + else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::Image && + type.image.sampled == 1) + { + res.separate_images.push_back({ var.self, var.basetype, type.self, get_name(var.self) }); + } + + else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::Sampler) + { + res.separate_samplers.push_back({ var.self, var.basetype, type.self, get_name(var.self) }); + } + + else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::SampledImage) + { + res.sampled_images.push_back({ var.self, var.basetype, type.self, get_name(var.self) }); + } + + else if (type.storage == StorageClassAtomicCounter) + { + res.atomic_counters.push_back({ var.self, var.basetype, type.self, get_name(var.self) }); + } + + else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::AccelerationStructureNV) + { + res.acceleration_structures.push_back({ var.self, var.basetype, type.self, get_name(var.self) }); + } + }); + + return res; +} + +bool Compiler::type_is_block_like(const SPIRType &type) const +{ + if (type.basetype != SPIRType::Struct) + return false; + + if (has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock)) + { + return true; + } + + + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + if (has_member_decoration(type.self, i, DecorationOffset)) + return true; + + return false; +} + +void Compiler::parse_fixup() +{ + + for (auto id_ : ir.ids_for_constant_or_variable) + { + auto &id = ir.ids[id_]; + + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + if (ir.meta[c.self].decoration.builtin && ir.meta[c.self].decoration.builtin_type == BuiltInWorkgroupSize) + { + + + for (auto &entry : ir.entry_points) + { + entry.second.workgroup_size.constant = c.self; + entry.second.workgroup_size.x = c.scalar(0, 0); + entry.second.workgroup_size.y = c.scalar(0, 1); + entry.second.workgroup_size.z = c.scalar(0, 2); + } + } + } + else if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + if (var.storage == StorageClassPrivate || var.storage == StorageClassWorkgroup || + var.storage == StorageClassOutput) + global_variables.push_back(var.self); + if (variable_storage_is_aliased(var)) + aliased_variables.push_back(var.self); + } + } +} + +void Compiler::update_name_cache(unordered_set &cache_primary, const unordered_set &cache_secondary, + string &name) +{ + if (name.empty()) + return; + + const auto find_name = [&](const string &n) -> bool { + if (cache_primary.find(n) != end(cache_primary)) + return true; + + if (&cache_primary != &cache_secondary) + if (cache_secondary.find(n) != end(cache_secondary)) + return true; + + return false; + }; + + const auto insert_name = [&](const string &n) { cache_primary.insert(n); }; + + if (!find_name(name)) + { + insert_name(name); + return; + } + + uint32_t counter = 0; + auto tmpname = name; + + bool use_linked_underscore = true; + + if (tmpname == "_") + { + + + tmpname += "0"; + } + else if (tmpname.back() == '_') + { + + + use_linked_underscore = false; + } + + + + do + { + counter++; + name = tmpname + (use_linked_underscore ? "_" : "") + convert_to_string(counter); + } while (find_name(name)); + insert_name(name); +} + +void Compiler::update_name_cache(unordered_set &cache, string &name) +{ + update_name_cache(cache, cache, name); +} + +void Compiler::set_name(ID id, const std::string &name) +{ + ir.set_name(id, name); +} + +const SPIRType &Compiler::get_type(TypeID id) const +{ + return get(id); +} + +const SPIRType &Compiler::get_type_from_variable(VariableID id) const +{ + return get(get(id).basetype); +} + +uint32_t Compiler::get_pointee_type_id(uint32_t type_id) const +{ + auto *p_type = &get(type_id); + if (p_type->pointer) + { + assert(p_type->parent_type); + type_id = p_type->parent_type; + } + return type_id; +} + +const SPIRType &Compiler::get_pointee_type(const SPIRType &type) const +{ + auto *p_type = &type; + if (p_type->pointer) + { + assert(p_type->parent_type); + p_type = &get(p_type->parent_type); + } + return *p_type; +} + +const SPIRType &Compiler::get_pointee_type(uint32_t type_id) const +{ + return get_pointee_type(get(type_id)); +} + +uint32_t Compiler::get_variable_data_type_id(const SPIRVariable &var) const +{ + if (var.phi_variable) + return var.basetype; + return get_pointee_type_id(var.basetype); +} + +SPIRType &Compiler::get_variable_data_type(const SPIRVariable &var) +{ + return get(get_variable_data_type_id(var)); +} + +const SPIRType &Compiler::get_variable_data_type(const SPIRVariable &var) const +{ + return get(get_variable_data_type_id(var)); +} + +SPIRType &Compiler::get_variable_element_type(const SPIRVariable &var) +{ + SPIRType *type = &get_variable_data_type(var); + if (is_array(*type)) + type = &get(type->parent_type); + return *type; +} + +const SPIRType &Compiler::get_variable_element_type(const SPIRVariable &var) const +{ + const SPIRType *type = &get_variable_data_type(var); + if (is_array(*type)) + type = &get(type->parent_type); + return *type; +} + +bool Compiler::is_sampled_image_type(const SPIRType &type) +{ + return (type.basetype == SPIRType::Image || type.basetype == SPIRType::SampledImage) && type.image.sampled == 1 && + type.image.dim != DimBuffer; +} + +void Compiler::set_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration, + const std::string &argument) +{ + ir.set_member_decoration_string(id, index, decoration, argument); +} + +void Compiler::set_member_decoration(TypeID id, uint32_t index, Decoration decoration, uint32_t argument) +{ + ir.set_member_decoration(id, index, decoration, argument); +} + +void Compiler::set_member_name(TypeID id, uint32_t index, const std::string &name) +{ + ir.set_member_name(id, index, name); +} + +const std::string &Compiler::get_member_name(TypeID id, uint32_t index) const +{ + return ir.get_member_name(id, index); +} + +void Compiler::set_qualified_name(uint32_t id, const string &name) +{ + ir.meta[id].decoration.qualified_alias = name; +} + +void Compiler::set_member_qualified_name(uint32_t type_id, uint32_t index, const std::string &name) +{ + ir.meta[type_id].members.resize(max(ir.meta[type_id].members.size(), size_t(index) + 1)); + ir.meta[type_id].members[index].qualified_alias = name; +} + +const string &Compiler::get_member_qualified_name(TypeID type_id, uint32_t index) const +{ + auto *m = ir.find_meta(type_id); + if (m && index < m->members.size()) + return m->members[index].qualified_alias; + else + return ir.get_empty_string(); +} + +uint32_t Compiler::get_member_decoration(TypeID id, uint32_t index, Decoration decoration) const +{ + return ir.get_member_decoration(id, index, decoration); +} + +const Bitset &Compiler::get_member_decoration_bitset(TypeID id, uint32_t index) const +{ + return ir.get_member_decoration_bitset(id, index); +} + +bool Compiler::has_member_decoration(TypeID id, uint32_t index, Decoration decoration) const +{ + return ir.has_member_decoration(id, index, decoration); +} + +void Compiler::unset_member_decoration(TypeID id, uint32_t index, Decoration decoration) +{ + ir.unset_member_decoration(id, index, decoration); +} + +void Compiler::set_decoration_string(ID id, spv::Decoration decoration, const std::string &argument) +{ + ir.set_decoration_string(id, decoration, argument); +} + +void Compiler::set_decoration(ID id, Decoration decoration, uint32_t argument) +{ + ir.set_decoration(id, decoration, argument); +} + +void Compiler::set_extended_decoration(uint32_t id, ExtendedDecorations decoration, uint32_t value) +{ + auto &dec = ir.meta[id].decoration; + dec.extended.flags.set(decoration); + dec.extended.values[decoration] = value; +} + +void Compiler::set_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration, + uint32_t value) +{ + ir.meta[type].members.resize(max(ir.meta[type].members.size(), size_t(index) + 1)); + auto &dec = ir.meta[type].members[index]; + dec.extended.flags.set(decoration); + dec.extended.values[decoration] = value; +} + +static uint32_t get_default_extended_decoration(ExtendedDecorations decoration) +{ + switch (decoration) + { + case SPIRVCrossDecorationResourceIndexPrimary: + case SPIRVCrossDecorationResourceIndexSecondary: + case SPIRVCrossDecorationResourceIndexTertiary: + case SPIRVCrossDecorationResourceIndexQuaternary: + case SPIRVCrossDecorationInterfaceMemberIndex: + return ~(0u); + + default: + return 0; + } +} + +uint32_t Compiler::get_extended_decoration(uint32_t id, ExtendedDecorations decoration) const +{ + auto *m = ir.find_meta(id); + if (!m) + return 0; + + auto &dec = m->decoration; + + if (!dec.extended.flags.get(decoration)) + return get_default_extended_decoration(decoration); + + return dec.extended.values[decoration]; +} + +uint32_t Compiler::get_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration) const +{ + auto *m = ir.find_meta(type); + if (!m) + return 0; + + if (index >= m->members.size()) + return 0; + + auto &dec = m->members[index]; + if (!dec.extended.flags.get(decoration)) + return get_default_extended_decoration(decoration); + return dec.extended.values[decoration]; +} + +bool Compiler::has_extended_decoration(uint32_t id, ExtendedDecorations decoration) const +{ + auto *m = ir.find_meta(id); + if (!m) + return false; + + auto &dec = m->decoration; + return dec.extended.flags.get(decoration); +} + +bool Compiler::has_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration) const +{ + auto *m = ir.find_meta(type); + if (!m) + return false; + + if (index >= m->members.size()) + return false; + + auto &dec = m->members[index]; + return dec.extended.flags.get(decoration); +} + +void Compiler::unset_extended_decoration(uint32_t id, ExtendedDecorations decoration) +{ + auto &dec = ir.meta[id].decoration; + dec.extended.flags.clear(decoration); + dec.extended.values[decoration] = 0; +} + +void Compiler::unset_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration) +{ + ir.meta[type].members.resize(max(ir.meta[type].members.size(), size_t(index) + 1)); + auto &dec = ir.meta[type].members[index]; + dec.extended.flags.clear(decoration); + dec.extended.values[decoration] = 0; +} + +StorageClass Compiler::get_storage_class(VariableID id) const +{ + return get(id).storage; +} + +const std::string &Compiler::get_name(ID id) const +{ + return ir.get_name(id); +} + +const std::string Compiler::get_fallback_name(ID id) const +{ + return join("_", id); +} + +const std::string Compiler::get_block_fallback_name(VariableID id) const +{ + auto &var = get(id); + if (get_name(id).empty()) + return join("_", get(var.basetype).self, "_", id); + else + return get_name(id); +} + +const Bitset &Compiler::get_decoration_bitset(ID id) const +{ + return ir.get_decoration_bitset(id); +} + +bool Compiler::has_decoration(ID id, Decoration decoration) const +{ + return ir.has_decoration(id, decoration); +} + +const string &Compiler::get_decoration_string(ID id, Decoration decoration) const +{ + return ir.get_decoration_string(id, decoration); +} + +const string &Compiler::get_member_decoration_string(TypeID id, uint32_t index, Decoration decoration) const +{ + return ir.get_member_decoration_string(id, index, decoration); +} + +uint32_t Compiler::get_decoration(ID id, Decoration decoration) const +{ + return ir.get_decoration(id, decoration); +} + +void Compiler::unset_decoration(ID id, Decoration decoration) +{ + ir.unset_decoration(id, decoration); +} + +bool Compiler::get_binary_offset_for_decoration(VariableID id, spv::Decoration decoration, uint32_t &word_offset) const +{ + auto *m = ir.find_meta(id); + if (!m) + return false; + + auto &word_offsets = m->decoration_word_offset; + auto itr = word_offsets.find(decoration); + if (itr == end(word_offsets)) + return false; + + word_offset = itr->second; + return true; +} + +bool Compiler::block_is_loop_candidate(const SPIRBlock &block, SPIRBlock::Method method) const +{ + + if (block.disable_block_optimization || block.complex_continue) + return false; + + if (method == SPIRBlock::MergeToSelectForLoop || method == SPIRBlock::MergeToSelectContinueForLoop) + { + + + + + const auto *false_block = maybe_get(block.false_block); + const auto *true_block = maybe_get(block.true_block); + const auto *merge_block = maybe_get(block.merge_block); + + bool false_block_is_merge = block.false_block == block.merge_block || + (false_block && merge_block && execution_is_noop(*false_block, *merge_block)); + + bool true_block_is_merge = block.true_block == block.merge_block || + (true_block && merge_block && execution_is_noop(*true_block, *merge_block)); + + bool positive_candidate = + block.true_block != block.merge_block && block.true_block != block.self && false_block_is_merge; + + bool negative_candidate = + block.false_block != block.merge_block && block.false_block != block.self && true_block_is_merge; + + bool ret = block.terminator == SPIRBlock::Select && block.merge == SPIRBlock::MergeLoop && + (positive_candidate || negative_candidate); + + if (ret && positive_candidate && method == SPIRBlock::MergeToSelectContinueForLoop) + ret = block.true_block == block.continue_block; + else if (ret && negative_candidate && method == SPIRBlock::MergeToSelectContinueForLoop) + ret = block.false_block == block.continue_block; + + + + + if (ret) + { + for (auto &phi : block.phi_variables) + if (phi.parent == block.self) + return false; + + auto *merge = maybe_get(block.merge_block); + if (merge) + for (auto &phi : merge->phi_variables) + if (phi.parent == block.self) + return false; + } + return ret; + } + else if (method == SPIRBlock::MergeToDirectForLoop) + { + + + bool ret = block.terminator == SPIRBlock::Direct && block.merge == SPIRBlock::MergeLoop && block.ops.empty(); + + if (!ret) + return false; + + auto &child = get(block.next_block); + + const auto *false_block = maybe_get(child.false_block); + const auto *true_block = maybe_get(child.true_block); + const auto *merge_block = maybe_get(block.merge_block); + + bool false_block_is_merge = child.false_block == block.merge_block || + (false_block && merge_block && execution_is_noop(*false_block, *merge_block)); + + bool true_block_is_merge = child.true_block == block.merge_block || + (true_block && merge_block && execution_is_noop(*true_block, *merge_block)); + + bool positive_candidate = + child.true_block != block.merge_block && child.true_block != block.self && false_block_is_merge; + + bool negative_candidate = + child.false_block != block.merge_block && child.false_block != block.self && true_block_is_merge; + + ret = child.terminator == SPIRBlock::Select && child.merge == SPIRBlock::MergeNone && + (positive_candidate || negative_candidate); + + + + + if (ret) + { + for (auto &phi : block.phi_variables) + if (phi.parent == block.self || phi.parent == child.self) + return false; + + for (auto &phi : child.phi_variables) + if (phi.parent == block.self) + return false; + + auto *merge = maybe_get(block.merge_block); + if (merge) + for (auto &phi : merge->phi_variables) + if (phi.parent == block.self || phi.parent == child.false_block) + return false; + } + + return ret; + } + else + return false; +} + +bool Compiler::execution_is_noop(const SPIRBlock &from, const SPIRBlock &to) const +{ + if (!execution_is_branchless(from, to)) + return false; + + auto *start = &from; + for (;;) + { + if (start->self == to.self) + return true; + + if (!start->ops.empty()) + return false; + + auto &next = get(start->next_block); + + for (auto &phi : next.phi_variables) + if (phi.parent == start->self) + return false; + + start = &next; + } +} + +bool Compiler::execution_is_branchless(const SPIRBlock &from, const SPIRBlock &to) const +{ + auto *start = &from; + for (;;) + { + if (start->self == to.self) + return true; + + if (start->terminator == SPIRBlock::Direct && start->merge == SPIRBlock::MergeNone) + start = &get(start->next_block); + else + return false; + } +} + +bool Compiler::execution_is_direct_branch(const SPIRBlock &from, const SPIRBlock &to) const +{ + return from.terminator == SPIRBlock::Direct && from.merge == SPIRBlock::MergeNone && from.next_block == to.self; +} + +SPIRBlock::ContinueBlockType Compiler::continue_block_type(const SPIRBlock &block) const +{ + + if (block.complex_continue) + return SPIRBlock::ComplexLoop; + + + + if (block.merge == SPIRBlock::MergeLoop) + return SPIRBlock::WhileLoop; + + if (block.loop_dominator == BlockID(SPIRBlock::NoDominator)) + { + + return SPIRBlock::ComplexLoop; + } + + auto &dominator = get(block.loop_dominator); + + if (execution_is_noop(block, dominator)) + return SPIRBlock::WhileLoop; + else if (execution_is_branchless(block, dominator)) + return SPIRBlock::ForLoop; + else + { + const auto *false_block = maybe_get(block.false_block); + const auto *true_block = maybe_get(block.true_block); + const auto *merge_block = maybe_get(dominator.merge_block); + + + bool flush_phi_to_false = false_block && flush_phi_required(block.self, block.false_block); + bool flush_phi_to_true = true_block && flush_phi_required(block.self, block.true_block); + if (flush_phi_to_false || flush_phi_to_true) + return SPIRBlock::ComplexLoop; + + bool positive_do_while = block.true_block == dominator.self && + (block.false_block == dominator.merge_block || + (false_block && merge_block && execution_is_noop(*false_block, *merge_block))); + + bool negative_do_while = block.false_block == dominator.self && + (block.true_block == dominator.merge_block || + (true_block && merge_block && execution_is_noop(*true_block, *merge_block))); + + if (block.merge == SPIRBlock::MergeNone && block.terminator == SPIRBlock::Select && + (positive_do_while || negative_do_while)) + { + return SPIRBlock::DoWhileLoop; + } + else + return SPIRBlock::ComplexLoop; + } +} + +bool Compiler::traverse_all_reachable_opcodes(const SPIRBlock &block, OpcodeHandler &handler) const +{ + handler.set_current_block(block); + handler.rearm_current_block(block); + + + + + for (auto &i : block.ops) + { + auto ops = stream(i); + auto op = static_cast(i.op); + + if (!handler.handle(op, ops, i.length)) + return false; + + if (op == OpFunctionCall) + { + auto &func = get(ops[2]); + if (handler.follow_function_call(func)) + { + if (!handler.begin_function_scope(ops, i.length)) + return false; + if (!traverse_all_reachable_opcodes(get(ops[2]), handler)) + return false; + if (!handler.end_function_scope(ops, i.length)) + return false; + + handler.rearm_current_block(block); + } + } + } + + return true; +} + +bool Compiler::traverse_all_reachable_opcodes(const SPIRFunction &func, OpcodeHandler &handler) const +{ + for (auto block : func.blocks) + if (!traverse_all_reachable_opcodes(get(block), handler)) + return false; + + return true; +} + +uint32_t Compiler::type_struct_member_offset(const SPIRType &type, uint32_t index) const +{ + auto *type_meta = ir.find_meta(type.self); + if (type_meta) + { + + auto &dec = type_meta->members[index]; + if (dec.decoration_flags.get(DecorationOffset)) + return dec.offset; + else + SPIRV_CROSS_THROW("Struct member does not have Offset set."); + } + else + SPIRV_CROSS_THROW("Struct member does not have Offset set."); +} + +uint32_t Compiler::type_struct_member_array_stride(const SPIRType &type, uint32_t index) const +{ + auto *type_meta = ir.find_meta(type.member_types[index]); + if (type_meta) + { + + + auto &dec = type_meta->decoration; + if (dec.decoration_flags.get(DecorationArrayStride)) + return dec.array_stride; + else + SPIRV_CROSS_THROW("Struct member does not have ArrayStride set."); + } + else + SPIRV_CROSS_THROW("Struct member does not have ArrayStride set."); +} + +uint32_t Compiler::type_struct_member_matrix_stride(const SPIRType &type, uint32_t index) const +{ + auto *type_meta = ir.find_meta(type.self); + if (type_meta) + { + + + auto &dec = type_meta->members[index]; + if (dec.decoration_flags.get(DecorationMatrixStride)) + return dec.matrix_stride; + else + SPIRV_CROSS_THROW("Struct member does not have MatrixStride set."); + } + else + SPIRV_CROSS_THROW("Struct member does not have MatrixStride set."); +} + +size_t Compiler::get_declared_struct_size(const SPIRType &type) const +{ + if (type.member_types.empty()) + SPIRV_CROSS_THROW("Declared struct in block cannot be empty."); + + uint32_t last = uint32_t(type.member_types.size() - 1); + size_t offset = type_struct_member_offset(type, last); + size_t size = get_declared_struct_member_size(type, last); + return offset + size; +} + +size_t Compiler::get_declared_struct_size_runtime_array(const SPIRType &type, size_t array_size) const +{ + if (type.member_types.empty()) + SPIRV_CROSS_THROW("Declared struct in block cannot be empty."); + + size_t size = get_declared_struct_size(type); + auto &last_type = get(type.member_types.back()); + if (!last_type.array.empty() && last_type.array_size_literal[0] && last_type.array[0] == 0) + size += array_size * type_struct_member_array_stride(type, uint32_t(type.member_types.size() - 1)); + + return size; +} + +size_t Compiler::get_declared_struct_member_size(const SPIRType &struct_type, uint32_t index) const +{ + if (struct_type.member_types.empty()) + SPIRV_CROSS_THROW("Declared struct in block cannot be empty."); + + auto &flags = get_member_decoration_bitset(struct_type.self, index); + auto &type = get(struct_type.member_types[index]); + + switch (type.basetype) + { + case SPIRType::Unknown: + case SPIRType::Void: + case SPIRType::Boolean: + case SPIRType::AtomicCounter: + case SPIRType::Image: + case SPIRType::SampledImage: + case SPIRType::Sampler: + SPIRV_CROSS_THROW("Querying size for object with opaque size."); + + default: + break; + } + + if (!type.array.empty()) + { + + bool array_size_literal = type.array_size_literal.back(); + uint32_t array_size = array_size_literal ? type.array.back() : get(type.array.back()).scalar(); + return type_struct_member_array_stride(struct_type, index) * array_size; + } + else if (type.basetype == SPIRType::Struct) + { + return get_declared_struct_size(type); + } + else + { + unsigned vecsize = type.vecsize; + unsigned columns = type.columns; + + + if (columns == 1) + { + size_t component_size = type.width / 8; + return vecsize * component_size; + } + else + { + uint32_t matrix_stride = type_struct_member_matrix_stride(struct_type, index); + + + if (flags.get(DecorationRowMajor)) + return matrix_stride * vecsize; + else if (flags.get(DecorationColMajor)) + return matrix_stride * columns; + else + SPIRV_CROSS_THROW("Either row-major or column-major must be declared for matrices."); + } + } +} + +bool Compiler::BufferAccessHandler::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + if (opcode != OpAccessChain && opcode != OpInBoundsAccessChain && opcode != OpPtrAccessChain) + return true; + + bool ptr_chain = (opcode == OpPtrAccessChain); + + + if (length < (ptr_chain ? 5u : 4u)) + return false; + + if (args[2] != id) + return true; + + + + uint32_t index = compiler.get(args[ptr_chain ? 4 : 3]).scalar(); + + + if (seen.find(index) != end(seen)) + return true; + seen.insert(index); + + auto &type = compiler.expression_type(id); + uint32_t offset = compiler.type_struct_member_offset(type, index); + + size_t range; + + + + + + if (index + 1 < type.member_types.size()) + { + range = compiler.type_struct_member_offset(type, index + 1) - offset; + } + else + { + + range = compiler.get_declared_struct_member_size(type, index); + } + + ranges.push_back({ index, offset, range }); + return true; +} + +SmallVector Compiler::get_active_buffer_ranges(VariableID id) const +{ + SmallVector ranges; + BufferAccessHandler handler(*this, ranges, id); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + return ranges; +} + +bool Compiler::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const +{ + if (a.basetype != b.basetype) + return false; + if (a.width != b.width) + return false; + if (a.vecsize != b.vecsize) + return false; + if (a.columns != b.columns) + return false; + if (a.array.size() != b.array.size()) + return false; + + size_t array_count = a.array.size(); + if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0) + return false; + + if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage) + { + if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0) + return false; + } + + if (a.member_types.size() != b.member_types.size()) + return false; + + size_t member_types = a.member_types.size(); + for (size_t i = 0; i < member_types; i++) + { + if (!types_are_logically_equivalent(get(a.member_types[i]), get(b.member_types[i]))) + return false; + } + + return true; +} + +const Bitset &Compiler::get_execution_mode_bitset() const +{ + return get_entry_point().flags; +} + +void Compiler::set_execution_mode(ExecutionMode mode, uint32_t arg0, uint32_t arg1, uint32_t arg2) +{ + auto &execution = get_entry_point(); + + execution.flags.set(mode); + switch (mode) + { + case ExecutionModeLocalSize: + execution.workgroup_size.x = arg0; + execution.workgroup_size.y = arg1; + execution.workgroup_size.z = arg2; + break; + + case ExecutionModeInvocations: + execution.invocations = arg0; + break; + + case ExecutionModeOutputVertices: + execution.output_vertices = arg0; + break; + + default: + break; + } +} + +void Compiler::unset_execution_mode(ExecutionMode mode) +{ + auto &execution = get_entry_point(); + execution.flags.clear(mode); +} + +uint32_t Compiler::get_work_group_size_specialization_constants(SpecializationConstant &x, SpecializationConstant &y, + SpecializationConstant &z) const +{ + auto &execution = get_entry_point(); + x = { 0, 0 }; + y = { 0, 0 }; + z = { 0, 0 }; + + if (execution.workgroup_size.constant != 0) + { + auto &c = get(execution.workgroup_size.constant); + + if (c.m.c[0].id[0] != ID(0)) + { + x.id = c.m.c[0].id[0]; + x.constant_id = get_decoration(c.m.c[0].id[0], DecorationSpecId); + } + + if (c.m.c[0].id[1] != ID(0)) + { + y.id = c.m.c[0].id[1]; + y.constant_id = get_decoration(c.m.c[0].id[1], DecorationSpecId); + } + + if (c.m.c[0].id[2] != ID(0)) + { + z.id = c.m.c[0].id[2]; + z.constant_id = get_decoration(c.m.c[0].id[2], DecorationSpecId); + } + } + + return execution.workgroup_size.constant; +} + +uint32_t Compiler::get_execution_mode_argument(spv::ExecutionMode mode, uint32_t index) const +{ + auto &execution = get_entry_point(); + switch (mode) + { + case ExecutionModeLocalSize: + switch (index) + { + case 0: + return execution.workgroup_size.x; + case 1: + return execution.workgroup_size.y; + case 2: + return execution.workgroup_size.z; + default: + return 0; + } + + case ExecutionModeInvocations: + return execution.invocations; + + case ExecutionModeOutputVertices: + return execution.output_vertices; + + default: + return 0; + } +} + +ExecutionModel Compiler::get_execution_model() const +{ + auto &execution = get_entry_point(); + return execution.model; +} + +bool Compiler::is_tessellation_shader(ExecutionModel model) +{ + return model == ExecutionModelTessellationControl || model == ExecutionModelTessellationEvaluation; +} + +bool Compiler::is_tessellation_shader() const +{ + return is_tessellation_shader(get_execution_model()); +} + +void Compiler::set_remapped_variable_state(VariableID id, bool remap_enable) +{ + get(id).remapped_variable = remap_enable; +} + +bool Compiler::get_remapped_variable_state(VariableID id) const +{ + return get(id).remapped_variable; +} + +void Compiler::set_subpass_input_remapped_components(VariableID id, uint32_t components) +{ + get(id).remapped_components = components; +} + +uint32_t Compiler::get_subpass_input_remapped_components(VariableID id) const +{ + return get(id).remapped_components; +} + +void Compiler::add_implied_read_expression(SPIRExpression &e, uint32_t source) +{ + auto itr = find(begin(e.implied_read_expressions), end(e.implied_read_expressions), ID(source)); + if (itr == end(e.implied_read_expressions)) + e.implied_read_expressions.push_back(source); +} + +void Compiler::add_implied_read_expression(SPIRAccessChain &e, uint32_t source) +{ + auto itr = find(begin(e.implied_read_expressions), end(e.implied_read_expressions), ID(source)); + if (itr == end(e.implied_read_expressions)) + e.implied_read_expressions.push_back(source); +} + +void Compiler::inherit_expression_dependencies(uint32_t dst, uint32_t source_expression) +{ + + + if (forwarded_temporaries.find(dst) == end(forwarded_temporaries) || + forced_temporaries.find(dst) != end(forced_temporaries)) + { + return; + } + + auto &e = get(dst); + auto *phi = maybe_get(source_expression); + if (phi && phi->phi_variable) + { + + + phi->dependees.push_back(dst); + } + + auto *s = maybe_get(source_expression); + if (!s) + return; + + auto &e_deps = e.expression_dependencies; + auto &s_deps = s->expression_dependencies; + + + e_deps.push_back(source_expression); + e_deps.insert(end(e_deps), begin(s_deps), end(s_deps)); + + + sort(begin(e_deps), end(e_deps)); + e_deps.erase(unique(begin(e_deps), end(e_deps)), end(e_deps)); +} + +SmallVector Compiler::get_entry_points_and_stages() const +{ + SmallVector entries; + for (auto &entry : ir.entry_points) + entries.push_back({ entry.second.orig_name, entry.second.model }); + return entries; +} + +void Compiler::rename_entry_point(const std::string &old_name, const std::string &new_name, spv::ExecutionModel model) +{ + auto &entry = get_entry_point(old_name, model); + entry.orig_name = new_name; + entry.name = new_name; +} + +void Compiler::set_entry_point(const std::string &name, spv::ExecutionModel model) +{ + auto &entry = get_entry_point(name, model); + ir.default_entry_point = entry.self; +} + +SPIREntryPoint &Compiler::get_first_entry_point(const std::string &name) +{ + auto itr = find_if( + begin(ir.entry_points), end(ir.entry_points), + [&](const std::pair &entry) -> bool { return entry.second.orig_name == name; }); + + if (itr == end(ir.entry_points)) + SPIRV_CROSS_THROW("Entry point does not exist."); + + return itr->second; +} + +const SPIREntryPoint &Compiler::get_first_entry_point(const std::string &name) const +{ + auto itr = find_if( + begin(ir.entry_points), end(ir.entry_points), + [&](const std::pair &entry) -> bool { return entry.second.orig_name == name; }); + + if (itr == end(ir.entry_points)) + SPIRV_CROSS_THROW("Entry point does not exist."); + + return itr->second; +} + +SPIREntryPoint &Compiler::get_entry_point(const std::string &name, ExecutionModel model) +{ + auto itr = find_if(begin(ir.entry_points), end(ir.entry_points), + [&](const std::pair &entry) -> bool { + return entry.second.orig_name == name && entry.second.model == model; + }); + + if (itr == end(ir.entry_points)) + SPIRV_CROSS_THROW("Entry point does not exist."); + + return itr->second; +} + +const SPIREntryPoint &Compiler::get_entry_point(const std::string &name, ExecutionModel model) const +{ + auto itr = find_if(begin(ir.entry_points), end(ir.entry_points), + [&](const std::pair &entry) -> bool { + return entry.second.orig_name == name && entry.second.model == model; + }); + + if (itr == end(ir.entry_points)) + SPIRV_CROSS_THROW("Entry point does not exist."); + + return itr->second; +} + +const string &Compiler::get_cleansed_entry_point_name(const std::string &name, ExecutionModel model) const +{ + return get_entry_point(name, model).name; +} + +const SPIREntryPoint &Compiler::get_entry_point() const +{ + return ir.entry_points.find(ir.default_entry_point)->second; +} + +SPIREntryPoint &Compiler::get_entry_point() +{ + return ir.entry_points.find(ir.default_entry_point)->second; +} + +bool Compiler::interface_variable_exists_in_entry_point(uint32_t id) const +{ + auto &var = get(id); + if (var.storage != StorageClassInput && var.storage != StorageClassOutput && + var.storage != StorageClassUniformConstant) + SPIRV_CROSS_THROW("Only Input, Output variables and Uniform constants are part of a shader linking interface."); + + + + + + if (ir.entry_points.size() <= 1) + return true; + + auto &execution = get_entry_point(); + return find(begin(execution.interface_variables), end(execution.interface_variables), VariableID(id)) != + end(execution.interface_variables); +} + +void Compiler::CombinedImageSamplerHandler::push_remap_parameters(const SPIRFunction &func, const uint32_t *args, + uint32_t length) +{ + + + unordered_map remapping; + for (uint32_t i = 0; i < length; i++) + remapping[func.arguments[i].id] = remap_parameter(args[i]); + parameter_remapping.push(move(remapping)); +} + +void Compiler::CombinedImageSamplerHandler::pop_remap_parameters() +{ + parameter_remapping.pop(); +} + +uint32_t Compiler::CombinedImageSamplerHandler::remap_parameter(uint32_t id) +{ + auto *var = compiler.maybe_get_backing_variable(id); + if (var) + id = var->self; + + if (parameter_remapping.empty()) + return id; + + auto &remapping = parameter_remapping.top(); + auto itr = remapping.find(id); + if (itr != end(remapping)) + return itr->second; + else + return id; +} + +bool Compiler::CombinedImageSamplerHandler::begin_function_scope(const uint32_t *args, uint32_t length) +{ + if (length < 3) + return false; + + auto &callee = compiler.get(args[2]); + args += 3; + length -= 3; + push_remap_parameters(callee, args, length); + functions.push(&callee); + return true; +} + +bool Compiler::CombinedImageSamplerHandler::end_function_scope(const uint32_t *args, uint32_t length) +{ + if (length < 3) + return false; + + auto &callee = compiler.get(args[2]); + args += 3; + + + + + + + + pop_remap_parameters(); + + + + callee.do_combined_parameters = false; + + auto ¶ms = functions.top()->combined_parameters; + functions.pop(); + if (functions.empty()) + return true; + + auto &caller = *functions.top(); + if (caller.do_combined_parameters) + { + for (auto ¶m : params) + { + VariableID image_id = param.global_image ? param.image_id : VariableID(args[param.image_id]); + VariableID sampler_id = param.global_sampler ? param.sampler_id : VariableID(args[param.sampler_id]); + + auto *i = compiler.maybe_get_backing_variable(image_id); + auto *s = compiler.maybe_get_backing_variable(sampler_id); + if (i) + image_id = i->self; + if (s) + sampler_id = s->self; + + register_combined_image_sampler(caller, 0, image_id, sampler_id, param.depth); + } + } + + return true; +} + +void Compiler::CombinedImageSamplerHandler::register_combined_image_sampler(SPIRFunction &caller, + VariableID combined_module_id, + VariableID image_id, VariableID sampler_id, + bool depth) +{ + + + + SPIRFunction::CombinedImageSamplerParameter param = { + 0u, image_id, sampler_id, true, true, depth, + }; + + auto texture_itr = find_if(begin(caller.arguments), end(caller.arguments), + [image_id](const SPIRFunction::Parameter &p) { return p.id == image_id; }); + auto sampler_itr = find_if(begin(caller.arguments), end(caller.arguments), + [sampler_id](const SPIRFunction::Parameter &p) { return p.id == sampler_id; }); + + if (texture_itr != end(caller.arguments)) + { + param.global_image = false; + param.image_id = uint32_t(texture_itr - begin(caller.arguments)); + } + + if (sampler_itr != end(caller.arguments)) + { + param.global_sampler = false; + param.sampler_id = uint32_t(sampler_itr - begin(caller.arguments)); + } + + if (param.global_image && param.global_sampler) + return; + + auto itr = find_if(begin(caller.combined_parameters), end(caller.combined_parameters), + [¶m](const SPIRFunction::CombinedImageSamplerParameter &p) { + return param.image_id == p.image_id && param.sampler_id == p.sampler_id && + param.global_image == p.global_image && param.global_sampler == p.global_sampler; + }); + + if (itr == end(caller.combined_parameters)) + { + uint32_t id = compiler.ir.increase_bound_by(3); + auto type_id = id + 0; + auto ptr_type_id = id + 1; + auto combined_id = id + 2; + auto &base = compiler.expression_type(image_id); + auto &type = compiler.set(type_id); + auto &ptr_type = compiler.set(ptr_type_id); + + type = base; + type.self = type_id; + type.basetype = SPIRType::SampledImage; + type.pointer = false; + type.storage = StorageClassGeneric; + type.image.depth = depth; + + ptr_type = type; + ptr_type.pointer = true; + ptr_type.storage = StorageClassUniformConstant; + ptr_type.parent_type = type_id; + + + compiler.set(combined_id, ptr_type_id, StorageClassFunction, 0); + + + + bool relaxed_precision = + compiler.has_decoration(sampler_id, DecorationRelaxedPrecision) || + compiler.has_decoration(image_id, DecorationRelaxedPrecision) || + (combined_module_id && compiler.has_decoration(combined_module_id, DecorationRelaxedPrecision)); + + if (relaxed_precision) + compiler.set_decoration(combined_id, DecorationRelaxedPrecision); + + param.id = combined_id; + + compiler.set_name(combined_id, + join("SPIRV_Cross_Combined", compiler.to_name(image_id), compiler.to_name(sampler_id))); + + caller.combined_parameters.push_back(param); + caller.shadow_arguments.push_back({ ptr_type_id, combined_id, 0u, 0u, true }); + } +} + +bool Compiler::DummySamplerForCombinedImageHandler::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + if (need_dummy_sampler) + { + + return false; + } + + switch (opcode) + { + case OpLoad: + { + if (length < 3) + return false; + + uint32_t result_type = args[0]; + + auto &type = compiler.get(result_type); + bool separate_image = + type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer; + + + if (!separate_image) + return true; + + uint32_t id = args[1]; + uint32_t ptr = args[2]; + compiler.set(id, "", result_type, true); + compiler.register_read(id, ptr, true); + break; + } + + case OpImageFetch: + case OpImageQuerySizeLod: + case OpImageQuerySize: + case OpImageQueryLevels: + case OpImageQuerySamples: + { + + auto *var = compiler.maybe_get_backing_variable(args[2]); + if (var) + { + auto &type = compiler.get(var->basetype); + if (type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer) + need_dummy_sampler = true; + } + + break; + } + + case OpInBoundsAccessChain: + case OpAccessChain: + case OpPtrAccessChain: + { + if (length < 3) + return false; + + uint32_t result_type = args[0]; + auto &type = compiler.get(result_type); + bool separate_image = + type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer; + if (!separate_image) + return true; + + uint32_t id = args[1]; + uint32_t ptr = args[2]; + compiler.set(id, "", result_type, true); + compiler.register_read(id, ptr, true); + + + compiler.ir.ids[id].set_allow_type_rewrite(); + break; + } + + default: + break; + } + + return true; +} + +bool Compiler::CombinedImageSamplerHandler::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + + bool is_fetch = false; + + switch (opcode) + { + case OpLoad: + { + if (length < 3) + return false; + + uint32_t result_type = args[0]; + + auto &type = compiler.get(result_type); + bool separate_image = type.basetype == SPIRType::Image && type.image.sampled == 1; + bool separate_sampler = type.basetype == SPIRType::Sampler; + + + if (!separate_image && !separate_sampler) + return true; + + uint32_t id = args[1]; + uint32_t ptr = args[2]; + compiler.set(id, "", result_type, true); + compiler.register_read(id, ptr, true); + return true; + } + + case OpInBoundsAccessChain: + case OpAccessChain: + case OpPtrAccessChain: + { + if (length < 3) + return false; + + + + + + + + uint32_t result_type = args[0]; + + auto &type = compiler.get(result_type); + bool separate_image = type.basetype == SPIRType::Image && type.image.sampled == 1; + bool separate_sampler = type.basetype == SPIRType::Sampler; + if (separate_sampler) + SPIRV_CROSS_THROW( + "Attempting to use arrays or structs of separate samplers. This is not possible to statically " + "remap to plain GLSL."); + + if (separate_image) + { + uint32_t id = args[1]; + uint32_t ptr = args[2]; + compiler.set(id, "", result_type, true); + compiler.register_read(id, ptr, true); + } + return true; + } + + case OpImageFetch: + case OpImageQuerySizeLod: + case OpImageQuerySize: + case OpImageQueryLevels: + case OpImageQuerySamples: + { + + auto *var = compiler.maybe_get_backing_variable(args[2]); + if (!var) + return true; + + auto &type = compiler.get(var->basetype); + if (type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer) + { + if (compiler.dummy_sampler_id == 0) + SPIRV_CROSS_THROW("texelFetch without sampler was found, but no dummy sampler has been created with " + "build_dummy_sampler_for_combined_images()."); + + + is_fetch = true; + break; + } + + return true; + } + + case OpSampledImage: + + break; + + default: + return true; + } + + + + if (!functions.empty()) + { + auto &callee = *functions.top(); + if (callee.do_combined_parameters) + { + uint32_t image_id = args[2]; + + auto *image = compiler.maybe_get_backing_variable(image_id); + if (image) + image_id = image->self; + + uint32_t sampler_id = is_fetch ? compiler.dummy_sampler_id : args[3]; + auto *sampler = compiler.maybe_get_backing_variable(sampler_id); + if (sampler) + sampler_id = sampler->self; + + uint32_t combined_id = args[1]; + + auto &combined_type = compiler.get(args[0]); + register_combined_image_sampler(callee, combined_id, image_id, sampler_id, combined_type.image.depth); + } + } + + + + + + VariableID image_id = remap_parameter(args[2]); + VariableID sampler_id = is_fetch ? compiler.dummy_sampler_id : remap_parameter(args[3]); + + auto itr = find_if(begin(compiler.combined_image_samplers), end(compiler.combined_image_samplers), + [image_id, sampler_id](const CombinedImageSampler &combined) { + return combined.image_id == image_id && combined.sampler_id == sampler_id; + }); + + if (itr == end(compiler.combined_image_samplers)) + { + uint32_t sampled_type; + uint32_t combined_module_id; + if (is_fetch) + { + + sampled_type = compiler.ir.increase_bound_by(1); + auto &type = compiler.set(sampled_type); + type = compiler.expression_type(args[2]); + type.self = sampled_type; + type.basetype = SPIRType::SampledImage; + type.image.depth = false; + combined_module_id = 0; + } + else + { + sampled_type = args[0]; + combined_module_id = args[1]; + } + + auto id = compiler.ir.increase_bound_by(2); + auto type_id = id + 0; + auto combined_id = id + 1; + + + + auto &type = compiler.set(type_id); + auto &base = compiler.get(sampled_type); + type = base; + type.pointer = true; + type.storage = StorageClassUniformConstant; + type.parent_type = type_id; + + + compiler.set(combined_id, type_id, StorageClassUniformConstant, 0); + + + + bool relaxed_precision = + (sampler_id && compiler.has_decoration(sampler_id, DecorationRelaxedPrecision)) || + (image_id && compiler.has_decoration(image_id, DecorationRelaxedPrecision)) || + (combined_module_id && compiler.has_decoration(combined_module_id, DecorationRelaxedPrecision)); + + if (relaxed_precision) + compiler.set_decoration(combined_id, DecorationRelaxedPrecision); + + + auto *var = compiler.maybe_get_backing_variable(image_id); + if (var) + { + auto &parent_type = compiler.get(var->basetype); + type.array = parent_type.array; + type.array_size_literal = parent_type.array_size_literal; + } + + compiler.combined_image_samplers.push_back({ combined_id, image_id, sampler_id }); + } + + return true; +} + +VariableID Compiler::build_dummy_sampler_for_combined_images() +{ + DummySamplerForCombinedImageHandler handler(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + if (handler.need_dummy_sampler) + { + uint32_t offset = ir.increase_bound_by(3); + auto type_id = offset + 0; + auto ptr_type_id = offset + 1; + auto var_id = offset + 2; + + SPIRType sampler_type; + auto &sampler = set(type_id); + sampler.basetype = SPIRType::Sampler; + + auto &ptr_sampler = set(ptr_type_id); + ptr_sampler = sampler; + ptr_sampler.self = type_id; + ptr_sampler.storage = StorageClassUniformConstant; + ptr_sampler.pointer = true; + ptr_sampler.parent_type = type_id; + + set(var_id, ptr_type_id, StorageClassUniformConstant, 0); + set_name(var_id, "SPIRV_Cross_DummySampler"); + dummy_sampler_id = var_id; + return var_id; + } + else + return 0; +} + +void Compiler::build_combined_image_samplers() +{ + ir.for_each_typed_id([&](uint32_t, SPIRFunction &func) { + func.combined_parameters.clear(); + func.shadow_arguments.clear(); + func.do_combined_parameters = true; + }); + + combined_image_samplers.clear(); + CombinedImageSamplerHandler handler(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); +} + +SmallVector Compiler::get_specialization_constants() const +{ + SmallVector spec_consts; + ir.for_each_typed_id([&](uint32_t, const SPIRConstant &c) { + if (c.specialization && has_decoration(c.self, DecorationSpecId)) + spec_consts.push_back({ c.self, get_decoration(c.self, DecorationSpecId) }); + }); + return spec_consts; +} + +SPIRConstant &Compiler::get_constant(ConstantID id) +{ + return get(id); +} + +const SPIRConstant &Compiler::get_constant(ConstantID id) const +{ + return get(id); +} + +static bool exists_unaccessed_path_to_return(const CFG &cfg, uint32_t block, const unordered_set &blocks) +{ + + if (blocks.find(block) != end(blocks)) + return false; + + + if (cfg.get_succeeding_edges(block).empty()) + return true; + + + for (auto &succ : cfg.get_succeeding_edges(block)) + if (exists_unaccessed_path_to_return(cfg, succ, blocks)) + return true; + + return false; +} + +void Compiler::analyze_parameter_preservation( + SPIRFunction &entry, const CFG &cfg, const unordered_map> &variable_to_blocks, + const unordered_map> &complete_write_blocks) +{ + for (auto &arg : entry.arguments) + { + + auto &type = get(arg.type); + if (!type.pointer) + continue; + + + bool potential_preserve; + switch (type.basetype) + { + case SPIRType::Sampler: + case SPIRType::Image: + case SPIRType::SampledImage: + case SPIRType::AtomicCounter: + potential_preserve = false; + break; + + default: + potential_preserve = true; + break; + } + + if (!potential_preserve) + continue; + + auto itr = variable_to_blocks.find(arg.id); + if (itr == end(variable_to_blocks)) + { + + continue; + } + + + + itr = complete_write_blocks.find(arg.id); + if (itr == end(complete_write_blocks)) + { + arg.read_count++; + continue; + } + + + + + + + + if (exists_unaccessed_path_to_return(cfg, entry.entry_block, itr->second)) + arg.read_count++; + } +} + +Compiler::AnalyzeVariableScopeAccessHandler::AnalyzeVariableScopeAccessHandler(Compiler &compiler_, + SPIRFunction &entry_) + : compiler(compiler_) + , entry(entry_) +{ +} + +bool Compiler::AnalyzeVariableScopeAccessHandler::follow_function_call(const SPIRFunction &) +{ + + return false; +} + +void Compiler::AnalyzeVariableScopeAccessHandler::set_current_block(const SPIRBlock &block) +{ + current_block = █ + + + + + + const auto test_phi = [this, &block](uint32_t to) { + auto &next = compiler.get(to); + for (auto &phi : next.phi_variables) + { + if (phi.parent == block.self) + { + accessed_variables_to_block[phi.function_variable].insert(block.self); + + accessed_variables_to_block[phi.function_variable].insert(next.self); + + notify_variable_access(phi.local_variable, block.self); + } + } + }; + + switch (block.terminator) + { + case SPIRBlock::Direct: + notify_variable_access(block.condition, block.self); + test_phi(block.next_block); + break; + + case SPIRBlock::Select: + notify_variable_access(block.condition, block.self); + test_phi(block.true_block); + test_phi(block.false_block); + break; + + case SPIRBlock::MultiSelect: + notify_variable_access(block.condition, block.self); + for (auto &target : block.cases) + test_phi(target.block); + if (block.default_block) + test_phi(block.default_block); + break; + + default: + break; + } +} + +void Compiler::AnalyzeVariableScopeAccessHandler::notify_variable_access(uint32_t id, uint32_t block) +{ + if (id == 0) + return; + + + auto itr = access_chain_children.find(id); + if (itr != end(access_chain_children)) + for (auto child_id : itr->second) + notify_variable_access(child_id, block); + + if (id_is_phi_variable(id)) + accessed_variables_to_block[id].insert(block); + else if (id_is_potential_temporary(id)) + accessed_temporaries_to_block[id].insert(block); +} + +bool Compiler::AnalyzeVariableScopeAccessHandler::id_is_phi_variable(uint32_t id) const +{ + if (id >= compiler.get_current_id_bound()) + return false; + auto *var = compiler.maybe_get(id); + return var && var->phi_variable; +} + +bool Compiler::AnalyzeVariableScopeAccessHandler::id_is_potential_temporary(uint32_t id) const +{ + if (id >= compiler.get_current_id_bound()) + return false; + + + return compiler.ir.ids[id].empty() || (compiler.ir.ids[id].get_type() == TypeExpression); +} + +bool Compiler::AnalyzeVariableScopeAccessHandler::handle(spv::Op op, const uint32_t *args, uint32_t length) +{ + + uint32_t result_type, result_id; + if (compiler.instruction_to_result_type(result_type, result_id, op, args, length)) + result_id_to_type[result_id] = result_type; + + switch (op) + { + case OpStore: + { + if (length < 2) + return false; + + ID ptr = args[0]; + auto *var = compiler.maybe_get_backing_variable(ptr); + + + if (var) + { + accessed_variables_to_block[var->self].insert(current_block->self); + if (var->self == ptr) + complete_write_variables_to_block[var->self].insert(current_block->self); + else + partial_write_variables_to_block[var->self].insert(current_block->self); + } + + + notify_variable_access(args[0], current_block->self); + + notify_variable_access(args[1], current_block->self); + break; + } + + case OpAccessChain: + case OpInBoundsAccessChain: + case OpPtrAccessChain: + { + if (length < 3) + return false; + + + uint32_t ptr = args[2]; + auto *var = compiler.maybe_get(ptr); + if (var) + { + accessed_variables_to_block[var->self].insert(current_block->self); + access_chain_children[args[1]].insert(var->self); + } + + + for (uint32_t i = 2; i < length; i++) + { + notify_variable_access(args[i], current_block->self); + access_chain_children[args[1]].insert(args[i]); + } + + + + + + notify_variable_access(args[1], current_block->self); + + + auto &e = compiler.set(args[1], "", args[0], true); + auto *backing_variable = compiler.maybe_get_backing_variable(ptr); + e.loaded_from = backing_variable ? VariableID(backing_variable->self) : VariableID(0); + + + compiler.ir.ids[args[1]].set_allow_type_rewrite(); + access_chain_expressions.insert(args[1]); + break; + } + + case OpCopyMemory: + { + if (length < 2) + return false; + + ID lhs = args[0]; + ID rhs = args[1]; + auto *var = compiler.maybe_get_backing_variable(lhs); + + + if (var) + { + accessed_variables_to_block[var->self].insert(current_block->self); + if (var->self == lhs) + complete_write_variables_to_block[var->self].insert(current_block->self); + else + partial_write_variables_to_block[var->self].insert(current_block->self); + } + + + for (uint32_t i = 0; i < 2; i++) + notify_variable_access(args[i], current_block->self); + + var = compiler.maybe_get_backing_variable(rhs); + if (var) + accessed_variables_to_block[var->self].insert(current_block->self); + break; + } + + case OpCopyObject: + { + if (length < 3) + return false; + + auto *var = compiler.maybe_get_backing_variable(args[2]); + if (var) + accessed_variables_to_block[var->self].insert(current_block->self); + + + notify_variable_access(args[1], current_block->self); + if (access_chain_expressions.count(args[2])) + access_chain_expressions.insert(args[1]); + + + notify_variable_access(args[2], current_block->self); + break; + } + + case OpLoad: + { + if (length < 3) + return false; + uint32_t ptr = args[2]; + auto *var = compiler.maybe_get_backing_variable(ptr); + if (var) + accessed_variables_to_block[var->self].insert(current_block->self); + + + notify_variable_access(args[1], current_block->self); + + + notify_variable_access(args[2], current_block->self); + break; + } + + case OpFunctionCall: + { + if (length < 3) + return false; + + + if (compiler.get_type(args[0]).basetype != SPIRType::Void) + notify_variable_access(args[1], current_block->self); + + length -= 3; + args += 3; + + for (uint32_t i = 0; i < length; i++) + { + auto *var = compiler.maybe_get_backing_variable(args[i]); + if (var) + { + accessed_variables_to_block[var->self].insert(current_block->self); + + partial_write_variables_to_block[var->self].insert(current_block->self); + } + + + + + + + notify_variable_access(args[i], current_block->self); + } + break; + } + + case OpExtInst: + { + for (uint32_t i = 4; i < length; i++) + notify_variable_access(args[i], current_block->self); + notify_variable_access(args[1], current_block->self); + break; + } + + case OpArrayLength: + case OpLine: + case OpNoLine: + + break; + + + + + case OpCompositeInsert: + case OpVectorShuffle: + + for (uint32_t i = 1; i < 4; i++) + notify_variable_access(args[i], current_block->self); + break; + + case OpCompositeExtract: + + for (uint32_t i = 1; i < 3; i++) + notify_variable_access(args[i], current_block->self); + break; + + case OpImageWrite: + for (uint32_t i = 0; i < length; i++) + { + + if (i != 3) + notify_variable_access(args[i], current_block->self); + } + break; + + case OpImageSampleImplicitLod: + case OpImageSampleExplicitLod: + case OpImageSparseSampleImplicitLod: + case OpImageSparseSampleExplicitLod: + case OpImageSampleProjImplicitLod: + case OpImageSampleProjExplicitLod: + case OpImageSparseSampleProjImplicitLod: + case OpImageSparseSampleProjExplicitLod: + case OpImageFetch: + case OpImageSparseFetch: + case OpImageRead: + case OpImageSparseRead: + for (uint32_t i = 1; i < length; i++) + { + + if (i != 4) + notify_variable_access(args[i], current_block->self); + } + break; + + case OpImageSampleDrefImplicitLod: + case OpImageSampleDrefExplicitLod: + case OpImageSparseSampleDrefImplicitLod: + case OpImageSparseSampleDrefExplicitLod: + case OpImageSampleProjDrefImplicitLod: + case OpImageSampleProjDrefExplicitLod: + case OpImageSparseSampleProjDrefImplicitLod: + case OpImageSparseSampleProjDrefExplicitLod: + case OpImageGather: + case OpImageSparseGather: + case OpImageDrefGather: + case OpImageSparseDrefGather: + for (uint32_t i = 1; i < length; i++) + { + + if (i != 5) + notify_variable_access(args[i], current_block->self); + } + break; + + default: + { + + + + + + + for (uint32_t i = 0; i < length; i++) + notify_variable_access(args[i], current_block->self); + break; + } + } + return true; +} + +Compiler::StaticExpressionAccessHandler::StaticExpressionAccessHandler(Compiler &compiler_, uint32_t variable_id_) + : compiler(compiler_) + , variable_id(variable_id_) +{ +} + +bool Compiler::StaticExpressionAccessHandler::follow_function_call(const SPIRFunction &) +{ + return false; +} + +bool Compiler::StaticExpressionAccessHandler::handle(spv::Op op, const uint32_t *args, uint32_t length) +{ + switch (op) + { + case OpStore: + if (length < 2) + return false; + if (args[0] == variable_id) + { + static_expression = args[1]; + write_count++; + } + break; + + case OpLoad: + if (length < 3) + return false; + if (args[2] == variable_id && static_expression == 0) + return false; + break; + + case OpAccessChain: + case OpInBoundsAccessChain: + case OpPtrAccessChain: + if (length < 3) + return false; + if (args[2] == variable_id) + return false; + break; + + default: + break; + } + + return true; +} + +void Compiler::find_function_local_luts(SPIRFunction &entry, const AnalyzeVariableScopeAccessHandler &handler, + bool single_function) +{ + auto &cfg = *function_cfgs.find(entry.self)->second; + + + for (auto &accessed_var : handler.accessed_variables_to_block) + { + auto &blocks = accessed_var.second; + auto &var = get(accessed_var.first); + auto &type = expression_type(accessed_var.first); + + + + + bool allow_lut = var.storage == StorageClassFunction || (single_function && var.storage == StorageClassPrivate); + if (!allow_lut) + continue; + + + if (var.phi_variable) + continue; + + + if (type.array.empty()) + continue; + + + uint32_t static_constant_expression = 0; + if (var.initializer) + { + if (ir.ids[var.initializer].get_type() != TypeConstant) + continue; + static_constant_expression = var.initializer; + + + if (handler.complete_write_variables_to_block.count(var.self) != 0 || + handler.partial_write_variables_to_block.count(var.self) != 0) + continue; + } + else + { + + + + if (handler.partial_write_variables_to_block.count(var.self) != 0) + continue; + + auto itr = handler.complete_write_variables_to_block.find(var.self); + + + if (itr == end(handler.complete_write_variables_to_block)) + continue; + + + auto &write_blocks = itr->second; + if (write_blocks.size() != 1) + continue; + + + DominatorBuilder builder(cfg); + for (auto &block : blocks) + builder.add_block(block); + uint32_t dominator = builder.get_dominator(); + + + if (write_blocks.count(dominator) == 0) + continue; + + + StaticExpressionAccessHandler static_expression_handler(*this, var.self); + traverse_all_reachable_opcodes(get(dominator), static_expression_handler); + + + if (static_expression_handler.write_count != 1 || static_expression_handler.static_expression == 0) + continue; + + + if (ir.ids[static_expression_handler.static_expression].get_type() != TypeConstant) + continue; + + + static_constant_expression = static_expression_handler.static_expression; + } + + get(static_constant_expression).is_used_as_lut = true; + var.static_expression = static_constant_expression; + var.statically_assigned = true; + var.remapped_variable = true; + } +} + +void Compiler::analyze_variable_scope(SPIRFunction &entry, AnalyzeVariableScopeAccessHandler &handler) +{ + + + traverse_all_reachable_opcodes(entry, handler); + + auto &cfg = *function_cfgs.find(entry.self)->second; + + + analyze_parameter_preservation(entry, cfg, handler.accessed_variables_to_block, + handler.complete_write_variables_to_block); + + unordered_map potential_loop_variables; + + + for (auto &block_id : entry.blocks) + { + auto &block = get(block_id); + + auto itr = ir.continue_block_to_loop_header.find(block_id); + if (itr != end(ir.continue_block_to_loop_header) && itr->second != block_id) + { + + + block.loop_dominator = itr->second; + } + else + { + uint32_t loop_dominator = cfg.find_loop_dominator(block_id); + if (loop_dominator != block_id) + block.loop_dominator = loop_dominator; + else + block.loop_dominator = SPIRBlock::NoDominator; + } + } + + + for (auto &var : handler.accessed_variables_to_block) + { + + if (find(begin(entry.local_variables), end(entry.local_variables), VariableID(var.first)) == + end(entry.local_variables)) + continue; + + DominatorBuilder builder(cfg); + auto &blocks = var.second; + auto &type = expression_type(var.first); + + + for (auto &block : blocks) + { + + + if (is_continue(block)) + { + + + + + + + builder.add_block(ir.continue_block_to_loop_header[block]); + + + if (type.vecsize == 1 && type.columns == 1 && type.basetype != SPIRType::Struct && type.array.empty()) + { + + + auto &potential = potential_loop_variables[var.first]; + + if (potential == 0) + potential = block; + else + potential = ~(0u); + } + } + builder.add_block(block); + } + + builder.lift_continue_block_dominator(); + + + BlockID dominating_block = builder.get_dominator(); + + + + + + + + if (dominating_block) + { + auto &variable = get(var.first); + if (!variable.phi_variable) + { + auto *block = &get(dominating_block); + bool preserve = may_read_undefined_variable_in_block(*block, var.first); + if (preserve) + { + + while (block->loop_dominator != BlockID(SPIRBlock::NoDominator)) + block = &get(block->loop_dominator); + + if (block->self != dominating_block) + { + builder.add_block(block->self); + dominating_block = builder.get_dominator(); + } + } + } + } + + + + if (dominating_block) + { + auto &block = get(dominating_block); + block.dominated_variables.push_back(var.first); + get(var.first).dominator = dominating_block; + } + } + + for (auto &var : handler.accessed_temporaries_to_block) + { + auto itr = handler.result_id_to_type.find(var.first); + + if (itr == end(handler.result_id_to_type)) + { + + + continue; + } + + + auto &type = get(itr->second); + if (type_is_opaque_value(type)) + continue; + + DominatorBuilder builder(cfg); + bool force_temporary = false; + bool used_in_header_hoisted_continue_block = false; + + + auto &blocks = var.second; + for (auto &block : blocks) + { + builder.add_block(block); + + if (blocks.size() != 1 && is_continue(block)) + { + + + + auto &loop_header_block = get(ir.continue_block_to_loop_header[block]); + assert(loop_header_block.merge == SPIRBlock::MergeLoop); + builder.add_block(loop_header_block.self); + used_in_header_hoisted_continue_block = true; + } + } + + uint32_t dominating_block = builder.get_dominator(); + + if (blocks.size() != 1 && is_single_block_loop(dominating_block)) + { + + + force_temporary = true; + } + + if (dominating_block) + { + + + bool first_use_is_dominator = blocks.count(dominating_block) != 0; + + if (!first_use_is_dominator || force_temporary) + { + if (handler.access_chain_expressions.count(var.first)) + { + + + + + + + if (used_in_header_hoisted_continue_block) + { + + + + + + + auto &loop_header_block = get(dominating_block); + assert(loop_header_block.merge == SPIRBlock::MergeLoop); + loop_header_block.complex_continue = true; + } + } + else + { + + + + hoisted_temporaries.insert(var.first); + forced_temporaries.insert(var.first); + + auto &block_temporaries = get(dominating_block).declare_temporary; + block_temporaries.emplace_back(handler.result_id_to_type[var.first], var.first); + } + } + else if (blocks.size() > 1) + { + + + + + + auto &block_temporaries = get(dominating_block).potential_declare_temporary; + block_temporaries.emplace_back(handler.result_id_to_type[var.first], var.first); + } + } + } + + unordered_set seen_blocks; + + + for (auto &loop_variable : potential_loop_variables) + { + auto &var = get(loop_variable.first); + auto dominator = var.dominator; + BlockID block = loop_variable.second; + + + if (block == BlockID(~(0u)) || block == BlockID(0)) + continue; + + + if (dominator == ID(0)) + continue; + + BlockID header = 0; + + + { + auto itr = ir.continue_block_to_loop_header.find(block); + if (itr != end(ir.continue_block_to_loop_header)) + { + header = itr->second; + } + else if (get(block).continue_block == block) + { + + header = block; + } + } + + assert(header); + auto &header_block = get(header); + auto &blocks = handler.accessed_variables_to_block[loop_variable.first]; + + + bool has_accessed_variable = blocks.count(header) != 0; + + + + + + + + bool static_loop_init = true; + while (dominator != header) + { + if (blocks.count(dominator) != 0) + has_accessed_variable = true; + + auto &succ = cfg.get_succeeding_edges(dominator); + if (succ.size() != 1) + { + static_loop_init = false; + break; + } + + auto &pred = cfg.get_preceding_edges(succ.front()); + if (pred.size() != 1 || pred.front() != dominator) + { + static_loop_init = false; + break; + } + + dominator = succ.front(); + } + + if (!static_loop_init || !has_accessed_variable) + continue; + + + + + seen_blocks.clear(); + cfg.walk_from(seen_blocks, header_block.merge_block, [&](uint32_t walk_block) -> bool { + + if (blocks.find(walk_block) != end(blocks)) + static_loop_init = false; + return true; + }); + + if (!static_loop_init) + continue; + + + header_block.loop_variables.push_back(loop_variable.first); + + + sort(begin(header_block.loop_variables), end(header_block.loop_variables)); + get(loop_variable.first).loop_variable = true; + } +} + +bool Compiler::may_read_undefined_variable_in_block(const SPIRBlock &block, uint32_t var) +{ + for (auto &op : block.ops) + { + auto *ops = stream(op); + switch (op.op) + { + case OpStore: + case OpCopyMemory: + if (ops[0] == var) + return false; + break; + + case OpAccessChain: + case OpInBoundsAccessChain: + case OpPtrAccessChain: + + + + if (ops[2] == var) + return true; + break; + + case OpSelect: + + + if (ops[3] == var || ops[4] == var) + return true; + break; + + case OpPhi: + { + + + if (op.length < 2) + break; + + uint32_t count = op.length - 2; + for (uint32_t i = 0; i < count; i += 2) + if (ops[i + 2] == var) + return true; + break; + } + + case OpCopyObject: + case OpLoad: + if (ops[2] == var) + return true; + break; + + case OpFunctionCall: + { + if (op.length < 3) + break; + + + uint32_t count = op.length - 3; + for (uint32_t i = 0; i < count; i++) + if (ops[i + 3] == var) + return true; + break; + } + + default: + break; + } + } + + + + return true; +} + +Bitset Compiler::get_buffer_block_flags(VariableID id) const +{ + return ir.get_buffer_block_flags(get(id)); +} + +bool Compiler::get_common_basic_type(const SPIRType &type, SPIRType::BaseType &base_type) +{ + if (type.basetype == SPIRType::Struct) + { + base_type = SPIRType::Unknown; + for (auto &member_type : type.member_types) + { + SPIRType::BaseType member_base; + if (!get_common_basic_type(get(member_type), member_base)) + return false; + + if (base_type == SPIRType::Unknown) + base_type = member_base; + else if (base_type != member_base) + return false; + } + return true; + } + else + { + base_type = type.basetype; + return true; + } +} + +void Compiler::ActiveBuiltinHandler::handle_builtin(const SPIRType &type, BuiltIn builtin, + const Bitset &decoration_flags) +{ + + + if (builtin == BuiltInClipDistance) + { + if (!type.array_size_literal[0]) + SPIRV_CROSS_THROW("Array size for ClipDistance must be a literal."); + uint32_t array_size = type.array[0]; + if (array_size == 0) + SPIRV_CROSS_THROW("Array size for ClipDistance must not be unsized."); + compiler.clip_distance_count = array_size; + } + else if (builtin == BuiltInCullDistance) + { + if (!type.array_size_literal[0]) + SPIRV_CROSS_THROW("Array size for CullDistance must be a literal."); + uint32_t array_size = type.array[0]; + if (array_size == 0) + SPIRV_CROSS_THROW("Array size for CullDistance must not be unsized."); + compiler.cull_distance_count = array_size; + } + else if (builtin == BuiltInPosition) + { + if (decoration_flags.get(DecorationInvariant)) + compiler.position_invariant = true; + } +} + +bool Compiler::ActiveBuiltinHandler::handle(spv::Op opcode, const uint32_t *args, uint32_t length) +{ + const auto add_if_builtin = [&](uint32_t id) { + + + auto *var = compiler.maybe_get(id); + auto &decorations = compiler.ir.meta[id].decoration; + if (var && decorations.builtin) + { + auto &type = compiler.get(var->basetype); + auto &flags = + type.storage == StorageClassInput ? compiler.active_input_builtins : compiler.active_output_builtins; + flags.set(decorations.builtin_type); + handle_builtin(type, decorations.builtin_type, decorations.decoration_flags); + } + }; + + switch (opcode) + { + case OpStore: + if (length < 1) + return false; + + add_if_builtin(args[0]); + break; + + case OpCopyMemory: + if (length < 2) + return false; + + add_if_builtin(args[0]); + add_if_builtin(args[1]); + break; + + case OpCopyObject: + case OpLoad: + if (length < 3) + return false; + + add_if_builtin(args[2]); + break; + + case OpSelect: + if (length < 5) + return false; + + add_if_builtin(args[3]); + add_if_builtin(args[4]); + break; + + case OpPhi: + { + if (length < 2) + return false; + + uint32_t count = length - 2; + args += 2; + for (uint32_t i = 0; i < count; i += 2) + add_if_builtin(args[i]); + break; + } + + case OpFunctionCall: + { + if (length < 3) + return false; + + uint32_t count = length - 3; + args += 3; + for (uint32_t i = 0; i < count; i++) + add_if_builtin(args[i]); + break; + } + + case OpAccessChain: + case OpInBoundsAccessChain: + case OpPtrAccessChain: + { + if (length < 4) + return false; + + + + auto *var = compiler.maybe_get(args[2]); + if (!var) + break; + + + add_if_builtin(args[2]); + + + auto *type = &compiler.get_variable_data_type(*var); + + auto &flags = + var->storage == StorageClassInput ? compiler.active_input_builtins : compiler.active_output_builtins; + + uint32_t count = length - 3; + args += 3; + for (uint32_t i = 0; i < count; i++) + { + + if (opcode == OpPtrAccessChain && i == 0) + { + type = &compiler.get(type->parent_type); + continue; + } + + + if (!type->array.empty()) + { + type = &compiler.get(type->parent_type); + } + + else if (type->basetype == SPIRType::Struct) + { + uint32_t index = compiler.get(args[i]).scalar(); + + if (index < uint32_t(compiler.ir.meta[type->self].members.size())) + { + auto &decorations = compiler.ir.meta[type->self].members[index]; + if (decorations.builtin) + { + flags.set(decorations.builtin_type); + handle_builtin(compiler.get(type->member_types[index]), decorations.builtin_type, + decorations.decoration_flags); + } + } + + type = &compiler.get(type->member_types[index]); + } + else + { + + break; + } + } + break; + } + + default: + break; + } + + return true; +} + +void Compiler::update_active_builtins() +{ + active_input_builtins.reset(); + active_output_builtins.reset(); + cull_distance_count = 0; + clip_distance_count = 0; + ActiveBuiltinHandler handler(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); +} + + +bool Compiler::has_active_builtin(BuiltIn builtin, StorageClass storage) +{ + const Bitset *flags; + switch (storage) + { + case StorageClassInput: + flags = &active_input_builtins; + break; + case StorageClassOutput: + flags = &active_output_builtins; + break; + + default: + return false; + } + return flags->get(builtin); +} + +void Compiler::analyze_image_and_sampler_usage() +{ + CombinedImageSamplerDrefHandler dref_handler(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), dref_handler); + + CombinedImageSamplerUsageHandler handler(*this, dref_handler.dref_combined_samplers); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + + + + + handler.dependency_hierarchy.clear(); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + + comparison_ids = move(handler.comparison_ids); + need_subpass_input = handler.need_subpass_input; + + + for (auto &combined : combined_image_samplers) + if (comparison_ids.count(combined.sampler_id)) + comparison_ids.insert(combined.combined_id); +} + +bool Compiler::CombinedImageSamplerDrefHandler::handle(spv::Op opcode, const uint32_t *args, uint32_t) +{ + + switch (opcode) + { + case OpImageSampleDrefExplicitLod: + case OpImageSampleDrefImplicitLod: + case OpImageSampleProjDrefExplicitLod: + case OpImageSampleProjDrefImplicitLod: + case OpImageSparseSampleProjDrefImplicitLod: + case OpImageSparseSampleDrefImplicitLod: + case OpImageSparseSampleProjDrefExplicitLod: + case OpImageSparseSampleDrefExplicitLod: + case OpImageDrefGather: + case OpImageSparseDrefGather: + dref_combined_samplers.insert(args[2]); + return true; + + default: + break; + } + + return true; +} + +const CFG &Compiler::get_cfg_for_current_function() const +{ + assert(current_function); + return get_cfg_for_function(current_function->self); +} + +const CFG &Compiler::get_cfg_for_function(uint32_t id) const +{ + auto cfg_itr = function_cfgs.find(id); + assert(cfg_itr != end(function_cfgs)); + assert(cfg_itr->second); + return *cfg_itr->second; +} + +void Compiler::build_function_control_flow_graphs_and_analyze() +{ + CFGBuilder handler(*this); + handler.function_cfgs[ir.default_entry_point].reset(new CFG(*this, get(ir.default_entry_point))); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + function_cfgs = move(handler.function_cfgs); + bool single_function = function_cfgs.size() <= 1; + + for (auto &f : function_cfgs) + { + auto &func = get(f.first); + AnalyzeVariableScopeAccessHandler scope_handler(*this, func); + analyze_variable_scope(func, scope_handler); + find_function_local_luts(func, scope_handler, single_function); + + + + for (auto block : func.blocks) + { + auto &b = get(block); + if (b.loop_variables.size() < 2) + continue; + + auto &flags = get_decoration_bitset(b.loop_variables.front()); + uint32_t type = get(b.loop_variables.front()).basetype; + bool invalid_initializers = false; + for (auto loop_variable : b.loop_variables) + { + if (flags != get_decoration_bitset(loop_variable) || + type != get(b.loop_variables.front()).basetype) + { + invalid_initializers = true; + break; + } + } + + if (invalid_initializers) + { + for (auto loop_variable : b.loop_variables) + get(loop_variable).loop_variable = false; + b.loop_variables.clear(); + } + } + } +} + +Compiler::CFGBuilder::CFGBuilder(Compiler &compiler_) + : compiler(compiler_) +{ +} + +bool Compiler::CFGBuilder::handle(spv::Op, const uint32_t *, uint32_t) +{ + return true; +} + +bool Compiler::CFGBuilder::follow_function_call(const SPIRFunction &func) +{ + if (function_cfgs.find(func.self) == end(function_cfgs)) + { + function_cfgs[func.self].reset(new CFG(compiler, func)); + return true; + } + else + return false; +} + +void Compiler::CombinedImageSamplerUsageHandler::add_dependency(uint32_t dst, uint32_t src) +{ + dependency_hierarchy[dst].insert(src); + + if (comparison_ids.count(src)) + comparison_ids.insert(dst); +} + +bool Compiler::CombinedImageSamplerUsageHandler::begin_function_scope(const uint32_t *args, uint32_t length) +{ + if (length < 3) + return false; + + auto &func = compiler.get(args[2]); + const auto *arg = &args[3]; + length -= 3; + + for (uint32_t i = 0; i < length; i++) + { + auto &argument = func.arguments[i]; + add_dependency(argument.id, arg[i]); + } + + return true; +} + +void Compiler::CombinedImageSamplerUsageHandler::add_hierarchy_to_comparison_ids(uint32_t id) +{ + + comparison_ids.insert(id); + + for (auto &dep_id : dependency_hierarchy[id]) + add_hierarchy_to_comparison_ids(dep_id); +} + +bool Compiler::CombinedImageSamplerUsageHandler::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + switch (opcode) + { + case OpAccessChain: + case OpInBoundsAccessChain: + case OpPtrAccessChain: + case OpLoad: + { + if (length < 3) + return false; + + add_dependency(args[1], args[2]); + + + + auto &type = compiler.get(args[0]); + if (type.image.dim == DimSubpassData) + need_subpass_input = true; + + + if (dref_combined_samplers.count(args[1]) != 0) + add_hierarchy_to_comparison_ids(args[1]); + break; + } + + case OpSampledImage: + { + if (length < 4) + return false; + + uint32_t result_type = args[0]; + uint32_t result_id = args[1]; + auto &type = compiler.get(result_type); + + + + uint32_t image = args[2]; + uint32_t sampler = args[3]; + + if (type.image.depth || dref_combined_samplers.count(result_id) != 0) + { + add_hierarchy_to_comparison_ids(image); + + + add_hierarchy_to_comparison_ids(sampler); + + + comparison_ids.insert(result_id); + } + return true; + } + + default: + break; + } + + return true; +} + +bool Compiler::buffer_is_hlsl_counter_buffer(VariableID id) const +{ + auto *m = ir.find_meta(id); + return m && m->hlsl_is_magic_counter_buffer; +} + +bool Compiler::buffer_get_hlsl_counter_buffer(VariableID id, uint32_t &counter_id) const +{ + auto *m = ir.find_meta(id); + + + if (m && m->hlsl_magic_counter_buffer != 0) + { + counter_id = m->hlsl_magic_counter_buffer; + return true; + } + else + return false; +} + +void Compiler::make_constant_null(uint32_t id, uint32_t type) +{ + auto &constant_type = get(type); + + if (constant_type.pointer) + { + auto &constant = set(id, type); + constant.make_null(constant_type); + } + else if (!constant_type.array.empty()) + { + assert(constant_type.parent_type); + uint32_t parent_id = ir.increase_bound_by(1); + make_constant_null(parent_id, constant_type.parent_type); + + if (!constant_type.array_size_literal.back()) + SPIRV_CROSS_THROW("Array size of OpConstantNull must be a literal."); + + SmallVector elements(constant_type.array.back()); + for (uint32_t i = 0; i < constant_type.array.back(); i++) + elements[i] = parent_id; + set(id, type, elements.data(), uint32_t(elements.size()), false); + } + else if (!constant_type.member_types.empty()) + { + uint32_t member_ids = ir.increase_bound_by(uint32_t(constant_type.member_types.size())); + SmallVector elements(constant_type.member_types.size()); + for (uint32_t i = 0; i < constant_type.member_types.size(); i++) + { + make_constant_null(member_ids + i, constant_type.member_types[i]); + elements[i] = member_ids + i; + } + set(id, type, elements.data(), uint32_t(elements.size()), false); + } + else + { + auto &constant = set(id, type); + constant.make_null(constant_type); + } +} + +const SmallVector &Compiler::get_declared_capabilities() const +{ + return ir.declared_capabilities; +} + +const SmallVector &Compiler::get_declared_extensions() const +{ + return ir.declared_extensions; +} + +std::string Compiler::get_remapped_declared_block_name(VariableID id) const +{ + return get_remapped_declared_block_name(id, false); +} + +std::string Compiler::get_remapped_declared_block_name(uint32_t id, bool fallback_prefer_instance_name) const +{ + auto itr = declared_block_names.find(id); + if (itr != end(declared_block_names)) + { + return itr->second; + } + else + { + auto &var = get(id); + + if (fallback_prefer_instance_name) + { + return to_name(var.self); + } + else + { + auto &type = get(var.basetype); + auto *type_meta = ir.find_meta(type.self); + auto *block_name = type_meta ? &type_meta->decoration.alias : nullptr; + return (!block_name || block_name->empty()) ? get_block_fallback_name(id) : *block_name; + } + } +} + +bool Compiler::reflection_ssbo_instance_name_is_significant() const +{ + if (ir.source.known) + { + + + + return ir.source.hlsl; + } + + unordered_set ssbo_type_ids; + bool aliased_ssbo_types = false; + + + ir.for_each_typed_id([&](uint32_t, const SPIRVariable &var) { + auto &type = this->get(var.basetype); + if (!type.pointer || var.storage == StorageClassFunction) + return; + + bool ssbo = var.storage == StorageClassStorageBuffer || + (var.storage == StorageClassUniform && has_decoration(type.self, DecorationBufferBlock)); + + if (ssbo) + { + if (ssbo_type_ids.count(type.self)) + aliased_ssbo_types = true; + else + ssbo_type_ids.insert(type.self); + } + }); + + + return aliased_ssbo_types; +} + +bool Compiler::instruction_to_result_type(uint32_t &result_type, uint32_t &result_id, spv::Op op, const uint32_t *args, + uint32_t length) +{ + + + switch (op) + { + case OpStore: + case OpCopyMemory: + case OpCopyMemorySized: + case OpImageWrite: + case OpAtomicStore: + case OpAtomicFlagClear: + case OpEmitStreamVertex: + case OpEndStreamPrimitive: + case OpControlBarrier: + case OpMemoryBarrier: + case OpGroupWaitEvents: + case OpRetainEvent: + case OpReleaseEvent: + case OpSetUserEventStatus: + case OpCaptureEventProfilingInfo: + case OpCommitReadPipe: + case OpCommitWritePipe: + case OpGroupCommitReadPipe: + case OpGroupCommitWritePipe: + case OpLine: + case OpNoLine: + return false; + + default: + if (length > 1 && maybe_get(args[0]) != nullptr) + { + result_type = args[0]; + result_id = args[1]; + return true; + } + else + return false; + } +} + +Bitset Compiler::combined_decoration_for_member(const SPIRType &type, uint32_t index) const +{ + Bitset flags; + auto *type_meta = ir.find_meta(type.self); + + if (type_meta) + { + auto &memb = type_meta->members; + if (index >= memb.size()) + return flags; + auto &dec = memb[index]; + + + flags.merge_or(dec.decoration_flags); + + for (uint32_t i = 0; i < type.member_types.size(); i++) + { + auto &memb_type = get(type.member_types[i]); + if (!memb_type.pointer) + flags.merge_or(combined_decoration_for_member(memb_type, i)); + } + } + + return flags; +} + +bool Compiler::is_desktop_only_format(spv::ImageFormat format) +{ + switch (format) + { + + case ImageFormatR11fG11fB10f: + case ImageFormatR16f: + case ImageFormatRgb10A2: + case ImageFormatR8: + case ImageFormatRg8: + case ImageFormatR16: + case ImageFormatRg16: + case ImageFormatRgba16: + case ImageFormatR16Snorm: + case ImageFormatRg16Snorm: + case ImageFormatRgba16Snorm: + case ImageFormatR8Snorm: + case ImageFormatRg8Snorm: + case ImageFormatR8ui: + case ImageFormatRg8ui: + case ImageFormatR16ui: + case ImageFormatRgb10a2ui: + case ImageFormatR8i: + case ImageFormatRg8i: + case ImageFormatR16i: + return true; + default: + break; + } + + return false; +} + +bool Compiler::image_is_comparison(const SPIRType &type, uint32_t id) const +{ + return type.image.depth || (comparison_ids.count(id) != 0); +} + +bool Compiler::type_is_opaque_value(const SPIRType &type) const +{ + return !type.pointer && (type.basetype == SPIRType::SampledImage || type.basetype == SPIRType::Image || + type.basetype == SPIRType::Sampler); +} + + +void Compiler::force_recompile() +{ + is_force_recompile = true; +} + +bool Compiler::is_forcing_recompilation() const +{ + return is_force_recompile; +} + +void Compiler::clear_force_recompile() +{ + is_force_recompile = false; +} + +Compiler::PhysicalStorageBufferPointerHandler::PhysicalStorageBufferPointerHandler(Compiler &compiler_) + : compiler(compiler_) +{ +} + +bool Compiler::PhysicalStorageBufferPointerHandler::handle(Op op, const uint32_t *args, uint32_t) +{ + if (op == OpConvertUToPtr || op == OpBitcast) + { + auto &type = compiler.get(args[0]); + if (type.storage == StorageClassPhysicalStorageBufferEXT && type.pointer && type.pointer_depth == 1) + { + + + if (type.basetype != SPIRType::Struct) + types.insert(args[0]); + } + } + + return true; +} + +void Compiler::analyze_non_block_pointer_types() +{ + PhysicalStorageBufferPointerHandler handler(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + physical_storage_non_block_pointer_types.reserve(handler.types.size()); + for (auto type : handler.types) + physical_storage_non_block_pointer_types.push_back(type); + sort(begin(physical_storage_non_block_pointer_types), end(physical_storage_non_block_pointer_types)); +} + +bool Compiler::InterlockedResourceAccessPrepassHandler::handle(Op op, const uint32_t *, uint32_t) +{ + if (op == OpBeginInvocationInterlockEXT || op == OpEndInvocationInterlockEXT) + { + if (interlock_function_id != 0 && interlock_function_id != call_stack.back()) + { + + + split_function_case = true; + return false; + } + else + { + interlock_function_id = call_stack.back(); + + auto &cfg = compiler.get_cfg_for_function(interlock_function_id); + + uint32_t from_block_id = compiler.get(interlock_function_id).entry_block; + bool outside_control_flow = cfg.node_terminates_control_flow_in_sub_graph(from_block_id, current_block_id); + if (!outside_control_flow) + control_flow_interlock = true; + } + } + return true; +} + +void Compiler::InterlockedResourceAccessPrepassHandler::rearm_current_block(const SPIRBlock &block) +{ + current_block_id = block.self; +} + +bool Compiler::InterlockedResourceAccessPrepassHandler::begin_function_scope(const uint32_t *args, uint32_t length) +{ + if (length < 3) + return false; + call_stack.push_back(args[2]); + return true; +} + +bool Compiler::InterlockedResourceAccessPrepassHandler::end_function_scope(const uint32_t *, uint32_t) +{ + call_stack.pop_back(); + return true; +} + +bool Compiler::InterlockedResourceAccessHandler::begin_function_scope(const uint32_t *args, uint32_t length) +{ + if (length < 3) + return false; + + if (args[2] == interlock_function_id) + call_stack_is_interlocked = true; + + call_stack.push_back(args[2]); + return true; +} + +bool Compiler::InterlockedResourceAccessHandler::end_function_scope(const uint32_t *, uint32_t) +{ + if (call_stack.back() == interlock_function_id) + call_stack_is_interlocked = false; + + call_stack.pop_back(); + return true; +} + +void Compiler::InterlockedResourceAccessHandler::access_potential_resource(uint32_t id) +{ + if ((use_critical_section && in_crit_sec) || (control_flow_interlock && call_stack_is_interlocked) || + split_function_case) + { + compiler.interlocked_resources.insert(id); + } +} + +bool Compiler::InterlockedResourceAccessHandler::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + + if (use_critical_section) + { + if (opcode == OpBeginInvocationInterlockEXT) + { + in_crit_sec = true; + return true; + } + + if (opcode == OpEndInvocationInterlockEXT) + { + + return false; + } + } + + + switch (opcode) + { + case OpLoad: + { + if (length < 3) + return false; + + uint32_t ptr = args[2]; + auto *var = compiler.maybe_get_backing_variable(ptr); + + + if (!var) + break; + + switch (var->storage) + { + default: + break; + + case StorageClassUniformConstant: + { + uint32_t result_type = args[0]; + uint32_t id = args[1]; + compiler.set(id, "", result_type, true); + compiler.register_read(id, ptr, true); + break; + } + + case StorageClassUniform: + + if (!compiler.has_decoration(compiler.get(var->basetype).self, DecorationBufferBlock)) + break; + + case StorageClassStorageBuffer: + access_potential_resource(var->self); + break; + } + break; + } + + case OpInBoundsAccessChain: + case OpAccessChain: + case OpPtrAccessChain: + { + if (length < 3) + return false; + + uint32_t result_type = args[0]; + + auto &type = compiler.get(result_type); + if (type.storage == StorageClassUniform || type.storage == StorageClassUniformConstant || + type.storage == StorageClassStorageBuffer) + { + uint32_t id = args[1]; + uint32_t ptr = args[2]; + compiler.set(id, "", result_type, true); + compiler.register_read(id, ptr, true); + compiler.ir.ids[id].set_allow_type_rewrite(); + } + break; + } + + case OpImageTexelPointer: + { + if (length < 3) + return false; + + uint32_t result_type = args[0]; + uint32_t id = args[1]; + uint32_t ptr = args[2]; + auto &e = compiler.set(id, "", result_type, true); + auto *var = compiler.maybe_get_backing_variable(ptr); + if (var) + e.loaded_from = var->self; + break; + } + + case OpStore: + case OpImageWrite: + case OpAtomicStore: + { + if (length < 1) + return false; + + uint32_t ptr = args[0]; + auto *var = compiler.maybe_get_backing_variable(ptr); + if (var && (var->storage == StorageClassUniform || var->storage == StorageClassUniformConstant || + var->storage == StorageClassStorageBuffer)) + { + access_potential_resource(var->self); + } + + break; + } + + case OpCopyMemory: + { + if (length < 2) + return false; + + uint32_t dst = args[0]; + uint32_t src = args[1]; + auto *dst_var = compiler.maybe_get_backing_variable(dst); + auto *src_var = compiler.maybe_get_backing_variable(src); + + if (dst_var && (dst_var->storage == StorageClassUniform || dst_var->storage == StorageClassStorageBuffer)) + access_potential_resource(dst_var->self); + + if (src_var) + { + if (src_var->storage != StorageClassUniform && src_var->storage != StorageClassStorageBuffer) + break; + + if (src_var->storage == StorageClassUniform && + !compiler.has_decoration(compiler.get(src_var->basetype).self, DecorationBufferBlock)) + { + break; + } + + access_potential_resource(src_var->self); + } + + break; + } + + case OpImageRead: + case OpAtomicLoad: + { + if (length < 3) + return false; + + uint32_t ptr = args[2]; + auto *var = compiler.maybe_get_backing_variable(ptr); + + + if (!var) + break; + + switch (var->storage) + { + default: + break; + + case StorageClassUniform: + + if (!compiler.has_decoration(compiler.get(var->basetype).self, DecorationBufferBlock)) + break; + + case StorageClassUniformConstant: + case StorageClassStorageBuffer: + access_potential_resource(var->self); + break; + } + break; + } + + case OpAtomicExchange: + case OpAtomicCompareExchange: + case OpAtomicIIncrement: + case OpAtomicIDecrement: + case OpAtomicIAdd: + case OpAtomicISub: + case OpAtomicSMin: + case OpAtomicUMin: + case OpAtomicSMax: + case OpAtomicUMax: + case OpAtomicAnd: + case OpAtomicOr: + case OpAtomicXor: + { + if (length < 3) + return false; + + uint32_t ptr = args[2]; + auto *var = compiler.maybe_get_backing_variable(ptr); + if (var && (var->storage == StorageClassUniform || var->storage == StorageClassUniformConstant || + var->storage == StorageClassStorageBuffer)) + { + access_potential_resource(var->self); + } + + break; + } + + default: + break; + } + + return true; +} + +void Compiler::analyze_interlocked_resource_usage() +{ + if (get_execution_model() == ExecutionModelFragment && + (get_entry_point().flags.get(ExecutionModePixelInterlockOrderedEXT) || + get_entry_point().flags.get(ExecutionModePixelInterlockUnorderedEXT) || + get_entry_point().flags.get(ExecutionModeSampleInterlockOrderedEXT) || + get_entry_point().flags.get(ExecutionModeSampleInterlockUnorderedEXT))) + { + InterlockedResourceAccessPrepassHandler prepass_handler(*this, ir.default_entry_point); + traverse_all_reachable_opcodes(get(ir.default_entry_point), prepass_handler); + + InterlockedResourceAccessHandler handler(*this, ir.default_entry_point); + handler.interlock_function_id = prepass_handler.interlock_function_id; + handler.split_function_case = prepass_handler.split_function_case; + handler.control_flow_interlock = prepass_handler.control_flow_interlock; + handler.use_critical_section = !handler.split_function_case && !handler.control_flow_interlock; + + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + + + interlocked_is_complex = + !handler.use_critical_section || handler.interlock_function_id != ir.default_entry_point; + } +} + +bool Compiler::type_is_array_of_pointers(const SPIRType &type) const +{ + if (!type.pointer) + return false; + + + return type.pointer_depth == get(type.parent_type).pointer_depth; +} + +bool Compiler::flush_phi_required(BlockID from, BlockID to) const +{ + auto &child = get(to); + for (auto &phi : child.phi_variables) + if (phi.parent == from) + return true; + return false; +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross.hpp new file mode 100644 index 000000000000..ee8f72aeaa2b --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross.hpp @@ -0,0 +1,1055 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_HPP +#define SPIRV_CROSS_HPP + +#include "spirv.hpp" +#include "spirv_cfg.hpp" +#include "spirv_cross_parsed_ir.hpp" + +namespace SPIRV_CROSS_NAMESPACE +{ +struct Resource +{ + + + ID id; + + + + + TypeID type_id; + + + + + + TypeID base_type_id; + + + + + + + + + + + std::string name; +}; + +struct ShaderResources +{ + SmallVector uniform_buffers; + SmallVector storage_buffers; + SmallVector stage_inputs; + SmallVector stage_outputs; + SmallVector subpass_inputs; + SmallVector storage_images; + SmallVector sampled_images; + SmallVector atomic_counters; + SmallVector acceleration_structures; + + + + SmallVector push_constant_buffers; + + + + SmallVector separate_images; + SmallVector separate_samplers; +}; + +struct CombinedImageSampler +{ + + VariableID combined_id; + + VariableID image_id; + + VariableID sampler_id; +}; + +struct SpecializationConstant +{ + + ConstantID id; + + uint32_t constant_id; +}; + +struct BufferRange +{ + unsigned index; + size_t offset; + size_t range; +}; + +enum BufferPackingStandard +{ + BufferPackingStd140, + BufferPackingStd430, + BufferPackingStd140EnhancedLayout, + BufferPackingStd430EnhancedLayout, + BufferPackingHLSLCbuffer, + BufferPackingHLSLCbufferPackOffset, + BufferPackingScalar, + BufferPackingScalarEnhancedLayout +}; + +struct EntryPoint +{ + std::string name; + spv::ExecutionModel execution_model; +}; + +class Compiler +{ +public: + friend class CFG; + friend class DominatorBuilder; + + + + + explicit Compiler(std::vector ir); + Compiler(const uint32_t *ir, size_t word_count); + + + + explicit Compiler(const ParsedIR &ir); + explicit Compiler(ParsedIR &&ir); + + virtual ~Compiler() = default; + + + + + virtual std::string compile(); + + + const std::string &get_name(ID id) const; + + + void set_decoration(ID id, spv::Decoration decoration, uint32_t argument = 0); + void set_decoration_string(ID id, spv::Decoration decoration, const std::string &argument); + + + + + void set_name(ID id, const std::string &name); + + + + const Bitset &get_decoration_bitset(ID id) const; + + + bool has_decoration(ID id, spv::Decoration decoration) const; + + + + + + + uint32_t get_decoration(ID id, spv::Decoration decoration) const; + const std::string &get_decoration_string(ID id, spv::Decoration decoration) const; + + + void unset_decoration(ID id, spv::Decoration decoration); + + + + const SPIRType &get_type(TypeID id) const; + + + const SPIRType &get_type_from_variable(VariableID id) const; + + + spv::StorageClass get_storage_class(VariableID id) const; + + + + virtual const std::string get_fallback_name(ID id) const; + + + + virtual const std::string get_block_fallback_name(VariableID id) const; + + + + const std::string &get_member_name(TypeID id, uint32_t index) const; + + + uint32_t get_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration) const; + const std::string &get_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration) const; + + + void set_member_name(TypeID id, uint32_t index, const std::string &name); + + + + const std::string &get_member_qualified_name(TypeID type_id, uint32_t index) const; + + + const Bitset &get_member_decoration_bitset(TypeID id, uint32_t index) const; + + + bool has_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration) const; + + + void set_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration, uint32_t argument = 0); + void set_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration, + const std::string &argument); + + + void unset_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration); + + + virtual const std::string get_fallback_member_name(uint32_t index) const + { + return join("_", index); + } + + + + + + SmallVector get_active_buffer_ranges(VariableID id) const; + + + size_t get_declared_struct_size(const SPIRType &struct_type) const; + + + + + + + + + + + + size_t get_declared_struct_size_runtime_array(const SPIRType &struct_type, size_t array_size) const; + + + size_t get_declared_struct_member_size(const SPIRType &struct_type, uint32_t index) const; + + + + + + + + + + std::unordered_set get_active_interface_variables() const; + + + + + void set_enabled_interface_variables(std::unordered_set active_variables); + + + ShaderResources get_shader_resources() const; + + + + + ShaderResources get_shader_resources(const std::unordered_set &active_variables) const; + + + + + void set_remapped_variable_state(VariableID id, bool remap_enable); + bool get_remapped_variable_state(VariableID id) const; + + + + + void set_subpass_input_remapped_components(VariableID id, uint32_t components); + uint32_t get_subpass_input_remapped_components(VariableID id) const; + + + + + + + + + + + + + + + + + + + + SmallVector get_entry_points_and_stages() const; + void set_entry_point(const std::string &entry, spv::ExecutionModel execution_model); + + + + + + void rename_entry_point(const std::string &old_name, const std::string &new_name, + spv::ExecutionModel execution_model); + const SPIREntryPoint &get_entry_point(const std::string &name, spv::ExecutionModel execution_model) const; + SPIREntryPoint &get_entry_point(const std::string &name, spv::ExecutionModel execution_model); + const std::string &get_cleansed_entry_point_name(const std::string &name, + spv::ExecutionModel execution_model) const; + + + const Bitset &get_execution_mode_bitset() const; + + void unset_execution_mode(spv::ExecutionMode mode); + void set_execution_mode(spv::ExecutionMode mode, uint32_t arg0 = 0, uint32_t arg1 = 0, uint32_t arg2 = 0); + + + + + uint32_t get_execution_mode_argument(spv::ExecutionMode mode, uint32_t index = 0) const; + spv::ExecutionModel get_execution_model() const; + + bool is_tessellation_shader() const; + + + + + + + + + + + + + + + + + + uint32_t get_work_group_size_specialization_constants(SpecializationConstant &x, SpecializationConstant &y, + SpecializationConstant &z) const; + + + + + + + + + + + + + + + + VariableID build_dummy_sampler_for_combined_images(); + + + + + + + + + + + + + + + + + + + + void build_combined_image_samplers(); + + + const SmallVector &get_combined_image_samplers() const + { + return combined_image_samplers; + } + + + + + + + + + + + void set_variable_type_remap_callback(VariableTypeRemapCallback cb) + { + variable_remap_callback = std::move(cb); + } + + + + + + + + SmallVector get_specialization_constants() const; + SPIRConstant &get_constant(ConstantID id); + const SPIRConstant &get_constant(ConstantID id) const; + + uint32_t get_current_id_bound() const + { + return uint32_t(ir.ids.size()); + } + + + + + + + + uint32_t type_struct_member_offset(const SPIRType &type, uint32_t index) const; + uint32_t type_struct_member_array_stride(const SPIRType &type, uint32_t index) const; + uint32_t type_struct_member_matrix_stride(const SPIRType &type, uint32_t index) const; + + + + + + + + bool get_binary_offset_for_decoration(VariableID id, spv::Decoration decoration, uint32_t &word_offset) const; + + + + + + + + + + + + + + + bool buffer_is_hlsl_counter_buffer(VariableID id) const; + + + + + + + + bool buffer_get_hlsl_counter_buffer(VariableID id, uint32_t &counter_id) const; + + + const SmallVector &get_declared_capabilities() const; + + + const SmallVector &get_declared_extensions() const; + + + + + + + + + + + + + + + std::string get_remapped_declared_block_name(VariableID id) const; + + + + + + Bitset get_buffer_block_flags(VariableID id) const; + +protected: + const uint32_t *stream(const Instruction &instr) const + { + + + + if (!instr.length) + return nullptr; + + if (instr.offset + instr.length > ir.spirv.size()) + SPIRV_CROSS_THROW("Compiler::stream() out of range."); + return &ir.spirv[instr.offset]; + } + + ParsedIR ir; + + + SmallVector global_variables; + SmallVector aliased_variables; + + SPIRFunction *current_function = nullptr; + SPIRBlock *current_block = nullptr; + std::unordered_set active_interface_variables; + bool check_active_interface_variables = false; + + + + template + T &set(uint32_t id, P &&... args) + { + ir.add_typed_id(static_cast(T::type), id); + auto &var = variant_set(ir.ids[id], std::forward

(args)...); + var.self = id; + return var; + } + + template + T &get(uint32_t id) + { + return variant_get(ir.ids[id]); + } + + template + T *maybe_get(uint32_t id) + { + if (id >= ir.ids.size()) + return nullptr; + else if (ir.ids[id].get_type() == static_cast(T::type)) + return &get(id); + else + return nullptr; + } + + template + const T &get(uint32_t id) const + { + return variant_get(ir.ids[id]); + } + + template + const T *maybe_get(uint32_t id) const + { + if (id >= ir.ids.size()) + return nullptr; + else if (ir.ids[id].get_type() == static_cast(T::type)) + return &get(id); + else + return nullptr; + } + + + uint32_t get_pointee_type_id(uint32_t type_id) const; + + + const SPIRType &get_pointee_type(const SPIRType &type) const; + + + const SPIRType &get_pointee_type(uint32_t type_id) const; + + + uint32_t get_variable_data_type_id(const SPIRVariable &var) const; + + + SPIRType &get_variable_data_type(const SPIRVariable &var); + + + const SPIRType &get_variable_data_type(const SPIRVariable &var) const; + + + SPIRType &get_variable_element_type(const SPIRVariable &var); + + + const SPIRType &get_variable_element_type(const SPIRVariable &var) const; + + + void set_member_qualified_name(uint32_t type_id, uint32_t index, const std::string &name); + void set_qualified_name(uint32_t id, const std::string &name); + + + bool is_sampled_image_type(const SPIRType &type); + + const SPIREntryPoint &get_entry_point() const; + SPIREntryPoint &get_entry_point(); + static bool is_tessellation_shader(spv::ExecutionModel model); + + virtual std::string to_name(uint32_t id, bool allow_alias = true) const; + bool is_builtin_variable(const SPIRVariable &var) const; + bool is_builtin_type(const SPIRType &type) const; + bool is_hidden_variable(const SPIRVariable &var, bool include_builtins = false) const; + bool is_immutable(uint32_t id) const; + bool is_member_builtin(const SPIRType &type, uint32_t index, spv::BuiltIn *builtin) const; + bool is_scalar(const SPIRType &type) const; + bool is_vector(const SPIRType &type) const; + bool is_matrix(const SPIRType &type) const; + bool is_array(const SPIRType &type) const; + uint32_t expression_type_id(uint32_t id) const; + const SPIRType &expression_type(uint32_t id) const; + bool expression_is_lvalue(uint32_t id) const; + bool variable_storage_is_aliased(const SPIRVariable &var); + SPIRVariable *maybe_get_backing_variable(uint32_t chain); + spv::StorageClass get_backing_variable_storage(uint32_t ptr); + + void register_read(uint32_t expr, uint32_t chain, bool forwarded); + void register_write(uint32_t chain); + + inline bool is_continue(uint32_t next) const + { + return (ir.block_meta[next] & ParsedIR::BLOCK_META_CONTINUE_BIT) != 0; + } + + inline bool is_single_block_loop(uint32_t next) const + { + auto &block = get(next); + return block.merge == SPIRBlock::MergeLoop && block.continue_block == ID(next); + } + + inline bool is_break(uint32_t next) const + { + return (ir.block_meta[next] & + (ParsedIR::BLOCK_META_LOOP_MERGE_BIT | ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT)) != 0; + } + + inline bool is_loop_break(uint32_t next) const + { + return (ir.block_meta[next] & ParsedIR::BLOCK_META_LOOP_MERGE_BIT) != 0; + } + + inline bool is_conditional(uint32_t next) const + { + return (ir.block_meta[next] & + (ParsedIR::BLOCK_META_SELECTION_MERGE_BIT | ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT)) != 0; + } + + + void flush_dependees(SPIRVariable &var); + void flush_all_active_variables(); + void flush_control_dependent_expressions(uint32_t block); + void flush_all_atomic_capable_variables(); + void flush_all_aliased_variables(); + void register_global_read_dependencies(const SPIRBlock &func, uint32_t id); + void register_global_read_dependencies(const SPIRFunction &func, uint32_t id); + std::unordered_set invalid_expressions; + + void update_name_cache(std::unordered_set &cache, std::string &name); + + + + + void update_name_cache(std::unordered_set &cache_primary, + const std::unordered_set &cache_secondary, std::string &name); + + bool function_is_pure(const SPIRFunction &func); + bool block_is_pure(const SPIRBlock &block); + + bool execution_is_branchless(const SPIRBlock &from, const SPIRBlock &to) const; + bool execution_is_direct_branch(const SPIRBlock &from, const SPIRBlock &to) const; + bool execution_is_noop(const SPIRBlock &from, const SPIRBlock &to) const; + SPIRBlock::ContinueBlockType continue_block_type(const SPIRBlock &continue_block) const; + + void force_recompile(); + void clear_force_recompile(); + bool is_forcing_recompilation() const; + bool is_force_recompile = false; + + bool block_is_loop_candidate(const SPIRBlock &block, SPIRBlock::Method method) const; + + bool types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const; + void inherit_expression_dependencies(uint32_t dst, uint32_t source); + void add_implied_read_expression(SPIRExpression &e, uint32_t source); + void add_implied_read_expression(SPIRAccessChain &e, uint32_t source); + + + + bool interface_variable_exists_in_entry_point(uint32_t id) const; + + SmallVector combined_image_samplers; + + void remap_variable_type_name(const SPIRType &type, const std::string &var_name, std::string &type_name) const + { + if (variable_remap_callback) + variable_remap_callback(type, var_name, type_name); + } + + void set_ir(const ParsedIR &parsed); + void set_ir(ParsedIR &&parsed); + void parse_fixup(); + + + struct OpcodeHandler + { + virtual ~OpcodeHandler() = default; + + + + virtual bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) = 0; + + virtual bool follow_function_call(const SPIRFunction &) + { + return true; + } + + virtual void set_current_block(const SPIRBlock &) + { + } + + + + + virtual void rearm_current_block(const SPIRBlock &) + { + } + + virtual bool begin_function_scope(const uint32_t *, uint32_t) + { + return true; + } + + virtual bool end_function_scope(const uint32_t *, uint32_t) + { + return true; + } + }; + + struct BufferAccessHandler : OpcodeHandler + { + BufferAccessHandler(const Compiler &compiler_, SmallVector &ranges_, uint32_t id_) + : compiler(compiler_) + , ranges(ranges_) + , id(id_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + + const Compiler &compiler; + SmallVector &ranges; + uint32_t id; + + std::unordered_set seen; + }; + + struct InterfaceVariableAccessHandler : OpcodeHandler + { + InterfaceVariableAccessHandler(const Compiler &compiler_, std::unordered_set &variables_) + : compiler(compiler_) + , variables(variables_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + + const Compiler &compiler; + std::unordered_set &variables; + }; + + struct CombinedImageSamplerHandler : OpcodeHandler + { + CombinedImageSamplerHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + bool begin_function_scope(const uint32_t *args, uint32_t length) override; + bool end_function_scope(const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + + + std::stack> parameter_remapping; + std::stack functions; + + uint32_t remap_parameter(uint32_t id); + void push_remap_parameters(const SPIRFunction &func, const uint32_t *args, uint32_t length); + void pop_remap_parameters(); + void register_combined_image_sampler(SPIRFunction &caller, VariableID combined_id, VariableID texture_id, + VariableID sampler_id, bool depth); + }; + + struct DummySamplerForCombinedImageHandler : OpcodeHandler + { + DummySamplerForCombinedImageHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + bool need_dummy_sampler = false; + }; + + struct ActiveBuiltinHandler : OpcodeHandler + { + ActiveBuiltinHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + Compiler &compiler; + + void handle_builtin(const SPIRType &type, spv::BuiltIn builtin, const Bitset &decoration_flags); + }; + + bool traverse_all_reachable_opcodes(const SPIRBlock &block, OpcodeHandler &handler) const; + bool traverse_all_reachable_opcodes(const SPIRFunction &block, OpcodeHandler &handler) const; + + SmallVector global_struct_cache; + + ShaderResources get_shader_resources(const std::unordered_set *active_variables) const; + + VariableTypeRemapCallback variable_remap_callback; + + bool get_common_basic_type(const SPIRType &type, SPIRType::BaseType &base_type); + + std::unordered_set forced_temporaries; + std::unordered_set forwarded_temporaries; + std::unordered_set suppressed_usage_tracking; + std::unordered_set hoisted_temporaries; + std::unordered_set forced_invariant_temporaries; + + Bitset active_input_builtins; + Bitset active_output_builtins; + uint32_t clip_distance_count = 0; + uint32_t cull_distance_count = 0; + bool position_invariant = false; + + + void update_active_builtins(); + bool has_active_builtin(spv::BuiltIn builtin, spv::StorageClass storage); + + void analyze_parameter_preservation( + SPIRFunction &entry, const CFG &cfg, + const std::unordered_map> &variable_to_blocks, + const std::unordered_map> &complete_write_blocks); + + + + + + + std::unordered_set comparison_ids; + bool need_subpass_input = false; + + + + + uint32_t dummy_sampler_id = 0; + + void analyze_image_and_sampler_usage(); + + struct CombinedImageSamplerDrefHandler : OpcodeHandler + { + CombinedImageSamplerDrefHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + std::unordered_set dref_combined_samplers; + }; + + struct CombinedImageSamplerUsageHandler : OpcodeHandler + { + CombinedImageSamplerUsageHandler(Compiler &compiler_, + const std::unordered_set &dref_combined_samplers_) + : compiler(compiler_) + , dref_combined_samplers(dref_combined_samplers_) + { + } + + bool begin_function_scope(const uint32_t *args, uint32_t length) override; + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + Compiler &compiler; + const std::unordered_set &dref_combined_samplers; + + std::unordered_map> dependency_hierarchy; + std::unordered_set comparison_ids; + + void add_hierarchy_to_comparison_ids(uint32_t ids); + bool need_subpass_input = false; + void add_dependency(uint32_t dst, uint32_t src); + }; + + void build_function_control_flow_graphs_and_analyze(); + std::unordered_map> function_cfgs; + const CFG &get_cfg_for_current_function() const; + const CFG &get_cfg_for_function(uint32_t id) const; + + struct CFGBuilder : OpcodeHandler + { + explicit CFGBuilder(Compiler &compiler_); + + bool follow_function_call(const SPIRFunction &func) override; + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + Compiler &compiler; + std::unordered_map> function_cfgs; + }; + + struct AnalyzeVariableScopeAccessHandler : OpcodeHandler + { + AnalyzeVariableScopeAccessHandler(Compiler &compiler_, SPIRFunction &entry_); + + bool follow_function_call(const SPIRFunction &) override; + void set_current_block(const SPIRBlock &block) override; + + void notify_variable_access(uint32_t id, uint32_t block); + bool id_is_phi_variable(uint32_t id) const; + bool id_is_potential_temporary(uint32_t id) const; + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + SPIRFunction &entry; + std::unordered_map> accessed_variables_to_block; + std::unordered_map> accessed_temporaries_to_block; + std::unordered_map result_id_to_type; + std::unordered_map> complete_write_variables_to_block; + std::unordered_map> partial_write_variables_to_block; + std::unordered_set access_chain_expressions; + + std::unordered_map> access_chain_children; + const SPIRBlock *current_block = nullptr; + }; + + struct StaticExpressionAccessHandler : OpcodeHandler + { + StaticExpressionAccessHandler(Compiler &compiler_, uint32_t variable_id_); + bool follow_function_call(const SPIRFunction &) override; + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + uint32_t variable_id; + uint32_t static_expression = 0; + uint32_t write_count = 0; + }; + + struct PhysicalStorageBufferPointerHandler : OpcodeHandler + { + explicit PhysicalStorageBufferPointerHandler(Compiler &compiler_); + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + Compiler &compiler; + std::unordered_set types; + }; + void analyze_non_block_pointer_types(); + SmallVector physical_storage_non_block_pointer_types; + + void analyze_variable_scope(SPIRFunction &function, AnalyzeVariableScopeAccessHandler &handler); + void find_function_local_luts(SPIRFunction &function, const AnalyzeVariableScopeAccessHandler &handler, + bool single_function); + bool may_read_undefined_variable_in_block(const SPIRBlock &block, uint32_t var); + + + + + + struct InterlockedResourceAccessHandler : OpcodeHandler + { + InterlockedResourceAccessHandler(Compiler &compiler_, uint32_t entry_point_id) + : compiler(compiler_) + { + call_stack.push_back(entry_point_id); + } + + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + bool begin_function_scope(const uint32_t *args, uint32_t length) override; + bool end_function_scope(const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + bool in_crit_sec = false; + + uint32_t interlock_function_id = 0; + bool split_function_case = false; + bool control_flow_interlock = false; + bool use_critical_section = false; + bool call_stack_is_interlocked = false; + SmallVector call_stack; + + void access_potential_resource(uint32_t id); + }; + + struct InterlockedResourceAccessPrepassHandler : OpcodeHandler + { + InterlockedResourceAccessPrepassHandler(Compiler &compiler_, uint32_t entry_point_id) + : compiler(compiler_) + { + call_stack.push_back(entry_point_id); + } + + void rearm_current_block(const SPIRBlock &block) override; + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + bool begin_function_scope(const uint32_t *args, uint32_t length) override; + bool end_function_scope(const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + uint32_t interlock_function_id = 0; + uint32_t current_block_id = 0; + bool split_function_case = false; + bool control_flow_interlock = false; + SmallVector call_stack; + }; + + void analyze_interlocked_resource_usage(); + + std::unordered_set interlocked_resources; + bool interlocked_is_complex = false; + + void make_constant_null(uint32_t id, uint32_t type); + + std::unordered_map declared_block_names; + + bool instruction_to_result_type(uint32_t &result_type, uint32_t &result_id, spv::Op op, const uint32_t *args, + uint32_t length); + + Bitset combined_decoration_for_member(const SPIRType &type, uint32_t index) const; + static bool is_desktop_only_format(spv::ImageFormat format); + + bool image_is_comparison(const SPIRType &type, uint32_t id) const; + + void set_extended_decoration(uint32_t id, ExtendedDecorations decoration, uint32_t value = 0); + uint32_t get_extended_decoration(uint32_t id, ExtendedDecorations decoration) const; + bool has_extended_decoration(uint32_t id, ExtendedDecorations decoration) const; + void unset_extended_decoration(uint32_t id, ExtendedDecorations decoration); + + void set_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration, + uint32_t value = 0); + uint32_t get_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration) const; + bool has_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration) const; + void unset_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration); + + bool type_is_array_of_pointers(const SPIRType &type) const; + bool type_is_block_like(const SPIRType &type) const; + bool type_is_opaque_value(const SPIRType &type) const; + + bool reflection_ssbo_instance_name_is_significant() const; + std::string get_remapped_declared_block_name(uint32_t id, bool fallback_prefer_instance_name) const; + + bool flush_phi_required(BlockID from, BlockID to) const; + +private: + + const SPIREntryPoint &get_first_entry_point(const std::string &name) const; + SPIREntryPoint &get_first_entry_point(const std::string &name); +}; +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_c.cpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_c.cpp new file mode 100644 index 000000000000..f88efa032c09 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_c.cpp @@ -0,0 +1,2176 @@ + + + + + + + + + + + + + + + + +#include "spirv_cross_c.h" + +#if SPIRV_CROSS_C_API_CPP +#include "spirv_cpp.hpp" +#endif +#if SPIRV_CROSS_C_API_GLSL +#include "spirv_glsl.hpp" +#else +#include "spirv_cross.hpp" +#endif +#if SPIRV_CROSS_C_API_HLSL +#include "spirv_hlsl.hpp" +#endif +#if SPIRV_CROSS_C_API_MSL +#include "spirv_msl.hpp" +#endif +#if SPIRV_CROSS_C_API_REFLECT +#include "spirv_reflect.hpp" +#endif + +#ifdef HAVE_SPIRV_CROSS_GIT_VERSION +#include "gitversion.h" +#endif + +#include "spirv_parser.hpp" +#include +#include +#include + + + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + +#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS +#define SPVC_BEGIN_SAFE_SCOPE try +#else +#define SPVC_BEGIN_SAFE_SCOPE +#endif + +#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS +#define SPVC_END_SAFE_SCOPE(context, error) \ + catch (const std::exception &e) \ + { \ + (context)->report_error(e.what()); \ + return (error); \ + } +#else +#define SPVC_END_SAFE_SCOPE(context, error) +#endif + +using namespace std; +using namespace SPIRV_CROSS_NAMESPACE; + +struct ScratchMemoryAllocation +{ + virtual ~ScratchMemoryAllocation() = default; +}; + +struct StringAllocation : ScratchMemoryAllocation +{ + explicit StringAllocation(const char *name) + : str(name) + { + } + + explicit StringAllocation(std::string name) + : str(std::move(name)) + { + } + + std::string str; +}; + +template +struct TemporaryBuffer : ScratchMemoryAllocation +{ + SmallVector buffer; +}; + +template +static inline std::unique_ptr spvc_allocate(Ts &&... ts) +{ + return std::unique_ptr(new T(std::forward(ts)...)); +} + +struct spvc_context_s +{ + string last_error; + SmallVector> allocations; + const char *allocate_name(const std::string &name); + + spvc_error_callback callback = nullptr; + void *callback_userdata = nullptr; + void report_error(std::string msg); +}; + +void spvc_context_s::report_error(std::string msg) +{ + last_error = std::move(msg); + if (callback) + callback(callback_userdata, last_error.c_str()); +} + +const char *spvc_context_s::allocate_name(const std::string &name) +{ + SPVC_BEGIN_SAFE_SCOPE + { + auto alloc = spvc_allocate(name); + auto *ret = alloc->str.c_str(); + allocations.emplace_back(std::move(alloc)); + return ret; + } + SPVC_END_SAFE_SCOPE(this, nullptr) +} + +struct spvc_parsed_ir_s : ScratchMemoryAllocation +{ + spvc_context context = nullptr; + ParsedIR parsed; +}; + +struct spvc_compiler_s : ScratchMemoryAllocation +{ + spvc_context context = nullptr; + unique_ptr compiler; + spvc_backend backend = SPVC_BACKEND_NONE; +}; + +struct spvc_compiler_options_s : ScratchMemoryAllocation +{ + spvc_context context = nullptr; + uint32_t backend_flags = 0; +#if SPIRV_CROSS_C_API_GLSL + CompilerGLSL::Options glsl; +#endif +#if SPIRV_CROSS_C_API_MSL + CompilerMSL::Options msl; +#endif +#if SPIRV_CROSS_C_API_HLSL + CompilerHLSL::Options hlsl; +#endif +}; + +struct spvc_set_s : ScratchMemoryAllocation +{ + std::unordered_set set; +}; + + + +struct spvc_type_s : SPIRType +{ +}; + +struct spvc_constant_s : SPIRConstant +{ +}; + +struct spvc_resources_s : ScratchMemoryAllocation +{ + spvc_context context = nullptr; + SmallVector uniform_buffers; + SmallVector storage_buffers; + SmallVector stage_inputs; + SmallVector stage_outputs; + SmallVector subpass_inputs; + SmallVector storage_images; + SmallVector sampled_images; + SmallVector atomic_counters; + SmallVector push_constant_buffers; + SmallVector separate_images; + SmallVector separate_samplers; + SmallVector acceleration_structures; + + bool copy_resources(SmallVector &outputs, const SmallVector &inputs); + bool copy_resources(const ShaderResources &resources); +}; + +spvc_result spvc_context_create(spvc_context *context) +{ + auto *ctx = new (std::nothrow) spvc_context_s; + if (!ctx) + return SPVC_ERROR_OUT_OF_MEMORY; + + *context = ctx; + return SPVC_SUCCESS; +} + +void spvc_context_destroy(spvc_context context) +{ + delete context; +} + +void spvc_context_release_allocations(spvc_context context) +{ + context->allocations.clear(); +} + +const char *spvc_context_get_last_error_string(spvc_context context) +{ + return context->last_error.c_str(); +} + +SPVC_PUBLIC_API void spvc_context_set_error_callback(spvc_context context, spvc_error_callback cb, void *userdata) +{ + context->callback = cb; + context->callback_userdata = userdata; +} + +spvc_result spvc_context_parse_spirv(spvc_context context, const SpvId *spirv, size_t word_count, + spvc_parsed_ir *parsed_ir) +{ + SPVC_BEGIN_SAFE_SCOPE + { + std::unique_ptr pir(new (std::nothrow) spvc_parsed_ir_s); + if (!pir) + { + context->report_error("Out of memory."); + return SPVC_ERROR_OUT_OF_MEMORY; + } + + pir->context = context; + Parser parser(spirv, word_count); + parser.parse(); + pir->parsed = move(parser.get_parsed_ir()); + *parsed_ir = pir.get(); + context->allocations.push_back(std::move(pir)); + } + SPVC_END_SAFE_SCOPE(context, SPVC_ERROR_INVALID_SPIRV) + return SPVC_SUCCESS; +} + +spvc_result spvc_context_create_compiler(spvc_context context, spvc_backend backend, spvc_parsed_ir parsed_ir, + spvc_capture_mode mode, spvc_compiler *compiler) +{ + SPVC_BEGIN_SAFE_SCOPE + { + std::unique_ptr comp(new (std::nothrow) spvc_compiler_s); + if (!comp) + { + context->report_error("Out of memory."); + return SPVC_ERROR_OUT_OF_MEMORY; + } + comp->backend = backend; + comp->context = context; + + if (mode != SPVC_CAPTURE_MODE_COPY && mode != SPVC_CAPTURE_MODE_TAKE_OWNERSHIP) + { + context->report_error("Invalid argument for capture mode."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + switch (backend) + { + case SPVC_BACKEND_NONE: + if (mode == SPVC_CAPTURE_MODE_TAKE_OWNERSHIP) + comp->compiler.reset(new Compiler(move(parsed_ir->parsed))); + else if (mode == SPVC_CAPTURE_MODE_COPY) + comp->compiler.reset(new Compiler(parsed_ir->parsed)); + break; + +#if SPIRV_CROSS_C_API_GLSL + case SPVC_BACKEND_GLSL: + if (mode == SPVC_CAPTURE_MODE_TAKE_OWNERSHIP) + comp->compiler.reset(new CompilerGLSL(move(parsed_ir->parsed))); + else if (mode == SPVC_CAPTURE_MODE_COPY) + comp->compiler.reset(new CompilerGLSL(parsed_ir->parsed)); + break; +#endif + +#if SPIRV_CROSS_C_API_HLSL + case SPVC_BACKEND_HLSL: + if (mode == SPVC_CAPTURE_MODE_TAKE_OWNERSHIP) + comp->compiler.reset(new CompilerHLSL(move(parsed_ir->parsed))); + else if (mode == SPVC_CAPTURE_MODE_COPY) + comp->compiler.reset(new CompilerHLSL(parsed_ir->parsed)); + break; +#endif + +#if SPIRV_CROSS_C_API_MSL + case SPVC_BACKEND_MSL: + if (mode == SPVC_CAPTURE_MODE_TAKE_OWNERSHIP) + comp->compiler.reset(new CompilerMSL(move(parsed_ir->parsed))); + else if (mode == SPVC_CAPTURE_MODE_COPY) + comp->compiler.reset(new CompilerMSL(parsed_ir->parsed)); + break; +#endif + +#if SPIRV_CROSS_C_API_CPP + case SPVC_BACKEND_CPP: + if (mode == SPVC_CAPTURE_MODE_TAKE_OWNERSHIP) + comp->compiler.reset(new CompilerCPP(move(parsed_ir->parsed))); + else if (mode == SPVC_CAPTURE_MODE_COPY) + comp->compiler.reset(new CompilerCPP(parsed_ir->parsed)); + break; +#endif + +#if SPIRV_CROSS_C_API_REFLECT + case SPVC_BACKEND_JSON: + if (mode == SPVC_CAPTURE_MODE_TAKE_OWNERSHIP) + comp->compiler.reset(new CompilerReflection(move(parsed_ir->parsed))); + else if (mode == SPVC_CAPTURE_MODE_COPY) + comp->compiler.reset(new CompilerReflection(parsed_ir->parsed)); + break; +#endif + + default: + context->report_error("Invalid backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + *compiler = comp.get(); + context->allocations.push_back(std::move(comp)); + } + SPVC_END_SAFE_SCOPE(context, SPVC_ERROR_OUT_OF_MEMORY) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_create_compiler_options(spvc_compiler compiler, spvc_compiler_options *options) +{ + SPVC_BEGIN_SAFE_SCOPE + { + std::unique_ptr opt(new (std::nothrow) spvc_compiler_options_s); + if (!opt) + { + compiler->context->report_error("Out of memory."); + return SPVC_ERROR_OUT_OF_MEMORY; + } + + opt->context = compiler->context; + opt->backend_flags = 0; + switch (compiler->backend) + { +#if SPIRV_CROSS_C_API_MSL + case SPVC_BACKEND_MSL: + opt->backend_flags |= SPVC_COMPILER_OPTION_MSL_BIT | SPVC_COMPILER_OPTION_COMMON_BIT; + opt->glsl = static_cast(compiler->compiler.get())->get_common_options(); + opt->msl = static_cast(compiler->compiler.get())->get_msl_options(); + break; +#endif + +#if SPIRV_CROSS_C_API_HLSL + case SPVC_BACKEND_HLSL: + opt->backend_flags |= SPVC_COMPILER_OPTION_HLSL_BIT | SPVC_COMPILER_OPTION_COMMON_BIT; + opt->glsl = static_cast(compiler->compiler.get())->get_common_options(); + opt->hlsl = static_cast(compiler->compiler.get())->get_hlsl_options(); + break; +#endif + +#if SPIRV_CROSS_C_API_GLSL + case SPVC_BACKEND_GLSL: + opt->backend_flags |= SPVC_COMPILER_OPTION_GLSL_BIT | SPVC_COMPILER_OPTION_COMMON_BIT; + opt->glsl = static_cast(compiler->compiler.get())->get_common_options(); + break; +#endif + + default: + break; + } + + *options = opt.get(); + compiler->context->allocations.push_back(std::move(opt)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_OUT_OF_MEMORY) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_options_set_bool(spvc_compiler_options options, spvc_compiler_option option, + spvc_bool value) +{ + return spvc_compiler_options_set_uint(options, option, value ? 1 : 0); +} + +spvc_result spvc_compiler_options_set_uint(spvc_compiler_options options, spvc_compiler_option option, unsigned value) +{ + (void)value; + (void)option; + uint32_t supported_mask = options->backend_flags; + uint32_t required_mask = option & SPVC_COMPILER_OPTION_LANG_BITS; + if ((required_mask | supported_mask) != supported_mask) + { + options->context->report_error("Option is not supported by current backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + switch (option) + { +#if SPIRV_CROSS_C_API_GLSL + case SPVC_COMPILER_OPTION_FORCE_TEMPORARY: + options->glsl.force_temporary = value != 0; + break; + case SPVC_COMPILER_OPTION_FLATTEN_MULTIDIMENSIONAL_ARRAYS: + options->glsl.flatten_multidimensional_arrays = value != 0; + break; + case SPVC_COMPILER_OPTION_FIXUP_DEPTH_CONVENTION: + options->glsl.vertex.fixup_clipspace = value != 0; + break; + case SPVC_COMPILER_OPTION_FLIP_VERTEX_Y: + options->glsl.vertex.flip_vert_y = value != 0; + break; + case SPVC_COMPILER_OPTION_EMIT_LINE_DIRECTIVES: + options->glsl.emit_line_directives = value != 0; + break; + + case SPVC_COMPILER_OPTION_GLSL_SUPPORT_NONZERO_BASE_INSTANCE: + options->glsl.vertex.support_nonzero_base_instance = value != 0; + break; + case SPVC_COMPILER_OPTION_GLSL_SEPARATE_SHADER_OBJECTS: + options->glsl.separate_shader_objects = value != 0; + break; + case SPVC_COMPILER_OPTION_GLSL_ENABLE_420PACK_EXTENSION: + options->glsl.enable_420pack_extension = value != 0; + break; + case SPVC_COMPILER_OPTION_GLSL_VERSION: + options->glsl.version = value; + break; + case SPVC_COMPILER_OPTION_GLSL_ES: + options->glsl.es = value != 0; + break; + case SPVC_COMPILER_OPTION_GLSL_VULKAN_SEMANTICS: + options->glsl.vulkan_semantics = value != 0; + break; + case SPVC_COMPILER_OPTION_GLSL_ES_DEFAULT_FLOAT_PRECISION_HIGHP: + options->glsl.fragment.default_float_precision = + value != 0 ? CompilerGLSL::Options::Precision::Highp : CompilerGLSL::Options::Precision::Mediump; + break; + case SPVC_COMPILER_OPTION_GLSL_ES_DEFAULT_INT_PRECISION_HIGHP: + options->glsl.fragment.default_int_precision = + value != 0 ? CompilerGLSL::Options::Precision::Highp : CompilerGLSL::Options::Precision::Mediump; + break; + case SPVC_COMPILER_OPTION_GLSL_EMIT_PUSH_CONSTANT_AS_UNIFORM_BUFFER: + options->glsl.emit_push_constant_as_uniform_buffer = value != 0; + break; + case SPVC_COMPILER_OPTION_GLSL_EMIT_UNIFORM_BUFFER_AS_PLAIN_UNIFORMS: + options->glsl.emit_uniform_buffer_as_plain_uniforms = value != 0; + break; +#endif + +#if SPIRV_CROSS_C_API_HLSL + case SPVC_COMPILER_OPTION_HLSL_SHADER_MODEL: + options->hlsl.shader_model = value; + break; + + case SPVC_COMPILER_OPTION_HLSL_POINT_SIZE_COMPAT: + options->hlsl.point_size_compat = value != 0; + break; + + case SPVC_COMPILER_OPTION_HLSL_POINT_COORD_COMPAT: + options->hlsl.point_coord_compat = value != 0; + break; + + case SPVC_COMPILER_OPTION_HLSL_SUPPORT_NONZERO_BASE_VERTEX_BASE_INSTANCE: + options->hlsl.support_nonzero_base_vertex_base_instance = value != 0; + break; +#endif + +#if SPIRV_CROSS_C_API_MSL + case SPVC_COMPILER_OPTION_MSL_VERSION: + options->msl.msl_version = value; + break; + + case SPVC_COMPILER_OPTION_MSL_TEXEL_BUFFER_TEXTURE_WIDTH: + options->msl.texel_buffer_texture_width = value; + break; + + case SPVC_COMPILER_OPTION_MSL_SWIZZLE_BUFFER_INDEX: + options->msl.swizzle_buffer_index = value; + break; + + case SPVC_COMPILER_OPTION_MSL_INDIRECT_PARAMS_BUFFER_INDEX: + options->msl.indirect_params_buffer_index = value; + break; + + case SPVC_COMPILER_OPTION_MSL_SHADER_OUTPUT_BUFFER_INDEX: + options->msl.shader_output_buffer_index = value; + break; + + case SPVC_COMPILER_OPTION_MSL_SHADER_PATCH_OUTPUT_BUFFER_INDEX: + options->msl.shader_patch_output_buffer_index = value; + break; + + case SPVC_COMPILER_OPTION_MSL_SHADER_TESS_FACTOR_OUTPUT_BUFFER_INDEX: + options->msl.shader_tess_factor_buffer_index = value; + break; + + case SPVC_COMPILER_OPTION_MSL_SHADER_INPUT_WORKGROUP_INDEX: + options->msl.shader_input_wg_index = value; + break; + + case SPVC_COMPILER_OPTION_MSL_ENABLE_POINT_SIZE_BUILTIN: + options->msl.enable_point_size_builtin = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_DISABLE_RASTERIZATION: + options->msl.disable_rasterization = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_CAPTURE_OUTPUT_TO_BUFFER: + options->msl.capture_output_to_buffer = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_SWIZZLE_TEXTURE_SAMPLES: + options->msl.swizzle_texture_samples = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_PAD_FRAGMENT_OUTPUT_COMPONENTS: + options->msl.pad_fragment_output_components = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_TESS_DOMAIN_ORIGIN_LOWER_LEFT: + options->msl.tess_domain_origin_lower_left = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_PLATFORM: + options->msl.platform = static_cast(value); + break; + + case SPVC_COMPILER_OPTION_MSL_ARGUMENT_BUFFERS: + options->msl.argument_buffers = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_TEXTURE_BUFFER_NATIVE: + options->msl.texture_buffer_native = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_BUFFER_SIZE_BUFFER_INDEX: + options->msl.buffer_size_buffer_index = value; + break; + + case SPVC_COMPILER_OPTION_MSL_MULTIVIEW: + options->msl.multiview = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_VIEW_MASK_BUFFER_INDEX: + options->msl.view_mask_buffer_index = value; + break; + + case SPVC_COMPILER_OPTION_MSL_DEVICE_INDEX: + options->msl.device_index = value; + break; + + case SPVC_COMPILER_OPTION_MSL_VIEW_INDEX_FROM_DEVICE_INDEX: + options->msl.view_index_from_device_index = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_DISPATCH_BASE: + options->msl.dispatch_base = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_DYNAMIC_OFFSETS_BUFFER_INDEX: + options->msl.dynamic_offsets_buffer_index = value; + break; +#endif + + default: + options->context->report_error("Unknown option."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_install_compiler_options(spvc_compiler compiler, spvc_compiler_options options) +{ + (void)options; + switch (compiler->backend) + { +#if SPIRV_CROSS_C_API_GLSL + case SPVC_BACKEND_GLSL: + static_cast(*compiler->compiler).set_common_options(options->glsl); + break; +#endif + +#if SPIRV_CROSS_C_API_HLSL + case SPVC_BACKEND_HLSL: + static_cast(*compiler->compiler).set_common_options(options->glsl); + static_cast(*compiler->compiler).set_hlsl_options(options->hlsl); + break; +#endif + +#if SPIRV_CROSS_C_API_MSL + case SPVC_BACKEND_MSL: + static_cast(*compiler->compiler).set_common_options(options->glsl); + static_cast(*compiler->compiler).set_msl_options(options->msl); + break; +#endif + + default: + break; + } + + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_add_header_line(spvc_compiler compiler, const char *line) +{ +#if SPIRV_CROSS_C_API_GLSL + if (compiler->backend == SPVC_BACKEND_NONE) + { + compiler->context->report_error("Cross-compilation related option used on NONE backend which only supports reflection."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + static_cast(compiler->compiler.get())->add_header_line(line); + return SPVC_SUCCESS; +#else + (void)line; + compiler->context->report_error("Cross-compilation related option used on NONE backend which only supports reflection."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_result spvc_compiler_require_extension(spvc_compiler compiler, const char *line) +{ +#if SPIRV_CROSS_C_API_GLSL + if (compiler->backend == SPVC_BACKEND_NONE) + { + compiler->context->report_error("Cross-compilation related option used on NONE backend which only supports reflection."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + static_cast(compiler->compiler.get())->require_extension(line); + return SPVC_SUCCESS; +#else + (void)line; + compiler->context->report_error("Cross-compilation related option used on NONE backend which only supports reflection."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_result spvc_compiler_flatten_buffer_block(spvc_compiler compiler, spvc_variable_id id) +{ +#if SPIRV_CROSS_C_API_GLSL + if (compiler->backend == SPVC_BACKEND_NONE) + { + compiler->context->report_error("Cross-compilation related option used on NONE backend which only supports reflection."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + static_cast(compiler->compiler.get())->flatten_buffer_block(id); + return SPVC_SUCCESS; +#else + (void)id; + compiler->context->report_error("Cross-compilation related option used on NONE backend which only supports reflection."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_result spvc_compiler_hlsl_set_root_constants_layout(spvc_compiler compiler, + const spvc_hlsl_root_constants *constant_info, + size_t count) +{ +#if SPIRV_CROSS_C_API_HLSL + if (compiler->backend != SPVC_BACKEND_HLSL) + { + compiler->context->report_error("HLSL function used on a non-HLSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &hlsl = *static_cast(compiler->compiler.get()); + vector roots; + roots.reserve(count); + for (size_t i = 0; i < count; i++) + { + RootConstants root; + root.binding = constant_info[i].binding; + root.space = constant_info[i].space; + root.start = constant_info[i].start; + root.end = constant_info[i].end; + roots.push_back(root); + } + + hlsl.set_root_constant_layouts(std::move(roots)); + return SPVC_SUCCESS; +#else + (void)constant_info; + (void)count; + compiler->context->report_error("HLSL function used on a non-HLSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_result spvc_compiler_hlsl_add_vertex_attribute_remap(spvc_compiler compiler, + const spvc_hlsl_vertex_attribute_remap *remap, + size_t count) +{ +#if SPIRV_CROSS_C_API_HLSL + if (compiler->backend != SPVC_BACKEND_HLSL) + { + compiler->context->report_error("HLSL function used on a non-HLSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + HLSLVertexAttributeRemap re; + auto &hlsl = *static_cast(compiler->compiler.get()); + for (size_t i = 0; i < count; i++) + { + re.location = remap[i].location; + re.semantic = remap[i].semantic; + hlsl.add_vertex_attribute_remap(re); + } + + return SPVC_SUCCESS; +#else + (void)remap; + (void)count; + compiler->context->report_error("HLSL function used on a non-HLSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_variable_id spvc_compiler_hlsl_remap_num_workgroups_builtin(spvc_compiler compiler) +{ +#if SPIRV_CROSS_C_API_HLSL + if (compiler->backend != SPVC_BACKEND_HLSL) + { + compiler->context->report_error("HLSL function used on a non-HLSL backend."); + return 0; + } + + auto &hlsl = *static_cast(compiler->compiler.get()); + return hlsl.remap_num_workgroups_builtin(); +#else + compiler->context->report_error("HLSL function used on a non-HLSL backend."); + return 0; +#endif +} + +spvc_bool spvc_compiler_msl_is_rasterization_disabled(spvc_compiler compiler) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; + } + + auto &msl = *static_cast(compiler->compiler.get()); + return msl.get_is_rasterization_disabled() ? SPVC_TRUE : SPVC_FALSE; +#else + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; +#endif +} + +spvc_bool spvc_compiler_msl_needs_swizzle_buffer(spvc_compiler compiler) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; + } + + auto &msl = *static_cast(compiler->compiler.get()); + return msl.needs_swizzle_buffer() ? SPVC_TRUE : SPVC_FALSE; +#else + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; +#endif +} + +spvc_bool spvc_compiler_msl_needs_buffer_size_buffer(spvc_compiler compiler) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; + } + + auto &msl = *static_cast(compiler->compiler.get()); + return msl.needs_buffer_size_buffer() ? SPVC_TRUE : SPVC_FALSE; +#else + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; +#endif +} + +spvc_bool spvc_compiler_msl_needs_aux_buffer(spvc_compiler compiler) +{ + return spvc_compiler_msl_needs_swizzle_buffer(compiler); +} + +spvc_bool spvc_compiler_msl_needs_output_buffer(spvc_compiler compiler) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; + } + + auto &msl = *static_cast(compiler->compiler.get()); + return msl.needs_output_buffer() ? SPVC_TRUE : SPVC_FALSE; +#else + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; +#endif +} + +spvc_bool spvc_compiler_msl_needs_patch_output_buffer(spvc_compiler compiler) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; + } + + auto &msl = *static_cast(compiler->compiler.get()); + return msl.needs_patch_output_buffer() ? SPVC_TRUE : SPVC_FALSE; +#else + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; +#endif +} + +spvc_bool spvc_compiler_msl_needs_input_threadgroup_mem(spvc_compiler compiler) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; + } + + auto &msl = *static_cast(compiler->compiler.get()); + return msl.needs_input_threadgroup_mem() ? SPVC_TRUE : SPVC_FALSE; +#else + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; +#endif +} + +spvc_result spvc_compiler_msl_add_vertex_attribute(spvc_compiler compiler, const spvc_msl_vertex_attribute *va) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &msl = *static_cast(compiler->compiler.get()); + MSLVertexAttr attr; + attr.location = va->location; + attr.msl_buffer = va->msl_buffer; + attr.msl_offset = va->msl_offset; + attr.msl_stride = va->msl_stride; + attr.format = static_cast(va->format); + attr.builtin = static_cast(va->builtin); + attr.per_instance = va->per_instance != 0; + msl.add_msl_vertex_attribute(attr); + return SPVC_SUCCESS; +#else + (void)va; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_result spvc_compiler_msl_add_resource_binding(spvc_compiler compiler, + const spvc_msl_resource_binding *binding) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &msl = *static_cast(compiler->compiler.get()); + MSLResourceBinding bind; + bind.binding = binding->binding; + bind.desc_set = binding->desc_set; + bind.stage = static_cast(binding->stage); + bind.msl_buffer = binding->msl_buffer; + bind.msl_texture = binding->msl_texture; + bind.msl_sampler = binding->msl_sampler; + msl.add_msl_resource_binding(bind); + return SPVC_SUCCESS; +#else + (void)binding; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_result spvc_compiler_msl_add_dynamic_buffer(spvc_compiler compiler, unsigned desc_set, unsigned binding, unsigned index) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &msl = *static_cast(compiler->compiler.get()); + msl.add_dynamic_buffer(desc_set, binding, index); + return SPVC_SUCCESS; +#else + (void)binding; + (void)desc_set; + (void)index; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_result spvc_compiler_msl_add_discrete_descriptor_set(spvc_compiler compiler, unsigned desc_set) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &msl = *static_cast(compiler->compiler.get()); + msl.add_discrete_descriptor_set(desc_set); + return SPVC_SUCCESS; +#else + (void)desc_set; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_result spvc_compiler_msl_set_argument_buffer_device_address_space(spvc_compiler compiler, unsigned desc_set, spvc_bool device_address) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &msl = *static_cast(compiler->compiler.get()); + msl.set_argument_buffer_device_address_space(desc_set, bool(device_address)); + return SPVC_SUCCESS; +#else + (void)desc_set; + (void)device_address; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_bool spvc_compiler_msl_is_vertex_attribute_used(spvc_compiler compiler, unsigned location) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; + } + + auto &msl = *static_cast(compiler->compiler.get()); + return msl.is_msl_vertex_attribute_used(location) ? SPVC_TRUE : SPVC_FALSE; +#else + (void)location; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; +#endif +} + +spvc_bool spvc_compiler_msl_is_resource_used(spvc_compiler compiler, SpvExecutionModel model, unsigned set, + unsigned binding) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; + } + + auto &msl = *static_cast(compiler->compiler.get()); + return msl.is_msl_resource_binding_used(static_cast(model), set, binding) ? SPVC_TRUE : + SPVC_FALSE; +#else + (void)model; + (void)set; + (void)binding; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_FALSE; +#endif +} + +#if SPIRV_CROSS_C_API_MSL +static void spvc_convert_msl_sampler(MSLConstexprSampler &samp, const spvc_msl_constexpr_sampler *sampler) +{ + samp.s_address = static_cast(sampler->s_address); + samp.t_address = static_cast(sampler->t_address); + samp.r_address = static_cast(sampler->r_address); + samp.lod_clamp_min = sampler->lod_clamp_min; + samp.lod_clamp_max = sampler->lod_clamp_max; + samp.lod_clamp_enable = sampler->lod_clamp_enable != 0; + samp.min_filter = static_cast(sampler->min_filter); + samp.mag_filter = static_cast(sampler->mag_filter); + samp.mip_filter = static_cast(sampler->mip_filter); + samp.compare_enable = sampler->compare_enable != 0; + samp.anisotropy_enable = sampler->anisotropy_enable != 0; + samp.max_anisotropy = sampler->max_anisotropy; + samp.compare_func = static_cast(sampler->compare_func); + samp.coord = static_cast(sampler->coord); + samp.border_color = static_cast(sampler->border_color); +} + +static void spvc_convert_msl_sampler_ycbcr_conversion(MSLConstexprSampler &samp, const spvc_msl_sampler_ycbcr_conversion *conv) +{ + samp.ycbcr_conversion_enable = conv != nullptr; + if (conv == nullptr) return; + samp.planes = conv->planes; + samp.resolution = static_cast(conv->resolution); + samp.chroma_filter = static_cast(conv->chroma_filter); + samp.x_chroma_offset = static_cast(conv->x_chroma_offset); + samp.y_chroma_offset = static_cast(conv->y_chroma_offset); + for (int i = 0; i < 4; i++) + samp.swizzle[i] = static_cast(conv->swizzle[i]); + samp.ycbcr_model = static_cast(conv->ycbcr_model); + samp.ycbcr_range = static_cast(conv->ycbcr_range); + samp.bpc = conv->bpc; +} +#endif + +spvc_result spvc_compiler_msl_remap_constexpr_sampler(spvc_compiler compiler, spvc_variable_id id, + const spvc_msl_constexpr_sampler *sampler) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &msl = *static_cast(compiler->compiler.get()); + MSLConstexprSampler samp; + spvc_convert_msl_sampler(samp, sampler); + msl.remap_constexpr_sampler(id, samp); + return SPVC_SUCCESS; +#else + (void)id; + (void)sampler; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_result spvc_compiler_msl_remap_constexpr_sampler_by_binding(spvc_compiler compiler, + unsigned desc_set, unsigned binding, + const spvc_msl_constexpr_sampler *sampler) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &msl = *static_cast(compiler->compiler.get()); + MSLConstexprSampler samp; + spvc_convert_msl_sampler(samp, sampler); + msl.remap_constexpr_sampler_by_binding(desc_set, binding, samp); + return SPVC_SUCCESS; +#else + (void)desc_set; + (void)binding; + (void)sampler; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_result spvc_compiler_msl_remap_constexpr_sampler_ycbcr(spvc_compiler compiler, spvc_variable_id id, + const spvc_msl_constexpr_sampler *sampler, + const spvc_msl_sampler_ycbcr_conversion *conv) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &msl = *static_cast(compiler->compiler.get()); + MSLConstexprSampler samp; + spvc_convert_msl_sampler(samp, sampler); + spvc_convert_msl_sampler_ycbcr_conversion(samp, conv); + msl.remap_constexpr_sampler(id, samp); + return SPVC_SUCCESS; +#else + (void)id; + (void)sampler; + (void)conv; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_result spvc_compiler_msl_remap_constexpr_sampler_by_binding_ycbcr(spvc_compiler compiler, + unsigned desc_set, unsigned binding, + const spvc_msl_constexpr_sampler *sampler, + const spvc_msl_sampler_ycbcr_conversion *conv) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &msl = *static_cast(compiler->compiler.get()); + MSLConstexprSampler samp; + spvc_convert_msl_sampler(samp, sampler); + spvc_convert_msl_sampler_ycbcr_conversion(samp, conv); + msl.remap_constexpr_sampler_by_binding(desc_set, binding, samp); + return SPVC_SUCCESS; +#else + (void)desc_set; + (void)binding; + (void)sampler; + (void)conv; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +spvc_result spvc_compiler_msl_set_fragment_output_components(spvc_compiler compiler, unsigned location, + unsigned components) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &msl = *static_cast(compiler->compiler.get()); + msl.set_fragment_output_components(location, components); + return SPVC_SUCCESS; +#else + (void)location; + (void)components; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + +unsigned spvc_compiler_msl_get_automatic_resource_binding(spvc_compiler compiler, spvc_variable_id id) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return uint32_t(-1); + } + + auto &msl = *static_cast(compiler->compiler.get()); + return msl.get_automatic_msl_resource_binding(id); +#else + (void)id; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return uint32_t(-1); +#endif +} + +unsigned spvc_compiler_msl_get_automatic_resource_binding_secondary(spvc_compiler compiler, spvc_variable_id id) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return uint32_t(-1); + } + + auto &msl = *static_cast(compiler->compiler.get()); + return msl.get_automatic_msl_resource_binding_secondary(id); +#else + (void)id; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return uint32_t(-1); +#endif +} + +spvc_result spvc_compiler_compile(spvc_compiler compiler, const char **source) +{ + SPVC_BEGIN_SAFE_SCOPE + { + auto result = compiler->compiler->compile(); + if (result.empty()) + { + compiler->context->report_error("Unsupported SPIR-V."); + return SPVC_ERROR_UNSUPPORTED_SPIRV; + } + + *source = compiler->context->allocate_name(result); + if (!*source) + { + compiler->context->report_error("Out of memory."); + return SPVC_ERROR_OUT_OF_MEMORY; + } + return SPVC_SUCCESS; + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_UNSUPPORTED_SPIRV) +} + +bool spvc_resources_s::copy_resources(SmallVector &outputs, + const SmallVector &inputs) +{ + for (auto &i : inputs) + { + spvc_reflected_resource r; + r.base_type_id = i.base_type_id; + r.type_id = i.type_id; + r.id = i.id; + r.name = context->allocate_name(i.name); + if (!r.name) + return false; + + outputs.push_back(r); + } + + return true; +} + +bool spvc_resources_s::copy_resources(const ShaderResources &resources) +{ + if (!copy_resources(uniform_buffers, resources.uniform_buffers)) + return false; + if (!copy_resources(storage_buffers, resources.storage_buffers)) + return false; + if (!copy_resources(stage_inputs, resources.stage_inputs)) + return false; + if (!copy_resources(stage_outputs, resources.stage_outputs)) + return false; + if (!copy_resources(subpass_inputs, resources.subpass_inputs)) + return false; + if (!copy_resources(storage_images, resources.storage_images)) + return false; + if (!copy_resources(sampled_images, resources.sampled_images)) + return false; + if (!copy_resources(atomic_counters, resources.atomic_counters)) + return false; + if (!copy_resources(push_constant_buffers, resources.push_constant_buffers)) + return false; + if (!copy_resources(separate_images, resources.separate_images)) + return false; + if (!copy_resources(separate_samplers, resources.separate_samplers)) + return false; + if (!copy_resources(acceleration_structures, resources.acceleration_structures)) + return false; + + return true; +} + +spvc_result spvc_compiler_get_active_interface_variables(spvc_compiler compiler, spvc_set *set) +{ + SPVC_BEGIN_SAFE_SCOPE + { + std::unique_ptr ptr(new (std::nothrow) spvc_set_s); + if (!ptr) + { + compiler->context->report_error("Out of memory."); + return SPVC_ERROR_OUT_OF_MEMORY; + } + + auto active = compiler->compiler->get_active_interface_variables(); + ptr->set = std::move(active); + *set = ptr.get(); + compiler->context->allocations.push_back(std::move(ptr)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_set_enabled_interface_variables(spvc_compiler compiler, spvc_set set) +{ + SPVC_BEGIN_SAFE_SCOPE + { + compiler->compiler->set_enabled_interface_variables(set->set); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_create_shader_resources_for_active_variables(spvc_compiler compiler, spvc_resources *resources, + spvc_set set) +{ + SPVC_BEGIN_SAFE_SCOPE + { + std::unique_ptr res(new (std::nothrow) spvc_resources_s); + if (!res) + { + compiler->context->report_error("Out of memory."); + return SPVC_ERROR_OUT_OF_MEMORY; + } + + res->context = compiler->context; + auto accessed_resources = compiler->compiler->get_shader_resources(set->set); + + if (!res->copy_resources(accessed_resources)) + { + res->context->report_error("Out of memory."); + return SPVC_ERROR_OUT_OF_MEMORY; + } + *resources = res.get(); + compiler->context->allocations.push_back(std::move(res)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_OUT_OF_MEMORY) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_create_shader_resources(spvc_compiler compiler, spvc_resources *resources) +{ + SPVC_BEGIN_SAFE_SCOPE + { + std::unique_ptr res(new (std::nothrow) spvc_resources_s); + if (!res) + { + compiler->context->report_error("Out of memory."); + return SPVC_ERROR_OUT_OF_MEMORY; + } + + res->context = compiler->context; + auto accessed_resources = compiler->compiler->get_shader_resources(); + + if (!res->copy_resources(accessed_resources)) + { + res->context->report_error("Out of memory."); + return SPVC_ERROR_OUT_OF_MEMORY; + } + + *resources = res.get(); + compiler->context->allocations.push_back(std::move(res)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_OUT_OF_MEMORY) + return SPVC_SUCCESS; +} + +spvc_result spvc_resources_get_resource_list_for_type(spvc_resources resources, spvc_resource_type type, + const spvc_reflected_resource **resource_list, + size_t *resource_size) +{ + const SmallVector *list = nullptr; + switch (type) + { + case SPVC_RESOURCE_TYPE_UNIFORM_BUFFER: + list = &resources->uniform_buffers; + break; + + case SPVC_RESOURCE_TYPE_STORAGE_BUFFER: + list = &resources->storage_buffers; + break; + + case SPVC_RESOURCE_TYPE_STAGE_INPUT: + list = &resources->stage_inputs; + break; + + case SPVC_RESOURCE_TYPE_STAGE_OUTPUT: + list = &resources->stage_outputs; + break; + + case SPVC_RESOURCE_TYPE_SUBPASS_INPUT: + list = &resources->subpass_inputs; + break; + + case SPVC_RESOURCE_TYPE_STORAGE_IMAGE: + list = &resources->storage_images; + break; + + case SPVC_RESOURCE_TYPE_SAMPLED_IMAGE: + list = &resources->sampled_images; + break; + + case SPVC_RESOURCE_TYPE_ATOMIC_COUNTER: + list = &resources->atomic_counters; + break; + + case SPVC_RESOURCE_TYPE_PUSH_CONSTANT: + list = &resources->push_constant_buffers; + break; + + case SPVC_RESOURCE_TYPE_SEPARATE_IMAGE: + list = &resources->separate_images; + break; + + case SPVC_RESOURCE_TYPE_SEPARATE_SAMPLERS: + list = &resources->separate_samplers; + break; + + case SPVC_RESOURCE_TYPE_ACCELERATION_STRUCTURE: + list = &resources->acceleration_structures; + break; + + default: + break; + } + + if (!list) + { + resources->context->report_error("Invalid argument."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + *resource_size = list->size(); + *resource_list = list->data(); + return SPVC_SUCCESS; +} + +void spvc_compiler_set_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration, unsigned argument) +{ + compiler->compiler->set_decoration(id, static_cast(decoration), argument); +} + +void spvc_compiler_set_decoration_string(spvc_compiler compiler, SpvId id, SpvDecoration decoration, + const char *argument) +{ + compiler->compiler->set_decoration_string(id, static_cast(decoration), argument); +} + +void spvc_compiler_set_name(spvc_compiler compiler, SpvId id, const char *argument) +{ + compiler->compiler->set_name(id, argument); +} + +void spvc_compiler_set_member_decoration(spvc_compiler compiler, spvc_type_id id, unsigned member_index, + SpvDecoration decoration, unsigned argument) +{ + compiler->compiler->set_member_decoration(id, member_index, static_cast(decoration), argument); +} + +void spvc_compiler_set_member_decoration_string(spvc_compiler compiler, spvc_type_id id, unsigned member_index, + SpvDecoration decoration, const char *argument) +{ + compiler->compiler->set_member_decoration_string(id, member_index, static_cast(decoration), + argument); +} + +void spvc_compiler_set_member_name(spvc_compiler compiler, spvc_type_id id, unsigned member_index, const char *argument) +{ + compiler->compiler->set_member_name(id, member_index, argument); +} + +void spvc_compiler_unset_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration) +{ + compiler->compiler->unset_decoration(id, static_cast(decoration)); +} + +void spvc_compiler_unset_member_decoration(spvc_compiler compiler, spvc_type_id id, unsigned member_index, + SpvDecoration decoration) +{ + compiler->compiler->unset_member_decoration(id, member_index, static_cast(decoration)); +} + +spvc_bool spvc_compiler_has_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration) +{ + return compiler->compiler->has_decoration(id, static_cast(decoration)) ? SPVC_TRUE : SPVC_FALSE; +} + +spvc_bool spvc_compiler_has_member_decoration(spvc_compiler compiler, spvc_type_id id, unsigned member_index, + SpvDecoration decoration) +{ + return compiler->compiler->has_member_decoration(id, member_index, static_cast(decoration)) ? + SPVC_TRUE : + SPVC_FALSE; +} + +const char *spvc_compiler_get_name(spvc_compiler compiler, SpvId id) +{ + return compiler->compiler->get_name(id).c_str(); +} + +unsigned spvc_compiler_get_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration) +{ + return compiler->compiler->get_decoration(id, static_cast(decoration)); +} + +const char *spvc_compiler_get_decoration_string(spvc_compiler compiler, SpvId id, SpvDecoration decoration) +{ + return compiler->compiler->get_decoration_string(id, static_cast(decoration)).c_str(); +} + +unsigned spvc_compiler_get_member_decoration(spvc_compiler compiler, spvc_type_id id, unsigned member_index, + SpvDecoration decoration) +{ + return compiler->compiler->get_member_decoration(id, member_index, static_cast(decoration)); +} + +const char *spvc_compiler_get_member_decoration_string(spvc_compiler compiler, spvc_type_id id, unsigned member_index, + SpvDecoration decoration) +{ + return compiler->compiler->get_member_decoration_string(id, member_index, static_cast(decoration)) + .c_str(); +} + +const char *spvc_compiler_get_member_name(spvc_compiler compiler, spvc_type_id id, unsigned member_index) +{ + return compiler->compiler->get_member_name(id, member_index).c_str(); +} + +spvc_result spvc_compiler_get_entry_points(spvc_compiler compiler, const spvc_entry_point **entry_points, + size_t *num_entry_points) +{ + SPVC_BEGIN_SAFE_SCOPE + { + auto entries = compiler->compiler->get_entry_points_and_stages(); + SmallVector translated; + translated.reserve(entries.size()); + + for (auto &entry : entries) + { + spvc_entry_point new_entry; + new_entry.execution_model = static_cast(entry.execution_model); + new_entry.name = compiler->context->allocate_name(entry.name); + if (!new_entry.name) + { + compiler->context->report_error("Out of memory."); + return SPVC_ERROR_OUT_OF_MEMORY; + } + translated.push_back(new_entry); + } + + auto ptr = spvc_allocate>(); + ptr->buffer = std::move(translated); + *entry_points = ptr->buffer.data(); + *num_entry_points = ptr->buffer.size(); + compiler->context->allocations.push_back(std::move(ptr)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_OUT_OF_MEMORY) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_set_entry_point(spvc_compiler compiler, const char *name, SpvExecutionModel model) +{ + compiler->compiler->set_entry_point(name, static_cast(model)); + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_rename_entry_point(spvc_compiler compiler, const char *old_name, const char *new_name, + SpvExecutionModel model) +{ + SPVC_BEGIN_SAFE_SCOPE + { + compiler->compiler->rename_entry_point(old_name, new_name, static_cast(model)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT) + return SPVC_SUCCESS; +} + +const char *spvc_compiler_get_cleansed_entry_point_name(spvc_compiler compiler, const char *name, + SpvExecutionModel model) +{ + SPVC_BEGIN_SAFE_SCOPE + { + auto cleansed_name = + compiler->compiler->get_cleansed_entry_point_name(name, static_cast(model)); + return compiler->context->allocate_name(cleansed_name); + } + SPVC_END_SAFE_SCOPE(compiler->context, nullptr) +} + +void spvc_compiler_set_execution_mode(spvc_compiler compiler, SpvExecutionMode mode) +{ + compiler->compiler->set_execution_mode(static_cast(mode)); +} + +void spvc_compiler_set_execution_mode_with_arguments(spvc_compiler compiler, SpvExecutionMode mode, unsigned arg0, + unsigned arg1, + unsigned arg2) +{ + compiler->compiler->set_execution_mode(static_cast(mode), arg0, arg1, arg2); +} + +void spvc_compiler_unset_execution_mode(spvc_compiler compiler, SpvExecutionMode mode) +{ + compiler->compiler->unset_execution_mode(static_cast(mode)); +} + +spvc_result spvc_compiler_get_execution_modes(spvc_compiler compiler, const SpvExecutionMode **modes, size_t *num_modes) +{ + SPVC_BEGIN_SAFE_SCOPE + { + auto ptr = spvc_allocate>(); + + compiler->compiler->get_execution_mode_bitset().for_each_bit( + [&](uint32_t bit) { ptr->buffer.push_back(static_cast(bit)); }); + + *modes = ptr->buffer.data(); + *num_modes = ptr->buffer.size(); + compiler->context->allocations.push_back(std::move(ptr)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_OUT_OF_MEMORY) + return SPVC_SUCCESS; +} + +unsigned spvc_compiler_get_execution_mode_argument(spvc_compiler compiler, SpvExecutionMode mode) +{ + return compiler->compiler->get_execution_mode_argument(static_cast(mode)); +} + +unsigned spvc_compiler_get_execution_mode_argument_by_index(spvc_compiler compiler, SpvExecutionMode mode, + unsigned index) +{ + return compiler->compiler->get_execution_mode_argument(static_cast(mode), index); +} + +SpvExecutionModel spvc_compiler_get_execution_model(spvc_compiler compiler) +{ + return static_cast(compiler->compiler->get_execution_model()); +} + +spvc_type spvc_compiler_get_type_handle(spvc_compiler compiler, spvc_type_id id) +{ + + SPVC_BEGIN_SAFE_SCOPE + { + return static_cast(&compiler->compiler->get_type(id)); + } + SPVC_END_SAFE_SCOPE(compiler->context, nullptr) +} + +static spvc_basetype convert_basetype(SPIRType::BaseType type) +{ + + return static_cast(type); +} + +spvc_basetype spvc_type_get_basetype(spvc_type type) +{ + return convert_basetype(type->basetype); +} + +unsigned spvc_type_get_bit_width(spvc_type type) +{ + return type->width; +} + +unsigned spvc_type_get_vector_size(spvc_type type) +{ + return type->vecsize; +} + +unsigned spvc_type_get_columns(spvc_type type) +{ + return type->columns; +} + +unsigned spvc_type_get_num_array_dimensions(spvc_type type) +{ + return unsigned(type->array.size()); +} + +spvc_bool spvc_type_array_dimension_is_literal(spvc_type type, unsigned dimension) +{ + return type->array_size_literal[dimension] ? SPVC_TRUE : SPVC_FALSE; +} + +SpvId spvc_type_get_array_dimension(spvc_type type, unsigned dimension) +{ + return type->array[dimension]; +} + +unsigned spvc_type_get_num_member_types(spvc_type type) +{ + return unsigned(type->member_types.size()); +} + +spvc_type_id spvc_type_get_member_type(spvc_type type, unsigned index) +{ + return type->member_types[index]; +} + +SpvStorageClass spvc_type_get_storage_class(spvc_type type) +{ + return static_cast(type->storage); +} + + +spvc_type_id spvc_type_get_image_sampled_type(spvc_type type) +{ + return type->image.type; +} + +SpvDim spvc_type_get_image_dimension(spvc_type type) +{ + return static_cast(type->image.dim); +} + +spvc_bool spvc_type_get_image_is_depth(spvc_type type) +{ + return type->image.depth ? SPVC_TRUE : SPVC_FALSE; +} + +spvc_bool spvc_type_get_image_arrayed(spvc_type type) +{ + return type->image.arrayed ? SPVC_TRUE : SPVC_FALSE; +} + +spvc_bool spvc_type_get_image_multisampled(spvc_type type) +{ + return type->image.ms ? SPVC_TRUE : SPVC_FALSE; +} + +spvc_bool spvc_type_get_image_is_storage(spvc_type type) +{ + return type->image.sampled == 2 ? SPVC_TRUE : SPVC_FALSE; +} + +SpvImageFormat spvc_type_get_image_storage_format(spvc_type type) +{ + return static_cast(static_cast(type)->image.format); +} + +SpvAccessQualifier spvc_type_get_image_access_qualifier(spvc_type type) +{ + return static_cast(static_cast(type)->image.access); +} + +spvc_result spvc_compiler_get_declared_struct_size(spvc_compiler compiler, spvc_type struct_type, size_t *size) +{ + SPVC_BEGIN_SAFE_SCOPE + { + *size = compiler->compiler->get_declared_struct_size(*static_cast(struct_type)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_get_declared_struct_size_runtime_array(spvc_compiler compiler, spvc_type struct_type, + size_t array_size, size_t *size) +{ + SPVC_BEGIN_SAFE_SCOPE + { + *size = compiler->compiler->get_declared_struct_size_runtime_array(*static_cast(struct_type), + array_size); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_get_declared_struct_member_size(spvc_compiler compiler, spvc_type struct_type, unsigned index, size_t *size) +{ + SPVC_BEGIN_SAFE_SCOPE + { + *size = compiler->compiler->get_declared_struct_member_size(*static_cast(struct_type), index); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_type_struct_member_offset(spvc_compiler compiler, spvc_type type, unsigned index, unsigned *offset) +{ + SPVC_BEGIN_SAFE_SCOPE + { + *offset = compiler->compiler->type_struct_member_offset(*static_cast(type), index); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_type_struct_member_array_stride(spvc_compiler compiler, spvc_type type, unsigned index, unsigned *stride) +{ + SPVC_BEGIN_SAFE_SCOPE + { + *stride = compiler->compiler->type_struct_member_array_stride(*static_cast(type), index); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_type_struct_member_matrix_stride(spvc_compiler compiler, spvc_type type, unsigned index, unsigned *stride) +{ + SPVC_BEGIN_SAFE_SCOPE + { + *stride = compiler->compiler->type_struct_member_matrix_stride(*static_cast(type), index); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_build_dummy_sampler_for_combined_images(spvc_compiler compiler, spvc_variable_id *id) +{ + SPVC_BEGIN_SAFE_SCOPE + { + *id = compiler->compiler->build_dummy_sampler_for_combined_images(); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_build_combined_image_samplers(spvc_compiler compiler) +{ + SPVC_BEGIN_SAFE_SCOPE + { + compiler->compiler->build_combined_image_samplers(); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_UNSUPPORTED_SPIRV) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_get_combined_image_samplers(spvc_compiler compiler, + const spvc_combined_image_sampler **samplers, + size_t *num_samplers) +{ + SPVC_BEGIN_SAFE_SCOPE + { + auto combined = compiler->compiler->get_combined_image_samplers(); + SmallVector translated; + translated.reserve(combined.size()); + for (auto &c : combined) + { + spvc_combined_image_sampler trans = { c.combined_id, c.image_id, c.sampler_id }; + translated.push_back(trans); + } + + auto ptr = spvc_allocate>(); + ptr->buffer = std::move(translated); + *samplers = ptr->buffer.data(); + *num_samplers = ptr->buffer.size(); + compiler->context->allocations.push_back(std::move(ptr)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_OUT_OF_MEMORY) + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_get_specialization_constants(spvc_compiler compiler, + const spvc_specialization_constant **constants, + size_t *num_constants) +{ + SPVC_BEGIN_SAFE_SCOPE + { + auto spec_constants = compiler->compiler->get_specialization_constants(); + SmallVector translated; + translated.reserve(spec_constants.size()); + for (auto &c : spec_constants) + { + spvc_specialization_constant trans = { c.id, c.constant_id }; + translated.push_back(trans); + } + + auto ptr = spvc_allocate>(); + ptr->buffer = std::move(translated); + *constants = ptr->buffer.data(); + *num_constants = ptr->buffer.size(); + compiler->context->allocations.push_back(std::move(ptr)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_OUT_OF_MEMORY) + return SPVC_SUCCESS; +} + +spvc_constant spvc_compiler_get_constant_handle(spvc_compiler compiler, spvc_variable_id id) +{ + SPVC_BEGIN_SAFE_SCOPE + { + return static_cast(&compiler->compiler->get_constant(id)); + } + SPVC_END_SAFE_SCOPE(compiler->context, nullptr) +} + +spvc_constant_id spvc_compiler_get_work_group_size_specialization_constants(spvc_compiler compiler, + spvc_specialization_constant *x, + spvc_specialization_constant *y, + spvc_specialization_constant *z) +{ + SpecializationConstant tmpx; + SpecializationConstant tmpy; + SpecializationConstant tmpz; + spvc_constant_id ret = compiler->compiler->get_work_group_size_specialization_constants(tmpx, tmpy, tmpz); + x->id = tmpx.id; + x->constant_id = tmpx.constant_id; + y->id = tmpy.id; + y->constant_id = tmpy.constant_id; + z->id = tmpz.id; + z->constant_id = tmpz.constant_id; + return ret; +} + +spvc_result spvc_compiler_get_active_buffer_ranges(spvc_compiler compiler, + spvc_variable_id id, + const spvc_buffer_range **ranges, + size_t *num_ranges) +{ + SPVC_BEGIN_SAFE_SCOPE + { + auto active_ranges = compiler->compiler->get_active_buffer_ranges(id); + SmallVector translated; + translated.reserve(active_ranges.size()); + for (auto &r : active_ranges) + { + spvc_buffer_range trans = { r.index, r.offset, r.range }; + translated.push_back(trans); + } + + auto ptr = spvc_allocate>(); + ptr->buffer = std::move(translated); + *ranges = ptr->buffer.data(); + *num_ranges = ptr->buffer.size(); + compiler->context->allocations.push_back(std::move(ptr)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_OUT_OF_MEMORY) + return SPVC_SUCCESS; +} + +float spvc_constant_get_scalar_fp16(spvc_constant constant, unsigned column, unsigned row) +{ + return constant->scalar_f16(column, row); +} + +float spvc_constant_get_scalar_fp32(spvc_constant constant, unsigned column, unsigned row) +{ + return constant->scalar_f32(column, row); +} + +double spvc_constant_get_scalar_fp64(spvc_constant constant, unsigned column, unsigned row) +{ + return constant->scalar_f64(column, row); +} + +unsigned spvc_constant_get_scalar_u32(spvc_constant constant, unsigned column, unsigned row) +{ + return constant->scalar(column, row); +} + +int spvc_constant_get_scalar_i32(spvc_constant constant, unsigned column, unsigned row) +{ + return constant->scalar_i32(column, row); +} + +unsigned spvc_constant_get_scalar_u16(spvc_constant constant, unsigned column, unsigned row) +{ + return constant->scalar_u16(column, row); +} + +int spvc_constant_get_scalar_i16(spvc_constant constant, unsigned column, unsigned row) +{ + return constant->scalar_i16(column, row); +} + +unsigned spvc_constant_get_scalar_u8(spvc_constant constant, unsigned column, unsigned row) +{ + return constant->scalar_u8(column, row); +} + +int spvc_constant_get_scalar_i8(spvc_constant constant, unsigned column, unsigned row) +{ + return constant->scalar_i8(column, row); +} + +void spvc_constant_get_subconstants(spvc_constant constant, const spvc_constant_id **constituents, size_t *count) +{ + static_assert(sizeof(spvc_constant_id) == sizeof(constant->subconstants.front()), "ID size is not consistent."); + *constituents = reinterpret_cast(constant->subconstants.data()); + *count = constant->subconstants.size(); +} + +spvc_type_id spvc_constant_get_type(spvc_constant constant) +{ + return constant->constant_type; +} + +spvc_bool spvc_compiler_get_binary_offset_for_decoration(spvc_compiler compiler, spvc_variable_id id, + SpvDecoration decoration, + unsigned *word_offset) +{ + uint32_t off = 0; + bool ret = compiler->compiler->get_binary_offset_for_decoration(id, static_cast(decoration), off); + if (ret) + { + *word_offset = off; + return SPVC_TRUE; + } + else + return SPVC_FALSE; +} + +spvc_bool spvc_compiler_buffer_is_hlsl_counter_buffer(spvc_compiler compiler, spvc_variable_id id) +{ + return compiler->compiler->buffer_is_hlsl_counter_buffer(id) ? SPVC_TRUE : SPVC_FALSE; +} + +spvc_bool spvc_compiler_buffer_get_hlsl_counter_buffer(spvc_compiler compiler, spvc_variable_id id, + spvc_variable_id *counter_id) +{ + uint32_t buffer; + bool ret = compiler->compiler->buffer_get_hlsl_counter_buffer(id, buffer); + if (ret) + { + *counter_id = buffer; + return SPVC_TRUE; + } + else + return SPVC_FALSE; +} + +spvc_result spvc_compiler_get_declared_capabilities(spvc_compiler compiler, const SpvCapability **capabilities, + size_t *num_capabilities) +{ + auto &caps = compiler->compiler->get_declared_capabilities(); + static_assert(sizeof(SpvCapability) == sizeof(spv::Capability), "Enum size mismatch."); + *capabilities = reinterpret_cast(caps.data()); + *num_capabilities = caps.size(); + return SPVC_SUCCESS; +} + +spvc_result spvc_compiler_get_declared_extensions(spvc_compiler compiler, const char ***extensions, + size_t *num_extensions) +{ + SPVC_BEGIN_SAFE_SCOPE + { + auto &exts = compiler->compiler->get_declared_extensions(); + SmallVector duped; + duped.reserve(exts.size()); + for (auto &ext : exts) + duped.push_back(compiler->context->allocate_name(ext)); + + auto ptr = spvc_allocate>(); + ptr->buffer = std::move(duped); + *extensions = ptr->buffer.data(); + *num_extensions = ptr->buffer.size(); + compiler->context->allocations.push_back(std::move(ptr)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_OUT_OF_MEMORY) + return SPVC_SUCCESS; +} + +const char *spvc_compiler_get_remapped_declared_block_name(spvc_compiler compiler, spvc_variable_id id) +{ + SPVC_BEGIN_SAFE_SCOPE + { + auto name = compiler->compiler->get_remapped_declared_block_name(id); + return compiler->context->allocate_name(name); + } + SPVC_END_SAFE_SCOPE(compiler->context, nullptr) +} + +spvc_result spvc_compiler_get_buffer_block_decorations(spvc_compiler compiler, spvc_variable_id id, + const SpvDecoration **decorations, size_t *num_decorations) +{ + SPVC_BEGIN_SAFE_SCOPE + { + auto flags = compiler->compiler->get_buffer_block_flags(id); + auto bitset = spvc_allocate>(); + + flags.for_each_bit([&](uint32_t bit) { bitset->buffer.push_back(static_cast(bit)); }); + + *decorations = bitset->buffer.data(); + *num_decorations = bitset->buffer.size(); + compiler->context->allocations.push_back(std::move(bitset)); + } + SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT) + return SPVC_SUCCESS; +} + +unsigned spvc_msl_get_aux_buffer_struct_version(void) +{ + return SPVC_MSL_AUX_BUFFER_STRUCT_VERSION; +} + +void spvc_msl_vertex_attribute_init(spvc_msl_vertex_attribute *attr) +{ +#if SPIRV_CROSS_C_API_MSL + + MSLVertexAttr attr_default; + attr->location = attr_default.location; + attr->per_instance = attr_default.per_instance ? SPVC_TRUE : SPVC_FALSE; + attr->format = static_cast(attr_default.format); + attr->builtin = static_cast(attr_default.builtin); + attr->msl_buffer = attr_default.msl_buffer; + attr->msl_offset = attr_default.msl_offset; + attr->msl_stride = attr_default.msl_stride; +#else + memset(attr, 0, sizeof(*attr)); +#endif +} + +void spvc_msl_resource_binding_init(spvc_msl_resource_binding *binding) +{ +#if SPIRV_CROSS_C_API_MSL + MSLResourceBinding binding_default; + binding->desc_set = binding_default.desc_set; + binding->binding = binding_default.binding; + binding->msl_buffer = binding_default.msl_buffer; + binding->msl_texture = binding_default.msl_texture; + binding->msl_sampler = binding_default.msl_sampler; + binding->stage = static_cast(binding_default.stage); +#else + memset(binding, 0, sizeof(*binding)); +#endif +} + +void spvc_msl_constexpr_sampler_init(spvc_msl_constexpr_sampler *sampler) +{ +#if SPIRV_CROSS_C_API_MSL + MSLConstexprSampler defaults; + sampler->anisotropy_enable = defaults.anisotropy_enable ? SPVC_TRUE : SPVC_FALSE; + sampler->border_color = static_cast(defaults.border_color); + sampler->compare_enable = defaults.compare_enable ? SPVC_TRUE : SPVC_FALSE; + sampler->coord = static_cast(defaults.coord); + sampler->compare_func = static_cast(defaults.compare_func); + sampler->lod_clamp_enable = defaults.lod_clamp_enable ? SPVC_TRUE : SPVC_FALSE; + sampler->lod_clamp_max = defaults.lod_clamp_max; + sampler->lod_clamp_min = defaults.lod_clamp_min; + sampler->mag_filter = static_cast(defaults.mag_filter); + sampler->min_filter = static_cast(defaults.min_filter); + sampler->mip_filter = static_cast(defaults.mip_filter); + sampler->max_anisotropy = defaults.max_anisotropy; + sampler->s_address = static_cast(defaults.s_address); + sampler->t_address = static_cast(defaults.t_address); + sampler->r_address = static_cast(defaults.r_address); +#else + memset(sampler, 0, sizeof(*sampler)); +#endif +} + +void spvc_msl_sampler_ycbcr_conversion_init(spvc_msl_sampler_ycbcr_conversion *conv) +{ +#if SPIRV_CROSS_C_API_MSL + MSLConstexprSampler defaults; + conv->planes = defaults.planes; + conv->resolution = static_cast(defaults.resolution); + conv->chroma_filter = static_cast(defaults.chroma_filter); + conv->x_chroma_offset = static_cast(defaults.x_chroma_offset); + conv->y_chroma_offset = static_cast(defaults.y_chroma_offset); + for (int i = 0; i < 4; i++) + conv->swizzle[i] = static_cast(defaults.swizzle[i]); + conv->ycbcr_model = static_cast(defaults.ycbcr_model); + conv->ycbcr_range = static_cast(defaults.ycbcr_range); +#else + memset(conv, 0, sizeof(*conv)); +#endif +} + +unsigned spvc_compiler_get_current_id_bound(spvc_compiler compiler) +{ + return compiler->compiler->get_current_id_bound(); +} + +void spvc_get_version(unsigned *major, unsigned *minor, unsigned *patch) +{ + *major = SPVC_C_API_VERSION_MAJOR; + *minor = SPVC_C_API_VERSION_MINOR; + *patch = SPVC_C_API_VERSION_PATCH; +} + +const char *spvc_get_commit_revision_and_timestamp(void) +{ +#ifdef HAVE_SPIRV_CROSS_GIT_VERSION + return SPIRV_CROSS_GIT_REVISION; +#else + return ""; +#endif +} + +#ifdef _MSC_VER +#pragma warning(pop) +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_c.h b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_c.h new file mode 100644 index 000000000000..67f0bd2c6e44 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_c.h @@ -0,0 +1,832 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_C_API_H +#define SPIRV_CROSS_C_API_H + +#include +#include "spirv.h" + + + + + + + +#ifdef __cplusplus +extern "C" { +#endif + + +#define SPVC_C_API_VERSION_MAJOR 0 + +#define SPVC_C_API_VERSION_MINOR 19 + +#define SPVC_C_API_VERSION_PATCH 0 + +#if !defined(SPVC_PUBLIC_API) +#if defined(SPVC_EXPORT_SYMBOLS) + +#if defined(__GNUC__) +#define SPVC_PUBLIC_API __attribute__((visibility("default"))) +#elif defined(_MSC_VER) +#define SPVC_PUBLIC_API __declspec(dllexport) +#else +#define SPVC_PUBLIC_API +#endif +#else +#define SPVC_PUBLIC_API +#endif +#endif + + + + + +SPVC_PUBLIC_API void spvc_get_version(unsigned *major, unsigned *minor, unsigned *patch); + + +SPVC_PUBLIC_API const char *spvc_get_commit_revision_and_timestamp(void); + + +typedef struct spvc_context_s *spvc_context; +typedef struct spvc_parsed_ir_s *spvc_parsed_ir; +typedef struct spvc_compiler_s *spvc_compiler; +typedef struct spvc_compiler_options_s *spvc_compiler_options; +typedef struct spvc_resources_s *spvc_resources; +struct spvc_type_s; +typedef const struct spvc_type_s *spvc_type; +typedef struct spvc_constant_s *spvc_constant; +struct spvc_set_s; +typedef const struct spvc_set_s *spvc_set; + + + + + +typedef SpvId spvc_type_id; + +typedef SpvId spvc_variable_id; + +typedef SpvId spvc_constant_id; + + +typedef struct spvc_reflected_resource +{ + spvc_variable_id id; + spvc_type_id base_type_id; + spvc_type_id type_id; + const char *name; +} spvc_reflected_resource; + + +typedef struct spvc_entry_point +{ + SpvExecutionModel execution_model; + const char *name; +} spvc_entry_point; + + +typedef struct spvc_combined_image_sampler +{ + spvc_variable_id combined_id; + spvc_variable_id image_id; + spvc_variable_id sampler_id; +} spvc_combined_image_sampler; + + +typedef struct spvc_specialization_constant +{ + spvc_constant_id id; + unsigned constant_id; +} spvc_specialization_constant; + + +typedef struct spvc_buffer_range +{ + unsigned index; + size_t offset; + size_t range; +} spvc_buffer_range; + + +typedef struct spvc_hlsl_root_constants +{ + unsigned start; + unsigned end; + unsigned binding; + unsigned space; +} spvc_hlsl_root_constants; + + +typedef struct spvc_hlsl_vertex_attribute_remap +{ + unsigned location; + const char *semantic; +} spvc_hlsl_vertex_attribute_remap; + + + + + + +typedef unsigned char spvc_bool; +#define SPVC_TRUE ((spvc_bool)1) +#define SPVC_FALSE ((spvc_bool)0) + +typedef enum spvc_result +{ + + SPVC_SUCCESS = 0, + + + SPVC_ERROR_INVALID_SPIRV = -1, + + + SPVC_ERROR_UNSUPPORTED_SPIRV = -2, + + + SPVC_ERROR_OUT_OF_MEMORY = -3, + + + SPVC_ERROR_INVALID_ARGUMENT = -4, + + SPVC_ERROR_INT_MAX = 0x7fffffff +} spvc_result; + +typedef enum spvc_capture_mode +{ + + SPVC_CAPTURE_MODE_COPY = 0, + + + + + + + SPVC_CAPTURE_MODE_TAKE_OWNERSHIP = 1, + + SPVC_CAPTURE_MODE_INT_MAX = 0x7fffffff +} spvc_capture_mode; + +typedef enum spvc_backend +{ + + SPVC_BACKEND_NONE = 0, + SPVC_BACKEND_GLSL = 1, + SPVC_BACKEND_HLSL = 2, + SPVC_BACKEND_MSL = 3, + SPVC_BACKEND_CPP = 4, + SPVC_BACKEND_JSON = 5, + SPVC_BACKEND_INT_MAX = 0x7fffffff +} spvc_backend; + + +typedef enum spvc_resource_type +{ + SPVC_RESOURCE_TYPE_UNKNOWN = 0, + SPVC_RESOURCE_TYPE_UNIFORM_BUFFER = 1, + SPVC_RESOURCE_TYPE_STORAGE_BUFFER = 2, + SPVC_RESOURCE_TYPE_STAGE_INPUT = 3, + SPVC_RESOURCE_TYPE_STAGE_OUTPUT = 4, + SPVC_RESOURCE_TYPE_SUBPASS_INPUT = 5, + SPVC_RESOURCE_TYPE_STORAGE_IMAGE = 6, + SPVC_RESOURCE_TYPE_SAMPLED_IMAGE = 7, + SPVC_RESOURCE_TYPE_ATOMIC_COUNTER = 8, + SPVC_RESOURCE_TYPE_PUSH_CONSTANT = 9, + SPVC_RESOURCE_TYPE_SEPARATE_IMAGE = 10, + SPVC_RESOURCE_TYPE_SEPARATE_SAMPLERS = 11, + SPVC_RESOURCE_TYPE_ACCELERATION_STRUCTURE = 12, + SPVC_RESOURCE_TYPE_INT_MAX = 0x7fffffff +} spvc_resource_type; + + +typedef enum spvc_basetype +{ + SPVC_BASETYPE_UNKNOWN = 0, + SPVC_BASETYPE_VOID = 1, + SPVC_BASETYPE_BOOLEAN = 2, + SPVC_BASETYPE_INT8 = 3, + SPVC_BASETYPE_UINT8 = 4, + SPVC_BASETYPE_INT16 = 5, + SPVC_BASETYPE_UINT16 = 6, + SPVC_BASETYPE_INT32 = 7, + SPVC_BASETYPE_UINT32 = 8, + SPVC_BASETYPE_INT64 = 9, + SPVC_BASETYPE_UINT64 = 10, + SPVC_BASETYPE_ATOMIC_COUNTER = 11, + SPVC_BASETYPE_FP16 = 12, + SPVC_BASETYPE_FP32 = 13, + SPVC_BASETYPE_FP64 = 14, + SPVC_BASETYPE_STRUCT = 15, + SPVC_BASETYPE_IMAGE = 16, + SPVC_BASETYPE_SAMPLED_IMAGE = 17, + SPVC_BASETYPE_SAMPLER = 18, + SPVC_BASETYPE_ACCELERATION_STRUCTURE = 19, + + SPVC_BASETYPE_INT_MAX = 0x7fffffff +} spvc_basetype; + +#define SPVC_COMPILER_OPTION_COMMON_BIT 0x1000000 +#define SPVC_COMPILER_OPTION_GLSL_BIT 0x2000000 +#define SPVC_COMPILER_OPTION_HLSL_BIT 0x4000000 +#define SPVC_COMPILER_OPTION_MSL_BIT 0x8000000 +#define SPVC_COMPILER_OPTION_LANG_BITS 0x0f000000 +#define SPVC_COMPILER_OPTION_ENUM_BITS 0xffffff + +#define SPVC_MAKE_MSL_VERSION(major, minor, patch) ((major) * 10000 + (minor) * 100 + (patch)) + + +typedef enum spvc_msl_platform +{ + SPVC_MSL_PLATFORM_IOS = 0, + SPVC_MSL_PLATFORM_MACOS = 1, + SPVC_MSL_PLATFORM_MAX_INT = 0x7fffffff +} spvc_msl_platform; + + +typedef enum spvc_msl_vertex_format +{ + SPVC_MSL_VERTEX_FORMAT_OTHER = 0, + SPVC_MSL_VERTEX_FORMAT_UINT8 = 1, + SPVC_MSL_VERTEX_FORMAT_UINT16 = 2 +} spvc_msl_vertex_format; + + +typedef struct spvc_msl_vertex_attribute +{ + unsigned location; + unsigned msl_buffer; + unsigned msl_offset; + unsigned msl_stride; + spvc_bool per_instance; + spvc_msl_vertex_format format; + SpvBuiltIn builtin; +} spvc_msl_vertex_attribute; + + + + +SPVC_PUBLIC_API void spvc_msl_vertex_attribute_init(spvc_msl_vertex_attribute *attr); + + +typedef struct spvc_msl_resource_binding +{ + SpvExecutionModel stage; + unsigned desc_set; + unsigned binding; + unsigned msl_buffer; + unsigned msl_texture; + unsigned msl_sampler; +} spvc_msl_resource_binding; + + + + + +SPVC_PUBLIC_API void spvc_msl_resource_binding_init(spvc_msl_resource_binding *binding); + +#define SPVC_MSL_PUSH_CONSTANT_DESC_SET (~(0u)) +#define SPVC_MSL_PUSH_CONSTANT_BINDING (0) +#define SPVC_MSL_SWIZZLE_BUFFER_BINDING (~(1u)) +#define SPVC_MSL_BUFFER_SIZE_BUFFER_BINDING (~(2u)) +#define SPVC_MSL_ARGUMENT_BUFFER_BINDING (~(3u)) + + +#define SPVC_MSL_AUX_BUFFER_STRUCT_VERSION 1 + + +SPVC_PUBLIC_API unsigned spvc_msl_get_aux_buffer_struct_version(void); + + +typedef enum spvc_msl_sampler_coord +{ + SPVC_MSL_SAMPLER_COORD_NORMALIZED = 0, + SPVC_MSL_SAMPLER_COORD_PIXEL = 1, + SPVC_MSL_SAMPLER_INT_MAX = 0x7fffffff +} spvc_msl_sampler_coord; + + +typedef enum spvc_msl_sampler_filter +{ + SPVC_MSL_SAMPLER_FILTER_NEAREST = 0, + SPVC_MSL_SAMPLER_FILTER_LINEAR = 1, + SPVC_MSL_SAMPLER_FILTER_INT_MAX = 0x7fffffff +} spvc_msl_sampler_filter; + + +typedef enum spvc_msl_sampler_mip_filter +{ + SPVC_MSL_SAMPLER_MIP_FILTER_NONE = 0, + SPVC_MSL_SAMPLER_MIP_FILTER_NEAREST = 1, + SPVC_MSL_SAMPLER_MIP_FILTER_LINEAR = 2, + SPVC_MSL_SAMPLER_MIP_FILTER_INT_MAX = 0x7fffffff +} spvc_msl_sampler_mip_filter; + + +typedef enum spvc_msl_sampler_address +{ + SPVC_MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO = 0, + SPVC_MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE = 1, + SPVC_MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER = 2, + SPVC_MSL_SAMPLER_ADDRESS_REPEAT = 3, + SPVC_MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT = 4, + SPVC_MSL_SAMPLER_ADDRESS_INT_MAX = 0x7fffffff +} spvc_msl_sampler_address; + + +typedef enum spvc_msl_sampler_compare_func +{ + SPVC_MSL_SAMPLER_COMPARE_FUNC_NEVER = 0, + SPVC_MSL_SAMPLER_COMPARE_FUNC_LESS = 1, + SPVC_MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL = 2, + SPVC_MSL_SAMPLER_COMPARE_FUNC_GREATER = 3, + SPVC_MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL = 4, + SPVC_MSL_SAMPLER_COMPARE_FUNC_EQUAL = 5, + SPVC_MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL = 6, + SPVC_MSL_SAMPLER_COMPARE_FUNC_ALWAYS = 7, + SPVC_MSL_SAMPLER_COMPARE_FUNC_INT_MAX = 0x7fffffff +} spvc_msl_sampler_compare_func; + + +typedef enum spvc_msl_sampler_border_color +{ + SPVC_MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK = 0, + SPVC_MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK = 1, + SPVC_MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE = 2, + SPVC_MSL_SAMPLER_BORDER_COLOR_INT_MAX = 0x7fffffff +} spvc_msl_sampler_border_color; + + +typedef enum spvc_msl_format_resolution +{ + SPVC_MSL_FORMAT_RESOLUTION_444 = 0, + SPVC_MSL_FORMAT_RESOLUTION_422, + SPVC_MSL_FORMAT_RESOLUTION_420, + SPVC_MSL_FORMAT_RESOLUTION_INT_MAX = 0x7fffffff +} spvc_msl_format_resolution; + + +typedef enum spvc_msl_chroma_location +{ + SPVC_MSL_CHROMA_LOCATION_COSITED_EVEN = 0, + SPVC_MSL_CHROMA_LOCATION_MIDPOINT, + SPVC_MSL_CHROMA_LOCATION_INT_MAX = 0x7fffffff +} spvc_msl_chroma_location; + + +typedef enum spvc_msl_component_swizzle +{ + SPVC_MSL_COMPONENT_SWIZZLE_IDENTITY = 0, + SPVC_MSL_COMPONENT_SWIZZLE_ZERO, + SPVC_MSL_COMPONENT_SWIZZLE_ONE, + SPVC_MSL_COMPONENT_SWIZZLE_R, + SPVC_MSL_COMPONENT_SWIZZLE_G, + SPVC_MSL_COMPONENT_SWIZZLE_B, + SPVC_MSL_COMPONENT_SWIZZLE_A, + SPVC_MSL_COMPONENT_SWIZZLE_INT_MAX = 0x7fffffff +} spvc_msl_component_swizzle; + + +typedef enum spvc_msl_sampler_ycbcr_model_conversion +{ + SPVC_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0, + SPVC_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + SPVC_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_709, + SPVC_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_601, + SPVC_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_2020, + SPVC_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_INT_MAX = 0x7fffffff +} spvc_msl_sampler_ycbcr_model_conversion; + + +typedef enum spvc_msl_sampler_ycbcr_range +{ + SPVC_MSL_SAMPLER_YCBCR_RANGE_ITU_FULL = 0, + SPVC_MSL_SAMPLER_YCBCR_RANGE_ITU_NARROW, + SPVC_MSL_SAMPLER_YCBCR_RANGE_INT_MAX = 0x7fffffff +} spvc_msl_sampler_ycbcr_range; + + +typedef struct spvc_msl_constexpr_sampler +{ + spvc_msl_sampler_coord coord; + spvc_msl_sampler_filter min_filter; + spvc_msl_sampler_filter mag_filter; + spvc_msl_sampler_mip_filter mip_filter; + spvc_msl_sampler_address s_address; + spvc_msl_sampler_address t_address; + spvc_msl_sampler_address r_address; + spvc_msl_sampler_compare_func compare_func; + spvc_msl_sampler_border_color border_color; + float lod_clamp_min; + float lod_clamp_max; + int max_anisotropy; + + spvc_bool compare_enable; + spvc_bool lod_clamp_enable; + spvc_bool anisotropy_enable; +} spvc_msl_constexpr_sampler; + + + + + +SPVC_PUBLIC_API void spvc_msl_constexpr_sampler_init(spvc_msl_constexpr_sampler *sampler); + + +typedef struct spvc_msl_sampler_ycbcr_conversion +{ + unsigned planes; + spvc_msl_format_resolution resolution; + spvc_msl_sampler_filter chroma_filter; + spvc_msl_chroma_location x_chroma_offset; + spvc_msl_chroma_location y_chroma_offset; + spvc_msl_component_swizzle swizzle[4]; + spvc_msl_sampler_ycbcr_model_conversion ycbcr_model; + spvc_msl_sampler_ycbcr_range ycbcr_range; + unsigned bpc; +} spvc_msl_sampler_ycbcr_conversion; + + + + + +SPVC_PUBLIC_API void spvc_msl_sampler_ycbcr_conversion_init(spvc_msl_sampler_ycbcr_conversion *conv); + + +typedef enum spvc_compiler_option +{ + SPVC_COMPILER_OPTION_UNKNOWN = 0, + + SPVC_COMPILER_OPTION_FORCE_TEMPORARY = 1 | SPVC_COMPILER_OPTION_COMMON_BIT, + SPVC_COMPILER_OPTION_FLATTEN_MULTIDIMENSIONAL_ARRAYS = 2 | SPVC_COMPILER_OPTION_COMMON_BIT, + SPVC_COMPILER_OPTION_FIXUP_DEPTH_CONVENTION = 3 | SPVC_COMPILER_OPTION_COMMON_BIT, + SPVC_COMPILER_OPTION_FLIP_VERTEX_Y = 4 | SPVC_COMPILER_OPTION_COMMON_BIT, + + SPVC_COMPILER_OPTION_GLSL_SUPPORT_NONZERO_BASE_INSTANCE = 5 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_SEPARATE_SHADER_OBJECTS = 6 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_ENABLE_420PACK_EXTENSION = 7 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_VERSION = 8 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_ES = 9 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_VULKAN_SEMANTICS = 10 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_ES_DEFAULT_FLOAT_PRECISION_HIGHP = 11 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_ES_DEFAULT_INT_PRECISION_HIGHP = 12 | SPVC_COMPILER_OPTION_GLSL_BIT, + + SPVC_COMPILER_OPTION_HLSL_SHADER_MODEL = 13 | SPVC_COMPILER_OPTION_HLSL_BIT, + SPVC_COMPILER_OPTION_HLSL_POINT_SIZE_COMPAT = 14 | SPVC_COMPILER_OPTION_HLSL_BIT, + SPVC_COMPILER_OPTION_HLSL_POINT_COORD_COMPAT = 15 | SPVC_COMPILER_OPTION_HLSL_BIT, + SPVC_COMPILER_OPTION_HLSL_SUPPORT_NONZERO_BASE_VERTEX_BASE_INSTANCE = 16 | SPVC_COMPILER_OPTION_HLSL_BIT, + + SPVC_COMPILER_OPTION_MSL_VERSION = 17 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_TEXEL_BUFFER_TEXTURE_WIDTH = 18 | SPVC_COMPILER_OPTION_MSL_BIT, + + + SPVC_COMPILER_OPTION_MSL_AUX_BUFFER_INDEX = 19 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SWIZZLE_BUFFER_INDEX = 19 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_MSL_INDIRECT_PARAMS_BUFFER_INDEX = 20 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SHADER_OUTPUT_BUFFER_INDEX = 21 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SHADER_PATCH_OUTPUT_BUFFER_INDEX = 22 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SHADER_TESS_FACTOR_OUTPUT_BUFFER_INDEX = 23 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SHADER_INPUT_WORKGROUP_INDEX = 24 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_ENABLE_POINT_SIZE_BUILTIN = 25 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_DISABLE_RASTERIZATION = 26 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_CAPTURE_OUTPUT_TO_BUFFER = 27 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SWIZZLE_TEXTURE_SAMPLES = 28 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_PAD_FRAGMENT_OUTPUT_COMPONENTS = 29 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_TESS_DOMAIN_ORIGIN_LOWER_LEFT = 30 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_PLATFORM = 31 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_ARGUMENT_BUFFERS = 32 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_GLSL_EMIT_PUSH_CONSTANT_AS_UNIFORM_BUFFER = 33 | SPVC_COMPILER_OPTION_GLSL_BIT, + + SPVC_COMPILER_OPTION_MSL_TEXTURE_BUFFER_NATIVE = 34 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_GLSL_EMIT_UNIFORM_BUFFER_AS_PLAIN_UNIFORMS = 35 | SPVC_COMPILER_OPTION_GLSL_BIT, + + SPVC_COMPILER_OPTION_MSL_BUFFER_SIZE_BUFFER_INDEX = 36 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_EMIT_LINE_DIRECTIVES = 37 | SPVC_COMPILER_OPTION_COMMON_BIT, + + SPVC_COMPILER_OPTION_MSL_MULTIVIEW = 38 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_VIEW_MASK_BUFFER_INDEX = 39 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_DEVICE_INDEX = 40 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_VIEW_INDEX_FROM_DEVICE_INDEX = 41 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_DISPATCH_BASE = 42 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_DYNAMIC_OFFSETS_BUFFER_INDEX = 43 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_INT_MAX = 0x7fffffff +} spvc_compiler_option; + + + + + + + + +SPVC_PUBLIC_API spvc_result spvc_context_create(spvc_context *context); + + +SPVC_PUBLIC_API void spvc_context_destroy(spvc_context context); + + +SPVC_PUBLIC_API void spvc_context_release_allocations(spvc_context context); + + +SPVC_PUBLIC_API const char *spvc_context_get_last_error_string(spvc_context context); + + +typedef void (*spvc_error_callback)(void *userdata, const char *error); +SPVC_PUBLIC_API void spvc_context_set_error_callback(spvc_context context, spvc_error_callback cb, void *userdata); + + +SPVC_PUBLIC_API spvc_result spvc_context_parse_spirv(spvc_context context, const SpvId *spirv, size_t word_count, + spvc_parsed_ir *parsed_ir); + + + + + +SPVC_PUBLIC_API spvc_result spvc_context_create_compiler(spvc_context context, spvc_backend backend, + spvc_parsed_ir parsed_ir, spvc_capture_mode mode, + spvc_compiler *compiler); + + +SPVC_PUBLIC_API unsigned spvc_compiler_get_current_id_bound(spvc_compiler compiler); + + +SPVC_PUBLIC_API spvc_result spvc_compiler_create_compiler_options(spvc_compiler compiler, + spvc_compiler_options *options); + +SPVC_PUBLIC_API spvc_result spvc_compiler_options_set_bool(spvc_compiler_options options, + spvc_compiler_option option, spvc_bool value); +SPVC_PUBLIC_API spvc_result spvc_compiler_options_set_uint(spvc_compiler_options options, + spvc_compiler_option option, unsigned value); + +SPVC_PUBLIC_API spvc_result spvc_compiler_install_compiler_options(spvc_compiler compiler, + spvc_compiler_options options); + + +SPVC_PUBLIC_API spvc_result spvc_compiler_compile(spvc_compiler compiler, const char **source); + + +SPVC_PUBLIC_API spvc_result spvc_compiler_add_header_line(spvc_compiler compiler, const char *line); +SPVC_PUBLIC_API spvc_result spvc_compiler_require_extension(spvc_compiler compiler, const char *ext); +SPVC_PUBLIC_API spvc_result spvc_compiler_flatten_buffer_block(spvc_compiler compiler, spvc_variable_id id); + + + + + +SPVC_PUBLIC_API spvc_result spvc_compiler_hlsl_set_root_constants_layout(spvc_compiler compiler, + const spvc_hlsl_root_constants *constant_info, + size_t count); +SPVC_PUBLIC_API spvc_result spvc_compiler_hlsl_add_vertex_attribute_remap(spvc_compiler compiler, + const spvc_hlsl_vertex_attribute_remap *remap, + size_t remaps); +SPVC_PUBLIC_API spvc_variable_id spvc_compiler_hlsl_remap_num_workgroups_builtin(spvc_compiler compiler); + + + + + +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_is_rasterization_disabled(spvc_compiler compiler); + + +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_needs_aux_buffer(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_needs_swizzle_buffer(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_needs_buffer_size_buffer(spvc_compiler compiler); + +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_needs_output_buffer(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_needs_patch_output_buffer(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_needs_input_threadgroup_mem(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_vertex_attribute(spvc_compiler compiler, + const spvc_msl_vertex_attribute *attrs); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_resource_binding(spvc_compiler compiler, + const spvc_msl_resource_binding *binding); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_discrete_descriptor_set(spvc_compiler compiler, unsigned desc_set); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_set_argument_buffer_device_address_space(spvc_compiler compiler, unsigned desc_set, spvc_bool device_address); +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_is_vertex_attribute_used(spvc_compiler compiler, unsigned location); +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_is_resource_used(spvc_compiler compiler, + SpvExecutionModel model, + unsigned set, + unsigned binding); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_remap_constexpr_sampler(spvc_compiler compiler, spvc_variable_id id, const spvc_msl_constexpr_sampler *sampler); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_remap_constexpr_sampler_by_binding(spvc_compiler compiler, unsigned desc_set, unsigned binding, const spvc_msl_constexpr_sampler *sampler); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_remap_constexpr_sampler_ycbcr(spvc_compiler compiler, spvc_variable_id id, const spvc_msl_constexpr_sampler *sampler, const spvc_msl_sampler_ycbcr_conversion *conv); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_remap_constexpr_sampler_by_binding_ycbcr(spvc_compiler compiler, unsigned desc_set, unsigned binding, const spvc_msl_constexpr_sampler *sampler, const spvc_msl_sampler_ycbcr_conversion *conv); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_set_fragment_output_components(spvc_compiler compiler, unsigned location, unsigned components); + +SPVC_PUBLIC_API unsigned spvc_compiler_msl_get_automatic_resource_binding(spvc_compiler compiler, spvc_variable_id id); +SPVC_PUBLIC_API unsigned spvc_compiler_msl_get_automatic_resource_binding_secondary(spvc_compiler compiler, spvc_variable_id id); + +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_dynamic_buffer(spvc_compiler compiler, unsigned desc_set, unsigned binding, unsigned index); + + + + + +SPVC_PUBLIC_API spvc_result spvc_compiler_get_active_interface_variables(spvc_compiler compiler, spvc_set *set); +SPVC_PUBLIC_API spvc_result spvc_compiler_set_enabled_interface_variables(spvc_compiler compiler, spvc_set set); +SPVC_PUBLIC_API spvc_result spvc_compiler_create_shader_resources(spvc_compiler compiler, spvc_resources *resources); +SPVC_PUBLIC_API spvc_result spvc_compiler_create_shader_resources_for_active_variables(spvc_compiler compiler, + spvc_resources *resources, + spvc_set active); +SPVC_PUBLIC_API spvc_result spvc_resources_get_resource_list_for_type(spvc_resources resources, spvc_resource_type type, + const spvc_reflected_resource **resource_list, + size_t *resource_size); + + + + + +SPVC_PUBLIC_API void spvc_compiler_set_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration, + unsigned argument); +SPVC_PUBLIC_API void spvc_compiler_set_decoration_string(spvc_compiler compiler, SpvId id, SpvDecoration decoration, + const char *argument); +SPVC_PUBLIC_API void spvc_compiler_set_name(spvc_compiler compiler, SpvId id, const char *argument); +SPVC_PUBLIC_API void spvc_compiler_set_member_decoration(spvc_compiler compiler, spvc_type_id id, unsigned member_index, + SpvDecoration decoration, unsigned argument); +SPVC_PUBLIC_API void spvc_compiler_set_member_decoration_string(spvc_compiler compiler, spvc_type_id id, + unsigned member_index, SpvDecoration decoration, + const char *argument); +SPVC_PUBLIC_API void spvc_compiler_set_member_name(spvc_compiler compiler, spvc_type_id id, unsigned member_index, + const char *argument); +SPVC_PUBLIC_API void spvc_compiler_unset_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration); +SPVC_PUBLIC_API void spvc_compiler_unset_member_decoration(spvc_compiler compiler, spvc_type_id id, + unsigned member_index, SpvDecoration decoration); + +SPVC_PUBLIC_API spvc_bool spvc_compiler_has_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration); +SPVC_PUBLIC_API spvc_bool spvc_compiler_has_member_decoration(spvc_compiler compiler, spvc_type_id id, + unsigned member_index, SpvDecoration decoration); +SPVC_PUBLIC_API const char *spvc_compiler_get_name(spvc_compiler compiler, SpvId id); +SPVC_PUBLIC_API unsigned spvc_compiler_get_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration); +SPVC_PUBLIC_API const char *spvc_compiler_get_decoration_string(spvc_compiler compiler, SpvId id, + SpvDecoration decoration); +SPVC_PUBLIC_API unsigned spvc_compiler_get_member_decoration(spvc_compiler compiler, spvc_type_id id, + unsigned member_index, SpvDecoration decoration); +SPVC_PUBLIC_API const char *spvc_compiler_get_member_decoration_string(spvc_compiler compiler, spvc_type_id id, + unsigned member_index, SpvDecoration decoration); +SPVC_PUBLIC_API const char *spvc_compiler_get_member_name(spvc_compiler compiler, spvc_type_id id, unsigned member_index); + + + + + +SPVC_PUBLIC_API spvc_result spvc_compiler_get_entry_points(spvc_compiler compiler, + const spvc_entry_point **entry_points, + size_t *num_entry_points); +SPVC_PUBLIC_API spvc_result spvc_compiler_set_entry_point(spvc_compiler compiler, const char *name, + SpvExecutionModel model); +SPVC_PUBLIC_API spvc_result spvc_compiler_rename_entry_point(spvc_compiler compiler, const char *old_name, + const char *new_name, SpvExecutionModel model); +SPVC_PUBLIC_API const char *spvc_compiler_get_cleansed_entry_point_name(spvc_compiler compiler, const char *name, + SpvExecutionModel model); +SPVC_PUBLIC_API void spvc_compiler_set_execution_mode(spvc_compiler compiler, SpvExecutionMode mode); +SPVC_PUBLIC_API void spvc_compiler_unset_execution_mode(spvc_compiler compiler, SpvExecutionMode mode); +SPVC_PUBLIC_API void spvc_compiler_set_execution_mode_with_arguments(spvc_compiler compiler, SpvExecutionMode mode, + unsigned arg0, unsigned arg1, unsigned arg2); +SPVC_PUBLIC_API spvc_result spvc_compiler_get_execution_modes(spvc_compiler compiler, const SpvExecutionMode **modes, + size_t *num_modes); +SPVC_PUBLIC_API unsigned spvc_compiler_get_execution_mode_argument(spvc_compiler compiler, SpvExecutionMode mode); +SPVC_PUBLIC_API unsigned spvc_compiler_get_execution_mode_argument_by_index(spvc_compiler compiler, + SpvExecutionMode mode, unsigned index); +SPVC_PUBLIC_API SpvExecutionModel spvc_compiler_get_execution_model(spvc_compiler compiler); + + + + + +SPVC_PUBLIC_API spvc_type spvc_compiler_get_type_handle(spvc_compiler compiler, spvc_type_id id); + +SPVC_PUBLIC_API spvc_basetype spvc_type_get_basetype(spvc_type type); +SPVC_PUBLIC_API unsigned spvc_type_get_bit_width(spvc_type type); +SPVC_PUBLIC_API unsigned spvc_type_get_vector_size(spvc_type type); +SPVC_PUBLIC_API unsigned spvc_type_get_columns(spvc_type type); +SPVC_PUBLIC_API unsigned spvc_type_get_num_array_dimensions(spvc_type type); +SPVC_PUBLIC_API spvc_bool spvc_type_array_dimension_is_literal(spvc_type type, unsigned dimension); +SPVC_PUBLIC_API SpvId spvc_type_get_array_dimension(spvc_type type, unsigned dimension); +SPVC_PUBLIC_API unsigned spvc_type_get_num_member_types(spvc_type type); +SPVC_PUBLIC_API spvc_type_id spvc_type_get_member_type(spvc_type type, unsigned index); +SPVC_PUBLIC_API SpvStorageClass spvc_type_get_storage_class(spvc_type type); + + +SPVC_PUBLIC_API spvc_type_id spvc_type_get_image_sampled_type(spvc_type type); +SPVC_PUBLIC_API SpvDim spvc_type_get_image_dimension(spvc_type type); +SPVC_PUBLIC_API spvc_bool spvc_type_get_image_is_depth(spvc_type type); +SPVC_PUBLIC_API spvc_bool spvc_type_get_image_arrayed(spvc_type type); +SPVC_PUBLIC_API spvc_bool spvc_type_get_image_multisampled(spvc_type type); +SPVC_PUBLIC_API spvc_bool spvc_type_get_image_is_storage(spvc_type type); +SPVC_PUBLIC_API SpvImageFormat spvc_type_get_image_storage_format(spvc_type type); +SPVC_PUBLIC_API SpvAccessQualifier spvc_type_get_image_access_qualifier(spvc_type type); + + + + + +SPVC_PUBLIC_API spvc_result spvc_compiler_get_declared_struct_size(spvc_compiler compiler, spvc_type struct_type, size_t *size); +SPVC_PUBLIC_API spvc_result spvc_compiler_get_declared_struct_size_runtime_array(spvc_compiler compiler, + spvc_type struct_type, size_t array_size, size_t *size); +SPVC_PUBLIC_API spvc_result spvc_compiler_get_declared_struct_member_size(spvc_compiler compiler, spvc_type type, unsigned index, size_t *size); + +SPVC_PUBLIC_API spvc_result spvc_compiler_type_struct_member_offset(spvc_compiler compiler, + spvc_type type, unsigned index, unsigned *offset); +SPVC_PUBLIC_API spvc_result spvc_compiler_type_struct_member_array_stride(spvc_compiler compiler, + spvc_type type, unsigned index, unsigned *stride); +SPVC_PUBLIC_API spvc_result spvc_compiler_type_struct_member_matrix_stride(spvc_compiler compiler, + spvc_type type, unsigned index, unsigned *stride); + + + + + +SPVC_PUBLIC_API spvc_result spvc_compiler_build_dummy_sampler_for_combined_images(spvc_compiler compiler, spvc_variable_id *id); +SPVC_PUBLIC_API spvc_result spvc_compiler_build_combined_image_samplers(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_result spvc_compiler_get_combined_image_samplers(spvc_compiler compiler, + const spvc_combined_image_sampler **samplers, + size_t *num_samplers); + + + + + +SPVC_PUBLIC_API spvc_result spvc_compiler_get_specialization_constants(spvc_compiler compiler, + const spvc_specialization_constant **constants, + size_t *num_constants); +SPVC_PUBLIC_API spvc_constant spvc_compiler_get_constant_handle(spvc_compiler compiler, + spvc_constant_id id); + +SPVC_PUBLIC_API spvc_constant_id spvc_compiler_get_work_group_size_specialization_constants(spvc_compiler compiler, + spvc_specialization_constant *x, + spvc_specialization_constant *y, + spvc_specialization_constant *z); + + + + + +SPVC_PUBLIC_API spvc_result spvc_compiler_get_active_buffer_ranges(spvc_compiler compiler, + spvc_variable_id id, + const spvc_buffer_range **ranges, + size_t *num_ranges); + + + + + + + +SPVC_PUBLIC_API float spvc_constant_get_scalar_fp16(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API float spvc_constant_get_scalar_fp32(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API double spvc_constant_get_scalar_fp64(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API unsigned spvc_constant_get_scalar_u32(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API int spvc_constant_get_scalar_i32(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API unsigned spvc_constant_get_scalar_u16(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API int spvc_constant_get_scalar_i16(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API unsigned spvc_constant_get_scalar_u8(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API int spvc_constant_get_scalar_i8(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API void spvc_constant_get_subconstants(spvc_constant constant, const spvc_constant_id **constituents, size_t *count); +SPVC_PUBLIC_API spvc_type_id spvc_constant_get_type(spvc_constant constant); + + + + + +SPVC_PUBLIC_API spvc_bool spvc_compiler_get_binary_offset_for_decoration(spvc_compiler compiler, + spvc_variable_id id, + SpvDecoration decoration, + unsigned *word_offset); + +SPVC_PUBLIC_API spvc_bool spvc_compiler_buffer_is_hlsl_counter_buffer(spvc_compiler compiler, spvc_variable_id id); +SPVC_PUBLIC_API spvc_bool spvc_compiler_buffer_get_hlsl_counter_buffer(spvc_compiler compiler, spvc_variable_id id, + spvc_variable_id *counter_id); + +SPVC_PUBLIC_API spvc_result spvc_compiler_get_declared_capabilities(spvc_compiler compiler, + const SpvCapability **capabilities, + size_t *num_capabilities); +SPVC_PUBLIC_API spvc_result spvc_compiler_get_declared_extensions(spvc_compiler compiler, const char ***extensions, + size_t *num_extensions); + +SPVC_PUBLIC_API const char *spvc_compiler_get_remapped_declared_block_name(spvc_compiler compiler, spvc_variable_id id); +SPVC_PUBLIC_API spvc_result spvc_compiler_get_buffer_block_decorations(spvc_compiler compiler, spvc_variable_id id, + const SpvDecoration **decorations, + size_t *num_decorations); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_containers.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_containers.hpp new file mode 100644 index 000000000000..fe48e00b017a --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_containers.hpp @@ -0,0 +1,721 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_CONTAINERS_HPP +#define SPIRV_CROSS_CONTAINERS_HPP + +#include "spirv_cross_error_handling.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef SPIRV_CROSS_NAMESPACE_OVERRIDE +#define SPIRV_CROSS_NAMESPACE SPIRV_CROSS_NAMESPACE_OVERRIDE +#else +#define SPIRV_CROSS_NAMESPACE spirv_cross +#endif + +namespace SPIRV_CROSS_NAMESPACE +{ +#ifndef SPIRV_CROSS_FORCE_STL_TYPES + +template +class AlignedBuffer +{ +public: + T *data() + { +#if defined(_MSC_VER) && _MSC_VER < 1900 + + + + return reinterpret_cast(u.aligned_char); +#else + return reinterpret_cast(aligned_char); +#endif + } + +private: +#if defined(_MSC_VER) && _MSC_VER < 1900 + + union + { + char aligned_char[sizeof(T) * N]; + double dummy_aligner; + } u; +#else + alignas(T) char aligned_char[sizeof(T) * N]; +#endif +}; + +template +class AlignedBuffer +{ +public: + T *data() + { + return nullptr; + } +}; + + +template +class VectorView +{ +public: + T &operator[](size_t i) + { + return ptr[i]; + } + + const T &operator[](size_t i) const + { + return ptr[i]; + } + + bool empty() const + { + return buffer_size == 0; + } + + size_t size() const + { + return buffer_size; + } + + T *data() + { + return ptr; + } + + const T *data() const + { + return ptr; + } + + T *begin() + { + return ptr; + } + + T *end() + { + return ptr + buffer_size; + } + + const T *begin() const + { + return ptr; + } + + const T *end() const + { + return ptr + buffer_size; + } + + T &front() + { + return ptr[0]; + } + + const T &front() const + { + return ptr[0]; + } + + T &back() + { + return ptr[buffer_size - 1]; + } + + const T &back() const + { + return ptr[buffer_size - 1]; + } + + +#if defined(_MSC_VER) && _MSC_VER < 1900 + explicit operator std::vector() const + { + + return std::vector(ptr, ptr + buffer_size); + } +#else + + explicit operator std::vector() const & + { + return std::vector(ptr, ptr + buffer_size); + } + + + explicit operator std::vector() && + { + return std::vector(std::make_move_iterator(ptr), std::make_move_iterator(ptr + buffer_size)); + } +#endif + + + VectorView(const VectorView &) = delete; + void operator=(const VectorView &) = delete; + +protected: + VectorView() = default; + T *ptr = nullptr; + size_t buffer_size = 0; +}; + + + + + +template +class SmallVector : public VectorView +{ +public: + SmallVector() + { + this->ptr = stack_storage.data(); + buffer_capacity = N; + } + + SmallVector(const T *arg_list_begin, const T *arg_list_end) + : SmallVector() + { + auto count = size_t(arg_list_end - arg_list_begin); + reserve(count); + for (size_t i = 0; i < count; i++, arg_list_begin++) + new (&this->ptr[i]) T(*arg_list_begin); + this->buffer_size = count; + } + + SmallVector(SmallVector &&other) SPIRV_CROSS_NOEXCEPT : SmallVector() + { + *this = std::move(other); + } + + SmallVector &operator=(SmallVector &&other) SPIRV_CROSS_NOEXCEPT + { + clear(); + if (other.ptr != other.stack_storage.data()) + { + + if (this->ptr != stack_storage.data()) + free(this->ptr); + this->ptr = other.ptr; + this->buffer_size = other.buffer_size; + buffer_capacity = other.buffer_capacity; + other.ptr = nullptr; + other.buffer_size = 0; + other.buffer_capacity = 0; + } + else + { + + reserve(other.buffer_size); + for (size_t i = 0; i < other.buffer_size; i++) + { + new (&this->ptr[i]) T(std::move(other.ptr[i])); + other.ptr[i].~T(); + } + this->buffer_size = other.buffer_size; + other.buffer_size = 0; + } + return *this; + } + + SmallVector(const SmallVector &other) + : SmallVector() + { + *this = other; + } + + SmallVector &operator=(const SmallVector &other) + { + clear(); + reserve(other.buffer_size); + for (size_t i = 0; i < other.buffer_size; i++) + new (&this->ptr[i]) T(other.ptr[i]); + this->buffer_size = other.buffer_size; + return *this; + } + + explicit SmallVector(size_t count) + : SmallVector() + { + resize(count); + } + + ~SmallVector() + { + clear(); + if (this->ptr != stack_storage.data()) + free(this->ptr); + } + + void clear() + { + for (size_t i = 0; i < this->buffer_size; i++) + this->ptr[i].~T(); + this->buffer_size = 0; + } + + void push_back(const T &t) + { + reserve(this->buffer_size + 1); + new (&this->ptr[this->buffer_size]) T(t); + this->buffer_size++; + } + + void push_back(T &&t) + { + reserve(this->buffer_size + 1); + new (&this->ptr[this->buffer_size]) T(std::move(t)); + this->buffer_size++; + } + + void pop_back() + { + + + if (!this->empty()) + resize(this->buffer_size - 1); + } + + template + void emplace_back(Ts &&... ts) + { + reserve(this->buffer_size + 1); + new (&this->ptr[this->buffer_size]) T(std::forward(ts)...); + this->buffer_size++; + } + + void reserve(size_t count) + { + if (count > buffer_capacity) + { + size_t target_capacity = buffer_capacity; + if (target_capacity == 0) + target_capacity = 1; + if (target_capacity < N) + target_capacity = N; + + while (target_capacity < count) + target_capacity <<= 1u; + + T *new_buffer = + target_capacity > N ? static_cast(malloc(target_capacity * sizeof(T))) : stack_storage.data(); + + if (!new_buffer) + SPIRV_CROSS_THROW("Out of memory."); + + + if (new_buffer != this->ptr) + { + + for (size_t i = 0; i < this->buffer_size; i++) + { + new (&new_buffer[i]) T(std::move(this->ptr[i])); + this->ptr[i].~T(); + } + } + + if (this->ptr != stack_storage.data()) + free(this->ptr); + this->ptr = new_buffer; + buffer_capacity = target_capacity; + } + } + + void insert(T *itr, const T *insert_begin, const T *insert_end) + { + auto count = size_t(insert_end - insert_begin); + if (itr == this->end()) + { + reserve(this->buffer_size + count); + for (size_t i = 0; i < count; i++, insert_begin++) + new (&this->ptr[this->buffer_size + i]) T(*insert_begin); + this->buffer_size += count; + } + else + { + if (this->buffer_size + count > buffer_capacity) + { + auto target_capacity = this->buffer_size + count; + if (target_capacity == 0) + target_capacity = 1; + if (target_capacity < N) + target_capacity = N; + + while (target_capacity < count) + target_capacity <<= 1u; + + + T *new_buffer = + target_capacity > N ? static_cast(malloc(target_capacity * sizeof(T))) : stack_storage.data(); + if (!new_buffer) + SPIRV_CROSS_THROW("Out of memory."); + + + + auto *target_itr = new_buffer; + auto *original_source_itr = this->begin(); + + if (new_buffer != this->ptr) + { + while (original_source_itr != itr) + { + new (target_itr) T(std::move(*original_source_itr)); + original_source_itr->~T(); + ++original_source_itr; + ++target_itr; + } + } + + + for (auto *source_itr = insert_begin; source_itr != insert_end; ++source_itr, ++target_itr) + new (target_itr) T(*source_itr); + + + if (new_buffer != this->ptr || insert_begin != insert_end) + { + while (original_source_itr != this->end()) + { + new (target_itr) T(std::move(*original_source_itr)); + original_source_itr->~T(); + ++original_source_itr; + ++target_itr; + } + } + + if (this->ptr != stack_storage.data()) + free(this->ptr); + this->ptr = new_buffer; + buffer_capacity = target_capacity; + } + else + { + + + auto *target_itr = this->end() + count; + auto *source_itr = this->end(); + while (target_itr != this->end() && source_itr != itr) + { + --target_itr; + --source_itr; + new (target_itr) T(std::move(*source_itr)); + } + + + std::move_backward(itr, source_itr, target_itr); + + + while (itr != this->end() && insert_begin != insert_end) + *itr++ = *insert_begin++; + + + while (insert_begin != insert_end) + { + new (itr) T(*insert_begin); + ++itr; + ++insert_begin; + } + } + + this->buffer_size += count; + } + } + + void insert(T *itr, const T &value) + { + insert(itr, &value, &value + 1); + } + + T *erase(T *itr) + { + std::move(itr + 1, this->end(), itr); + this->ptr[--this->buffer_size].~T(); + return itr; + } + + void erase(T *start_erase, T *end_erase) + { + if (end_erase == this->end()) + { + resize(size_t(start_erase - this->begin())); + } + else + { + auto new_size = this->buffer_size - (end_erase - start_erase); + std::move(end_erase, this->end(), start_erase); + resize(new_size); + } + } + + void resize(size_t new_size) + { + if (new_size < this->buffer_size) + { + for (size_t i = new_size; i < this->buffer_size; i++) + this->ptr[i].~T(); + } + else if (new_size > this->buffer_size) + { + reserve(new_size); + for (size_t i = this->buffer_size; i < new_size; i++) + new (&this->ptr[i]) T(); + } + + this->buffer_size = new_size; + } + +private: + size_t buffer_capacity = 0; + AlignedBuffer stack_storage; +}; + + + + +template +using Vector = SmallVector; + +#else + +template +using SmallVector = std::vector; +template +using Vector = std::vector; +template +using VectorView = std::vector; + +#endif + + + + +class ObjectPoolBase +{ +public: + virtual ~ObjectPoolBase() = default; + virtual void free_opaque(void *ptr) = 0; +}; + +template +class ObjectPool : public ObjectPoolBase +{ +public: + explicit ObjectPool(unsigned start_object_count_ = 16) + : start_object_count(start_object_count_) + { + } + + template + T *allocate(P &&... p) + { + if (vacants.empty()) + { + unsigned num_objects = start_object_count << memory.size(); + T *ptr = static_cast(malloc(num_objects * sizeof(T))); + if (!ptr) + return nullptr; + + for (unsigned i = 0; i < num_objects; i++) + vacants.push_back(&ptr[i]); + + memory.emplace_back(ptr); + } + + T *ptr = vacants.back(); + vacants.pop_back(); + new (ptr) T(std::forward

(p)...); + return ptr; + } + + void free(T *ptr) + { + ptr->~T(); + vacants.push_back(ptr); + } + + void free_opaque(void *ptr) override + { + free(static_cast(ptr)); + } + + void clear() + { + vacants.clear(); + memory.clear(); + } + +protected: + Vector vacants; + + struct MallocDeleter + { + void operator()(T *ptr) + { + ::free(ptr); + } + }; + + SmallVector> memory; + unsigned start_object_count; +}; + +template +class StringStream +{ +public: + StringStream() + { + reset(); + } + + ~StringStream() + { + reset(); + } + + + StringStream(const StringStream &) = delete; + void operator=(const StringStream &) = delete; + + template ::value, int>::type = 0> + StringStream &operator<<(const T &t) + { + auto s = std::to_string(t); + append(s.data(), s.size()); + return *this; + } + + + StringStream &operator<<(uint32_t v) + { + auto s = std::to_string(v); + append(s.data(), s.size()); + return *this; + } + + StringStream &operator<<(char c) + { + append(&c, 1); + return *this; + } + + StringStream &operator<<(const std::string &s) + { + append(s.data(), s.size()); + return *this; + } + + StringStream &operator<<(const char *s) + { + append(s, strlen(s)); + return *this; + } + + template + StringStream &operator<<(const char (&s)[N]) + { + append(s, strlen(s)); + return *this; + } + + std::string str() const + { + std::string ret; + size_t target_size = 0; + for (auto &saved : saved_buffers) + target_size += saved.offset; + target_size += current_buffer.offset; + ret.reserve(target_size); + + for (auto &saved : saved_buffers) + ret.insert(ret.end(), saved.buffer, saved.buffer + saved.offset); + ret.insert(ret.end(), current_buffer.buffer, current_buffer.buffer + current_buffer.offset); + return ret; + } + + void reset() + { + for (auto &saved : saved_buffers) + if (saved.buffer != stack_buffer) + free(saved.buffer); + if (current_buffer.buffer != stack_buffer) + free(current_buffer.buffer); + + saved_buffers.clear(); + current_buffer.buffer = stack_buffer; + current_buffer.offset = 0; + current_buffer.size = sizeof(stack_buffer); + } + +private: + struct Buffer + { + char *buffer = nullptr; + size_t offset = 0; + size_t size = 0; + }; + Buffer current_buffer; + char stack_buffer[StackSize]; + SmallVector saved_buffers; + + void append(const char *s, size_t len) + { + size_t avail = current_buffer.size - current_buffer.offset; + if (avail < len) + { + if (avail > 0) + { + memcpy(current_buffer.buffer + current_buffer.offset, s, avail); + s += avail; + len -= avail; + current_buffer.offset += avail; + } + + saved_buffers.push_back(current_buffer); + size_t target_size = len > BlockSize ? len : BlockSize; + current_buffer.buffer = static_cast(malloc(target_size)); + if (!current_buffer.buffer) + SPIRV_CROSS_THROW("Out of memory."); + + memcpy(current_buffer.buffer, s, len); + current_buffer.offset = len; + current_buffer.size = target_size; + } + else + { + memcpy(current_buffer.buffer + current_buffer.offset, s, len); + current_buffer.offset += len; + } + } +}; + +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_error_handling.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_error_handling.hpp new file mode 100644 index 000000000000..0c7a1d59267a --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_error_handling.hpp @@ -0,0 +1,85 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_ERROR_HANDLING +#define SPIRV_CROSS_ERROR_HANDLING + +#include +#include +#include +#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS +#include +#endif + +#ifdef SPIRV_CROSS_NAMESPACE_OVERRIDE +#define SPIRV_CROSS_NAMESPACE SPIRV_CROSS_NAMESPACE_OVERRIDE +#else +#define SPIRV_CROSS_NAMESPACE spirv_cross +#endif + +namespace SPIRV_CROSS_NAMESPACE +{ +#ifdef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS +#if !defined(_MSC_VER) || defined(__clang__) +[[noreturn]] +#endif +inline void +report_and_abort(const std::string &msg) +{ +#ifdef NDEBUG + (void)msg; +#else + fprintf(stderr, "There was a compiler error: %s\n", msg.c_str()); +#endif + fflush(stderr); + abort(); +} + +#define SPIRV_CROSS_THROW(x) report_and_abort(x) +#else +class CompilerError : public std::runtime_error +{ +public: + explicit CompilerError(const std::string &str) + : std::runtime_error(str) + { + } +}; + +#define SPIRV_CROSS_THROW(x) throw CompilerError(x) +#endif + + + + +#if defined(_MSC_VER) && _MSC_VER < 1900 +#define SPIRV_CROSS_NOEXCEPT +#else +#define SPIRV_CROSS_NOEXCEPT noexcept +#endif + +#if __cplusplus >= 201402l +#define SPIRV_CROSS_DEPRECATED(reason) [[deprecated(reason)]] +#elif defined(__GNUC__) +#define SPIRV_CROSS_DEPRECATED(reason) __attribute__((deprecated)) +#elif defined(_MSC_VER) +#define SPIRV_CROSS_DEPRECATED(reason) __declspec(deprecated(reason)) +#else +#define SPIRV_CROSS_DEPRECATED(reason) +#endif +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.cpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.cpp new file mode 100644 index 000000000000..af335e96b9f4 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.cpp @@ -0,0 +1,806 @@ + + + + + + + + + + + + + + + + +#include "spirv_cross_parsed_ir.hpp" +#include +#include + +using namespace std; +using namespace spv; + +namespace SPIRV_CROSS_NAMESPACE +{ +ParsedIR::ParsedIR() +{ + + + pool_group.reset(new ObjectPoolGroup); + + pool_group->pools[TypeType].reset(new ObjectPool); + pool_group->pools[TypeVariable].reset(new ObjectPool); + pool_group->pools[TypeConstant].reset(new ObjectPool); + pool_group->pools[TypeFunction].reset(new ObjectPool); + pool_group->pools[TypeFunctionPrototype].reset(new ObjectPool); + pool_group->pools[TypeBlock].reset(new ObjectPool); + pool_group->pools[TypeExtension].reset(new ObjectPool); + pool_group->pools[TypeExpression].reset(new ObjectPool); + pool_group->pools[TypeConstantOp].reset(new ObjectPool); + pool_group->pools[TypeCombinedImageSampler].reset(new ObjectPool); + pool_group->pools[TypeAccessChain].reset(new ObjectPool); + pool_group->pools[TypeUndef].reset(new ObjectPool); + pool_group->pools[TypeString].reset(new ObjectPool); +} + + +ParsedIR::ParsedIR(ParsedIR &&other) SPIRV_CROSS_NOEXCEPT +{ + *this = move(other); +} + +ParsedIR &ParsedIR::operator=(ParsedIR &&other) SPIRV_CROSS_NOEXCEPT +{ + if (this != &other) + { + pool_group = move(other.pool_group); + spirv = move(other.spirv); + meta = move(other.meta); + for (int i = 0; i < TypeCount; i++) + ids_for_type[i] = move(other.ids_for_type[i]); + ids_for_constant_or_type = move(other.ids_for_constant_or_type); + ids_for_constant_or_variable = move(other.ids_for_constant_or_variable); + declared_capabilities = move(other.declared_capabilities); + declared_extensions = move(other.declared_extensions); + block_meta = move(other.block_meta); + continue_block_to_loop_header = move(other.continue_block_to_loop_header); + entry_points = move(other.entry_points); + ids = move(other.ids); + addressing_model = other.addressing_model; + memory_model = other.memory_model; + + default_entry_point = other.default_entry_point; + source = other.source; + loop_iteration_depth_hard = other.loop_iteration_depth_hard; + loop_iteration_depth_soft = other.loop_iteration_depth_soft; + } + return *this; +} + +ParsedIR::ParsedIR(const ParsedIR &other) + : ParsedIR() +{ + *this = other; +} + +ParsedIR &ParsedIR::operator=(const ParsedIR &other) +{ + if (this != &other) + { + spirv = other.spirv; + meta = other.meta; + for (int i = 0; i < TypeCount; i++) + ids_for_type[i] = other.ids_for_type[i]; + ids_for_constant_or_type = other.ids_for_constant_or_type; + ids_for_constant_or_variable = other.ids_for_constant_or_variable; + declared_capabilities = other.declared_capabilities; + declared_extensions = other.declared_extensions; + block_meta = other.block_meta; + continue_block_to_loop_header = other.continue_block_to_loop_header; + entry_points = other.entry_points; + default_entry_point = other.default_entry_point; + source = other.source; + loop_iteration_depth_hard = other.loop_iteration_depth_hard; + loop_iteration_depth_soft = other.loop_iteration_depth_soft; + addressing_model = other.addressing_model; + memory_model = other.memory_model; + + + + ids.clear(); + ids.reserve(other.ids.size()); + for (size_t i = 0; i < other.ids.size(); i++) + { + ids.emplace_back(pool_group.get()); + ids.back() = other.ids[i]; + } + } + return *this; +} + +void ParsedIR::set_id_bounds(uint32_t bounds) +{ + ids.reserve(bounds); + while (ids.size() < bounds) + ids.emplace_back(pool_group.get()); + + block_meta.resize(bounds); +} + +static string ensure_valid_identifier(const string &name, bool member) +{ + + + auto str = name.substr(0, name.find('(')); + + for (uint32_t i = 0; i < str.size(); i++) + { + auto &c = str[i]; + + if (member) + { + + + if (i == 0) + c = isalpha(c) ? c : '_'; + else if (i == 2 && str[0] == '_' && str[1] == 'm') + c = isalpha(c) ? c : '_'; + else + c = isalnum(c) ? c : '_'; + } + else + { + + + if (i == 0 || (str[0] == '_' && i == 1)) + c = isalpha(c) ? c : '_'; + else + c = isalnum(c) ? c : '_'; + } + } + return str; +} + +const string &ParsedIR::get_name(ID id) const +{ + auto *m = find_meta(id); + if (m) + return m->decoration.alias; + else + return empty_string; +} + +const string &ParsedIR::get_member_name(TypeID id, uint32_t index) const +{ + auto *m = find_meta(id); + if (m) + { + if (index >= m->members.size()) + return empty_string; + return m->members[index].alias; + } + else + return empty_string; +} + +void ParsedIR::set_name(ID id, const string &name) +{ + auto &str = meta[id].decoration.alias; + str.clear(); + + if (name.empty()) + return; + + + if (name[0] == '_' && name.size() >= 2 && isdigit(name[1])) + return; + + str = ensure_valid_identifier(name, false); +} + +void ParsedIR::set_member_name(TypeID id, uint32_t index, const string &name) +{ + meta[id].members.resize(max(meta[id].members.size(), size_t(index) + 1)); + + auto &str = meta[id].members[index].alias; + str.clear(); + if (name.empty()) + return; + + + if (name[0] == '_' && name.size() >= 3 && name[1] == 'm' && isdigit(name[2])) + return; + + str = ensure_valid_identifier(name, true); +} + +void ParsedIR::set_decoration_string(ID id, Decoration decoration, const string &argument) +{ + auto &dec = meta[id].decoration; + dec.decoration_flags.set(decoration); + + switch (decoration) + { + case DecorationHlslSemanticGOOGLE: + dec.hlsl_semantic = argument; + break; + + default: + break; + } +} + +void ParsedIR::set_decoration(ID id, Decoration decoration, uint32_t argument) +{ + auto &dec = meta[id].decoration; + dec.decoration_flags.set(decoration); + + switch (decoration) + { + case DecorationBuiltIn: + dec.builtin = true; + dec.builtin_type = static_cast(argument); + break; + + case DecorationLocation: + dec.location = argument; + break; + + case DecorationComponent: + dec.component = argument; + break; + + case DecorationOffset: + dec.offset = argument; + break; + + case DecorationArrayStride: + dec.array_stride = argument; + break; + + case DecorationMatrixStride: + dec.matrix_stride = argument; + break; + + case DecorationBinding: + dec.binding = argument; + break; + + case DecorationDescriptorSet: + dec.set = argument; + break; + + case DecorationInputAttachmentIndex: + dec.input_attachment = argument; + break; + + case DecorationSpecId: + dec.spec_id = argument; + break; + + case DecorationIndex: + dec.index = argument; + break; + + case DecorationHlslCounterBufferGOOGLE: + meta[id].hlsl_magic_counter_buffer = argument; + meta[argument].hlsl_is_magic_counter_buffer = true; + break; + + case DecorationFPRoundingMode: + dec.fp_rounding_mode = static_cast(argument); + break; + + default: + break; + } +} + +void ParsedIR::set_member_decoration(TypeID id, uint32_t index, Decoration decoration, uint32_t argument) +{ + meta[id].members.resize(max(meta[id].members.size(), size_t(index) + 1)); + auto &dec = meta[id].members[index]; + dec.decoration_flags.set(decoration); + + switch (decoration) + { + case DecorationBuiltIn: + dec.builtin = true; + dec.builtin_type = static_cast(argument); + break; + + case DecorationLocation: + dec.location = argument; + break; + + case DecorationComponent: + dec.component = argument; + break; + + case DecorationBinding: + dec.binding = argument; + break; + + case DecorationOffset: + dec.offset = argument; + break; + + case DecorationSpecId: + dec.spec_id = argument; + break; + + case DecorationMatrixStride: + dec.matrix_stride = argument; + break; + + case DecorationIndex: + dec.index = argument; + break; + + default: + break; + } +} + + + +void ParsedIR::mark_used_as_array_length(ID id) +{ + switch (ids[id].get_type()) + { + case TypeConstant: + get(id).is_used_as_array_length = true; + break; + + case TypeConstantOp: + { + auto &cop = get(id); + if (cop.opcode == OpCompositeExtract) + mark_used_as_array_length(cop.arguments[0]); + else if (cop.opcode == OpCompositeInsert) + { + mark_used_as_array_length(cop.arguments[0]); + mark_used_as_array_length(cop.arguments[1]); + } + else + for (uint32_t arg_id : cop.arguments) + mark_used_as_array_length(arg_id); + break; + } + + case TypeUndef: + break; + + default: + assert(0); + } +} + +Bitset ParsedIR::get_buffer_block_flags(const SPIRVariable &var) const +{ + auto &type = get(var.basetype); + assert(type.basetype == SPIRType::Struct); + + + + + Bitset base_flags; + auto *m = find_meta(var.self); + if (m) + base_flags = m->decoration.decoration_flags; + + if (type.member_types.empty()) + return base_flags; + + Bitset all_members_flags = get_member_decoration_bitset(type.self, 0); + for (uint32_t i = 1; i < uint32_t(type.member_types.size()); i++) + all_members_flags.merge_and(get_member_decoration_bitset(type.self, i)); + + base_flags.merge_or(all_members_flags); + return base_flags; +} + +const Bitset &ParsedIR::get_member_decoration_bitset(TypeID id, uint32_t index) const +{ + auto *m = find_meta(id); + if (m) + { + if (index >= m->members.size()) + return cleared_bitset; + return m->members[index].decoration_flags; + } + else + return cleared_bitset; +} + +bool ParsedIR::has_decoration(ID id, Decoration decoration) const +{ + return get_decoration_bitset(id).get(decoration); +} + +uint32_t ParsedIR::get_decoration(ID id, Decoration decoration) const +{ + auto *m = find_meta(id); + if (!m) + return 0; + + auto &dec = m->decoration; + if (!dec.decoration_flags.get(decoration)) + return 0; + + switch (decoration) + { + case DecorationBuiltIn: + return dec.builtin_type; + case DecorationLocation: + return dec.location; + case DecorationComponent: + return dec.component; + case DecorationOffset: + return dec.offset; + case DecorationBinding: + return dec.binding; + case DecorationDescriptorSet: + return dec.set; + case DecorationInputAttachmentIndex: + return dec.input_attachment; + case DecorationSpecId: + return dec.spec_id; + case DecorationArrayStride: + return dec.array_stride; + case DecorationMatrixStride: + return dec.matrix_stride; + case DecorationIndex: + return dec.index; + case DecorationFPRoundingMode: + return dec.fp_rounding_mode; + default: + return 1; + } +} + +const string &ParsedIR::get_decoration_string(ID id, Decoration decoration) const +{ + auto *m = find_meta(id); + if (!m) + return empty_string; + + auto &dec = m->decoration; + + if (!dec.decoration_flags.get(decoration)) + return empty_string; + + switch (decoration) + { + case DecorationHlslSemanticGOOGLE: + return dec.hlsl_semantic; + + default: + return empty_string; + } +} + +void ParsedIR::unset_decoration(ID id, Decoration decoration) +{ + auto &dec = meta[id].decoration; + dec.decoration_flags.clear(decoration); + switch (decoration) + { + case DecorationBuiltIn: + dec.builtin = false; + break; + + case DecorationLocation: + dec.location = 0; + break; + + case DecorationComponent: + dec.component = 0; + break; + + case DecorationOffset: + dec.offset = 0; + break; + + case DecorationBinding: + dec.binding = 0; + break; + + case DecorationDescriptorSet: + dec.set = 0; + break; + + case DecorationInputAttachmentIndex: + dec.input_attachment = 0; + break; + + case DecorationSpecId: + dec.spec_id = 0; + break; + + case DecorationHlslSemanticGOOGLE: + dec.hlsl_semantic.clear(); + break; + + case DecorationFPRoundingMode: + dec.fp_rounding_mode = FPRoundingModeMax; + break; + + case DecorationHlslCounterBufferGOOGLE: + { + auto &counter = meta[id].hlsl_magic_counter_buffer; + if (counter) + { + meta[counter].hlsl_is_magic_counter_buffer = false; + counter = 0; + } + break; + } + + default: + break; + } +} + +bool ParsedIR::has_member_decoration(TypeID id, uint32_t index, Decoration decoration) const +{ + return get_member_decoration_bitset(id, index).get(decoration); +} + +uint32_t ParsedIR::get_member_decoration(TypeID id, uint32_t index, Decoration decoration) const +{ + auto *m = find_meta(id); + if (!m) + return 0; + + if (index >= m->members.size()) + return 0; + + auto &dec = m->members[index]; + if (!dec.decoration_flags.get(decoration)) + return 0; + + switch (decoration) + { + case DecorationBuiltIn: + return dec.builtin_type; + case DecorationLocation: + return dec.location; + case DecorationComponent: + return dec.component; + case DecorationBinding: + return dec.binding; + case DecorationOffset: + return dec.offset; + case DecorationSpecId: + return dec.spec_id; + case DecorationIndex: + return dec.index; + default: + return 1; + } +} + +const Bitset &ParsedIR::get_decoration_bitset(ID id) const +{ + auto *m = find_meta(id); + if (m) + { + auto &dec = m->decoration; + return dec.decoration_flags; + } + else + return cleared_bitset; +} + +void ParsedIR::set_member_decoration_string(TypeID id, uint32_t index, Decoration decoration, const string &argument) +{ + meta[id].members.resize(max(meta[id].members.size(), size_t(index) + 1)); + auto &dec = meta[id].members[index]; + dec.decoration_flags.set(decoration); + + switch (decoration) + { + case DecorationHlslSemanticGOOGLE: + dec.hlsl_semantic = argument; + break; + + default: + break; + } +} + +const string &ParsedIR::get_member_decoration_string(TypeID id, uint32_t index, Decoration decoration) const +{ + auto *m = find_meta(id); + if (m) + { + if (!has_member_decoration(id, index, decoration)) + return empty_string; + + auto &dec = m->members[index]; + + switch (decoration) + { + case DecorationHlslSemanticGOOGLE: + return dec.hlsl_semantic; + + default: + return empty_string; + } + } + else + return empty_string; +} + +void ParsedIR::unset_member_decoration(TypeID id, uint32_t index, Decoration decoration) +{ + auto &m = meta[id]; + if (index >= m.members.size()) + return; + + auto &dec = m.members[index]; + + dec.decoration_flags.clear(decoration); + switch (decoration) + { + case DecorationBuiltIn: + dec.builtin = false; + break; + + case DecorationLocation: + dec.location = 0; + break; + + case DecorationComponent: + dec.component = 0; + break; + + case DecorationOffset: + dec.offset = 0; + break; + + case DecorationSpecId: + dec.spec_id = 0; + break; + + case DecorationHlslSemanticGOOGLE: + dec.hlsl_semantic.clear(); + break; + + default: + break; + } +} + +uint32_t ParsedIR::increase_bound_by(uint32_t incr_amount) +{ + auto curr_bound = ids.size(); + auto new_bound = curr_bound + incr_amount; + + ids.reserve(ids.size() + incr_amount); + for (uint32_t i = 0; i < incr_amount; i++) + ids.emplace_back(pool_group.get()); + + block_meta.resize(new_bound); + return uint32_t(curr_bound); +} + +void ParsedIR::remove_typed_id(Types type, ID id) +{ + auto &type_ids = ids_for_type[type]; + type_ids.erase(remove(begin(type_ids), end(type_ids), id), end(type_ids)); +} + +void ParsedIR::reset_all_of_type(Types type) +{ + for (auto &id : ids_for_type[type]) + if (ids[id].get_type() == type) + ids[id].reset(); + + ids_for_type[type].clear(); +} + +void ParsedIR::add_typed_id(Types type, ID id) +{ + if (loop_iteration_depth_hard != 0) + SPIRV_CROSS_THROW("Cannot add typed ID while looping over it."); + + if (loop_iteration_depth_soft != 0) + { + if (!ids[id].empty()) + SPIRV_CROSS_THROW("Cannot override IDs when loop is soft locked."); + return; + } + + if (ids[id].empty() || ids[id].get_type() != type) + { + switch (type) + { + case TypeConstant: + ids_for_constant_or_variable.push_back(id); + ids_for_constant_or_type.push_back(id); + break; + + case TypeVariable: + ids_for_constant_or_variable.push_back(id); + break; + + case TypeType: + case TypeConstantOp: + ids_for_constant_or_type.push_back(id); + break; + + default: + break; + } + } + + if (ids[id].empty()) + { + ids_for_type[type].push_back(id); + } + else if (ids[id].get_type() != type) + { + remove_typed_id(ids[id].get_type(), id); + ids_for_type[type].push_back(id); + } +} + +const Meta *ParsedIR::find_meta(ID id) const +{ + auto itr = meta.find(id); + if (itr != end(meta)) + return &itr->second; + else + return nullptr; +} + +Meta *ParsedIR::find_meta(ID id) +{ + auto itr = meta.find(id); + if (itr != end(meta)) + return &itr->second; + else + return nullptr; +} + +ParsedIR::LoopLock ParsedIR::create_loop_hard_lock() const +{ + return ParsedIR::LoopLock(&loop_iteration_depth_hard); +} + +ParsedIR::LoopLock ParsedIR::create_loop_soft_lock() const +{ + return ParsedIR::LoopLock(&loop_iteration_depth_soft); +} + +ParsedIR::LoopLock::~LoopLock() +{ + if (lock) + (*lock)--; +} + +ParsedIR::LoopLock::LoopLock(uint32_t *lock_) + : lock(lock_) +{ + if (lock) + (*lock)++; +} + +ParsedIR::LoopLock::LoopLock(LoopLock &&other) SPIRV_CROSS_NOEXCEPT +{ + *this = move(other); +} + +ParsedIR::LoopLock &ParsedIR::LoopLock::operator=(LoopLock &&other) SPIRV_CROSS_NOEXCEPT +{ + if (lock) + (*lock)--; + lock = other.lock; + other.lock = nullptr; + return *this; +} + +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.hpp new file mode 100644 index 000000000000..5e436f64029a --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.hpp @@ -0,0 +1,229 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_PARSED_IR_HPP +#define SPIRV_CROSS_PARSED_IR_HPP + +#include "spirv_common.hpp" +#include +#include + +namespace SPIRV_CROSS_NAMESPACE +{ + + + + + + +class ParsedIR +{ +private: + + std::unique_ptr pool_group; + +public: + ParsedIR(); + + + ParsedIR(const ParsedIR &other); + ParsedIR &operator=(const ParsedIR &other); + + + + ParsedIR(ParsedIR &&other) SPIRV_CROSS_NOEXCEPT; + ParsedIR &operator=(ParsedIR &&other) SPIRV_CROSS_NOEXCEPT; + + + void set_id_bounds(uint32_t bounds); + + + std::vector spirv; + + + SmallVector ids; + + + std::unordered_map meta; + + + + + SmallVector ids_for_type[TypeCount]; + + + + + + SmallVector ids_for_constant_or_type; + SmallVector ids_for_constant_or_variable; + + + + SmallVector declared_capabilities; + SmallVector declared_extensions; + + + + enum BlockMetaFlagBits + { + BLOCK_META_LOOP_HEADER_BIT = 1 << 0, + BLOCK_META_CONTINUE_BIT = 1 << 1, + BLOCK_META_LOOP_MERGE_BIT = 1 << 2, + BLOCK_META_SELECTION_MERGE_BIT = 1 << 3, + BLOCK_META_MULTISELECT_MERGE_BIT = 1 << 4 + }; + using BlockMetaFlags = uint8_t; + SmallVector block_meta; + std::unordered_map continue_block_to_loop_header; + + + + std::unordered_map entry_points; + FunctionID default_entry_point = 0; + + struct Source + { + uint32_t version = 0; + bool es = false; + bool known = false; + bool hlsl = false; + + Source() = default; + }; + + Source source; + + spv::AddressingModel addressing_model = spv::AddressingModelMax; + spv::MemoryModel memory_model = spv::MemoryModelMax; + + + + + + void set_name(ID id, const std::string &name); + const std::string &get_name(ID id) const; + void set_decoration(ID id, spv::Decoration decoration, uint32_t argument = 0); + void set_decoration_string(ID id, spv::Decoration decoration, const std::string &argument); + bool has_decoration(ID id, spv::Decoration decoration) const; + uint32_t get_decoration(ID id, spv::Decoration decoration) const; + const std::string &get_decoration_string(ID id, spv::Decoration decoration) const; + const Bitset &get_decoration_bitset(ID id) const; + void unset_decoration(ID id, spv::Decoration decoration); + + + void set_member_name(TypeID id, uint32_t index, const std::string &name); + const std::string &get_member_name(TypeID id, uint32_t index) const; + void set_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration, uint32_t argument = 0); + void set_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration, + const std::string &argument); + uint32_t get_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration) const; + const std::string &get_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration) const; + bool has_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration) const; + const Bitset &get_member_decoration_bitset(TypeID id, uint32_t index) const; + void unset_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration); + + void mark_used_as_array_length(ID id); + uint32_t increase_bound_by(uint32_t count); + Bitset get_buffer_block_flags(const SPIRVariable &var) const; + + void add_typed_id(Types type, ID id); + void remove_typed_id(Types type, ID id); + + class LoopLock + { + public: + explicit LoopLock(uint32_t *counter); + LoopLock(const LoopLock &) = delete; + void operator=(const LoopLock &) = delete; + LoopLock(LoopLock &&other) SPIRV_CROSS_NOEXCEPT; + LoopLock &operator=(LoopLock &&other) SPIRV_CROSS_NOEXCEPT; + ~LoopLock(); + + private: + uint32_t *lock; + }; + + + + + + + + + + LoopLock create_loop_hard_lock() const; + LoopLock create_loop_soft_lock() const; + + template + void for_each_typed_id(const Op &op) + { + auto loop_lock = create_loop_hard_lock(); + for (auto &id : ids_for_type[T::type]) + { + if (ids[id].get_type() == static_cast(T::type)) + op(id, get(id)); + } + } + + template + void for_each_typed_id(const Op &op) const + { + auto loop_lock = create_loop_hard_lock(); + for (auto &id : ids_for_type[T::type]) + { + if (ids[id].get_type() == static_cast(T::type)) + op(id, get(id)); + } + } + + template + void reset_all_of_type() + { + reset_all_of_type(static_cast(T::type)); + } + + void reset_all_of_type(Types type); + + Meta *find_meta(ID id); + const Meta *find_meta(ID id) const; + + const std::string &get_empty_string() const + { + return empty_string; + } + +private: + template + T &get(uint32_t id) + { + return variant_get(ids[id]); + } + + template + const T &get(uint32_t id) const + { + return variant_get(ids[id]); + } + + mutable uint32_t loop_iteration_depth_hard = 0; + mutable uint32_t loop_iteration_depth_soft = 0; + std::string empty_string; + Bitset cleared_bitset; +}; +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_util.cpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_util.cpp new file mode 100644 index 000000000000..c47e073571fc --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_util.cpp @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + +#include "spirv_cross_util.hpp" +#include "spirv_common.hpp" + +using namespace spv; +using namespace SPIRV_CROSS_NAMESPACE; + +namespace spirv_cross_util +{ +void rename_interface_variable(Compiler &compiler, const SmallVector &resources, uint32_t location, + const std::string &name) +{ + for (auto &v : resources) + { + if (!compiler.has_decoration(v.id, spv::DecorationLocation)) + continue; + + auto loc = compiler.get_decoration(v.id, spv::DecorationLocation); + if (loc != location) + continue; + + auto &type = compiler.get_type(v.base_type_id); + + + + if (type.basetype == SPIRType::Struct) + { + compiler.set_name(v.base_type_id, join("SPIRV_Cross_Interface_Location", location)); + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + compiler.set_member_name(v.base_type_id, i, join("InterfaceMember", i)); + } + + compiler.set_name(v.id, name); + } +} + +void inherit_combined_sampler_bindings(Compiler &compiler) +{ + auto &samplers = compiler.get_combined_image_samplers(); + for (auto &s : samplers) + { + if (compiler.has_decoration(s.image_id, spv::DecorationDescriptorSet)) + { + uint32_t set = compiler.get_decoration(s.image_id, spv::DecorationDescriptorSet); + compiler.set_decoration(s.combined_id, spv::DecorationDescriptorSet, set); + } + + if (compiler.has_decoration(s.image_id, spv::DecorationBinding)) + { + uint32_t binding = compiler.get_decoration(s.image_id, spv::DecorationBinding); + compiler.set_decoration(s.combined_id, spv::DecorationBinding, binding); + } + } +} +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_util.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_util.hpp new file mode 100644 index 000000000000..d99126ef9a91 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_cross_util.hpp @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_UTIL_HPP +#define SPIRV_CROSS_UTIL_HPP + +#include "spirv_cross.hpp" + +namespace spirv_cross_util +{ +void rename_interface_variable(SPIRV_CROSS_NAMESPACE::Compiler &compiler, + const SPIRV_CROSS_NAMESPACE::SmallVector &resources, + uint32_t location, const std::string &name); +void inherit_combined_sampler_bindings(SPIRV_CROSS_NAMESPACE::Compiler &compiler); +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_glsl.cpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_glsl.cpp new file mode 100644 index 000000000000..83e4f7b53f19 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_glsl.cpp @@ -0,0 +1,13044 @@ + + + + + + + + + + + + + + + + +#include "spirv_glsl.hpp" +#include "GLSL.std.450.h" +#include "spirv_common.hpp" +#include +#include +#include +#include +#include +#include + +#ifndef _WIN32 +#include +#endif +#include + +using namespace spv; +using namespace SPIRV_CROSS_NAMESPACE; +using namespace std; + +static bool is_unsigned_opcode(Op op) +{ + + switch (op) + { + case OpShiftRightLogical: + case OpUGreaterThan: + case OpUGreaterThanEqual: + case OpULessThan: + case OpULessThanEqual: + case OpUConvert: + case OpUDiv: + case OpUMod: + case OpUMulExtended: + case OpConvertUToF: + case OpConvertFToU: + return true; + + default: + return false; + } +} + +static bool is_unsigned_glsl_opcode(GLSLstd450 op) +{ + + switch (op) + { + case GLSLstd450UClamp: + case GLSLstd450UMin: + case GLSLstd450UMax: + case GLSLstd450FindUMsb: + return true; + + default: + return false; + } +} + +static bool packing_is_vec4_padded(BufferPackingStandard packing) +{ + switch (packing) + { + case BufferPackingHLSLCbuffer: + case BufferPackingHLSLCbufferPackOffset: + case BufferPackingStd140: + case BufferPackingStd140EnhancedLayout: + return true; + + default: + return false; + } +} + +static bool packing_is_hlsl(BufferPackingStandard packing) +{ + switch (packing) + { + case BufferPackingHLSLCbuffer: + case BufferPackingHLSLCbufferPackOffset: + return true; + + default: + return false; + } +} + +static bool packing_has_flexible_offset(BufferPackingStandard packing) +{ + switch (packing) + { + case BufferPackingStd140: + case BufferPackingStd430: + case BufferPackingScalar: + case BufferPackingHLSLCbuffer: + return false; + + default: + return true; + } +} + +static bool packing_is_scalar(BufferPackingStandard packing) +{ + switch (packing) + { + case BufferPackingScalar: + case BufferPackingScalarEnhancedLayout: + return true; + + default: + return false; + } +} + +static BufferPackingStandard packing_to_substruct_packing(BufferPackingStandard packing) +{ + switch (packing) + { + case BufferPackingStd140EnhancedLayout: + return BufferPackingStd140; + case BufferPackingStd430EnhancedLayout: + return BufferPackingStd430; + case BufferPackingHLSLCbufferPackOffset: + return BufferPackingHLSLCbuffer; + case BufferPackingScalarEnhancedLayout: + return BufferPackingScalar; + default: + return packing; + } +} + + +string CompilerGLSL::sanitize_underscores(const string &str) +{ + string res; + res.reserve(str.size()); + + bool last_underscore = false; + for (auto c : str) + { + if (c == '_') + { + if (last_underscore) + continue; + + res += c; + last_underscore = true; + } + else + { + res += c; + last_underscore = false; + } + } + return res; +} + +void CompilerGLSL::init() +{ + if (ir.source.known) + { + options.es = ir.source.es; + options.version = ir.source.version; + } + + + + + +#ifdef _WIN32 + + const struct lconv *conv = localeconv(); + if (conv && conv->decimal_point) + current_locale_radix_character = *conv->decimal_point; +#elif defined(__ANDROID__) && __ANDROID_API__ < 26 + + const struct lconv *conv = localeconv(); + if (conv && conv->decimal_point) + current_locale_radix_character = *conv->decimal_point; +#else + + const char *decimal_point = nl_langinfo(RADIXCHAR); + if (decimal_point && *decimal_point != '\0') + current_locale_radix_character = *decimal_point; +#endif +} + +static const char *to_pls_layout(PlsFormat format) +{ + switch (format) + { + case PlsR11FG11FB10F: + return "layout(r11f_g11f_b10f) "; + case PlsR32F: + return "layout(r32f) "; + case PlsRG16F: + return "layout(rg16f) "; + case PlsRGB10A2: + return "layout(rgb10_a2) "; + case PlsRGBA8: + return "layout(rgba8) "; + case PlsRG16: + return "layout(rg16) "; + case PlsRGBA8I: + return "layout(rgba8i)"; + case PlsRG16I: + return "layout(rg16i) "; + case PlsRGB10A2UI: + return "layout(rgb10_a2ui) "; + case PlsRGBA8UI: + return "layout(rgba8ui) "; + case PlsRG16UI: + return "layout(rg16ui) "; + case PlsR32UI: + return "layout(r32ui) "; + default: + return ""; + } +} + +static SPIRType::BaseType pls_format_to_basetype(PlsFormat format) +{ + switch (format) + { + default: + case PlsR11FG11FB10F: + case PlsR32F: + case PlsRG16F: + case PlsRGB10A2: + case PlsRGBA8: + case PlsRG16: + return SPIRType::Float; + + case PlsRGBA8I: + case PlsRG16I: + return SPIRType::Int; + + case PlsRGB10A2UI: + case PlsRGBA8UI: + case PlsRG16UI: + case PlsR32UI: + return SPIRType::UInt; + } +} + +static uint32_t pls_format_to_components(PlsFormat format) +{ + switch (format) + { + default: + case PlsR32F: + case PlsR32UI: + return 1; + + case PlsRG16F: + case PlsRG16: + case PlsRG16UI: + case PlsRG16I: + return 2; + + case PlsR11FG11FB10F: + return 3; + + case PlsRGB10A2: + case PlsRGBA8: + case PlsRGBA8I: + case PlsRGB10A2UI: + case PlsRGBA8UI: + return 4; + } +} + +static const char *vector_swizzle(int vecsize, int index) +{ + static const char *const swizzle[4][4] = { + { ".x", ".y", ".z", ".w" }, + { ".xy", ".yz", ".zw", nullptr }, + { ".xyz", ".yzw", nullptr, nullptr }, +#if defined(__GNUC__) && (__GNUC__ == 9) + + + { "", nullptr, nullptr, "$" }, +#else + { "", nullptr, nullptr, nullptr }, +#endif + }; + + assert(vecsize >= 1 && vecsize <= 4); + assert(index >= 0 && index < 4); + assert(swizzle[vecsize - 1][index]); + + return swizzle[vecsize - 1][index]; +} + +void CompilerGLSL::reset() +{ + + + + clear_force_recompile(); + + + invalid_expressions.clear(); + current_function = nullptr; + + + expression_usage_counts.clear(); + forwarded_temporaries.clear(); + suppressed_usage_tracking.clear(); + + + flushed_phi_variables.clear(); + + reset_name_caches(); + + ir.for_each_typed_id([&](uint32_t, SPIRFunction &func) { + func.active = false; + func.flush_undeclared = true; + }); + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { var.dependees.clear(); }); + + ir.reset_all_of_type(); + ir.reset_all_of_type(); + + statement_count = 0; + indent = 0; +} + +void CompilerGLSL::remap_pls_variables() +{ + for (auto &input : pls_inputs) + { + auto &var = get(input.id); + + bool input_is_target = false; + if (var.storage == StorageClassUniformConstant) + { + auto &type = get(var.basetype); + input_is_target = type.image.dim == DimSubpassData; + } + + if (var.storage != StorageClassInput && !input_is_target) + SPIRV_CROSS_THROW("Can only use in and target variables for PLS inputs."); + var.remapped_variable = true; + } + + for (auto &output : pls_outputs) + { + auto &var = get(output.id); + if (var.storage != StorageClassOutput) + SPIRV_CROSS_THROW("Can only use out variables for PLS outputs."); + var.remapped_variable = true; + } +} + +void CompilerGLSL::find_static_extensions() +{ + ir.for_each_typed_id([&](uint32_t, const SPIRType &type) { + if (type.basetype == SPIRType::Double) + { + if (options.es) + SPIRV_CROSS_THROW("FP64 not supported in ES profile."); + if (!options.es && options.version < 400) + require_extension_internal("GL_ARB_gpu_shader_fp64"); + } + else if (type.basetype == SPIRType::Int64 || type.basetype == SPIRType::UInt64) + { + if (options.es) + SPIRV_CROSS_THROW("64-bit integers not supported in ES profile."); + if (!options.es) + require_extension_internal("GL_ARB_gpu_shader_int64"); + } + else if (type.basetype == SPIRType::Half) + { + require_extension_internal("GL_EXT_shader_explicit_arithmetic_types_float16"); + if (options.vulkan_semantics) + require_extension_internal("GL_EXT_shader_16bit_storage"); + } + else if (type.basetype == SPIRType::SByte || type.basetype == SPIRType::UByte) + { + require_extension_internal("GL_EXT_shader_explicit_arithmetic_types_int8"); + if (options.vulkan_semantics) + require_extension_internal("GL_EXT_shader_8bit_storage"); + } + else if (type.basetype == SPIRType::Short || type.basetype == SPIRType::UShort) + { + require_extension_internal("GL_EXT_shader_explicit_arithmetic_types_int16"); + if (options.vulkan_semantics) + require_extension_internal("GL_EXT_shader_16bit_storage"); + } + }); + + auto &execution = get_entry_point(); + switch (execution.model) + { + case ExecutionModelGLCompute: + if (!options.es && options.version < 430) + require_extension_internal("GL_ARB_compute_shader"); + if (options.es && options.version < 310) + SPIRV_CROSS_THROW("At least ESSL 3.10 required for compute shaders."); + break; + + case ExecutionModelGeometry: + if (options.es && options.version < 320) + require_extension_internal("GL_EXT_geometry_shader"); + if (!options.es && options.version < 150) + require_extension_internal("GL_ARB_geometry_shader4"); + + if (execution.flags.get(ExecutionModeInvocations) && execution.invocations != 1) + { + + if (!options.es && options.version < 400) + require_extension_internal("GL_ARB_gpu_shader5"); + } + break; + + case ExecutionModelTessellationEvaluation: + case ExecutionModelTessellationControl: + if (options.es && options.version < 320) + require_extension_internal("GL_EXT_tessellation_shader"); + if (!options.es && options.version < 400) + require_extension_internal("GL_ARB_tessellation_shader"); + break; + + case ExecutionModelRayGenerationNV: + case ExecutionModelIntersectionNV: + case ExecutionModelAnyHitNV: + case ExecutionModelClosestHitNV: + case ExecutionModelMissNV: + case ExecutionModelCallableNV: + if (options.es || options.version < 460) + SPIRV_CROSS_THROW("Ray tracing shaders require non-es profile with version 460 or above."); + require_extension_internal("GL_NV_ray_tracing"); + break; + + default: + break; + } + + if (!pls_inputs.empty() || !pls_outputs.empty()) + require_extension_internal("GL_EXT_shader_pixel_local_storage"); + + if (options.separate_shader_objects && !options.es && options.version < 410) + require_extension_internal("GL_ARB_separate_shader_objects"); + + if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT) + { + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("GL_EXT_buffer_reference is only supported in Vulkan GLSL."); + if (options.es && options.version < 320) + SPIRV_CROSS_THROW("GL_EXT_buffer_reference requires ESSL 320."); + else if (!options.es && options.version < 450) + SPIRV_CROSS_THROW("GL_EXT_buffer_reference requires GLSL 450."); + require_extension_internal("GL_EXT_buffer_reference"); + } + else if (ir.addressing_model != AddressingModelLogical) + { + SPIRV_CROSS_THROW("Only Logical and PhysicalStorageBuffer64EXT addressing models are supported."); + } + + + + for (auto &cap : ir.declared_capabilities) + { + bool nonuniform_indexing = false; + switch (cap) + { + case CapabilityShaderNonUniformEXT: + case CapabilityRuntimeDescriptorArrayEXT: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("GL_EXT_nonuniform_qualifier is only supported in Vulkan GLSL."); + require_extension_internal("GL_EXT_nonuniform_qualifier"); + nonuniform_indexing = true; + break; + + default: + break; + } + + if (nonuniform_indexing) + break; + } +} + +string CompilerGLSL::compile() +{ + if (options.vulkan_semantics) + backend.allow_precision_qualifiers = true; + backend.force_gl_in_out_block = true; + backend.supports_extensions = true; + backend.use_array_constructor = true; + + + fixup_type_alias(); + reorder_type_alias(); + build_function_control_flow_graphs_and_analyze(); + find_static_extensions(); + fixup_image_load_store_access(); + update_active_builtins(); + analyze_image_and_sampler_usage(); + analyze_interlocked_resource_usage(); + + + + if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT) + analyze_non_block_pointer_types(); + + uint32_t pass_count = 0; + do + { + if (pass_count >= 3) + SPIRV_CROSS_THROW("Over 3 compilation loops detected. Must be a bug!"); + + reset(); + + buffer.reset(); + + emit_header(); + emit_resources(); + + emit_function(get(ir.default_entry_point), Bitset()); + + pass_count++; + } while (is_forcing_recompilation()); + + + + if (interlocked_is_complex) + { + statement("void main()"); + begin_scope(); + statement("// Interlocks were used in a way not compatible with GLSL, this is very slow."); + if (options.es) + statement("beginInvocationInterlockNV();"); + else + statement("beginInvocationInterlockARB();"); + statement("spvMainInterlockedBody();"); + if (options.es) + statement("endInvocationInterlockNV();"); + else + statement("endInvocationInterlockARB();"); + end_scope(); + } + + + get_entry_point().name = "main"; + + return buffer.str(); +} + +std::string CompilerGLSL::get_partial_source() +{ + return buffer.str(); +} + +void CompilerGLSL::build_workgroup_size(SmallVector &arguments, const SpecializationConstant &wg_x, + const SpecializationConstant &wg_y, const SpecializationConstant &wg_z) +{ + auto &execution = get_entry_point(); + + if (wg_x.id) + { + if (options.vulkan_semantics) + arguments.push_back(join("local_size_x_id = ", wg_x.constant_id)); + else + arguments.push_back(join("local_size_x = ", get(wg_x.id).specialization_constant_macro_name)); + } + else + arguments.push_back(join("local_size_x = ", execution.workgroup_size.x)); + + if (wg_y.id) + { + if (options.vulkan_semantics) + arguments.push_back(join("local_size_y_id = ", wg_y.constant_id)); + else + arguments.push_back(join("local_size_y = ", get(wg_y.id).specialization_constant_macro_name)); + } + else + arguments.push_back(join("local_size_y = ", execution.workgroup_size.y)); + + if (wg_z.id) + { + if (options.vulkan_semantics) + arguments.push_back(join("local_size_z_id = ", wg_z.constant_id)); + else + arguments.push_back(join("local_size_z = ", get(wg_z.id).specialization_constant_macro_name)); + } + else + arguments.push_back(join("local_size_z = ", execution.workgroup_size.z)); +} + +void CompilerGLSL::emit_header() +{ + auto &execution = get_entry_point(); + statement("#version ", options.version, options.es && options.version > 100 ? " es" : ""); + + if (!options.es && options.version < 420) + { + + if (options.enable_420pack_extension) + { + statement("#ifdef GL_ARB_shading_language_420pack"); + statement("#extension GL_ARB_shading_language_420pack : require"); + statement("#endif"); + } + + if (execution.flags.get(ExecutionModeEarlyFragmentTests)) + require_extension_internal("GL_ARB_shader_image_load_store"); + } + + + if (execution.flags.get(ExecutionModePostDepthCoverage)) + require_extension_internal("GL_ARB_post_depth_coverage"); + + + if (execution.flags.get(ExecutionModePixelInterlockOrderedEXT) || + execution.flags.get(ExecutionModePixelInterlockUnorderedEXT) || + execution.flags.get(ExecutionModeSampleInterlockOrderedEXT) || + execution.flags.get(ExecutionModeSampleInterlockUnorderedEXT)) + { + if (options.es) + { + if (options.version < 310) + SPIRV_CROSS_THROW("At least ESSL 3.10 required for fragment shader interlock."); + require_extension_internal("GL_NV_fragment_shader_interlock"); + } + else + { + if (options.version < 420) + require_extension_internal("GL_ARB_shader_image_load_store"); + require_extension_internal("GL_ARB_fragment_shader_interlock"); + } + } + + for (auto &ext : forced_extensions) + { + if (ext == "GL_EXT_shader_explicit_arithmetic_types_float16") + { + + + statement("#if defined(GL_AMD_gpu_shader_half_float)"); + statement("#extension GL_AMD_gpu_shader_half_float : require"); + if (!options.vulkan_semantics) + { + statement("#elif defined(GL_NV_gpu_shader5)"); + statement("#extension GL_NV_gpu_shader5 : require"); + } + else + { + statement("#elif defined(GL_EXT_shader_explicit_arithmetic_types_float16)"); + statement("#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require"); + } + statement("#else"); + statement("#error No extension available for FP16."); + statement("#endif"); + } + else if (ext == "GL_EXT_shader_explicit_arithmetic_types_int16") + { + if (options.vulkan_semantics) + statement("#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require"); + else + { + statement("#if defined(GL_AMD_gpu_shader_int16)"); + statement("#extension GL_AMD_gpu_shader_int16 : require"); + statement("#else"); + statement("#error No extension available for Int16."); + statement("#endif"); + } + } + else if (ext == "GL_ARB_post_depth_coverage") + { + if (options.es) + statement("#extension GL_EXT_post_depth_coverage : require"); + else + { + statement("#if defined(GL_ARB_post_depth_coverge)"); + statement("#extension GL_ARB_post_depth_coverage : require"); + statement("#else"); + statement("#extension GL_EXT_post_depth_coverage : require"); + statement("#endif"); + } + } + else + statement("#extension ", ext, " : require"); + } + + for (auto &header : header_lines) + statement(header); + + SmallVector inputs; + SmallVector outputs; + + switch (execution.model) + { + case ExecutionModelGeometry: + outputs.push_back(join("max_vertices = ", execution.output_vertices)); + if ((execution.flags.get(ExecutionModeInvocations)) && execution.invocations != 1) + inputs.push_back(join("invocations = ", execution.invocations)); + if (execution.flags.get(ExecutionModeInputPoints)) + inputs.push_back("points"); + if (execution.flags.get(ExecutionModeInputLines)) + inputs.push_back("lines"); + if (execution.flags.get(ExecutionModeInputLinesAdjacency)) + inputs.push_back("lines_adjacency"); + if (execution.flags.get(ExecutionModeTriangles)) + inputs.push_back("triangles"); + if (execution.flags.get(ExecutionModeInputTrianglesAdjacency)) + inputs.push_back("triangles_adjacency"); + if (execution.flags.get(ExecutionModeOutputTriangleStrip)) + outputs.push_back("triangle_strip"); + if (execution.flags.get(ExecutionModeOutputPoints)) + outputs.push_back("points"); + if (execution.flags.get(ExecutionModeOutputLineStrip)) + outputs.push_back("line_strip"); + break; + + case ExecutionModelTessellationControl: + if (execution.flags.get(ExecutionModeOutputVertices)) + outputs.push_back(join("vertices = ", execution.output_vertices)); + break; + + case ExecutionModelTessellationEvaluation: + if (execution.flags.get(ExecutionModeQuads)) + inputs.push_back("quads"); + if (execution.flags.get(ExecutionModeTriangles)) + inputs.push_back("triangles"); + if (execution.flags.get(ExecutionModeIsolines)) + inputs.push_back("isolines"); + if (execution.flags.get(ExecutionModePointMode)) + inputs.push_back("point_mode"); + + if (!execution.flags.get(ExecutionModeIsolines)) + { + if (execution.flags.get(ExecutionModeVertexOrderCw)) + inputs.push_back("cw"); + if (execution.flags.get(ExecutionModeVertexOrderCcw)) + inputs.push_back("ccw"); + } + + if (execution.flags.get(ExecutionModeSpacingFractionalEven)) + inputs.push_back("fractional_even_spacing"); + if (execution.flags.get(ExecutionModeSpacingFractionalOdd)) + inputs.push_back("fractional_odd_spacing"); + if (execution.flags.get(ExecutionModeSpacingEqual)) + inputs.push_back("equal_spacing"); + break; + + case ExecutionModelGLCompute: + { + if (execution.workgroup_size.constant != 0) + { + SpecializationConstant wg_x, wg_y, wg_z; + get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + + + + if (options.vulkan_semantics || + ((wg_x.id == ConstantID(0)) && (wg_y.id == ConstantID(0)) && (wg_z.id == ConstantID(0)))) + build_workgroup_size(inputs, wg_x, wg_y, wg_z); + } + else + { + inputs.push_back(join("local_size_x = ", execution.workgroup_size.x)); + inputs.push_back(join("local_size_y = ", execution.workgroup_size.y)); + inputs.push_back(join("local_size_z = ", execution.workgroup_size.z)); + } + break; + } + + case ExecutionModelFragment: + if (options.es) + { + switch (options.fragment.default_float_precision) + { + case Options::Lowp: + statement("precision lowp float;"); + break; + + case Options::Mediump: + statement("precision mediump float;"); + break; + + case Options::Highp: + statement("precision highp float;"); + break; + + default: + break; + } + + switch (options.fragment.default_int_precision) + { + case Options::Lowp: + statement("precision lowp int;"); + break; + + case Options::Mediump: + statement("precision mediump int;"); + break; + + case Options::Highp: + statement("precision highp int;"); + break; + + default: + break; + } + } + + if (execution.flags.get(ExecutionModeEarlyFragmentTests)) + inputs.push_back("early_fragment_tests"); + if (execution.flags.get(ExecutionModePostDepthCoverage)) + inputs.push_back("post_depth_coverage"); + + if (execution.flags.get(ExecutionModePixelInterlockOrderedEXT)) + inputs.push_back("pixel_interlock_ordered"); + else if (execution.flags.get(ExecutionModePixelInterlockUnorderedEXT)) + inputs.push_back("pixel_interlock_unordered"); + else if (execution.flags.get(ExecutionModeSampleInterlockOrderedEXT)) + inputs.push_back("sample_interlock_ordered"); + else if (execution.flags.get(ExecutionModeSampleInterlockUnorderedEXT)) + inputs.push_back("sample_interlock_unordered"); + + if (!options.es && execution.flags.get(ExecutionModeDepthGreater)) + statement("layout(depth_greater) out float gl_FragDepth;"); + else if (!options.es && execution.flags.get(ExecutionModeDepthLess)) + statement("layout(depth_less) out float gl_FragDepth;"); + + break; + + default: + break; + } + + if (!inputs.empty()) + statement("layout(", merge(inputs), ") in;"); + if (!outputs.empty()) + statement("layout(", merge(outputs), ") out;"); + + statement(""); +} + +bool CompilerGLSL::type_is_empty(const SPIRType &type) +{ + return type.basetype == SPIRType::Struct && type.member_types.empty(); +} + +void CompilerGLSL::emit_struct(SPIRType &type) +{ + + + + + + if (type.type_alias != TypeID(0) && + !has_extended_decoration(type.type_alias, SPIRVCrossDecorationBufferBlockRepacked)) + return; + + add_resource_name(type.self); + auto name = type_to_glsl(type); + + statement(!backend.explicit_struct_type ? "struct " : "", name); + begin_scope(); + + type.member_name_cache.clear(); + + uint32_t i = 0; + bool emitted = false; + for (auto &member : type.member_types) + { + add_member_name(type, i); + emit_struct_member(type, member, i); + i++; + emitted = true; + } + + + if (type_is_empty(type) && !backend.supports_empty_struct) + { + statement("int empty_struct_member;"); + emitted = true; + } + + if (has_extended_decoration(type.self, SPIRVCrossDecorationPaddingTarget)) + emit_struct_padding_target(type); + + end_scope_decl(); + + if (emitted) + statement(""); +} + +string CompilerGLSL::to_interpolation_qualifiers(const Bitset &flags) +{ + string res; + + + if (flags.get(DecorationFlat)) + res += "flat "; + if (flags.get(DecorationNoPerspective)) + res += "noperspective "; + if (flags.get(DecorationCentroid)) + res += "centroid "; + if (flags.get(DecorationPatch)) + res += "patch "; + if (flags.get(DecorationSample)) + res += "sample "; + if (flags.get(DecorationInvariant)) + res += "invariant "; + if (flags.get(DecorationExplicitInterpAMD)) + res += "__explicitInterpAMD "; + + return res; +} + +string CompilerGLSL::layout_for_member(const SPIRType &type, uint32_t index) +{ + if (is_legacy()) + return ""; + + bool is_block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + if (!is_block) + return ""; + + auto &memb = ir.meta[type.self].members; + if (index >= memb.size()) + return ""; + auto &dec = memb[index]; + + SmallVector attr; + + + + + + + + + + + + + + + + auto flags = combined_decoration_for_member(type, index); + + if (flags.get(DecorationRowMajor)) + attr.push_back("row_major"); + + + + + if (dec.decoration_flags.get(DecorationLocation) && can_use_io_location(type.storage, true)) + attr.push_back(join("location = ", dec.location)); + + + if (dec.decoration_flags.get(DecorationComponent) && can_use_io_location(type.storage, true)) + { + if (!options.es) + { + if (options.version < 440 && options.version >= 140) + require_extension_internal("GL_ARB_enhanced_layouts"); + else if (options.version < 140) + SPIRV_CROSS_THROW("Component decoration is not supported in targets below GLSL 1.40."); + attr.push_back(join("component = ", dec.component)); + } + else + SPIRV_CROSS_THROW("Component decoration is not supported in ES targets."); + } + + + + if (has_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset) && + dec.decoration_flags.get(DecorationOffset)) + attr.push_back(join("offset = ", dec.offset)); + + if (attr.empty()) + return ""; + + string res = "layout("; + res += merge(attr); + res += ") "; + return res; +} + +const char *CompilerGLSL::format_to_glsl(spv::ImageFormat format) +{ + if (options.es && is_desktop_only_format(format)) + SPIRV_CROSS_THROW("Attempting to use image format not supported in ES profile."); + + switch (format) + { + case ImageFormatRgba32f: + return "rgba32f"; + case ImageFormatRgba16f: + return "rgba16f"; + case ImageFormatR32f: + return "r32f"; + case ImageFormatRgba8: + return "rgba8"; + case ImageFormatRgba8Snorm: + return "rgba8_snorm"; + case ImageFormatRg32f: + return "rg32f"; + case ImageFormatRg16f: + return "rg16f"; + case ImageFormatRgba32i: + return "rgba32i"; + case ImageFormatRgba16i: + return "rgba16i"; + case ImageFormatR32i: + return "r32i"; + case ImageFormatRgba8i: + return "rgba8i"; + case ImageFormatRg32i: + return "rg32i"; + case ImageFormatRg16i: + return "rg16i"; + case ImageFormatRgba32ui: + return "rgba32ui"; + case ImageFormatRgba16ui: + return "rgba16ui"; + case ImageFormatR32ui: + return "r32ui"; + case ImageFormatRgba8ui: + return "rgba8ui"; + case ImageFormatRg32ui: + return "rg32ui"; + case ImageFormatRg16ui: + return "rg16ui"; + case ImageFormatR11fG11fB10f: + return "r11f_g11f_b10f"; + case ImageFormatR16f: + return "r16f"; + case ImageFormatRgb10A2: + return "rgb10_a2"; + case ImageFormatR8: + return "r8"; + case ImageFormatRg8: + return "rg8"; + case ImageFormatR16: + return "r16"; + case ImageFormatRg16: + return "rg16"; + case ImageFormatRgba16: + return "rgba16"; + case ImageFormatR16Snorm: + return "r16_snorm"; + case ImageFormatRg16Snorm: + return "rg16_snorm"; + case ImageFormatRgba16Snorm: + return "rgba16_snorm"; + case ImageFormatR8Snorm: + return "r8_snorm"; + case ImageFormatRg8Snorm: + return "rg8_snorm"; + case ImageFormatR8ui: + return "r8ui"; + case ImageFormatRg8ui: + return "rg8ui"; + case ImageFormatR16ui: + return "r16ui"; + case ImageFormatRgb10a2ui: + return "rgb10_a2ui"; + case ImageFormatR8i: + return "r8i"; + case ImageFormatRg8i: + return "rg8i"; + case ImageFormatR16i: + return "r16i"; + default: + case ImageFormatUnknown: + return nullptr; + } +} + +uint32_t CompilerGLSL::type_to_packed_base_size(const SPIRType &type, BufferPackingStandard) +{ + switch (type.basetype) + { + case SPIRType::Double: + case SPIRType::Int64: + case SPIRType::UInt64: + return 8; + case SPIRType::Float: + case SPIRType::Int: + case SPIRType::UInt: + return 4; + case SPIRType::Half: + case SPIRType::Short: + case SPIRType::UShort: + return 2; + case SPIRType::SByte: + case SPIRType::UByte: + return 1; + + default: + SPIRV_CROSS_THROW("Unrecognized type in type_to_packed_base_size."); + } +} + +uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, const Bitset &flags, + BufferPackingStandard packing) +{ + + + if (type.storage == StorageClassPhysicalStorageBufferEXT) + { + if (!type.pointer) + SPIRV_CROSS_THROW("Types in PhysicalStorageBufferEXT must be pointers."); + + if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT) + { + if (packing_is_vec4_padded(packing) && type_is_array_of_pointers(type)) + return 16; + else + return 8; + } + else + SPIRV_CROSS_THROW("AddressingModelPhysicalStorageBuffer64EXT must be used for PhysicalStorageBufferEXT."); + } + + if (!type.array.empty()) + { + uint32_t minimum_alignment = 1; + if (packing_is_vec4_padded(packing)) + minimum_alignment = 16; + + auto *tmp = &get(type.parent_type); + while (!tmp->array.empty()) + tmp = &get(tmp->parent_type); + + + return max(minimum_alignment, type_to_packed_alignment(*tmp, flags, packing)); + } + + if (type.basetype == SPIRType::Struct) + { + + uint32_t alignment = 1; + for (uint32_t i = 0; i < type.member_types.size(); i++) + { + auto member_flags = ir.meta[type.self].members[i].decoration_flags; + alignment = + max(alignment, type_to_packed_alignment(get(type.member_types[i]), member_flags, packing)); + } + + + if (packing_is_vec4_padded(packing)) + alignment = max(alignment, 16u); + + return alignment; + } + else + { + const uint32_t base_alignment = type_to_packed_base_size(type, packing); + + + if (packing_is_scalar(packing)) + return base_alignment; + + + + if (type.columns == 1 && packing_is_hlsl(packing)) + return base_alignment; + + + + if (type.vecsize == 1 && type.columns == 1) + return base_alignment; + + + if ((type.vecsize == 2 || type.vecsize == 4) && type.columns == 1) + return type.vecsize * base_alignment; + + + if (type.vecsize == 3 && type.columns == 1) + return 4 * base_alignment; + + + + + + if (flags.get(DecorationColMajor) && type.columns > 1) + { + if (packing_is_vec4_padded(packing)) + return 4 * base_alignment; + else if (type.vecsize == 3) + return 4 * base_alignment; + else + return type.vecsize * base_alignment; + } + + + + + if (flags.get(DecorationRowMajor) && type.vecsize > 1) + { + if (packing_is_vec4_padded(packing)) + return 4 * base_alignment; + else if (type.columns == 3) + return 4 * base_alignment; + else + return type.columns * base_alignment; + } + + + } + + SPIRV_CROSS_THROW("Did not find suitable rule for type. Bogus decorations?"); +} + +uint32_t CompilerGLSL::type_to_packed_array_stride(const SPIRType &type, const Bitset &flags, + BufferPackingStandard packing) +{ + + uint32_t parent = type.parent_type; + assert(parent); + + auto &tmp = get(parent); + + uint32_t size = type_to_packed_size(tmp, flags, packing); + if (tmp.array.empty()) + { + uint32_t alignment = type_to_packed_alignment(type, flags, packing); + return (size + alignment - 1) & ~(alignment - 1); + } + else + { + + + return size; + } +} + +uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing) +{ + if (!type.array.empty()) + { + return to_array_size_literal(type) * type_to_packed_array_stride(type, flags, packing); + } + + + + if (type.storage == StorageClassPhysicalStorageBufferEXT) + { + if (!type.pointer) + SPIRV_CROSS_THROW("Types in PhysicalStorageBufferEXT must be pointers."); + + if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT) + return 8; + else + SPIRV_CROSS_THROW("AddressingModelPhysicalStorageBuffer64EXT must be used for PhysicalStorageBufferEXT."); + } + + uint32_t size = 0; + + if (type.basetype == SPIRType::Struct) + { + uint32_t pad_alignment = 1; + + for (uint32_t i = 0; i < type.member_types.size(); i++) + { + auto member_flags = ir.meta[type.self].members[i].decoration_flags; + auto &member_type = get(type.member_types[i]); + + uint32_t packed_alignment = type_to_packed_alignment(member_type, member_flags, packing); + uint32_t alignment = max(packed_alignment, pad_alignment); + + + + if (member_type.basetype == SPIRType::Struct) + pad_alignment = packed_alignment; + else + pad_alignment = 1; + + size = (size + alignment - 1) & ~(alignment - 1); + size += type_to_packed_size(member_type, member_flags, packing); + } + } + else + { + const uint32_t base_alignment = type_to_packed_base_size(type, packing); + + if (packing_is_scalar(packing)) + { + size = type.vecsize * type.columns * base_alignment; + } + else + { + if (type.columns == 1) + size = type.vecsize * base_alignment; + + if (flags.get(DecorationColMajor) && type.columns > 1) + { + if (packing_is_vec4_padded(packing)) + size = type.columns * 4 * base_alignment; + else if (type.vecsize == 3) + size = type.columns * 4 * base_alignment; + else + size = type.columns * type.vecsize * base_alignment; + } + + if (flags.get(DecorationRowMajor) && type.vecsize > 1) + { + if (packing_is_vec4_padded(packing)) + size = type.vecsize * 4 * base_alignment; + else if (type.columns == 3) + size = type.vecsize * 4 * base_alignment; + else + size = type.vecsize * type.columns * base_alignment; + } + } + } + + return size; +} + +bool CompilerGLSL::buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing, + uint32_t start_offset, uint32_t end_offset) +{ + + + + + + + + + + + + + + uint32_t offset = 0; + uint32_t pad_alignment = 1; + + bool is_top_level_block = + has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock); + + for (uint32_t i = 0; i < type.member_types.size(); i++) + { + auto &memb_type = get(type.member_types[i]); + auto member_flags = ir.meta[type.self].members[i].decoration_flags; + + + uint32_t packed_alignment = type_to_packed_alignment(memb_type, member_flags, packing); + + + + + + + + + + + + + bool member_can_be_unsized = + is_top_level_block && size_t(i + 1) == type.member_types.size() && !memb_type.array.empty(); + + uint32_t packed_size = 0; + if (!member_can_be_unsized) + packed_size = type_to_packed_size(memb_type, member_flags, packing); + + + if (packing_is_hlsl(packing)) + { + + uint32_t begin_word = offset / 16; + uint32_t end_word = (offset + packed_size - 1) / 16; + if (begin_word != end_word) + packed_alignment = max(packed_alignment, 16u); + } + + uint32_t alignment = max(packed_alignment, pad_alignment); + offset = (offset + alignment - 1) & ~(alignment - 1); + + + if (offset >= end_offset) + break; + + + + if (memb_type.basetype == SPIRType::Struct && !memb_type.pointer) + pad_alignment = packed_alignment; + else + pad_alignment = 1; + + + if (offset >= start_offset) + { + uint32_t actual_offset = type_struct_member_offset(type, i); + + + + if (!packing_has_flexible_offset(packing)) + { + if (actual_offset != offset) + return false; + } + else if ((actual_offset & (alignment - 1)) != 0) + { + + return false; + } + + + if (!memb_type.array.empty() && type_to_packed_array_stride(memb_type, member_flags, packing) != + type_struct_member_array_stride(type, i)) + return false; + + + + auto substruct_packing = packing_to_substruct_packing(packing); + + if (!memb_type.pointer && !memb_type.member_types.empty() && + !buffer_is_packing_standard(memb_type, substruct_packing)) + { + return false; + } + } + + + offset += packed_size; + } + + return true; +} + +bool CompilerGLSL::can_use_io_location(StorageClass storage, bool block) +{ + + + if ((get_execution_model() != ExecutionModelVertex && storage == StorageClassInput) || + (get_execution_model() != ExecutionModelFragment && storage == StorageClassOutput)) + { + uint32_t minimum_desktop_version = block ? 440 : 410; + + + if (!options.es && options.version < minimum_desktop_version && !options.separate_shader_objects) + return false; + else if (options.es && options.version < 310) + return false; + } + + if ((get_execution_model() == ExecutionModelVertex && storage == StorageClassInput) || + (get_execution_model() == ExecutionModelFragment && storage == StorageClassOutput)) + { + if (options.es && options.version < 300) + return false; + else if (!options.es && options.version < 330) + return false; + } + + if (storage == StorageClassUniform || storage == StorageClassUniformConstant || storage == StorageClassPushConstant) + { + if (options.es && options.version < 310) + return false; + else if (!options.es && options.version < 430) + return false; + } + + return true; +} + +string CompilerGLSL::layout_for_variable(const SPIRVariable &var) +{ + + + + + if (is_legacy()) + return ""; + + SmallVector attr; + + auto &dec = ir.meta[var.self].decoration; + auto &type = get(var.basetype); + auto &flags = dec.decoration_flags; + auto typeflags = ir.meta[type.self].decoration.decoration_flags; + + if (options.vulkan_semantics && var.storage == StorageClassPushConstant) + attr.push_back("push_constant"); + else if (var.storage == StorageClassShaderRecordBufferNV) + attr.push_back("shaderRecordNV"); + + if (flags.get(DecorationRowMajor)) + attr.push_back("row_major"); + if (flags.get(DecorationColMajor)) + attr.push_back("column_major"); + + if (options.vulkan_semantics) + { + if (flags.get(DecorationInputAttachmentIndex)) + attr.push_back(join("input_attachment_index = ", dec.input_attachment)); + } + + bool is_block = has_decoration(type.self, DecorationBlock); + if (flags.get(DecorationLocation) && can_use_io_location(var.storage, is_block)) + { + Bitset combined_decoration; + for (uint32_t i = 0; i < ir.meta[type.self].members.size(); i++) + combined_decoration.merge_or(combined_decoration_for_member(type, i)); + + + + if (!combined_decoration.get(DecorationLocation)) + attr.push_back(join("location = ", dec.location)); + } + + + if (flags.get(DecorationComponent) && can_use_io_location(var.storage, is_block)) + { + if (!options.es) + { + if (options.version < 440 && options.version >= 140) + require_extension_internal("GL_ARB_enhanced_layouts"); + else if (options.version < 140) + SPIRV_CROSS_THROW("Component decoration is not supported in targets below GLSL 1.40."); + attr.push_back(join("component = ", dec.component)); + } + else + SPIRV_CROSS_THROW("Component decoration is not supported in ES targets."); + } + + if (flags.get(DecorationIndex)) + attr.push_back(join("index = ", dec.index)); + + + + if (var.storage != StorageClassPushConstant && var.storage != StorageClassShaderRecordBufferNV) + { + if (flags.get(DecorationDescriptorSet) && options.vulkan_semantics) + attr.push_back(join("set = ", dec.set)); + } + + bool push_constant_block = options.vulkan_semantics && var.storage == StorageClassPushConstant; + bool ssbo_block = var.storage == StorageClassStorageBuffer || var.storage == StorageClassShaderRecordBufferNV || + (var.storage == StorageClassUniform && typeflags.get(DecorationBufferBlock)); + bool emulated_ubo = var.storage == StorageClassPushConstant && options.emit_push_constant_as_uniform_buffer; + bool ubo_block = var.storage == StorageClassUniform && typeflags.get(DecorationBlock); + + + bool can_use_buffer_blocks = (options.es && options.version >= 300) || (!options.es && options.version >= 140); + + + if (ubo_block && options.emit_uniform_buffer_as_plain_uniforms) + can_use_buffer_blocks = false; + + bool can_use_binding; + if (options.es) + can_use_binding = options.version >= 310; + else + can_use_binding = options.enable_420pack_extension || (options.version >= 420); + + + if (!can_use_buffer_blocks && var.storage == StorageClassUniform) + can_use_binding = false; + + if (var.storage == StorageClassShaderRecordBufferNV) + can_use_binding = false; + + if (can_use_binding && flags.get(DecorationBinding)) + attr.push_back(join("binding = ", dec.binding)); + + if (flags.get(DecorationOffset)) + attr.push_back(join("offset = ", dec.offset)); + + + + if (can_use_buffer_blocks && (ubo_block || emulated_ubo)) + { + attr.push_back(buffer_to_packing_standard(type, false)); + } + else if (can_use_buffer_blocks && (push_constant_block || ssbo_block)) + { + attr.push_back(buffer_to_packing_standard(type, true)); + } + + + + if (type.basetype == SPIRType::Image && type.image.sampled == 2) + { + const char *fmt = format_to_glsl(type.image.format); + if (fmt) + attr.push_back(fmt); + } + + if (attr.empty()) + return ""; + + string res = "layout("; + res += merge(attr); + res += ") "; + return res; +} + +string CompilerGLSL::buffer_to_packing_standard(const SPIRType &type, bool support_std430_without_scalar_layout) +{ + if (support_std430_without_scalar_layout && buffer_is_packing_standard(type, BufferPackingStd430)) + return "std430"; + else if (buffer_is_packing_standard(type, BufferPackingStd140)) + return "std140"; + else if (options.vulkan_semantics && buffer_is_packing_standard(type, BufferPackingScalar)) + { + require_extension_internal("GL_EXT_scalar_block_layout"); + return "scalar"; + } + else if (support_std430_without_scalar_layout && + buffer_is_packing_standard(type, BufferPackingStd430EnhancedLayout)) + { + if (options.es && !options.vulkan_semantics) + SPIRV_CROSS_THROW("Push constant block cannot be expressed as neither std430 nor std140. ES-targets do " + "not support GL_ARB_enhanced_layouts."); + if (!options.es && !options.vulkan_semantics && options.version < 440) + require_extension_internal("GL_ARB_enhanced_layouts"); + + set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset); + return "std430"; + } + else if (buffer_is_packing_standard(type, BufferPackingStd140EnhancedLayout)) + { + + + + if (options.es && !options.vulkan_semantics) + SPIRV_CROSS_THROW("Push constant block cannot be expressed as neither std430 nor std140. ES-targets do " + "not support GL_ARB_enhanced_layouts."); + if (!options.es && !options.vulkan_semantics && options.version < 440) + require_extension_internal("GL_ARB_enhanced_layouts"); + + set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset); + return "std140"; + } + else if (options.vulkan_semantics && buffer_is_packing_standard(type, BufferPackingScalarEnhancedLayout)) + { + set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset); + require_extension_internal("GL_EXT_scalar_block_layout"); + return "scalar"; + } + else if (!support_std430_without_scalar_layout && options.vulkan_semantics && + buffer_is_packing_standard(type, BufferPackingStd430)) + { + + require_extension_internal("GL_EXT_scalar_block_layout"); + return "std430"; + } + else if (!support_std430_without_scalar_layout && options.vulkan_semantics && + buffer_is_packing_standard(type, BufferPackingStd430EnhancedLayout)) + { + + set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset); + require_extension_internal("GL_EXT_scalar_block_layout"); + return "std430"; + } + else + { + SPIRV_CROSS_THROW("Buffer block cannot be expressed as any of std430, std140, scalar, even with enhanced " + "layouts. You can try flattening this block to support a more flexible layout."); + } +} + +void CompilerGLSL::emit_push_constant_block(const SPIRVariable &var) +{ + if (flattened_buffer_blocks.count(var.self)) + emit_buffer_block_flattened(var); + else if (options.vulkan_semantics) + emit_push_constant_block_vulkan(var); + else if (options.emit_push_constant_as_uniform_buffer) + emit_buffer_block_native(var); + else + emit_push_constant_block_glsl(var); +} + +void CompilerGLSL::emit_push_constant_block_vulkan(const SPIRVariable &var) +{ + emit_buffer_block(var); +} + +void CompilerGLSL::emit_push_constant_block_glsl(const SPIRVariable &var) +{ + + auto &type = get(var.basetype); + + auto &flags = ir.meta[var.self].decoration.decoration_flags; + flags.clear(DecorationBinding); + flags.clear(DecorationDescriptorSet); + +#if 0 + if (flags & ((1ull << DecorationBinding) | (1ull << DecorationDescriptorSet))) + SPIRV_CROSS_THROW("Push constant blocks cannot be compiled to GLSL with Binding or Set syntax. " + "Remap to location with reflection API first or disable these decorations."); +#endif + + + + auto &block_flags = ir.meta[type.self].decoration.decoration_flags; + bool block_flag = block_flags.get(DecorationBlock); + block_flags.clear(DecorationBlock); + + emit_struct(type); + + if (block_flag) + block_flags.set(DecorationBlock); + + emit_uniform(var); + statement(""); +} + +void CompilerGLSL::emit_buffer_block(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + bool ubo_block = var.storage == StorageClassUniform && has_decoration(type.self, DecorationBlock); + + if (flattened_buffer_blocks.count(var.self)) + emit_buffer_block_flattened(var); + else if (is_legacy() || (!options.es && options.version == 130) || + (ubo_block && options.emit_uniform_buffer_as_plain_uniforms)) + emit_buffer_block_legacy(var); + else + emit_buffer_block_native(var); +} + +void CompilerGLSL::emit_buffer_block_legacy(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + bool ssbo = var.storage == StorageClassStorageBuffer || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + if (ssbo) + SPIRV_CROSS_THROW("SSBOs not supported in legacy targets."); + + + + auto &block_flags = ir.meta[type.self].decoration.decoration_flags; + bool block_flag = block_flags.get(DecorationBlock); + block_flags.clear(DecorationBlock); + emit_struct(type); + if (block_flag) + block_flags.set(DecorationBlock); + emit_uniform(var); + statement(""); +} + +void CompilerGLSL::emit_buffer_reference_block(SPIRType &type, bool forward_declaration) +{ + string buffer_name; + + if (forward_declaration) + { + + + + buffer_name = to_name(type.self, false); + + + + + if (ir.meta[type.self].decoration.alias.empty() || + block_ssbo_names.find(buffer_name) != end(block_ssbo_names) || + resource_names.find(buffer_name) != end(resource_names)) + { + buffer_name = join("_", type.self); + } + + + + add_variable(block_ssbo_names, resource_names, buffer_name); + + + + + if (buffer_name.empty()) + buffer_name = join("_", type.self); + + block_names.insert(buffer_name); + block_ssbo_names.insert(buffer_name); + } + else if (type.basetype != SPIRType::Struct) + buffer_name = type_to_glsl(type); + else + buffer_name = to_name(type.self, false); + + if (!forward_declaration) + { + if (type.basetype == SPIRType::Struct) + statement("layout(buffer_reference, ", buffer_to_packing_standard(type, true), ") buffer ", buffer_name); + else + statement("layout(buffer_reference) buffer ", buffer_name); + + begin_scope(); + + if (type.basetype == SPIRType::Struct) + { + type.member_name_cache.clear(); + + uint32_t i = 0; + for (auto &member : type.member_types) + { + add_member_name(type, i); + emit_struct_member(type, member, i); + i++; + } + } + else + { + auto &pointee_type = get_pointee_type(type); + statement(type_to_glsl(pointee_type), " value", type_to_array_glsl(pointee_type), ";"); + } + + end_scope_decl(); + statement(""); + } + else + { + statement("layout(buffer_reference) buffer ", buffer_name, ";"); + } +} + +void CompilerGLSL::emit_buffer_block_native(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + + Bitset flags = ir.get_buffer_block_flags(var); + bool ssbo = var.storage == StorageClassStorageBuffer || var.storage == StorageClassShaderRecordBufferNV || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + bool is_restrict = ssbo && flags.get(DecorationRestrict); + bool is_writeonly = ssbo && flags.get(DecorationNonReadable); + bool is_readonly = ssbo && flags.get(DecorationNonWritable); + bool is_coherent = ssbo && flags.get(DecorationCoherent); + + + auto buffer_name = to_name(type.self, false); + + auto &block_namespace = ssbo ? block_ssbo_names : block_ubo_names; + + + + + if (ir.meta[type.self].decoration.alias.empty() || block_namespace.find(buffer_name) != end(block_namespace) || + resource_names.find(buffer_name) != end(resource_names)) + { + buffer_name = get_block_fallback_name(var.self); + } + + + + add_variable(block_namespace, resource_names, buffer_name); + + + + + if (buffer_name.empty()) + buffer_name = join("_", get(var.basetype).self, "_", var.self); + + block_names.insert(buffer_name); + block_namespace.insert(buffer_name); + + + declared_block_names[var.self] = buffer_name; + + statement(layout_for_variable(var), is_coherent ? "coherent " : "", is_restrict ? "restrict " : "", + is_writeonly ? "writeonly " : "", is_readonly ? "readonly " : "", ssbo ? "buffer " : "uniform ", + buffer_name); + + begin_scope(); + + type.member_name_cache.clear(); + + uint32_t i = 0; + for (auto &member : type.member_types) + { + add_member_name(type, i); + emit_struct_member(type, member, i); + i++; + } + + + + + preserve_alias_on_reset(var.self); + add_resource_name(var.self); + end_scope_decl(to_name(var.self) + type_to_array_glsl(type)); + statement(""); +} + +void CompilerGLSL::emit_buffer_block_flattened(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + + + auto buffer_name = to_name(type.self, false); + size_t buffer_size = (get_declared_struct_size(type) + 15) / 16; + + SPIRType::BaseType basic_type; + if (get_common_basic_type(type, basic_type)) + { + SPIRType tmp; + tmp.basetype = basic_type; + tmp.vecsize = 4; + if (basic_type != SPIRType::Float && basic_type != SPIRType::Int && basic_type != SPIRType::UInt) + SPIRV_CROSS_THROW("Basic types in a flattened UBO must be float, int or uint."); + + auto flags = ir.get_buffer_block_flags(var); + statement("uniform ", flags_to_qualifiers_glsl(tmp, flags), type_to_glsl(tmp), " ", buffer_name, "[", + buffer_size, "];"); + } + else + SPIRV_CROSS_THROW("All basic types in a flattened block must be the same."); +} + +const char *CompilerGLSL::to_storage_qualifiers_glsl(const SPIRVariable &var) +{ + auto &execution = get_entry_point(); + + if (var.storage == StorageClassInput || var.storage == StorageClassOutput) + { + if (is_legacy() && execution.model == ExecutionModelVertex) + return var.storage == StorageClassInput ? "attribute " : "varying "; + else if (is_legacy() && execution.model == ExecutionModelFragment) + return "varying "; + else + return var.storage == StorageClassInput ? "in " : "out "; + } + else if (var.storage == StorageClassUniformConstant || var.storage == StorageClassUniform || + var.storage == StorageClassPushConstant) + { + return "uniform "; + } + else if (var.storage == StorageClassRayPayloadNV) + { + return "rayPayloadNV "; + } + else if (var.storage == StorageClassIncomingRayPayloadNV) + { + return "rayPayloadInNV "; + } + else if (var.storage == StorageClassHitAttributeNV) + { + return "hitAttributeNV "; + } + else if (var.storage == StorageClassCallableDataNV) + { + return "callableDataNV "; + } + else if (var.storage == StorageClassIncomingCallableDataNV) + { + return "callableDataInNV "; + } + + return ""; +} + +void CompilerGLSL::emit_flattened_io_block(const SPIRVariable &var, const char *qual) +{ + auto &type = get(var.basetype); + if (!type.array.empty()) + SPIRV_CROSS_THROW("Array of varying structs cannot be flattened to legacy-compatible varyings."); + + auto old_flags = ir.meta[type.self].decoration.decoration_flags; + + ir.meta[type.self].decoration.decoration_flags.set(DecorationBlock); + + type.member_name_cache.clear(); + + uint32_t i = 0; + for (auto &member : type.member_types) + { + add_member_name(type, i); + auto &membertype = get(member); + + if (membertype.basetype == SPIRType::Struct) + SPIRV_CROSS_THROW("Cannot flatten struct inside structs in I/O variables."); + + + + + + auto backup_name = get_member_name(type.self, i); + auto member_name = to_member_name(type, i); + set_member_name(type.self, i, sanitize_underscores(join(to_name(var.self), "_", member_name))); + emit_struct_member(type, member, i, qual); + + set_member_name(type.self, i, member_name); + i++; + } + + ir.meta[type.self].decoration.decoration_flags = old_flags; + + + flattened_structs.insert(var.self); +} + +void CompilerGLSL::emit_interface_block(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + + + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + const char *qual = to_storage_qualifiers_glsl(var); + + if (block) + { + + + + if ((options.es && options.version < 310) || (!options.es && options.version < 150)) + { + + + emit_flattened_io_block(var, qual); + } + else + { + if (options.es && options.version < 320) + { + + if (!has_extension("GL_EXT_geometry_shader") && !has_extension("GL_EXT_tessellation_shader")) + require_extension_internal("GL_EXT_shader_io_blocks"); + } + + + auto block_name = to_name(type.self, false); + + + auto &block_namespace = type.storage == StorageClassInput ? block_input_names : block_output_names; + + + + if (block_name.empty() || block_namespace.find(block_name) != end(block_namespace)) + block_name = get_fallback_name(type.self); + else + block_namespace.insert(block_name); + + + + if (block_name.empty()) + block_name = join("_", get(var.basetype).self, "_", var.self); + + + resource_names.insert(block_name); + + statement(layout_for_variable(var), qual, block_name); + begin_scope(); + + type.member_name_cache.clear(); + + uint32_t i = 0; + for (auto &member : type.member_types) + { + add_member_name(type, i); + emit_struct_member(type, member, i); + i++; + } + + add_resource_name(var.self); + end_scope_decl(join(to_name(var.self), type_to_array_glsl(type))); + statement(""); + } + } + else + { + + + + if (type.basetype == SPIRType::Struct && + ((options.es && options.version < 310) || (!options.es && options.version < 150))) + { + emit_flattened_io_block(var, qual); + } + else + { + add_resource_name(var.self); + statement(layout_for_variable(var), to_qualifiers_glsl(var.self), + variable_decl(type, to_name(var.self), var.self), ";"); + + + if (var.storage == StorageClassOutput && var.initializer) + { + auto &entry_func = this->get(ir.default_entry_point); + entry_func.fixup_hooks_in.push_back( + [&]() { statement(to_name(var.self), " = ", to_expression(var.initializer), ";"); }); + } + } + } +} + +void CompilerGLSL::emit_uniform(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + if (type.basetype == SPIRType::Image && type.image.sampled == 2) + { + if (!options.es && options.version < 420) + require_extension_internal("GL_ARB_shader_image_load_store"); + else if (options.es && options.version < 310) + SPIRV_CROSS_THROW("At least ESSL 3.10 required for shader image load store."); + } + + add_resource_name(var.self); + statement(layout_for_variable(var), variable_decl(var), ";"); +} + +string CompilerGLSL::constant_value_macro_name(uint32_t id) +{ + return join("SPIRV_CROSS_CONSTANT_ID_", id); +} + +void CompilerGLSL::emit_specialization_constant_op(const SPIRConstantOp &constant) +{ + auto &type = get(constant.basetype); + auto name = to_name(constant.self); + statement("const ", variable_decl(type, name), " = ", constant_op_expression(constant), ";"); +} + +void CompilerGLSL::emit_constant(const SPIRConstant &constant) +{ + auto &type = get(constant.constant_type); + auto name = to_name(constant.self); + + SpecializationConstant wg_x, wg_y, wg_z; + ID workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + + + if (constant.self == workgroup_size_id) + return; + + + + + bool is_workgroup_size_constant = ConstantID(constant.self) == wg_x.id || ConstantID(constant.self) == wg_y.id || + ConstantID(constant.self) == wg_z.id; + + if (options.vulkan_semantics && is_workgroup_size_constant) + { + + return; + } + else if (!options.vulkan_semantics && is_workgroup_size_constant && + !has_decoration(constant.self, DecorationSpecId)) + { + + return; + } + + + if (has_decoration(constant.self, DecorationSpecId)) + { + if (options.vulkan_semantics) + { + statement("layout(constant_id = ", get_decoration(constant.self, DecorationSpecId), ") const ", + variable_decl(type, name), " = ", constant_expression(constant), ";"); + } + else + { + const string ¯o_name = constant.specialization_constant_macro_name; + statement("#ifndef ", macro_name); + statement("#define ", macro_name, " ", constant_expression(constant)); + statement("#endif"); + + + if (!is_workgroup_size_constant) + statement("const ", variable_decl(type, name), " = ", macro_name, ";"); + } + } + else + { + statement("const ", variable_decl(type, name), " = ", constant_expression(constant), ";"); + } +} + +void CompilerGLSL::emit_entry_point_declarations() +{ +} + +void CompilerGLSL::replace_illegal_names() +{ + + static const unordered_set keywords = { + "abs", "acos", "acosh", "all", "any", "asin", "asinh", "atan", "atanh", + "atomicAdd", "atomicCompSwap", "atomicCounter", "atomicCounterDecrement", "atomicCounterIncrement", + "atomicExchange", "atomicMax", "atomicMin", "atomicOr", "atomicXor", + "bitCount", "bitfieldExtract", "bitfieldInsert", "bitfieldReverse", + "ceil", "cos", "cosh", "cross", "degrees", + "dFdx", "dFdxCoarse", "dFdxFine", + "dFdy", "dFdyCoarse", "dFdyFine", + "distance", "dot", "EmitStreamVertex", "EmitVertex", "EndPrimitive", "EndStreamPrimitive", "equal", "exp", "exp2", + "faceforward", "findLSB", "findMSB", "float16BitsToInt16", "float16BitsToUint16", "floatBitsToInt", "floatBitsToUint", "floor", "fma", "fract", + "frexp", "fwidth", "fwidthCoarse", "fwidthFine", + "greaterThan", "greaterThanEqual", "groupMemoryBarrier", + "imageAtomicAdd", "imageAtomicAnd", "imageAtomicCompSwap", "imageAtomicExchange", "imageAtomicMax", "imageAtomicMin", "imageAtomicOr", "imageAtomicXor", + "imageLoad", "imageSamples", "imageSize", "imageStore", "imulExtended", "int16BitsToFloat16", "intBitsToFloat", "interpolateAtOffset", "interpolateAtCentroid", "interpolateAtSample", + "inverse", "inversesqrt", "isinf", "isnan", "ldexp", "length", "lessThan", "lessThanEqual", "log", "log2", + "matrixCompMult", "max", "memoryBarrier", "memoryBarrierAtomicCounter", "memoryBarrierBuffer", "memoryBarrierImage", "memoryBarrierShared", + "min", "mix", "mod", "modf", "noise", "noise1", "noise2", "noise3", "noise4", "normalize", "not", "notEqual", + "outerProduct", "packDouble2x32", "packHalf2x16", "packInt2x16", "packInt4x16", "packSnorm2x16", "packSnorm4x8", + "packUint2x16", "packUint4x16", "packUnorm2x16", "packUnorm4x8", "pow", + "radians", "reflect", "refract", "round", "roundEven", "sign", "sin", "sinh", "smoothstep", "sqrt", "step", + "tan", "tanh", "texelFetch", "texelFetchOffset", "texture", "textureGather", "textureGatherOffset", "textureGatherOffsets", + "textureGrad", "textureGradOffset", "textureLod", "textureLodOffset", "textureOffset", "textureProj", "textureProjGrad", + "textureProjGradOffset", "textureProjLod", "textureProjLodOffset", "textureProjOffset", "textureQueryLevels", "textureQueryLod", "textureSamples", "textureSize", + "transpose", "trunc", "uaddCarry", "uint16BitsToFloat16", "uintBitsToFloat", "umulExtended", "unpackDouble2x32", "unpackHalf2x16", "unpackInt2x16", "unpackInt4x16", + "unpackSnorm2x16", "unpackSnorm4x8", "unpackUint2x16", "unpackUint4x16", "unpackUnorm2x16", "unpackUnorm4x8", "usubBorrow", + + "active", "asm", "atomic_uint", "attribute", "bool", "break", "buffer", + "bvec2", "bvec3", "bvec4", "case", "cast", "centroid", "class", "coherent", "common", "const", "continue", "default", "discard", + "dmat2", "dmat2x2", "dmat2x3", "dmat2x4", "dmat3", "dmat3x2", "dmat3x3", "dmat3x4", "dmat4", "dmat4x2", "dmat4x3", "dmat4x4", + "do", "double", "dvec2", "dvec3", "dvec4", "else", "enum", "extern", "external", "false", "filter", "fixed", "flat", "float", + "for", "fvec2", "fvec3", "fvec4", "goto", "half", "highp", "hvec2", "hvec3", "hvec4", "if", "iimage1D", "iimage1DArray", + "iimage2D", "iimage2DArray", "iimage2DMS", "iimage2DMSArray", "iimage2DRect", "iimage3D", "iimageBuffer", "iimageCube", + "iimageCubeArray", "image1D", "image1DArray", "image2D", "image2DArray", "image2DMS", "image2DMSArray", "image2DRect", + "image3D", "imageBuffer", "imageCube", "imageCubeArray", "in", "inline", "inout", "input", "int", "interface", "invariant", + "isampler1D", "isampler1DArray", "isampler2D", "isampler2DArray", "isampler2DMS", "isampler2DMSArray", "isampler2DRect", + "isampler3D", "isamplerBuffer", "isamplerCube", "isamplerCubeArray", "ivec2", "ivec3", "ivec4", "layout", "long", "lowp", + "mat2", "mat2x2", "mat2x3", "mat2x4", "mat3", "mat3x2", "mat3x3", "mat3x4", "mat4", "mat4x2", "mat4x3", "mat4x4", "mediump", + "namespace", "noinline", "noperspective", "out", "output", "packed", "partition", "patch", "precise", "precision", "public", "readonly", + "resource", "restrict", "return", "sample", "sampler1D", "sampler1DArray", "sampler1DArrayShadow", + "sampler1DShadow", "sampler2D", "sampler2DArray", "sampler2DArrayShadow", "sampler2DMS", "sampler2DMSArray", + "sampler2DRect", "sampler2DRectShadow", "sampler2DShadow", "sampler3D", "sampler3DRect", "samplerBuffer", + "samplerCube", "samplerCubeArray", "samplerCubeArrayShadow", "samplerCubeShadow", "shared", "short", "sizeof", "smooth", "static", + "struct", "subroutine", "superp", "switch", "template", "this", "true", "typedef", "uimage1D", "uimage1DArray", "uimage2D", + "uimage2DArray", "uimage2DMS", "uimage2DMSArray", "uimage2DRect", "uimage3D", "uimageBuffer", "uimageCube", + "uimageCubeArray", "uint", "uniform", "union", "unsigned", "usampler1D", "usampler1DArray", "usampler2D", "usampler2DArray", + "usampler2DMS", "usampler2DMSArray", "usampler2DRect", "usampler3D", "usamplerBuffer", "usamplerCube", + "usamplerCubeArray", "using", "uvec2", "uvec3", "uvec4", "varying", "vec2", "vec3", "vec4", "void", "volatile", + "while", "writeonly", + }; + + + ir.for_each_typed_id([&](uint32_t, const SPIRVariable &var) { + if (!is_hidden_variable(var)) + { + auto &m = ir.meta[var.self].decoration; + if (m.alias.compare(0, 3, "gl_") == 0 || keywords.find(m.alias) != end(keywords)) + m.alias = join("_", m.alias); + } + }); +} + +void CompilerGLSL::replace_fragment_output(SPIRVariable &var) +{ + auto &m = ir.meta[var.self].decoration; + uint32_t location = 0; + if (m.decoration_flags.get(DecorationLocation)) + location = m.location; + + + + auto &type = get(var.basetype); + + if (type.array.empty()) + { + + m.alias = join("gl_FragData[", location, "]"); + + if (is_legacy_es() && location != 0) + require_extension_internal("GL_EXT_draw_buffers"); + } + else if (type.array.size() == 1) + { + + + + m.alias = "gl_FragData"; + if (location != 0) + SPIRV_CROSS_THROW("Arrayed output variable used, but location is not 0. " + "This is unimplemented in SPIRV-Cross."); + + if (is_legacy_es()) + require_extension_internal("GL_EXT_draw_buffers"); + } + else + SPIRV_CROSS_THROW("Array-of-array output variable used. This cannot be implemented in legacy GLSL."); + + var.compat_builtin = true; +} + +void CompilerGLSL::replace_fragment_outputs() +{ + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + + if (!is_builtin_variable(var) && !var.remapped_variable && type.pointer && var.storage == StorageClassOutput) + replace_fragment_output(var); + }); +} + +string CompilerGLSL::remap_swizzle(const SPIRType &out_type, uint32_t input_components, const string &expr) +{ + if (out_type.vecsize == input_components) + return expr; + else if (input_components == 1 && !backend.can_swizzle_scalar) + return join(type_to_glsl(out_type), "(", expr, ")"); + else + { + + auto e = enclose_expression(expr) + "."; + + for (uint32_t c = 0; c < out_type.vecsize; c++) + e += index_to_swizzle(min(c, input_components - 1)); + if (backend.swizzle_is_function && out_type.vecsize > 1) + e += "()"; + + remove_duplicate_swizzle(e); + return e; + } +} + +void CompilerGLSL::emit_pls() +{ + auto &execution = get_entry_point(); + if (execution.model != ExecutionModelFragment) + SPIRV_CROSS_THROW("Pixel local storage only supported in fragment shaders."); + + if (!options.es) + SPIRV_CROSS_THROW("Pixel local storage only supported in OpenGL ES."); + + if (options.version < 300) + SPIRV_CROSS_THROW("Pixel local storage only supported in ESSL 3.0 and above."); + + if (!pls_inputs.empty()) + { + statement("__pixel_local_inEXT _PLSIn"); + begin_scope(); + for (auto &input : pls_inputs) + statement(pls_decl(input), ";"); + end_scope_decl(); + statement(""); + } + + if (!pls_outputs.empty()) + { + statement("__pixel_local_outEXT _PLSOut"); + begin_scope(); + for (auto &output : pls_outputs) + statement(pls_decl(output), ";"); + end_scope_decl(); + statement(""); + } +} + +void CompilerGLSL::fixup_image_load_store_access() +{ + ir.for_each_typed_id([&](uint32_t var, const SPIRVariable &) { + auto &vartype = expression_type(var); + if (vartype.basetype == SPIRType::Image) + { + + + + + auto &flags = ir.meta[var].decoration.decoration_flags; + if (!flags.get(DecorationNonWritable) && !flags.get(DecorationNonReadable)) + { + flags.set(DecorationNonWritable); + flags.set(DecorationNonReadable); + } + } + }); +} + +void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionModel model) +{ + Bitset emitted_builtins; + Bitset global_builtins; + const SPIRVariable *block_var = nullptr; + bool emitted_block = false; + bool builtin_array = false; + + + + uint32_t cull_distance_size = 0; + uint32_t clip_distance_size = 0; + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + bool block = has_decoration(type.self, DecorationBlock); + Bitset builtins; + + if (var.storage == storage && block && is_builtin_variable(var)) + { + uint32_t index = 0; + for (auto &m : ir.meta[type.self].members) + { + if (m.builtin) + { + builtins.set(m.builtin_type); + if (m.builtin_type == BuiltInCullDistance) + cull_distance_size = this->get(type.member_types[index]).array.front(); + else if (m.builtin_type == BuiltInClipDistance) + clip_distance_size = this->get(type.member_types[index]).array.front(); + } + index++; + } + } + else if (var.storage == storage && !block && is_builtin_variable(var)) + { + + auto &m = ir.meta[var.self].decoration; + if (m.builtin) + { + global_builtins.set(m.builtin_type); + if (m.builtin_type == BuiltInCullDistance) + cull_distance_size = type.array.front(); + else if (m.builtin_type == BuiltInClipDistance) + clip_distance_size = type.array.front(); + } + } + + if (builtins.empty()) + return; + + if (emitted_block) + SPIRV_CROSS_THROW("Cannot use more than one builtin I/O block."); + + emitted_builtins = builtins; + emitted_block = true; + builtin_array = !type.array.empty(); + block_var = &var; + }); + + global_builtins = + Bitset(global_builtins.get_lower() & ((1ull << BuiltInPosition) | (1ull << BuiltInPointSize) | + (1ull << BuiltInClipDistance) | (1ull << BuiltInCullDistance))); + + + if (!emitted_block) + emitted_builtins = global_builtins; + + + if (emitted_builtins.empty()) + return; + + if (storage == StorageClassOutput) + statement("out gl_PerVertex"); + else + statement("in gl_PerVertex"); + + begin_scope(); + if (emitted_builtins.get(BuiltInPosition)) + statement("vec4 gl_Position;"); + if (emitted_builtins.get(BuiltInPointSize)) + statement("float gl_PointSize;"); + if (emitted_builtins.get(BuiltInClipDistance)) + statement("float gl_ClipDistance[", clip_distance_size, "];"); + if (emitted_builtins.get(BuiltInCullDistance)) + statement("float gl_CullDistance[", cull_distance_size, "];"); + + bool tessellation = model == ExecutionModelTessellationEvaluation || model == ExecutionModelTessellationControl; + if (builtin_array) + { + + if (storage == StorageClassOutput) + set_name(block_var->self, "gl_out"); + else if (storage == StorageClassInput) + set_name(block_var->self, "gl_in"); + + if (model == ExecutionModelTessellationControl && storage == StorageClassOutput) + end_scope_decl(join(to_name(block_var->self), "[", get_entry_point().output_vertices, "]")); + else + end_scope_decl(join(to_name(block_var->self), tessellation ? "[gl_MaxPatchVertices]" : "[]")); + } + else + end_scope_decl(); + statement(""); +} + +void CompilerGLSL::declare_undefined_values() +{ + bool emitted = false; + ir.for_each_typed_id([&](uint32_t, const SPIRUndef &undef) { + statement(variable_decl(this->get(undef.basetype), to_name(undef.self), undef.self), ";"); + emitted = true; + }); + + if (emitted) + statement(""); +} + +bool CompilerGLSL::variable_is_lut(const SPIRVariable &var) const +{ + bool statically_assigned = var.statically_assigned && var.static_expression != ID(0) && var.remapped_variable; + + if (statically_assigned) + { + auto *constant = maybe_get(var.static_expression); + if (constant && constant->is_used_as_lut) + return true; + } + + return false; +} + +void CompilerGLSL::emit_resources() +{ + auto &execution = get_entry_point(); + + replace_illegal_names(); + + + + if (execution.model == ExecutionModelFragment && is_legacy()) + replace_fragment_outputs(); + + + if (!pls_inputs.empty() || !pls_outputs.empty()) + emit_pls(); + + + if (options.separate_shader_objects && !options.es && execution.model != ExecutionModelFragment) + { + switch (execution.model) + { + case ExecutionModelGeometry: + case ExecutionModelTessellationControl: + case ExecutionModelTessellationEvaluation: + emit_declared_builtin_block(StorageClassInput, execution.model); + emit_declared_builtin_block(StorageClassOutput, execution.model); + break; + + case ExecutionModelVertex: + emit_declared_builtin_block(StorageClassOutput, execution.model); + break; + + default: + break; + } + } + else + { + + + const char *storage = execution.model == ExecutionModelFragment ? "in" : "out"; + if (clip_distance_count != 0) + statement(storage, " float gl_ClipDistance[", clip_distance_count, "];"); + if (cull_distance_count != 0) + statement(storage, " float gl_CullDistance[", cull_distance_count, "];"); + if (clip_distance_count != 0 || cull_distance_count != 0) + statement(""); + } + + if (position_invariant) + { + statement("invariant gl_Position;"); + statement(""); + } + + bool emitted = false; + + + + + + { + auto loop_lock = ir.create_loop_hard_lock(); + for (auto &id_ : ir.ids_for_constant_or_type) + { + auto &id = ir.ids[id_]; + + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + + bool needs_declaration = c.specialization || c.is_used_as_lut; + + if (needs_declaration) + { + if (!options.vulkan_semantics && c.specialization) + { + c.specialization_constant_macro_name = + constant_value_macro_name(get_decoration(c.self, DecorationSpecId)); + } + emit_constant(c); + emitted = true; + } + } + else if (id.get_type() == TypeConstantOp) + { + emit_specialization_constant_op(id.get()); + emitted = true; + } + else if (id.get_type() == TypeType) + { + auto &type = id.get(); + if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer && + (!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) && + !ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock))) + { + if (emitted) + statement(""); + emitted = false; + + emit_struct(type); + } + } + } + } + + if (emitted) + statement(""); + + + + + if (execution.model == ExecutionModelGLCompute && !options.vulkan_semantics && + execution.workgroup_size.constant != 0) + { + SpecializationConstant wg_x, wg_y, wg_z; + get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + + if ((wg_x.id != ConstantID(0)) || (wg_y.id != ConstantID(0)) || (wg_z.id != ConstantID(0))) + { + SmallVector inputs; + build_workgroup_size(inputs, wg_x, wg_y, wg_z); + statement("layout(", merge(inputs), ") in;"); + statement(""); + } + } + + emitted = false; + + if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT) + { + for (auto type : physical_storage_non_block_pointer_types) + { + emit_buffer_reference_block(get(type), false); + } + + + + + + ir.for_each_typed_id([&](uint32_t, SPIRType &type) { + bool has_block_flags = has_decoration(type.self, DecorationBlock); + if (has_block_flags && type.pointer && type.pointer_depth == 1 && !type_is_array_of_pointers(type) && + type.storage == StorageClassPhysicalStorageBufferEXT) + { + emit_buffer_reference_block(type, true); + } + }); + + ir.for_each_typed_id([&](uint32_t, SPIRType &type) { + bool has_block_flags = has_decoration(type.self, DecorationBlock); + if (has_block_flags && type.pointer && type.pointer_depth == 1 && !type_is_array_of_pointers(type) && + type.storage == StorageClassPhysicalStorageBufferEXT) + { + emit_buffer_reference_block(type, false); + } + }); + } + + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + + bool is_block_storage = type.storage == StorageClassStorageBuffer || type.storage == StorageClassUniform || + type.storage == StorageClassShaderRecordBufferNV; + bool has_block_flags = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + + if (var.storage != StorageClassFunction && type.pointer && is_block_storage && !is_hidden_variable(var) && + has_block_flags) + { + emit_buffer_block(var); + } + }); + + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + if (var.storage != StorageClassFunction && type.pointer && type.storage == StorageClassPushConstant && + !is_hidden_variable(var)) + { + emit_push_constant_block(var); + } + }); + + bool skip_separate_image_sampler = !combined_image_samplers.empty() || !options.vulkan_semantics; + + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + + + if (skip_separate_image_sampler) + { + + bool sampler_buffer = type.basetype == SPIRType::Image && type.image.dim == DimBuffer; + bool separate_image = type.basetype == SPIRType::Image && type.image.sampled == 1; + bool separate_sampler = type.basetype == SPIRType::Sampler; + if (!sampler_buffer && (separate_image || separate_sampler)) + return; + } + + if (var.storage != StorageClassFunction && type.pointer && + (type.storage == StorageClassUniformConstant || type.storage == StorageClassAtomicCounter || + type.storage == StorageClassRayPayloadNV || type.storage == StorageClassIncomingRayPayloadNV || + type.storage == StorageClassCallableDataNV || type.storage == StorageClassIncomingCallableDataNV || + type.storage == StorageClassHitAttributeNV) && + !is_hidden_variable(var)) + { + emit_uniform(var); + emitted = true; + } + }); + + if (emitted) + statement(""); + emitted = false; + + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + + if (var.storage != StorageClassFunction && type.pointer && + (var.storage == StorageClassInput || var.storage == StorageClassOutput) && + interface_variable_exists_in_entry_point(var.self) && !is_hidden_variable(var)) + { + emit_interface_block(var); + emitted = true; + } + else if (is_builtin_variable(var)) + { + + + if (options.vertex.support_nonzero_base_instance && + ir.meta[var.self].decoration.builtin_type == BuiltInInstanceIndex && !options.vulkan_semantics) + { + statement("uniform int SPIRV_Cross_BaseInstance;"); + emitted = true; + } + } + }); + + + for (auto global : global_variables) + { + auto &var = get(global); + if (var.storage != StorageClassOutput) + { + if (!variable_is_lut(var)) + { + add_resource_name(var.self); + statement(variable_decl(var), ";"); + emitted = true; + } + } + } + + if (emitted) + statement(""); + + declare_undefined_values(); +} + + + + +string CompilerGLSL::to_func_call_arg(const SPIRFunction::Parameter &, uint32_t id) +{ + + uint32_t name_id = id; + auto *var = maybe_get(id); + if (var && var->basevariable) + name_id = var->basevariable; + return to_expression(name_id); +} + +void CompilerGLSL::handle_invalid_expression(uint32_t id) +{ + + + forced_temporaries.insert(id); + force_recompile(); +} + + + + + +string CompilerGLSL::unpack_expression_type(string expr_str, const SPIRType &, uint32_t, bool, bool) +{ + return expr_str; +} + + +void CompilerGLSL::strip_enclosed_expression(string &expr) +{ + if (expr.size() < 2 || expr.front() != '(' || expr.back() != ')') + return; + + + uint32_t paren_count = 0; + for (auto &c : expr) + { + if (c == '(') + paren_count++; + else if (c == ')') + { + paren_count--; + + + + if (paren_count == 0 && &c != &expr.back()) + return; + } + } + expr.erase(expr.size() - 1, 1); + expr.erase(begin(expr)); +} + +string CompilerGLSL::enclose_expression(const string &expr) +{ + bool need_parens = false; + + + + if (!expr.empty()) + { + auto c = expr.front(); + if (c == '-' || c == '+' || c == '!' || c == '~' || c == '&' || c == '*') + need_parens = true; + } + + if (!need_parens) + { + uint32_t paren_count = 0; + for (auto c : expr) + { + if (c == '(' || c == '[') + paren_count++; + else if (c == ')' || c == ']') + { + assert(paren_count); + paren_count--; + } + else if (c == ' ' && paren_count == 0) + { + need_parens = true; + break; + } + } + assert(paren_count == 0); + } + + + + + if (need_parens) + return join('(', expr, ')'); + else + return expr; +} + +string CompilerGLSL::dereference_expression(const SPIRType &expr_type, const std::string &expr) +{ + + + + if (expr.front() == '&') + return expr.substr(1); + else if (backend.native_pointers) + return join('*', expr); + else if (expr_type.storage == StorageClassPhysicalStorageBufferEXT && expr_type.basetype != SPIRType::Struct && + expr_type.pointer_depth == 1) + { + return join(enclose_expression(expr), ".value"); + } + else + return expr; +} + +string CompilerGLSL::address_of_expression(const std::string &expr) +{ + if (expr.size() > 3 && expr[0] == '(' && expr[1] == '*' && expr.back() == ')') + { + + + + + return enclose_expression(expr.substr(2, expr.size() - 3)); + } + else if (expr.front() == '*') + { + + + return expr.substr(1); + } + else + return join('&', enclose_expression(expr)); +} + + +string CompilerGLSL::to_enclosed_expression(uint32_t id, bool register_expression_read) +{ + return enclose_expression(to_expression(id, register_expression_read)); +} + + + +string CompilerGLSL::to_unpacked_row_major_matrix_expression(uint32_t id) +{ + return unpack_expression_type(to_expression(id), expression_type(id), + get_extended_decoration(id, SPIRVCrossDecorationPhysicalTypeID), + has_extended_decoration(id, SPIRVCrossDecorationPhysicalTypePacked), true); +} + +string CompilerGLSL::to_unpacked_expression(uint32_t id, bool register_expression_read) +{ + + auto *e = maybe_get(id); + bool need_transpose = e && e->need_transpose; + bool is_remapped = has_extended_decoration(id, SPIRVCrossDecorationPhysicalTypeID); + bool is_packed = has_extended_decoration(id, SPIRVCrossDecorationPhysicalTypePacked); + + if (!need_transpose && (is_remapped || is_packed)) + { + return unpack_expression_type(to_expression(id, register_expression_read), + get_pointee_type(expression_type_id(id)), + get_extended_decoration(id, SPIRVCrossDecorationPhysicalTypeID), + has_extended_decoration(id, SPIRVCrossDecorationPhysicalTypePacked), false); + } + else + return to_expression(id, register_expression_read); +} + +string CompilerGLSL::to_enclosed_unpacked_expression(uint32_t id, bool register_expression_read) +{ + + auto *e = maybe_get(id); + bool need_transpose = e && e->need_transpose; + bool is_remapped = has_extended_decoration(id, SPIRVCrossDecorationPhysicalTypeID); + bool is_packed = has_extended_decoration(id, SPIRVCrossDecorationPhysicalTypePacked); + if (!need_transpose && (is_remapped || is_packed)) + { + return unpack_expression_type(to_expression(id, register_expression_read), expression_type(id), + get_extended_decoration(id, SPIRVCrossDecorationPhysicalTypeID), + has_extended_decoration(id, SPIRVCrossDecorationPhysicalTypePacked), false); + } + else + return to_enclosed_expression(id, register_expression_read); +} + +string CompilerGLSL::to_dereferenced_expression(uint32_t id, bool register_expression_read) +{ + auto &type = expression_type(id); + if (type.pointer && should_dereference(id)) + return dereference_expression(type, to_enclosed_expression(id, register_expression_read)); + else + return to_expression(id, register_expression_read); +} + +string CompilerGLSL::to_pointer_expression(uint32_t id, bool register_expression_read) +{ + auto &type = expression_type(id); + if (type.pointer && expression_is_lvalue(id) && !should_dereference(id)) + return address_of_expression(to_enclosed_expression(id, register_expression_read)); + else + return to_unpacked_expression(id, register_expression_read); +} + +string CompilerGLSL::to_enclosed_pointer_expression(uint32_t id, bool register_expression_read) +{ + auto &type = expression_type(id); + if (type.pointer && expression_is_lvalue(id) && !should_dereference(id)) + return address_of_expression(to_enclosed_expression(id, register_expression_read)); + else + return to_enclosed_unpacked_expression(id, register_expression_read); +} + +string CompilerGLSL::to_extract_component_expression(uint32_t id, uint32_t index) +{ + auto expr = to_enclosed_expression(id); + if (has_extended_decoration(id, SPIRVCrossDecorationPhysicalTypePacked)) + return join(expr, "[", index, "]"); + else + return join(expr, ".", index_to_swizzle(index)); +} + +string CompilerGLSL::to_rerolled_array_expression(const string &base_expr, const SPIRType &type) +{ + uint32_t size = to_array_size_literal(type); + auto &parent = get(type.parent_type); + string expr = "{ "; + + for (uint32_t i = 0; i < size; i++) + { + auto subexpr = join(base_expr, "[", convert_to_string(i), "]"); + if (parent.array.empty()) + expr += subexpr; + else + expr += to_rerolled_array_expression(subexpr, parent); + + if (i + 1 < size) + expr += ", "; + } + + expr += " }"; + return expr; +} + +string CompilerGLSL::to_composite_constructor_expression(uint32_t id) +{ + auto &type = expression_type(id); + if (!backend.array_is_value_type && !type.array.empty()) + { + + + + + + + + + + + return to_rerolled_array_expression(to_enclosed_expression(id), type); + } + else + return to_unpacked_expression(id); +} + +string CompilerGLSL::to_expression(uint32_t id, bool register_expression_read) +{ + auto itr = invalid_expressions.find(id); + if (itr != end(invalid_expressions)) + handle_invalid_expression(id); + + if (ir.ids[id].get_type() == TypeExpression) + { + + + + + + + + + + + + auto &expr = get(id); + for (uint32_t dep : expr.expression_dependencies) + if (invalid_expressions.find(dep) != end(invalid_expressions)) + handle_invalid_expression(dep); + } + + if (register_expression_read) + track_expression_read(id); + + switch (ir.ids[id].get_type()) + { + case TypeExpression: + { + auto &e = get(id); + if (e.base_expression) + return to_enclosed_expression(e.base_expression) + e.expression; + else if (e.need_transpose) + { + + + uint32_t physical_type_id = get_extended_decoration(id, SPIRVCrossDecorationPhysicalTypeID); + bool is_packed = has_extended_decoration(id, SPIRVCrossDecorationPhysicalTypePacked); + return convert_row_major_matrix(e.expression, get(e.expression_type), physical_type_id, + is_packed); + } + else + { + if (is_forcing_recompilation()) + { + + + + return "_"; + } + else + return e.expression; + } + } + + case TypeConstant: + { + auto &c = get(id); + auto &type = get(c.constant_type); + + + auto &dec = ir.meta[c.self].decoration; + if (dec.builtin) + return builtin_to_glsl(dec.builtin_type, StorageClassGeneric); + else if (c.specialization) + return to_name(id); + else if (c.is_used_as_lut) + return to_name(id); + else if (type.basetype == SPIRType::Struct && !backend.can_declare_struct_inline) + return to_name(id); + else if (!type.array.empty() && !backend.can_declare_arrays_inline) + return to_name(id); + else + return constant_expression(c); + } + + case TypeConstantOp: + return to_name(id); + + case TypeVariable: + { + auto &var = get(id); + + + if (var.statically_assigned || (var.loop_variable && !var.loop_variable_enable)) + return to_expression(var.static_expression); + else if (var.deferred_declaration) + { + var.deferred_declaration = false; + return variable_decl(var); + } + else if (flattened_structs.count(id)) + { + return load_flattened_struct(var); + } + else + { + auto &dec = ir.meta[var.self].decoration; + if (dec.builtin) + return builtin_to_glsl(dec.builtin_type, var.storage); + else + return to_name(id); + } + } + + case TypeCombinedImageSampler: + + + + + + SPIRV_CROSS_THROW("Combined image samplers have no default expression representation."); + + case TypeAccessChain: + + SPIRV_CROSS_THROW("Access chains have no default expression representation."); + + default: + return to_name(id); + } +} + +string CompilerGLSL::constant_op_expression(const SPIRConstantOp &cop) +{ + auto &type = get(cop.basetype); + bool binary = false; + bool unary = false; + string op; + + if (is_legacy() && is_unsigned_opcode(cop.opcode)) + SPIRV_CROSS_THROW("Unsigned integers are not supported on legacy targets."); + + + switch (cop.opcode) + { + case OpSConvert: + case OpUConvert: + case OpFConvert: + op = type_to_glsl_constructor(type); + break; + +#define GLSL_BOP(opname, x) \ + case Op##opname: \ + binary = true; \ + op = x; \ + break + +#define GLSL_UOP(opname, x) \ + case Op##opname: \ + unary = true; \ + op = x; \ + break + + GLSL_UOP(SNegate, "-"); + GLSL_UOP(Not, "~"); + GLSL_BOP(IAdd, "+"); + GLSL_BOP(ISub, "-"); + GLSL_BOP(IMul, "*"); + GLSL_BOP(SDiv, "/"); + GLSL_BOP(UDiv, "/"); + GLSL_BOP(UMod, "%"); + GLSL_BOP(SMod, "%"); + GLSL_BOP(ShiftRightLogical, ">>"); + GLSL_BOP(ShiftRightArithmetic, ">>"); + GLSL_BOP(ShiftLeftLogical, "<<"); + GLSL_BOP(BitwiseOr, "|"); + GLSL_BOP(BitwiseXor, "^"); + GLSL_BOP(BitwiseAnd, "&"); + GLSL_BOP(LogicalOr, "||"); + GLSL_BOP(LogicalAnd, "&&"); + GLSL_UOP(LogicalNot, "!"); + GLSL_BOP(LogicalEqual, "=="); + GLSL_BOP(LogicalNotEqual, "!="); + GLSL_BOP(IEqual, "=="); + GLSL_BOP(INotEqual, "!="); + GLSL_BOP(ULessThan, "<"); + GLSL_BOP(SLessThan, "<"); + GLSL_BOP(ULessThanEqual, "<="); + GLSL_BOP(SLessThanEqual, "<="); + GLSL_BOP(UGreaterThan, ">"); + GLSL_BOP(SGreaterThan, ">"); + GLSL_BOP(UGreaterThanEqual, ">="); + GLSL_BOP(SGreaterThanEqual, ">="); + + case OpSelect: + { + if (cop.arguments.size() < 3) + SPIRV_CROSS_THROW("Not enough arguments to OpSpecConstantOp."); + + + + + + + if (to_trivial_mix_op(type, op, cop.arguments[2], cop.arguments[1], cop.arguments[0])) + { + + } + else + { + + return to_ternary_expression(type, cop.arguments[0], cop.arguments[1], cop.arguments[2]); + } + break; + } + + case OpVectorShuffle: + { + string expr = type_to_glsl_constructor(type); + expr += "("; + + uint32_t left_components = expression_type(cop.arguments[0]).vecsize; + string left_arg = to_enclosed_expression(cop.arguments[0]); + string right_arg = to_enclosed_expression(cop.arguments[1]); + + for (uint32_t i = 2; i < uint32_t(cop.arguments.size()); i++) + { + uint32_t index = cop.arguments[i]; + if (index >= left_components) + expr += right_arg + "." + "xyzw"[index - left_components]; + else + expr += left_arg + "." + "xyzw"[index]; + + if (i + 1 < uint32_t(cop.arguments.size())) + expr += ", "; + } + + expr += ")"; + return expr; + } + + case OpCompositeExtract: + { + auto expr = access_chain_internal(cop.arguments[0], &cop.arguments[1], uint32_t(cop.arguments.size() - 1), + ACCESS_CHAIN_INDEX_IS_LITERAL_BIT, nullptr); + return expr; + } + + case OpCompositeInsert: + SPIRV_CROSS_THROW("OpCompositeInsert spec constant op is not supported."); + + default: + + SPIRV_CROSS_THROW("Unimplemented spec constant op."); + } + + uint32_t bit_width = 0; + if (unary || binary || cop.opcode == OpSConvert || cop.opcode == OpUConvert) + bit_width = expression_type(cop.arguments[0]).width; + + SPIRType::BaseType input_type; + bool skip_cast_if_equal_type = opcode_is_sign_invariant(cop.opcode); + + switch (cop.opcode) + { + case OpIEqual: + case OpINotEqual: + input_type = to_signed_basetype(bit_width); + break; + + case OpSLessThan: + case OpSLessThanEqual: + case OpSGreaterThan: + case OpSGreaterThanEqual: + case OpSMod: + case OpSDiv: + case OpShiftRightArithmetic: + case OpSConvert: + case OpSNegate: + input_type = to_signed_basetype(bit_width); + break; + + case OpULessThan: + case OpULessThanEqual: + case OpUGreaterThan: + case OpUGreaterThanEqual: + case OpUMod: + case OpUDiv: + case OpShiftRightLogical: + case OpUConvert: + input_type = to_unsigned_basetype(bit_width); + break; + + default: + input_type = type.basetype; + break; + } + +#undef GLSL_BOP +#undef GLSL_UOP + if (binary) + { + if (cop.arguments.size() < 2) + SPIRV_CROSS_THROW("Not enough arguments to OpSpecConstantOp."); + + string cast_op0; + string cast_op1; + auto expected_type = binary_op_bitcast_helper(cast_op0, cast_op1, input_type, cop.arguments[0], + cop.arguments[1], skip_cast_if_equal_type); + + if (type.basetype != input_type && type.basetype != SPIRType::Boolean) + { + expected_type.basetype = input_type; + auto expr = bitcast_glsl_op(type, expected_type); + expr += '('; + expr += join(cast_op0, " ", op, " ", cast_op1); + expr += ')'; + return expr; + } + else + return join("(", cast_op0, " ", op, " ", cast_op1, ")"); + } + else if (unary) + { + if (cop.arguments.size() < 1) + SPIRV_CROSS_THROW("Not enough arguments to OpSpecConstantOp."); + + + + return join("(", op, bitcast_glsl(type, cop.arguments[0]), ")"); + } + else if (cop.opcode == OpSConvert || cop.opcode == OpUConvert) + { + if (cop.arguments.size() < 1) + SPIRV_CROSS_THROW("Not enough arguments to OpSpecConstantOp."); + + auto &arg_type = expression_type(cop.arguments[0]); + if (arg_type.width < type.width && input_type != arg_type.basetype) + { + auto expected = arg_type; + expected.basetype = input_type; + return join(op, "(", bitcast_glsl(expected, cop.arguments[0]), ")"); + } + else + return join(op, "(", to_expression(cop.arguments[0]), ")"); + } + else + { + if (cop.arguments.size() < 1) + SPIRV_CROSS_THROW("Not enough arguments to OpSpecConstantOp."); + return join(op, "(", to_expression(cop.arguments[0]), ")"); + } +} + +string CompilerGLSL::constant_expression(const SPIRConstant &c) +{ + auto &type = get(c.constant_type); + + if (type.pointer) + { + return backend.null_pointer_literal; + } + else if (!c.subconstants.empty()) + { + + string res; + + + bool needs_trailing_tracket = false; + if (backend.use_initializer_list && backend.use_typed_initializer_list && type.basetype == SPIRType::Struct && + type.array.empty()) + { + res = type_to_glsl_constructor(type) + "{ "; + } + else if (backend.use_initializer_list && backend.use_typed_initializer_list && !type.array.empty()) + { + res = type_to_glsl_constructor(type) + "({ "; + needs_trailing_tracket = true; + } + else if (backend.use_initializer_list) + { + res = "{ "; + } + else + { + res = type_to_glsl_constructor(type) + "("; + } + + for (auto &elem : c.subconstants) + { + auto &subc = get(elem); + if (subc.specialization) + res += to_name(elem); + else + res += constant_expression(subc); + + if (&elem != &c.subconstants.back()) + res += ", "; + } + + res += backend.use_initializer_list ? " }" : ")"; + if (needs_trailing_tracket) + res += ")"; + + return res; + } + else if (type.basetype == SPIRType::Struct && type.member_types.size() == 0) + { + + if (backend.supports_empty_struct) + return "{ }"; + else if (backend.use_typed_initializer_list) + return join(type_to_glsl(get(c.constant_type)), "{ 0 }"); + else if (backend.use_initializer_list) + return "{ 0 }"; + else + return join(type_to_glsl(get(c.constant_type)), "(0)"); + } + else if (c.columns() == 1) + { + return constant_expression_vector(c, 0); + } + else + { + string res = type_to_glsl(get(c.constant_type)) + "("; + for (uint32_t col = 0; col < c.columns(); col++) + { + if (c.specialization_constant_id(col) != 0) + res += to_name(c.specialization_constant_id(col)); + else + res += constant_expression_vector(c, col); + + if (col + 1 < c.columns()) + res += ", "; + } + res += ")"; + return res; + } +} + +#ifdef _MSC_VER + + +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + +string CompilerGLSL::convert_half_to_string(const SPIRConstant &c, uint32_t col, uint32_t row) +{ + string res; + float float_value = c.scalar_f16(col, row); + + + + if (std::isnan(float_value) || std::isinf(float_value)) + { + SPIRType type; + type.basetype = SPIRType::Half; + type.vecsize = 1; + type.columns = 1; + + if (float_value == numeric_limits::infinity()) + res = join(type_to_glsl(type), "(1.0 / 0.0)"); + else if (float_value == -numeric_limits::infinity()) + res = join(type_to_glsl(type), "(-1.0 / 0.0)"); + else if (std::isnan(float_value)) + res = join(type_to_glsl(type), "(0.0 / 0.0)"); + else + SPIRV_CROSS_THROW("Cannot represent non-finite floating point constant."); + } + else + { + SPIRType type; + type.basetype = SPIRType::Half; + type.vecsize = 1; + type.columns = 1; + res = join(type_to_glsl(type), "(", convert_to_string(float_value, current_locale_radix_character), ")"); + } + + return res; +} + +string CompilerGLSL::convert_float_to_string(const SPIRConstant &c, uint32_t col, uint32_t row) +{ + string res; + float float_value = c.scalar_f32(col, row); + + if (std::isnan(float_value) || std::isinf(float_value)) + { + + if (!is_legacy()) + { + SPIRType out_type; + SPIRType in_type; + out_type.basetype = SPIRType::Float; + in_type.basetype = SPIRType::UInt; + out_type.vecsize = 1; + in_type.vecsize = 1; + out_type.width = 32; + in_type.width = 32; + + char print_buffer[32]; + sprintf(print_buffer, "0x%xu", c.scalar(col, row)); + res = join(bitcast_glsl_op(out_type, in_type), "(", print_buffer, ")"); + } + else + { + if (float_value == numeric_limits::infinity()) + { + if (backend.float_literal_suffix) + res = "(1.0f / 0.0f)"; + else + res = "(1.0 / 0.0)"; + } + else if (float_value == -numeric_limits::infinity()) + { + if (backend.float_literal_suffix) + res = "(-1.0f / 0.0f)"; + else + res = "(-1.0 / 0.0)"; + } + else if (std::isnan(float_value)) + { + if (backend.float_literal_suffix) + res = "(0.0f / 0.0f)"; + else + res = "(0.0 / 0.0)"; + } + else + SPIRV_CROSS_THROW("Cannot represent non-finite floating point constant."); + } + } + else + { + res = convert_to_string(float_value, current_locale_radix_character); + if (backend.float_literal_suffix) + res += "f"; + } + + return res; +} + +std::string CompilerGLSL::convert_double_to_string(const SPIRConstant &c, uint32_t col, uint32_t row) +{ + string res; + double double_value = c.scalar_f64(col, row); + + if (std::isnan(double_value) || std::isinf(double_value)) + { + + if (!is_legacy()) + { + SPIRType out_type; + SPIRType in_type; + out_type.basetype = SPIRType::Double; + in_type.basetype = SPIRType::UInt64; + out_type.vecsize = 1; + in_type.vecsize = 1; + out_type.width = 64; + in_type.width = 64; + + uint64_t u64_value = c.scalar_u64(col, row); + + if (options.es) + SPIRV_CROSS_THROW("64-bit integers/float not supported in ES profile."); + require_extension_internal("GL_ARB_gpu_shader_int64"); + + char print_buffer[64]; + sprintf(print_buffer, "0x%llx%s", static_cast(u64_value), + backend.long_long_literal_suffix ? "ull" : "ul"); + res = join(bitcast_glsl_op(out_type, in_type), "(", print_buffer, ")"); + } + else + { + if (options.es) + SPIRV_CROSS_THROW("FP64 not supported in ES profile."); + if (options.version < 400) + require_extension_internal("GL_ARB_gpu_shader_fp64"); + + if (double_value == numeric_limits::infinity()) + { + if (backend.double_literal_suffix) + res = "(1.0lf / 0.0lf)"; + else + res = "(1.0 / 0.0)"; + } + else if (double_value == -numeric_limits::infinity()) + { + if (backend.double_literal_suffix) + res = "(-1.0lf / 0.0lf)"; + else + res = "(-1.0 / 0.0)"; + } + else if (std::isnan(double_value)) + { + if (backend.double_literal_suffix) + res = "(0.0lf / 0.0lf)"; + else + res = "(0.0 / 0.0)"; + } + else + SPIRV_CROSS_THROW("Cannot represent non-finite floating point constant."); + } + } + else + { + res = convert_to_string(double_value, current_locale_radix_character); + if (backend.double_literal_suffix) + res += "lf"; + } + + return res; +} + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +string CompilerGLSL::constant_expression_vector(const SPIRConstant &c, uint32_t vector) +{ + auto type = get(c.constant_type); + type.columns = 1; + + auto scalar_type = type; + scalar_type.vecsize = 1; + + string res; + bool splat = backend.use_constructor_splatting && c.vector_size() > 1; + bool swizzle_splat = backend.can_swizzle_scalar && c.vector_size() > 1; + + if (!type_is_floating_point(type)) + { + + swizzle_splat = false; + } + + if (splat || swizzle_splat) + { + + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.specialization_constant_id(vector, i) != 0) + { + splat = false; + swizzle_splat = false; + break; + } + } + } + + if (splat || swizzle_splat) + { + if (type.width == 64) + { + uint64_t ident = c.scalar_u64(vector, 0); + for (uint32_t i = 1; i < c.vector_size(); i++) + { + if (ident != c.scalar_u64(vector, i)) + { + splat = false; + swizzle_splat = false; + break; + } + } + } + else + { + uint32_t ident = c.scalar(vector, 0); + for (uint32_t i = 1; i < c.vector_size(); i++) + { + if (ident != c.scalar(vector, i)) + { + splat = false; + swizzle_splat = false; + } + } + } + } + + if (c.vector_size() > 1 && !swizzle_splat) + res += type_to_glsl(type) + "("; + + switch (type.basetype) + { + case SPIRType::Half: + if (splat || swizzle_splat) + { + res += convert_half_to_string(c, vector, 0); + if (swizzle_splat) + res = remap_swizzle(get(c.constant_type), 1, res); + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + res += convert_half_to_string(c, vector, i); + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::Float: + if (splat || swizzle_splat) + { + res += convert_float_to_string(c, vector, 0); + if (swizzle_splat) + res = remap_swizzle(get(c.constant_type), 1, res); + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + res += convert_float_to_string(c, vector, i); + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::Double: + if (splat || swizzle_splat) + { + res += convert_double_to_string(c, vector, 0); + if (swizzle_splat) + res = remap_swizzle(get(c.constant_type), 1, res); + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + res += convert_double_to_string(c, vector, i); + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::Int64: + if (splat) + { + res += convert_to_string(c.scalar_i64(vector, 0)); + if (backend.long_long_literal_suffix) + res += "ll"; + else + res += "l"; + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + { + res += convert_to_string(c.scalar_i64(vector, i)); + if (backend.long_long_literal_suffix) + res += "ll"; + else + res += "l"; + } + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::UInt64: + if (splat) + { + res += convert_to_string(c.scalar_u64(vector, 0)); + if (backend.long_long_literal_suffix) + res += "ull"; + else + res += "ul"; + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + { + res += convert_to_string(c.scalar_u64(vector, i)); + if (backend.long_long_literal_suffix) + res += "ull"; + else + res += "ul"; + } + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::UInt: + if (splat) + { + res += convert_to_string(c.scalar(vector, 0)); + if (is_legacy()) + { + + + if (c.scalar_i32(vector, 0) < 0) + SPIRV_CROSS_THROW("Tried to convert uint literal into int, but this made the literal negative."); + } + else if (backend.uint32_t_literal_suffix) + res += "u"; + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + { + res += convert_to_string(c.scalar(vector, i)); + if (is_legacy()) + { + + + if (c.scalar_i32(vector, i) < 0) + SPIRV_CROSS_THROW( + "Tried to convert uint literal into int, but this made the literal negative."); + } + else if (backend.uint32_t_literal_suffix) + res += "u"; + } + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::Int: + if (splat) + res += convert_to_string(c.scalar_i32(vector, 0)); + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + res += convert_to_string(c.scalar_i32(vector, i)); + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::UShort: + if (splat) + { + res += convert_to_string(c.scalar(vector, 0)); + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + { + if (*backend.uint16_t_literal_suffix) + { + res += convert_to_string(c.scalar_u16(vector, i)); + res += backend.uint16_t_literal_suffix; + } + else + { + + res += type_to_glsl(scalar_type); + res += "("; + res += convert_to_string(c.scalar_u16(vector, i)); + res += ")"; + } + } + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::Short: + if (splat) + { + res += convert_to_string(c.scalar_i16(vector, 0)); + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + { + if (*backend.int16_t_literal_suffix) + { + res += convert_to_string(c.scalar_i16(vector, i)); + res += backend.int16_t_literal_suffix; + } + else + { + + res += type_to_glsl(scalar_type); + res += "("; + res += convert_to_string(c.scalar_i16(vector, i)); + res += ")"; + } + } + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::UByte: + if (splat) + { + res += convert_to_string(c.scalar_u8(vector, 0)); + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + { + res += type_to_glsl(scalar_type); + res += "("; + res += convert_to_string(c.scalar_u8(vector, i)); + res += ")"; + } + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::SByte: + if (splat) + { + res += convert_to_string(c.scalar_i8(vector, 0)); + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + { + res += type_to_glsl(scalar_type); + res += "("; + res += convert_to_string(c.scalar_i8(vector, i)); + res += ")"; + } + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::Boolean: + if (splat) + res += c.scalar(vector, 0) ? "true" : "false"; + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + res += c.scalar(vector, i) ? "true" : "false"; + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + default: + SPIRV_CROSS_THROW("Invalid constant expression basetype."); + } + + if (c.vector_size() > 1 && !swizzle_splat) + res += ")"; + + return res; +} + +SPIRExpression &CompilerGLSL::emit_uninitialized_temporary_expression(uint32_t type, uint32_t id) +{ + forced_temporaries.insert(id); + emit_uninitialized_temporary(type, id); + return set(id, to_name(id), type, true); +} + +void CompilerGLSL::emit_uninitialized_temporary(uint32_t result_type, uint32_t result_id) +{ + + + if (current_continue_block && !hoisted_temporaries.count(result_id)) + { + auto &header = get(current_continue_block->loop_dominator); + if (find_if(begin(header.declare_temporary), end(header.declare_temporary), + [result_type, result_id](const pair &tmp) { + return tmp.first == result_type && tmp.second == result_id; + }) == end(header.declare_temporary)) + { + header.declare_temporary.emplace_back(result_type, result_id); + hoisted_temporaries.insert(result_id); + force_recompile(); + } + } + else if (hoisted_temporaries.count(result_id) == 0) + { + auto &type = get(result_type); + auto &flags = ir.meta[result_id].decoration.decoration_flags; + + + add_local_variable_name(result_id); + statement(flags_to_qualifiers_glsl(type, flags), variable_decl(type, to_name(result_id)), ";"); + } +} + +string CompilerGLSL::declare_temporary(uint32_t result_type, uint32_t result_id) +{ + auto &type = get(result_type); + auto &flags = ir.meta[result_id].decoration.decoration_flags; + + + + if (current_continue_block && !hoisted_temporaries.count(result_id)) + { + auto &header = get(current_continue_block->loop_dominator); + if (find_if(begin(header.declare_temporary), end(header.declare_temporary), + [result_type, result_id](const pair &tmp) { + return tmp.first == result_type && tmp.second == result_id; + }) == end(header.declare_temporary)) + { + header.declare_temporary.emplace_back(result_type, result_id); + hoisted_temporaries.insert(result_id); + force_recompile(); + } + + return join(to_name(result_id), " = "); + } + else if (hoisted_temporaries.count(result_id)) + { + + return join(to_name(result_id), " = "); + } + else + { + + add_local_variable_name(result_id); + return join(flags_to_qualifiers_glsl(type, flags), variable_decl(type, to_name(result_id)), " = "); + } +} + +bool CompilerGLSL::expression_is_forwarded(uint32_t id) const +{ + return forwarded_temporaries.count(id) != 0; +} + +bool CompilerGLSL::expression_suppresses_usage_tracking(uint32_t id) const +{ + return suppressed_usage_tracking.count(id) != 0; +} + +SPIRExpression &CompilerGLSL::emit_op(uint32_t result_type, uint32_t result_id, const string &rhs, bool forwarding, + bool suppress_usage_tracking) +{ + if (forwarding && (forced_temporaries.find(result_id) == end(forced_temporaries))) + { + + + forwarded_temporaries.insert(result_id); + if (suppress_usage_tracking) + suppressed_usage_tracking.insert(result_id); + + return set(result_id, rhs, result_type, true); + } + else + { + + statement(declare_temporary(result_type, result_id), rhs, ";"); + return set(result_id, to_name(result_id), result_type, true); + } +} + +void CompilerGLSL::emit_unary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op) +{ + bool forward = should_forward(op0); + emit_op(result_type, result_id, join(op, to_enclosed_unpacked_expression(op0)), forward); + inherit_expression_dependencies(result_id, op0); +} + +void CompilerGLSL::emit_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op) +{ + bool forward = should_forward(op0) && should_forward(op1); + emit_op(result_type, result_id, + join(to_enclosed_unpacked_expression(op0), " ", op, " ", to_enclosed_unpacked_expression(op1)), forward); + + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +void CompilerGLSL::emit_unrolled_unary_op(uint32_t result_type, uint32_t result_id, uint32_t operand, const char *op) +{ + auto &type = get(result_type); + auto expr = type_to_glsl_constructor(type); + expr += '('; + for (uint32_t i = 0; i < type.vecsize; i++) + { + + + expr += op; + expr += to_extract_component_expression(operand, i); + + if (i + 1 < type.vecsize) + expr += ", "; + } + expr += ')'; + emit_op(result_type, result_id, expr, should_forward(operand)); + + inherit_expression_dependencies(result_id, operand); +} + +void CompilerGLSL::emit_unrolled_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op, bool negate, SPIRType::BaseType expected_type) +{ + auto &type0 = expression_type(op0); + auto &type1 = expression_type(op1); + + SPIRType target_type0 = type0; + SPIRType target_type1 = type1; + target_type0.basetype = expected_type; + target_type1.basetype = expected_type; + target_type0.vecsize = 1; + target_type1.vecsize = 1; + + auto &type = get(result_type); + auto expr = type_to_glsl_constructor(type); + expr += '('; + for (uint32_t i = 0; i < type.vecsize; i++) + { + + + if (negate) + expr += "!("; + + if (expected_type != SPIRType::Unknown && type0.basetype != expected_type) + expr += bitcast_expression(target_type0, type0.basetype, to_extract_component_expression(op0, i)); + else + expr += to_extract_component_expression(op0, i); + + expr += ' '; + expr += op; + expr += ' '; + + if (expected_type != SPIRType::Unknown && type1.basetype != expected_type) + expr += bitcast_expression(target_type1, type1.basetype, to_extract_component_expression(op1, i)); + else + expr += to_extract_component_expression(op1, i); + + if (negate) + expr += ")"; + + if (i + 1 < type.vecsize) + expr += ", "; + } + expr += ')'; + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1)); + + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +SPIRType CompilerGLSL::binary_op_bitcast_helper(string &cast_op0, string &cast_op1, SPIRType::BaseType &input_type, + uint32_t op0, uint32_t op1, bool skip_cast_if_equal_type) +{ + auto &type0 = expression_type(op0); + auto &type1 = expression_type(op1); + + + + + bool cast = (type0.basetype != type1.basetype) || (!skip_cast_if_equal_type && type0.basetype != input_type); + + + + SPIRType expected_type; + expected_type.basetype = input_type; + expected_type.vecsize = type0.vecsize; + expected_type.columns = type0.columns; + expected_type.width = type0.width; + + if (cast) + { + cast_op0 = bitcast_glsl(expected_type, op0); + cast_op1 = bitcast_glsl(expected_type, op1); + } + else + { + + cast_op0 = to_enclosed_unpacked_expression(op0); + cast_op1 = to_enclosed_unpacked_expression(op1); + input_type = type0.basetype; + } + + return expected_type; +} + +void CompilerGLSL::emit_binary_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op, SPIRType::BaseType input_type, bool skip_cast_if_equal_type) +{ + string cast_op0, cast_op1; + auto expected_type = binary_op_bitcast_helper(cast_op0, cast_op1, input_type, op0, op1, skip_cast_if_equal_type); + auto &out_type = get(result_type); + + + + + string expr; + if (out_type.basetype != input_type && out_type.basetype != SPIRType::Boolean) + { + expected_type.basetype = input_type; + expr = bitcast_glsl_op(out_type, expected_type); + expr += '('; + expr += join(cast_op0, " ", op, " ", cast_op1); + expr += ')'; + } + else + expr += join(cast_op0, " ", op, " ", cast_op1); + + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1)); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +void CompilerGLSL::emit_unary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op) +{ + bool forward = should_forward(op0); + emit_op(result_type, result_id, join(op, "(", to_unpacked_expression(op0), ")"), forward); + inherit_expression_dependencies(result_id, op0); +} + +void CompilerGLSL::emit_binary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op) +{ + bool forward = should_forward(op0) && should_forward(op1); + emit_op(result_type, result_id, join(op, "(", to_unpacked_expression(op0), ", ", to_unpacked_expression(op1), ")"), + forward); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +void CompilerGLSL::emit_unary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op, + SPIRType::BaseType input_type, SPIRType::BaseType expected_result_type) +{ + auto &out_type = get(result_type); + auto &expr_type = expression_type(op0); + auto expected_type = out_type; + + + expected_type.basetype = input_type; + expected_type.width = expr_type.width; + string cast_op = expr_type.basetype != input_type ? bitcast_glsl(expected_type, op0) : to_unpacked_expression(op0); + + string expr; + if (out_type.basetype != expected_result_type) + { + expected_type.basetype = expected_result_type; + expected_type.width = out_type.width; + expr = bitcast_glsl_op(out_type, expected_type); + expr += '('; + expr += join(op, "(", cast_op, ")"); + expr += ')'; + } + else + { + expr += join(op, "(", cast_op, ")"); + } + + emit_op(result_type, result_id, expr, should_forward(op0)); + inherit_expression_dependencies(result_id, op0); +} + + + +void CompilerGLSL::emit_trinary_func_op_bitextract(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + uint32_t op2, const char *op, + SPIRType::BaseType expected_result_type, + SPIRType::BaseType input_type0, SPIRType::BaseType input_type1, + SPIRType::BaseType input_type2) +{ + auto &out_type = get(result_type); + auto expected_type = out_type; + expected_type.basetype = input_type0; + + string cast_op0 = + expression_type(op0).basetype != input_type0 ? bitcast_glsl(expected_type, op0) : to_unpacked_expression(op0); + + auto op1_expr = to_unpacked_expression(op1); + auto op2_expr = to_unpacked_expression(op2); + + + expected_type.basetype = input_type1; + expected_type.vecsize = 1; + string cast_op1 = expression_type(op1).basetype != input_type1 ? + join(type_to_glsl_constructor(expected_type), "(", op1_expr, ")") : + op1_expr; + + expected_type.basetype = input_type2; + expected_type.vecsize = 1; + string cast_op2 = expression_type(op2).basetype != input_type2 ? + join(type_to_glsl_constructor(expected_type), "(", op2_expr, ")") : + op2_expr; + + string expr; + if (out_type.basetype != expected_result_type) + { + expected_type.vecsize = out_type.vecsize; + expected_type.basetype = expected_result_type; + expr = bitcast_glsl_op(out_type, expected_type); + expr += '('; + expr += join(op, "(", cast_op0, ", ", cast_op1, ", ", cast_op2, ")"); + expr += ')'; + } + else + { + expr += join(op, "(", cast_op0, ", ", cast_op1, ", ", cast_op2, ")"); + } + + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1) && should_forward(op2)); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + inherit_expression_dependencies(result_id, op2); +} + +void CompilerGLSL::emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + uint32_t op2, const char *op, SPIRType::BaseType input_type) +{ + auto &out_type = get(result_type); + auto expected_type = out_type; + expected_type.basetype = input_type; + string cast_op0 = + expression_type(op0).basetype != input_type ? bitcast_glsl(expected_type, op0) : to_unpacked_expression(op0); + string cast_op1 = + expression_type(op1).basetype != input_type ? bitcast_glsl(expected_type, op1) : to_unpacked_expression(op1); + string cast_op2 = + expression_type(op2).basetype != input_type ? bitcast_glsl(expected_type, op2) : to_unpacked_expression(op2); + + string expr; + if (out_type.basetype != input_type) + { + expr = bitcast_glsl_op(out_type, expected_type); + expr += '('; + expr += join(op, "(", cast_op0, ", ", cast_op1, ", ", cast_op2, ")"); + expr += ')'; + } + else + { + expr += join(op, "(", cast_op0, ", ", cast_op1, ", ", cast_op2, ")"); + } + + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1) && should_forward(op2)); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + inherit_expression_dependencies(result_id, op2); +} + +void CompilerGLSL::emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op, SPIRType::BaseType input_type, bool skip_cast_if_equal_type) +{ + string cast_op0, cast_op1; + auto expected_type = binary_op_bitcast_helper(cast_op0, cast_op1, input_type, op0, op1, skip_cast_if_equal_type); + auto &out_type = get(result_type); + + + string expr; + if (out_type.basetype != input_type && out_type.basetype != SPIRType::Boolean) + { + expected_type.basetype = input_type; + expr = bitcast_glsl_op(out_type, expected_type); + expr += '('; + expr += join(op, "(", cast_op0, ", ", cast_op1, ")"); + expr += ')'; + } + else + { + expr += join(op, "(", cast_op0, ", ", cast_op1, ")"); + } + + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1)); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +void CompilerGLSL::emit_trinary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + uint32_t op2, const char *op) +{ + bool forward = should_forward(op0) && should_forward(op1) && should_forward(op2); + emit_op(result_type, result_id, + join(op, "(", to_unpacked_expression(op0), ", ", to_unpacked_expression(op1), ", ", + to_unpacked_expression(op2), ")"), + forward); + + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + inherit_expression_dependencies(result_id, op2); +} + +void CompilerGLSL::emit_quaternary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + uint32_t op2, uint32_t op3, const char *op) +{ + bool forward = should_forward(op0) && should_forward(op1) && should_forward(op2) && should_forward(op3); + emit_op(result_type, result_id, + join(op, "(", to_unpacked_expression(op0), ", ", to_unpacked_expression(op1), ", ", + to_unpacked_expression(op2), ", ", to_unpacked_expression(op3), ")"), + forward); + + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + inherit_expression_dependencies(result_id, op2); + inherit_expression_dependencies(result_id, op3); +} + +void CompilerGLSL::emit_bitfield_insert_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + uint32_t op2, uint32_t op3, const char *op, + SPIRType::BaseType offset_count_type) +{ + + + bool forward = should_forward(op0) && should_forward(op1) && should_forward(op2) && should_forward(op3); + + auto op0_expr = to_unpacked_expression(op0); + auto op1_expr = to_unpacked_expression(op1); + auto op2_expr = to_unpacked_expression(op2); + auto op3_expr = to_unpacked_expression(op3); + + SPIRType target_type; + target_type.vecsize = 1; + target_type.basetype = offset_count_type; + + if (expression_type(op2).basetype != offset_count_type) + { + + op2_expr = join(type_to_glsl_constructor(target_type), "(", op2_expr, ")"); + } + + if (expression_type(op3).basetype != offset_count_type) + { + + op3_expr = join(type_to_glsl_constructor(target_type), "(", op3_expr, ")"); + } + + emit_op(result_type, result_id, join(op, "(", op0_expr, ", ", op1_expr, ", ", op2_expr, ", ", op3_expr, ")"), + forward); + + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + inherit_expression_dependencies(result_id, op2); + inherit_expression_dependencies(result_id, op3); +} + + + + + +bool CompilerGLSL::check_explicit_lod_allowed(uint32_t lod) +{ + auto &execution = get_entry_point(); + bool allowed = !is_legacy_es() || execution.model == ExecutionModelFragment; + if (!allowed && lod != 0) + { + auto *lod_constant = maybe_get(lod); + if (!lod_constant || lod_constant->scalar_f32() != 0.0f) + { + SPIRV_CROSS_THROW("Explicit lod not allowed in legacy ES non-fragment shaders."); + } + } + return allowed; +} + +string CompilerGLSL::legacy_tex_op(const std::string &op, const SPIRType &imgtype, uint32_t lod, uint32_t tex) +{ + const char *type; + switch (imgtype.image.dim) + { + case spv::Dim1D: + type = (imgtype.image.arrayed && !options.es) ? "1DArray" : "1D"; + break; + case spv::Dim2D: + type = (imgtype.image.arrayed && !options.es) ? "2DArray" : "2D"; + break; + case spv::Dim3D: + type = "3D"; + break; + case spv::DimCube: + type = "Cube"; + break; + case spv::DimRect: + type = "2DRect"; + break; + case spv::DimBuffer: + type = "Buffer"; + break; + case spv::DimSubpassData: + type = "2D"; + break; + default: + type = ""; + break; + } + + bool use_explicit_lod = check_explicit_lod_allowed(lod); + + if (op == "textureLod" || op == "textureProjLod" || op == "textureGrad" || op == "textureProjGrad") + { + if (is_legacy_es()) + { + if (use_explicit_lod) + require_extension_internal("GL_EXT_shader_texture_lod"); + } + else if (is_legacy()) + require_extension_internal("GL_ARB_shader_texture_lod"); + } + + if (op == "textureLodOffset" || op == "textureProjLodOffset") + { + if (is_legacy_es()) + SPIRV_CROSS_THROW(join(op, " not allowed in legacy ES")); + + require_extension_internal("GL_EXT_gpu_shader4"); + } + + + + + if (image_is_comparison(imgtype, tex) && is_legacy_es()) + { + if (op == "texture" || op == "textureProj") + require_extension_internal("GL_EXT_shadow_samplers"); + else + SPIRV_CROSS_THROW(join(op, " not allowed on depth samplers in legacy ES")); + } + + bool is_es_and_depth = is_legacy_es() && image_is_comparison(imgtype, tex); + std::string type_prefix = image_is_comparison(imgtype, tex) ? "shadow" : "texture"; + + if (op == "texture") + return is_es_and_depth ? join(type_prefix, type, "EXT") : join(type_prefix, type); + else if (op == "textureLod") + { + if (use_explicit_lod) + return join(type_prefix, type, is_legacy_es() ? "LodEXT" : "Lod"); + else + return join(type_prefix, type); + } + else if (op == "textureProj") + return join(type_prefix, type, is_es_and_depth ? "ProjEXT" : "Proj"); + else if (op == "textureGrad") + return join(type_prefix, type, is_legacy_es() ? "GradEXT" : is_legacy_desktop() ? "GradARB" : "Grad"); + else if (op == "textureProjLod") + { + if (use_explicit_lod) + return join(type_prefix, type, is_legacy_es() ? "ProjLodEXT" : "ProjLod"); + else + return join(type_prefix, type, "Proj"); + } + else if (op == "textureLodOffset") + { + if (use_explicit_lod) + return join(type_prefix, type, "LodOffset"); + else + return join(type_prefix, type); + } + else if (op == "textureProjGrad") + return join(type_prefix, type, + is_legacy_es() ? "ProjGradEXT" : is_legacy_desktop() ? "ProjGradARB" : "ProjGrad"); + else if (op == "textureProjLodOffset") + { + if (use_explicit_lod) + return join(type_prefix, type, "ProjLodOffset"); + else + return join(type_prefix, type, "ProjOffset"); + } + else + { + SPIRV_CROSS_THROW(join("Unsupported legacy texture op: ", op)); + } +} + +bool CompilerGLSL::to_trivial_mix_op(const SPIRType &type, string &op, uint32_t left, uint32_t right, uint32_t lerp) +{ + auto *cleft = maybe_get(left); + auto *cright = maybe_get(right); + auto &lerptype = expression_type(lerp); + + + if (!cleft || !cright) + return false; + + + if (cleft->specialization || cright->specialization) + return false; + + + + if (lerptype.basetype != SPIRType::Boolean || lerptype.vecsize > 1) + return false; + + + bool ret = false; + switch (type.basetype) + { + case SPIRType::Short: + case SPIRType::UShort: + ret = cleft->scalar_u16() == 0 && cright->scalar_u16() == 1; + break; + + case SPIRType::Int: + case SPIRType::UInt: + ret = cleft->scalar() == 0 && cright->scalar() == 1; + break; + + case SPIRType::Half: + ret = cleft->scalar_f16() == 0.0f && cright->scalar_f16() == 1.0f; + break; + + case SPIRType::Float: + ret = cleft->scalar_f32() == 0.0f && cright->scalar_f32() == 1.0f; + break; + + case SPIRType::Double: + ret = cleft->scalar_f64() == 0.0 && cright->scalar_f64() == 1.0; + break; + + case SPIRType::Int64: + case SPIRType::UInt64: + ret = cleft->scalar_u64() == 0 && cright->scalar_u64() == 1; + break; + + default: + break; + } + + if (ret) + op = type_to_glsl_constructor(type); + return ret; +} + +string CompilerGLSL::to_ternary_expression(const SPIRType &restype, uint32_t select, uint32_t true_value, + uint32_t false_value) +{ + string expr; + auto &lerptype = expression_type(select); + + if (lerptype.vecsize == 1) + expr = join(to_enclosed_expression(select), " ? ", to_enclosed_pointer_expression(true_value), " : ", + to_enclosed_pointer_expression(false_value)); + else + { + auto swiz = [this](uint32_t expression, uint32_t i) { return to_extract_component_expression(expression, i); }; + + expr = type_to_glsl_constructor(restype); + expr += "("; + for (uint32_t i = 0; i < restype.vecsize; i++) + { + expr += swiz(select, i); + expr += " ? "; + expr += swiz(true_value, i); + expr += " : "; + expr += swiz(false_value, i); + if (i + 1 < restype.vecsize) + expr += ", "; + } + expr += ")"; + } + + return expr; +} + +void CompilerGLSL::emit_mix_op(uint32_t result_type, uint32_t id, uint32_t left, uint32_t right, uint32_t lerp) +{ + auto &lerptype = expression_type(lerp); + auto &restype = get(result_type); + + + if (restype.pointer) + { + register_write(left); + register_write(right); + } + + string mix_op; + bool has_boolean_mix = *backend.boolean_mix_function && + ((options.es && options.version >= 310) || (!options.es && options.version >= 450)); + bool trivial_mix = to_trivial_mix_op(restype, mix_op, left, right, lerp); + + + + if (lerptype.vecsize == 1) + has_boolean_mix = false; + + + + + if (trivial_mix) + { + emit_unary_func_op(result_type, id, lerp, mix_op.c_str()); + } + else if (!has_boolean_mix && lerptype.basetype == SPIRType::Boolean) + { + + + + + + + auto expr = to_ternary_expression(get(result_type), lerp, right, left); + emit_op(result_type, id, expr, should_forward(left) && should_forward(right) && should_forward(lerp)); + inherit_expression_dependencies(id, left); + inherit_expression_dependencies(id, right); + inherit_expression_dependencies(id, lerp); + } + else if (lerptype.basetype == SPIRType::Boolean) + emit_trinary_func_op(result_type, id, left, right, lerp, backend.boolean_mix_function); + else + emit_trinary_func_op(result_type, id, left, right, lerp, "mix"); +} + +string CompilerGLSL::to_combined_image_sampler(VariableID image_id, VariableID samp_id) +{ + + + auto image_expr = to_expression(image_id); + string array_expr; + auto array_index = image_expr.find_first_of('['); + if (array_index != string::npos) + array_expr = image_expr.substr(array_index, string::npos); + + auto &args = current_function->arguments; + + + + auto *image = maybe_get_backing_variable(image_id); + auto *samp = maybe_get_backing_variable(samp_id); + if (image) + image_id = image->self; + if (samp) + samp_id = samp->self; + + auto image_itr = find_if(begin(args), end(args), + [image_id](const SPIRFunction::Parameter ¶m) { return image_id == param.id; }); + + auto sampler_itr = find_if(begin(args), end(args), + [samp_id](const SPIRFunction::Parameter ¶m) { return samp_id == param.id; }); + + if (image_itr != end(args) || sampler_itr != end(args)) + { + + bool global_image = image_itr == end(args); + bool global_sampler = sampler_itr == end(args); + VariableID iid = global_image ? image_id : VariableID(uint32_t(image_itr - begin(args))); + VariableID sid = global_sampler ? samp_id : VariableID(uint32_t(sampler_itr - begin(args))); + + auto &combined = current_function->combined_parameters; + auto itr = find_if(begin(combined), end(combined), [=](const SPIRFunction::CombinedImageSamplerParameter &p) { + return p.global_image == global_image && p.global_sampler == global_sampler && p.image_id == iid && + p.sampler_id == sid; + }); + + if (itr != end(combined)) + return to_expression(itr->id) + array_expr; + else + { + SPIRV_CROSS_THROW( + "Cannot find mapping for combined sampler parameter, was build_combined_image_samplers() used " + "before compile() was called?"); + } + } + else + { + + auto &mapping = combined_image_samplers; + auto itr = find_if(begin(mapping), end(mapping), [image_id, samp_id](const CombinedImageSampler &combined) { + return combined.image_id == image_id && combined.sampler_id == samp_id; + }); + + if (itr != end(combined_image_samplers)) + return to_expression(itr->combined_id) + array_expr; + else + { + SPIRV_CROSS_THROW("Cannot find mapping for combined sampler, was build_combined_image_samplers() used " + "before compile() was called?"); + } + } +} + +void CompilerGLSL::emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) +{ + if (options.vulkan_semantics && combined_image_samplers.empty()) + { + emit_binary_func_op(result_type, result_id, image_id, samp_id, + type_to_glsl(get(result_type), result_id).c_str()); + } + else + { + + emit_op(result_type, result_id, to_combined_image_sampler(image_id, samp_id), true, true); + } + + + + forwarded_temporaries.erase(result_id); +} + +static inline bool image_opcode_is_sample_no_dref(Op op) +{ + switch (op) + { + case OpImageSampleExplicitLod: + case OpImageSampleImplicitLod: + case OpImageSampleProjExplicitLod: + case OpImageSampleProjImplicitLod: + case OpImageFetch: + case OpImageRead: + case OpImageSparseSampleExplicitLod: + case OpImageSparseSampleImplicitLod: + case OpImageSparseSampleProjExplicitLod: + case OpImageSparseSampleProjImplicitLod: + case OpImageSparseFetch: + case OpImageSparseRead: + return true; + + default: + return false; + } +} + +void CompilerGLSL::emit_texture_op(const Instruction &i) +{ + auto *ops = stream(i); + auto op = static_cast(i.op); + + SmallVector inherited_expressions; + + uint32_t result_type_id = ops[0]; + uint32_t id = ops[1]; + + bool forward = false; + string expr = to_texture_op(i, &forward, inherited_expressions); + emit_op(result_type_id, id, expr, forward); + for (auto &inherit : inherited_expressions) + inherit_expression_dependencies(id, inherit); + + switch (op) + { + case OpImageSampleDrefImplicitLod: + case OpImageSampleImplicitLod: + case OpImageSampleProjImplicitLod: + case OpImageSampleProjDrefImplicitLod: + register_control_dependent_expression(id); + break; + + default: + break; + } +} + +std::string CompilerGLSL::to_texture_op(const Instruction &i, bool *forward, + SmallVector &inherited_expressions) +{ + auto *ops = stream(i); + auto op = static_cast(i.op); + uint32_t length = i.length; + + uint32_t result_type_id = ops[0]; + VariableID img = ops[2]; + uint32_t coord = ops[3]; + uint32_t dref = 0; + uint32_t comp = 0; + bool gather = false; + bool proj = false; + bool fetch = false; + const uint32_t *opt = nullptr; + + auto &result_type = get(result_type_id); + + inherited_expressions.push_back(coord); + + + if (has_decoration(img, DecorationNonUniformEXT)) + propagate_nonuniform_qualifier(img); + + switch (op) + { + case OpImageSampleDrefImplicitLod: + case OpImageSampleDrefExplicitLod: + dref = ops[4]; + opt = &ops[5]; + length -= 5; + break; + + case OpImageSampleProjDrefImplicitLod: + case OpImageSampleProjDrefExplicitLod: + dref = ops[4]; + opt = &ops[5]; + length -= 5; + proj = true; + break; + + case OpImageDrefGather: + dref = ops[4]; + opt = &ops[5]; + length -= 5; + gather = true; + break; + + case OpImageGather: + comp = ops[4]; + opt = &ops[5]; + length -= 5; + gather = true; + break; + + case OpImageFetch: + case OpImageRead: + opt = &ops[4]; + length -= 4; + fetch = true; + break; + + case OpImageSampleProjImplicitLod: + case OpImageSampleProjExplicitLod: + opt = &ops[4]; + length -= 4; + proj = true; + break; + + default: + opt = &ops[4]; + length -= 4; + break; + } + + + auto &type = expression_type(img); + auto &imgtype = get(type.self); + + uint32_t coord_components = 0; + switch (imgtype.image.dim) + { + case spv::Dim1D: + coord_components = 1; + break; + case spv::Dim2D: + coord_components = 2; + break; + case spv::Dim3D: + coord_components = 3; + break; + case spv::DimCube: + coord_components = 3; + break; + case spv::DimBuffer: + coord_components = 1; + break; + default: + coord_components = 2; + break; + } + + if (dref) + inherited_expressions.push_back(dref); + + if (proj) + coord_components++; + if (imgtype.image.arrayed) + coord_components++; + + uint32_t bias = 0; + uint32_t lod = 0; + uint32_t grad_x = 0; + uint32_t grad_y = 0; + uint32_t coffset = 0; + uint32_t offset = 0; + uint32_t coffsets = 0; + uint32_t sample = 0; + uint32_t minlod = 0; + uint32_t flags = 0; + + if (length) + { + flags = *opt++; + length--; + } + + auto test = [&](uint32_t &v, uint32_t flag) { + if (length && (flags & flag)) + { + v = *opt++; + inherited_expressions.push_back(v); + length--; + } + }; + + test(bias, ImageOperandsBiasMask); + test(lod, ImageOperandsLodMask); + test(grad_x, ImageOperandsGradMask); + test(grad_y, ImageOperandsGradMask); + test(coffset, ImageOperandsConstOffsetMask); + test(offset, ImageOperandsOffsetMask); + test(coffsets, ImageOperandsConstOffsetsMask); + test(sample, ImageOperandsSampleMask); + test(minlod, ImageOperandsMinLodMask); + + string expr; + expr += to_function_name(img, imgtype, !!fetch, !!gather, !!proj, !!coffsets, (!!coffset || !!offset), + (!!grad_x || !!grad_y), !!dref, lod, minlod); + expr += "("; + expr += to_function_args(img, imgtype, fetch, gather, proj, coord, coord_components, dref, grad_x, grad_y, lod, + coffset, offset, bias, comp, sample, minlod, forward); + expr += ")"; + + + if (is_legacy() && image_is_comparison(imgtype, img)) + expr += ".r"; + + + + if (backend.comparison_image_samples_scalar && image_opcode_is_sample_no_dref(op)) + { + bool image_is_depth = false; + const auto *combined = maybe_get(img); + VariableID image_id = combined ? combined->image : img; + + if (combined && image_is_comparison(imgtype, combined->image)) + image_is_depth = true; + else if (image_is_comparison(imgtype, img)) + image_is_depth = true; + + + + + auto *image_variable = maybe_get_backing_variable(image_id); + if (image_variable && image_is_comparison(get(image_variable->basetype), image_variable->self)) + image_is_depth = true; + + if (image_is_depth) + expr = remap_swizzle(result_type, 1, expr); + } + + if (!backend.support_small_type_sampling_result && result_type.width < 32) + { + + + expr = join(type_to_glsl_constructor(result_type), "(", expr, ")"); + } + + + if (op == OpImageRead) + expr = remap_swizzle(result_type, 4, expr); + + return expr; +} + +bool CompilerGLSL::expression_is_constant_null(uint32_t id) const +{ + auto *c = maybe_get(id); + if (!c) + return false; + return c->constant_is_null(); +} + + + +string CompilerGLSL::to_function_name(VariableID tex, const SPIRType &imgtype, bool is_fetch, bool is_gather, + bool is_proj, bool has_array_offsets, bool has_offset, bool has_grad, bool, + uint32_t lod, uint32_t minlod) +{ + if (minlod != 0) + SPIRV_CROSS_THROW("Sparse texturing not yet supported."); + + string fname; + + + + + + bool workaround_lod_array_shadow_as_grad = false; + if (((imgtype.image.arrayed && imgtype.image.dim == Dim2D) || imgtype.image.dim == DimCube) && + image_is_comparison(imgtype, tex) && lod) + { + if (!expression_is_constant_null(lod)) + { + SPIRV_CROSS_THROW( + "textureLod on sampler2DArrayShadow is not constant 0.0. This cannot be expressed in GLSL."); + } + workaround_lod_array_shadow_as_grad = true; + } + + if (is_fetch) + fname += "texelFetch"; + else + { + fname += "texture"; + + if (is_gather) + fname += "Gather"; + if (has_array_offsets) + fname += "Offsets"; + if (is_proj) + fname += "Proj"; + if (has_grad || workaround_lod_array_shadow_as_grad) + fname += "Grad"; + if (!!lod && !workaround_lod_array_shadow_as_grad) + fname += "Lod"; + } + + if (has_offset) + fname += "Offset"; + + return is_legacy() ? legacy_tex_op(fname, imgtype, lod, tex) : fname; +} + +std::string CompilerGLSL::convert_separate_image_to_expression(uint32_t id) +{ + auto *var = maybe_get_backing_variable(id); + + + + if (var) + { + auto &type = get(var->basetype); + if (type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer) + { + if (options.vulkan_semantics) + { + if (dummy_sampler_id) + { + + auto sampled_type = type; + sampled_type.basetype = SPIRType::SampledImage; + return join(type_to_glsl(sampled_type), "(", to_expression(id), ", ", + to_expression(dummy_sampler_id), ")"); + } + else + { + + require_extension_internal("GL_EXT_samplerless_texture_functions"); + } + } + else + { + if (!dummy_sampler_id) + SPIRV_CROSS_THROW( + "Cannot find dummy sampler ID. Was build_dummy_sampler_for_combined_images() called?"); + + return to_combined_image_sampler(id, dummy_sampler_id); + } + } + } + + return to_expression(id); +} + + +string CompilerGLSL::to_function_args(VariableID img, const SPIRType &imgtype, bool is_fetch, bool is_gather, + bool is_proj, uint32_t coord, uint32_t coord_components, uint32_t dref, + uint32_t grad_x, uint32_t grad_y, uint32_t lod, uint32_t coffset, uint32_t offset, + uint32_t bias, uint32_t comp, uint32_t sample, uint32_t , + bool *p_forward) +{ + string farg_str; + if (is_fetch) + farg_str = convert_separate_image_to_expression(img); + else + farg_str = to_expression(img); + + bool swizz_func = backend.swizzle_is_function; + auto swizzle = [swizz_func](uint32_t comps, uint32_t in_comps) -> const char * { + if (comps == in_comps) + return ""; + + switch (comps) + { + case 1: + return ".x"; + case 2: + return swizz_func ? ".xy()" : ".xy"; + case 3: + return swizz_func ? ".xyz()" : ".xyz"; + default: + return ""; + } + }; + + bool forward = should_forward(coord); + + + auto swizzle_expr = swizzle(coord_components, expression_type(coord).vecsize); + + auto coord_expr = (*swizzle_expr == '\0') ? to_expression(coord) : (to_enclosed_expression(coord) + swizzle_expr); + + + auto &coord_type = expression_type(coord); + if (coord_type.basetype == SPIRType::UInt) + { + auto expected_type = coord_type; + expected_type.vecsize = coord_components; + expected_type.basetype = SPIRType::Int; + coord_expr = bitcast_expression(expected_type, coord_type.basetype, coord_expr); + } + + + + + + bool workaround_lod_array_shadow_as_grad = + ((imgtype.image.arrayed && imgtype.image.dim == Dim2D) || imgtype.image.dim == DimCube) && + image_is_comparison(imgtype, img) && lod; + + if (dref) + { + forward = forward && should_forward(dref); + + + if (is_gather || coord_components == 4) + { + farg_str += ", "; + farg_str += to_expression(coord); + farg_str += ", "; + farg_str += to_expression(dref); + } + else if (is_proj) + { + + + + farg_str += ", vec4("; + + if (imgtype.image.dim == Dim1D) + { + + farg_str += to_enclosed_expression(coord) + ".x"; + farg_str += ", "; + farg_str += "0.0, "; + farg_str += to_expression(dref); + farg_str += ", "; + farg_str += to_enclosed_expression(coord) + ".y)"; + } + else if (imgtype.image.dim == Dim2D) + { + + farg_str += to_enclosed_expression(coord) + (swizz_func ? ".xy()" : ".xy"); + farg_str += ", "; + farg_str += to_expression(dref); + farg_str += ", "; + farg_str += to_enclosed_expression(coord) + ".z)"; + } + else + SPIRV_CROSS_THROW("Invalid type for textureProj with shadow."); + } + else + { + + auto type = expression_type(coord); + type.vecsize = coord_components + 1; + farg_str += ", "; + farg_str += type_to_glsl_constructor(type); + farg_str += "("; + farg_str += coord_expr; + farg_str += ", "; + farg_str += to_expression(dref); + farg_str += ")"; + } + } + else + { + farg_str += ", "; + farg_str += coord_expr; + } + + if (grad_x || grad_y) + { + forward = forward && should_forward(grad_x); + forward = forward && should_forward(grad_y); + farg_str += ", "; + farg_str += to_expression(grad_x); + farg_str += ", "; + farg_str += to_expression(grad_y); + } + + if (lod) + { + if (workaround_lod_array_shadow_as_grad) + { + + + if (imgtype.image.dim == Dim2D) + farg_str += ", vec2(0.0), vec2(0.0)"; + else if (imgtype.image.dim == DimCube) + farg_str += ", vec3(0.0), vec3(0.0)"; + } + else + { + if (check_explicit_lod_allowed(lod)) + { + forward = forward && should_forward(lod); + farg_str += ", "; + + auto &lod_expr_type = expression_type(lod); + + + if (is_fetch && imgtype.image.dim != DimBuffer && !imgtype.image.ms && + lod_expr_type.basetype != SPIRType::Int) + { + farg_str += join("int(", to_expression(lod), ")"); + } + else + { + farg_str += to_expression(lod); + } + } + } + } + else if (is_fetch && imgtype.image.dim != DimBuffer && !imgtype.image.ms) + { + + farg_str += ", 0"; + } + + if (coffset) + { + forward = forward && should_forward(coffset); + farg_str += ", "; + farg_str += to_expression(coffset); + } + else if (offset) + { + forward = forward && should_forward(offset); + farg_str += ", "; + farg_str += to_expression(offset); + } + + if (bias) + { + forward = forward && should_forward(bias); + farg_str += ", "; + farg_str += to_expression(bias); + } + + if (comp) + { + forward = forward && should_forward(comp); + farg_str += ", "; + farg_str += to_expression(comp); + } + + if (sample) + { + farg_str += ", "; + farg_str += to_expression(sample); + } + + *p_forward = forward; + + return farg_str; +} + +void CompilerGLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, uint32_t length) +{ + auto op = static_cast(eop); + + if (is_legacy() && is_unsigned_glsl_opcode(op)) + SPIRV_CROSS_THROW("Unsigned integers are not supported on legacy GLSL targets."); + + + uint32_t integer_width = get_integer_width_for_glsl_instruction(op, args, length); + auto int_type = to_signed_basetype(integer_width); + auto uint_type = to_unsigned_basetype(integer_width); + + switch (op) + { + + case GLSLstd450Round: + emit_unary_func_op(result_type, id, args[0], "round"); + break; + + case GLSLstd450RoundEven: + if ((options.es && options.version >= 300) || (!options.es && options.version >= 130)) + emit_unary_func_op(result_type, id, args[0], "roundEven"); + else + SPIRV_CROSS_THROW("roundEven supported only in ESSL 300 and GLSL 130 and up."); + break; + + case GLSLstd450Trunc: + emit_unary_func_op(result_type, id, args[0], "trunc"); + break; + case GLSLstd450SAbs: + emit_unary_func_op_cast(result_type, id, args[0], "abs", int_type, int_type); + break; + case GLSLstd450FAbs: + emit_unary_func_op(result_type, id, args[0], "abs"); + break; + case GLSLstd450SSign: + emit_unary_func_op_cast(result_type, id, args[0], "sign", int_type, int_type); + break; + case GLSLstd450FSign: + emit_unary_func_op(result_type, id, args[0], "sign"); + break; + case GLSLstd450Floor: + emit_unary_func_op(result_type, id, args[0], "floor"); + break; + case GLSLstd450Ceil: + emit_unary_func_op(result_type, id, args[0], "ceil"); + break; + case GLSLstd450Fract: + emit_unary_func_op(result_type, id, args[0], "fract"); + break; + case GLSLstd450Radians: + emit_unary_func_op(result_type, id, args[0], "radians"); + break; + case GLSLstd450Degrees: + emit_unary_func_op(result_type, id, args[0], "degrees"); + break; + case GLSLstd450Fma: + if ((!options.es && options.version < 400) || (options.es && options.version < 320)) + { + auto expr = join(to_enclosed_expression(args[0]), " * ", to_enclosed_expression(args[1]), " + ", + to_enclosed_expression(args[2])); + + emit_op(result_type, id, expr, + should_forward(args[0]) && should_forward(args[1]) && should_forward(args[2])); + for (uint32_t i = 0; i < 3; i++) + inherit_expression_dependencies(id, args[i]); + } + else + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "fma"); + break; + case GLSLstd450Modf: + register_call_out_argument(args[1]); + forced_temporaries.insert(id); + emit_binary_func_op(result_type, id, args[0], args[1], "modf"); + break; + + case GLSLstd450ModfStruct: + { + auto &type = get(result_type); + emit_uninitialized_temporary_expression(result_type, id); + statement(to_expression(id), ".", to_member_name(type, 0), " = ", "modf(", to_expression(args[0]), ", ", + to_expression(id), ".", to_member_name(type, 1), ");"); + break; + } + + + case GLSLstd450UMin: + emit_binary_func_op_cast(result_type, id, args[0], args[1], "min", uint_type, false); + break; + + case GLSLstd450SMin: + emit_binary_func_op_cast(result_type, id, args[0], args[1], "min", int_type, false); + break; + + case GLSLstd450FMin: + emit_binary_func_op(result_type, id, args[0], args[1], "min"); + break; + + case GLSLstd450FMax: + emit_binary_func_op(result_type, id, args[0], args[1], "max"); + break; + + case GLSLstd450UMax: + emit_binary_func_op_cast(result_type, id, args[0], args[1], "max", uint_type, false); + break; + + case GLSLstd450SMax: + emit_binary_func_op_cast(result_type, id, args[0], args[1], "max", int_type, false); + break; + + case GLSLstd450FClamp: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "clamp"); + break; + + case GLSLstd450UClamp: + emit_trinary_func_op_cast(result_type, id, args[0], args[1], args[2], "clamp", uint_type); + break; + + case GLSLstd450SClamp: + emit_trinary_func_op_cast(result_type, id, args[0], args[1], args[2], "clamp", int_type); + break; + + + case GLSLstd450Sin: + emit_unary_func_op(result_type, id, args[0], "sin"); + break; + case GLSLstd450Cos: + emit_unary_func_op(result_type, id, args[0], "cos"); + break; + case GLSLstd450Tan: + emit_unary_func_op(result_type, id, args[0], "tan"); + break; + case GLSLstd450Asin: + emit_unary_func_op(result_type, id, args[0], "asin"); + break; + case GLSLstd450Acos: + emit_unary_func_op(result_type, id, args[0], "acos"); + break; + case GLSLstd450Atan: + emit_unary_func_op(result_type, id, args[0], "atan"); + break; + case GLSLstd450Sinh: + emit_unary_func_op(result_type, id, args[0], "sinh"); + break; + case GLSLstd450Cosh: + emit_unary_func_op(result_type, id, args[0], "cosh"); + break; + case GLSLstd450Tanh: + emit_unary_func_op(result_type, id, args[0], "tanh"); + break; + case GLSLstd450Asinh: + emit_unary_func_op(result_type, id, args[0], "asinh"); + break; + case GLSLstd450Acosh: + emit_unary_func_op(result_type, id, args[0], "acosh"); + break; + case GLSLstd450Atanh: + emit_unary_func_op(result_type, id, args[0], "atanh"); + break; + case GLSLstd450Atan2: + emit_binary_func_op(result_type, id, args[0], args[1], "atan"); + break; + + + case GLSLstd450Pow: + emit_binary_func_op(result_type, id, args[0], args[1], "pow"); + break; + case GLSLstd450Exp: + emit_unary_func_op(result_type, id, args[0], "exp"); + break; + case GLSLstd450Log: + emit_unary_func_op(result_type, id, args[0], "log"); + break; + case GLSLstd450Exp2: + emit_unary_func_op(result_type, id, args[0], "exp2"); + break; + case GLSLstd450Log2: + emit_unary_func_op(result_type, id, args[0], "log2"); + break; + case GLSLstd450Sqrt: + emit_unary_func_op(result_type, id, args[0], "sqrt"); + break; + case GLSLstd450InverseSqrt: + emit_unary_func_op(result_type, id, args[0], "inversesqrt"); + break; + + + case GLSLstd450Determinant: + emit_unary_func_op(result_type, id, args[0], "determinant"); + break; + case GLSLstd450MatrixInverse: + emit_unary_func_op(result_type, id, args[0], "inverse"); + break; + + + case GLSLstd450FMix: + case GLSLstd450IMix: + { + emit_mix_op(result_type, id, args[0], args[1], args[2]); + break; + } + case GLSLstd450Step: + emit_binary_func_op(result_type, id, args[0], args[1], "step"); + break; + case GLSLstd450SmoothStep: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "smoothstep"); + break; + + + case GLSLstd450Frexp: + register_call_out_argument(args[1]); + forced_temporaries.insert(id); + emit_binary_func_op(result_type, id, args[0], args[1], "frexp"); + break; + + case GLSLstd450FrexpStruct: + { + auto &type = get(result_type); + emit_uninitialized_temporary_expression(result_type, id); + statement(to_expression(id), ".", to_member_name(type, 0), " = ", "frexp(", to_expression(args[0]), ", ", + to_expression(id), ".", to_member_name(type, 1), ");"); + break; + } + + case GLSLstd450Ldexp: + { + bool forward = should_forward(args[0]) && should_forward(args[1]); + + auto op0 = to_unpacked_expression(args[0]); + auto op1 = to_unpacked_expression(args[1]); + auto &op1_type = expression_type(args[1]); + if (op1_type.basetype != SPIRType::Int) + { + + auto target_type = op1_type; + target_type.basetype = SPIRType::Int; + op1 = join(type_to_glsl_constructor(target_type), "(", op1, ")"); + } + + auto expr = join("ldexp(", op0, ", ", op1, ")"); + + emit_op(result_type, id, expr, forward); + inherit_expression_dependencies(id, args[0]); + inherit_expression_dependencies(id, args[1]); + break; + } + + case GLSLstd450PackSnorm4x8: + emit_unary_func_op(result_type, id, args[0], "packSnorm4x8"); + break; + case GLSLstd450PackUnorm4x8: + emit_unary_func_op(result_type, id, args[0], "packUnorm4x8"); + break; + case GLSLstd450PackSnorm2x16: + emit_unary_func_op(result_type, id, args[0], "packSnorm2x16"); + break; + case GLSLstd450PackUnorm2x16: + emit_unary_func_op(result_type, id, args[0], "packUnorm2x16"); + break; + case GLSLstd450PackHalf2x16: + emit_unary_func_op(result_type, id, args[0], "packHalf2x16"); + break; + case GLSLstd450UnpackSnorm4x8: + emit_unary_func_op(result_type, id, args[0], "unpackSnorm4x8"); + break; + case GLSLstd450UnpackUnorm4x8: + emit_unary_func_op(result_type, id, args[0], "unpackUnorm4x8"); + break; + case GLSLstd450UnpackSnorm2x16: + emit_unary_func_op(result_type, id, args[0], "unpackSnorm2x16"); + break; + case GLSLstd450UnpackUnorm2x16: + emit_unary_func_op(result_type, id, args[0], "unpackUnorm2x16"); + break; + case GLSLstd450UnpackHalf2x16: + emit_unary_func_op(result_type, id, args[0], "unpackHalf2x16"); + break; + + case GLSLstd450PackDouble2x32: + emit_unary_func_op(result_type, id, args[0], "packDouble2x32"); + break; + case GLSLstd450UnpackDouble2x32: + emit_unary_func_op(result_type, id, args[0], "unpackDouble2x32"); + break; + + + case GLSLstd450Length: + emit_unary_func_op(result_type, id, args[0], "length"); + break; + case GLSLstd450Distance: + emit_binary_func_op(result_type, id, args[0], args[1], "distance"); + break; + case GLSLstd450Cross: + emit_binary_func_op(result_type, id, args[0], args[1], "cross"); + break; + case GLSLstd450Normalize: + emit_unary_func_op(result_type, id, args[0], "normalize"); + break; + case GLSLstd450FaceForward: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "faceforward"); + break; + case GLSLstd450Reflect: + emit_binary_func_op(result_type, id, args[0], args[1], "reflect"); + break; + case GLSLstd450Refract: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "refract"); + break; + + + case GLSLstd450FindILsb: + + emit_unary_func_op_cast(result_type, id, args[0], "findLSB", expression_type(args[0]).basetype, int_type); + break; + + case GLSLstd450FindSMsb: + emit_unary_func_op_cast(result_type, id, args[0], "findMSB", int_type, int_type); + break; + + case GLSLstd450FindUMsb: + emit_unary_func_op_cast(result_type, id, args[0], "findMSB", uint_type, + int_type); + break; + + + case GLSLstd450InterpolateAtCentroid: + emit_unary_func_op(result_type, id, args[0], "interpolateAtCentroid"); + break; + case GLSLstd450InterpolateAtSample: + emit_binary_func_op(result_type, id, args[0], args[1], "interpolateAtSample"); + break; + case GLSLstd450InterpolateAtOffset: + emit_binary_func_op(result_type, id, args[0], args[1], "interpolateAtOffset"); + break; + + case GLSLstd450NMin: + case GLSLstd450NMax: + { + emit_nminmax_op(result_type, id, args[0], args[1], op); + break; + } + + case GLSLstd450NClamp: + { + + + uint32_t &max_id = extra_sub_expressions[id | 0x80000000u]; + if (!max_id) + max_id = ir.increase_bound_by(1); + + + ir.meta[max_id] = ir.meta[id]; + + emit_nminmax_op(result_type, max_id, args[0], args[1], GLSLstd450NMax); + emit_nminmax_op(result_type, id, max_id, args[2], GLSLstd450NMin); + break; + } + + default: + statement("// unimplemented GLSL op ", eop); + break; + } +} + +void CompilerGLSL::emit_nminmax_op(uint32_t result_type, uint32_t id, uint32_t op0, uint32_t op1, GLSLstd450 op) +{ + + uint32_t &ids = extra_sub_expressions[id]; + if (!ids) + { + ids = ir.increase_bound_by(5); + auto btype = get(result_type); + btype.basetype = SPIRType::Boolean; + set(ids, btype); + } + + uint32_t btype_id = ids + 0; + uint32_t left_nan_id = ids + 1; + uint32_t right_nan_id = ids + 2; + uint32_t tmp_id = ids + 3; + uint32_t mixed_first_id = ids + 4; + + + ir.meta[tmp_id] = ir.meta[id]; + ir.meta[mixed_first_id] = ir.meta[id]; + + emit_unary_func_op(btype_id, left_nan_id, op0, "isnan"); + emit_unary_func_op(btype_id, right_nan_id, op1, "isnan"); + emit_binary_func_op(result_type, tmp_id, op0, op1, op == GLSLstd450NMin ? "min" : "max"); + emit_mix_op(result_type, mixed_first_id, tmp_id, op1, left_nan_id); + emit_mix_op(result_type, id, mixed_first_id, op0, right_nan_id); +} + +void CompilerGLSL::emit_spv_amd_shader_ballot_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, + uint32_t) +{ + require_extension_internal("GL_AMD_shader_ballot"); + + enum AMDShaderBallot + { + SwizzleInvocationsAMD = 1, + SwizzleInvocationsMaskedAMD = 2, + WriteInvocationAMD = 3, + MbcntAMD = 4 + }; + + auto op = static_cast(eop); + + switch (op) + { + case SwizzleInvocationsAMD: + emit_binary_func_op(result_type, id, args[0], args[1], "swizzleInvocationsAMD"); + register_control_dependent_expression(id); + break; + + case SwizzleInvocationsMaskedAMD: + emit_binary_func_op(result_type, id, args[0], args[1], "swizzleInvocationsMaskedAMD"); + register_control_dependent_expression(id); + break; + + case WriteInvocationAMD: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "writeInvocationAMD"); + register_control_dependent_expression(id); + break; + + case MbcntAMD: + emit_unary_func_op(result_type, id, args[0], "mbcntAMD"); + register_control_dependent_expression(id); + break; + + default: + statement("// unimplemented SPV AMD shader ballot op ", eop); + break; + } +} + +void CompilerGLSL::emit_spv_amd_shader_explicit_vertex_parameter_op(uint32_t result_type, uint32_t id, uint32_t eop, + const uint32_t *args, uint32_t) +{ + require_extension_internal("GL_AMD_shader_explicit_vertex_parameter"); + + enum AMDShaderExplicitVertexParameter + { + InterpolateAtVertexAMD = 1 + }; + + auto op = static_cast(eop); + + switch (op) + { + case InterpolateAtVertexAMD: + emit_binary_func_op(result_type, id, args[0], args[1], "interpolateAtVertexAMD"); + break; + + default: + statement("// unimplemented SPV AMD shader explicit vertex parameter op ", eop); + break; + } +} + +void CompilerGLSL::emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t id, uint32_t eop, + const uint32_t *args, uint32_t) +{ + require_extension_internal("GL_AMD_shader_trinary_minmax"); + + enum AMDShaderTrinaryMinMax + { + FMin3AMD = 1, + UMin3AMD = 2, + SMin3AMD = 3, + FMax3AMD = 4, + UMax3AMD = 5, + SMax3AMD = 6, + FMid3AMD = 7, + UMid3AMD = 8, + SMid3AMD = 9 + }; + + auto op = static_cast(eop); + + switch (op) + { + case FMin3AMD: + case UMin3AMD: + case SMin3AMD: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "min3"); + break; + + case FMax3AMD: + case UMax3AMD: + case SMax3AMD: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "max3"); + break; + + case FMid3AMD: + case UMid3AMD: + case SMid3AMD: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "mid3"); + break; + + default: + statement("// unimplemented SPV AMD shader trinary minmax op ", eop); + break; + } +} + +void CompilerGLSL::emit_spv_amd_gcn_shader_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, + uint32_t) +{ + require_extension_internal("GL_AMD_gcn_shader"); + + enum AMDGCNShader + { + CubeFaceIndexAMD = 1, + CubeFaceCoordAMD = 2, + TimeAMD = 3 + }; + + auto op = static_cast(eop); + + switch (op) + { + case CubeFaceIndexAMD: + emit_unary_func_op(result_type, id, args[0], "cubeFaceIndexAMD"); + break; + case CubeFaceCoordAMD: + emit_unary_func_op(result_type, id, args[0], "cubeFaceCoordAMD"); + break; + case TimeAMD: + { + string expr = "timeAMD()"; + emit_op(result_type, id, expr, true); + register_control_dependent_expression(id); + break; + } + + default: + statement("// unimplemented SPV AMD gcn shader op ", eop); + break; + } +} + +void CompilerGLSL::emit_subgroup_op(const Instruction &i) +{ + const uint32_t *ops = stream(i); + auto op = static_cast(i.op); + + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Can only use subgroup operations in Vulkan semantics."); + + switch (op) + { + case OpGroupNonUniformElect: + require_extension_internal("GL_KHR_shader_subgroup_basic"); + break; + + case OpGroupNonUniformBroadcast: + case OpGroupNonUniformBroadcastFirst: + case OpGroupNonUniformBallot: + case OpGroupNonUniformInverseBallot: + case OpGroupNonUniformBallotBitExtract: + case OpGroupNonUniformBallotBitCount: + case OpGroupNonUniformBallotFindLSB: + case OpGroupNonUniformBallotFindMSB: + require_extension_internal("GL_KHR_shader_subgroup_ballot"); + break; + + case OpGroupNonUniformShuffle: + case OpGroupNonUniformShuffleXor: + require_extension_internal("GL_KHR_shader_subgroup_shuffle"); + break; + + case OpGroupNonUniformShuffleUp: + case OpGroupNonUniformShuffleDown: + require_extension_internal("GL_KHR_shader_subgroup_shuffle_relative"); + break; + + case OpGroupNonUniformAll: + case OpGroupNonUniformAny: + case OpGroupNonUniformAllEqual: + require_extension_internal("GL_KHR_shader_subgroup_vote"); + break; + + case OpGroupNonUniformFAdd: + case OpGroupNonUniformFMul: + case OpGroupNonUniformFMin: + case OpGroupNonUniformFMax: + case OpGroupNonUniformIAdd: + case OpGroupNonUniformIMul: + case OpGroupNonUniformSMin: + case OpGroupNonUniformSMax: + case OpGroupNonUniformUMin: + case OpGroupNonUniformUMax: + case OpGroupNonUniformBitwiseAnd: + case OpGroupNonUniformBitwiseOr: + case OpGroupNonUniformBitwiseXor: + { + auto operation = static_cast(ops[3]); + if (operation == GroupOperationClusteredReduce) + { + require_extension_internal("GL_KHR_shader_subgroup_clustered"); + } + else if (operation == GroupOperationExclusiveScan || operation == GroupOperationInclusiveScan || + operation == GroupOperationReduce) + { + require_extension_internal("GL_KHR_shader_subgroup_arithmetic"); + } + else + SPIRV_CROSS_THROW("Invalid group operation."); + break; + } + + case OpGroupNonUniformQuadSwap: + case OpGroupNonUniformQuadBroadcast: + require_extension_internal("GL_KHR_shader_subgroup_quad"); + break; + + default: + SPIRV_CROSS_THROW("Invalid opcode for subgroup."); + } + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto scope = static_cast(get(ops[2]).scalar()); + if (scope != ScopeSubgroup) + SPIRV_CROSS_THROW("Only subgroup scope is supported."); + + switch (op) + { + case OpGroupNonUniformElect: + emit_op(result_type, id, "subgroupElect()", true); + break; + + case OpGroupNonUniformBroadcast: + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupBroadcast"); + break; + + case OpGroupNonUniformBroadcastFirst: + emit_unary_func_op(result_type, id, ops[3], "subgroupBroadcastFirst"); + break; + + case OpGroupNonUniformBallot: + emit_unary_func_op(result_type, id, ops[3], "subgroupBallot"); + break; + + case OpGroupNonUniformInverseBallot: + emit_unary_func_op(result_type, id, ops[3], "subgroupInverseBallot"); + break; + + case OpGroupNonUniformBallotBitExtract: + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupBallotBitExtract"); + break; + + case OpGroupNonUniformBallotFindLSB: + emit_unary_func_op(result_type, id, ops[3], "subgroupBallotFindLSB"); + break; + + case OpGroupNonUniformBallotFindMSB: + emit_unary_func_op(result_type, id, ops[3], "subgroupBallotFindMSB"); + break; + + case OpGroupNonUniformBallotBitCount: + { + auto operation = static_cast(ops[3]); + if (operation == GroupOperationReduce) + emit_unary_func_op(result_type, id, ops[4], "subgroupBallotBitCount"); + else if (operation == GroupOperationInclusiveScan) + emit_unary_func_op(result_type, id, ops[4], "subgroupBallotInclusiveBitCount"); + else if (operation == GroupOperationExclusiveScan) + emit_unary_func_op(result_type, id, ops[4], "subgroupBallotExclusiveBitCount"); + else + SPIRV_CROSS_THROW("Invalid BitCount operation."); + break; + } + + case OpGroupNonUniformShuffle: + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupShuffle"); + break; + + case OpGroupNonUniformShuffleXor: + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupShuffleXor"); + break; + + case OpGroupNonUniformShuffleUp: + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupShuffleUp"); + break; + + case OpGroupNonUniformShuffleDown: + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupShuffleDown"); + break; + + case OpGroupNonUniformAll: + emit_unary_func_op(result_type, id, ops[3], "subgroupAll"); + break; + + case OpGroupNonUniformAny: + emit_unary_func_op(result_type, id, ops[3], "subgroupAny"); + break; + + case OpGroupNonUniformAllEqual: + emit_unary_func_op(result_type, id, ops[3], "subgroupAllEqual"); + break; + + +#define GLSL_GROUP_OP(op, glsl_op) \ +case OpGroupNonUniform##op: \ + { \ + auto operation = static_cast(ops[3]); \ + if (operation == GroupOperationReduce) \ + emit_unary_func_op(result_type, id, ops[4], "subgroup" #glsl_op); \ + else if (operation == GroupOperationInclusiveScan) \ + emit_unary_func_op(result_type, id, ops[4], "subgroupInclusive" #glsl_op); \ + else if (operation == GroupOperationExclusiveScan) \ + emit_unary_func_op(result_type, id, ops[4], "subgroupExclusive" #glsl_op); \ + else if (operation == GroupOperationClusteredReduce) \ + emit_binary_func_op(result_type, id, ops[4], ops[5], "subgroupClustered" #glsl_op); \ + else \ + SPIRV_CROSS_THROW("Invalid group operation."); \ + break; \ + } + GLSL_GROUP_OP(FAdd, Add) + GLSL_GROUP_OP(FMul, Mul) + GLSL_GROUP_OP(FMin, Min) + GLSL_GROUP_OP(FMax, Max) + GLSL_GROUP_OP(IAdd, Add) + GLSL_GROUP_OP(IMul, Mul) + GLSL_GROUP_OP(SMin, Min) + GLSL_GROUP_OP(SMax, Max) + GLSL_GROUP_OP(UMin, Min) + GLSL_GROUP_OP(UMax, Max) + GLSL_GROUP_OP(BitwiseAnd, And) + GLSL_GROUP_OP(BitwiseOr, Or) + GLSL_GROUP_OP(BitwiseXor, Xor) +#undef GLSL_GROUP_OP + + + case OpGroupNonUniformQuadSwap: + { + uint32_t direction = get(ops[4]).scalar(); + if (direction == 0) + emit_unary_func_op(result_type, id, ops[3], "subgroupQuadSwapHorizontal"); + else if (direction == 1) + emit_unary_func_op(result_type, id, ops[3], "subgroupQuadSwapVertical"); + else if (direction == 2) + emit_unary_func_op(result_type, id, ops[3], "subgroupQuadSwapDiagonal"); + else + SPIRV_CROSS_THROW("Invalid quad swap direction."); + break; + } + + case OpGroupNonUniformQuadBroadcast: + { + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupQuadBroadcast"); + break; + } + + default: + SPIRV_CROSS_THROW("Invalid opcode for subgroup."); + } + + register_control_dependent_expression(id); +} + +string CompilerGLSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type) +{ + + if (out_type.pointer || in_type.pointer) + return type_to_glsl(out_type); + + if (out_type.basetype == in_type.basetype) + return ""; + + assert(out_type.basetype != SPIRType::Boolean); + assert(in_type.basetype != SPIRType::Boolean); + + bool integral_cast = type_is_integral(out_type) && type_is_integral(in_type); + bool same_size_cast = out_type.width == in_type.width; + + + if (integral_cast && same_size_cast) + return type_to_glsl(out_type); + + + if (out_type.width == 8 && in_type.width >= 16 && integral_cast && in_type.vecsize == 1) + return "unpack8"; + else if (in_type.width == 8 && out_type.width == 16 && integral_cast && out_type.vecsize == 1) + return "pack16"; + else if (in_type.width == 8 && out_type.width == 32 && integral_cast && out_type.vecsize == 1) + return "pack32"; + + + + if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Float) + { + if (is_legacy_es()) + SPIRV_CROSS_THROW("Float -> Uint bitcast not supported on legacy ESSL."); + else if (!options.es && options.version < 330) + require_extension_internal("GL_ARB_shader_bit_encoding"); + return "floatBitsToUint"; + } + else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::Float) + { + if (is_legacy_es()) + SPIRV_CROSS_THROW("Float -> Int bitcast not supported on legacy ESSL."); + else if (!options.es && options.version < 330) + require_extension_internal("GL_ARB_shader_bit_encoding"); + return "floatBitsToInt"; + } + else if (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::UInt) + { + if (is_legacy_es()) + SPIRV_CROSS_THROW("Uint -> Float bitcast not supported on legacy ESSL."); + else if (!options.es && options.version < 330) + require_extension_internal("GL_ARB_shader_bit_encoding"); + return "uintBitsToFloat"; + } + else if (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::Int) + { + if (is_legacy_es()) + SPIRV_CROSS_THROW("Int -> Float bitcast not supported on legacy ESSL."); + else if (!options.es && options.version < 330) + require_extension_internal("GL_ARB_shader_bit_encoding"); + return "intBitsToFloat"; + } + + else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::Double) + return "doubleBitsToInt64"; + else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Double) + return "doubleBitsToUint64"; + else if (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::Int64) + return "int64BitsToDouble"; + else if (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::UInt64) + return "uint64BitsToDouble"; + else if (out_type.basetype == SPIRType::Short && in_type.basetype == SPIRType::Half) + return "float16BitsToInt16"; + else if (out_type.basetype == SPIRType::UShort && in_type.basetype == SPIRType::Half) + return "float16BitsToUint16"; + else if (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::Short) + return "int16BitsToFloat16"; + else if (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::UShort) + return "uint16BitsToFloat16"; + + + if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::UInt && in_type.vecsize == 2) + return "packUint2x32"; + else if (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::UInt && in_type.vecsize == 1) + return "unpackFloat2x16"; + else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Half && in_type.vecsize == 2) + return "packFloat2x16"; + else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::Short && in_type.vecsize == 2) + return "packInt2x16"; + else if (out_type.basetype == SPIRType::Short && in_type.basetype == SPIRType::Int && in_type.vecsize == 1) + return "unpackInt2x16"; + else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::UShort && in_type.vecsize == 2) + return "packUint2x16"; + else if (out_type.basetype == SPIRType::UShort && in_type.basetype == SPIRType::UInt && in_type.vecsize == 1) + return "unpackUint2x16"; + else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::Short && in_type.vecsize == 4) + return "packInt4x16"; + else if (out_type.basetype == SPIRType::Short && in_type.basetype == SPIRType::Int64 && in_type.vecsize == 1) + return "unpackInt4x16"; + else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::UShort && in_type.vecsize == 4) + return "packUint4x16"; + else if (out_type.basetype == SPIRType::UShort && in_type.basetype == SPIRType::UInt64 && in_type.vecsize == 1) + return "unpackUint4x16"; + + return ""; +} + +string CompilerGLSL::bitcast_glsl(const SPIRType &result_type, uint32_t argument) +{ + auto op = bitcast_glsl_op(result_type, expression_type(argument)); + if (op.empty()) + return to_enclosed_unpacked_expression(argument); + else + return join(op, "(", to_unpacked_expression(argument), ")"); +} + +std::string CompilerGLSL::bitcast_expression(SPIRType::BaseType target_type, uint32_t arg) +{ + auto expr = to_expression(arg); + auto &src_type = expression_type(arg); + if (src_type.basetype != target_type) + { + auto target = src_type; + target.basetype = target_type; + expr = join(bitcast_glsl_op(target, src_type), "(", expr, ")"); + } + + return expr; +} + +std::string CompilerGLSL::bitcast_expression(const SPIRType &target_type, SPIRType::BaseType expr_type, + const std::string &expr) +{ + if (target_type.basetype == expr_type) + return expr; + + auto src_type = target_type; + src_type.basetype = expr_type; + return join(bitcast_glsl_op(target_type, src_type), "(", expr, ")"); +} + +string CompilerGLSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage) +{ + switch (builtin) + { + case BuiltInPosition: + return "gl_Position"; + case BuiltInPointSize: + return "gl_PointSize"; + case BuiltInClipDistance: + return "gl_ClipDistance"; + case BuiltInCullDistance: + return "gl_CullDistance"; + case BuiltInVertexId: + if (options.vulkan_semantics) + SPIRV_CROSS_THROW( + "Cannot implement gl_VertexID in Vulkan GLSL. This shader was created with GL semantics."); + return "gl_VertexID"; + case BuiltInInstanceId: + if (options.vulkan_semantics) + SPIRV_CROSS_THROW( + "Cannot implement gl_InstanceID in Vulkan GLSL. This shader was created with GL semantics."); + return "gl_InstanceID"; + case BuiltInVertexIndex: + if (options.vulkan_semantics) + return "gl_VertexIndex"; + else + return "gl_VertexID"; + case BuiltInInstanceIndex: + if (options.vulkan_semantics) + return "gl_InstanceIndex"; + else if (options.vertex.support_nonzero_base_instance) + return "(gl_InstanceID + SPIRV_Cross_BaseInstance)"; + else + return "gl_InstanceID"; + case BuiltInPrimitiveId: + if (storage == StorageClassInput && get_entry_point().model == ExecutionModelGeometry) + return "gl_PrimitiveIDIn"; + else + return "gl_PrimitiveID"; + case BuiltInInvocationId: + return "gl_InvocationID"; + case BuiltInLayer: + return "gl_Layer"; + case BuiltInViewportIndex: + return "gl_ViewportIndex"; + case BuiltInTessLevelOuter: + return "gl_TessLevelOuter"; + case BuiltInTessLevelInner: + return "gl_TessLevelInner"; + case BuiltInTessCoord: + return "gl_TessCoord"; + case BuiltInFragCoord: + return "gl_FragCoord"; + case BuiltInPointCoord: + return "gl_PointCoord"; + case BuiltInFrontFacing: + return "gl_FrontFacing"; + case BuiltInFragDepth: + return "gl_FragDepth"; + case BuiltInNumWorkgroups: + return "gl_NumWorkGroups"; + case BuiltInWorkgroupSize: + return "gl_WorkGroupSize"; + case BuiltInWorkgroupId: + return "gl_WorkGroupID"; + case BuiltInLocalInvocationId: + return "gl_LocalInvocationID"; + case BuiltInGlobalInvocationId: + return "gl_GlobalInvocationID"; + case BuiltInLocalInvocationIndex: + return "gl_LocalInvocationIndex"; + case BuiltInHelperInvocation: + return "gl_HelperInvocation"; + case BuiltInBaseVertex: + if (options.es) + SPIRV_CROSS_THROW("BaseVertex not supported in ES profile."); + if (options.version < 460) + { + require_extension_internal("GL_ARB_shader_draw_parameters"); + return "gl_BaseVertexARB"; + } + return "gl_BaseVertex"; + case BuiltInBaseInstance: + if (options.es) + SPIRV_CROSS_THROW("BaseInstance not supported in ES profile."); + if (options.version < 460) + { + require_extension_internal("GL_ARB_shader_draw_parameters"); + return "gl_BaseInstanceARB"; + } + return "gl_BaseInstance"; + case BuiltInDrawIndex: + if (options.es) + SPIRV_CROSS_THROW("DrawIndex not supported in ES profile."); + if (options.version < 460) + { + require_extension_internal("GL_ARB_shader_draw_parameters"); + return "gl_DrawIDARB"; + } + return "gl_DrawID"; + + case BuiltInSampleId: + if (options.es && options.version < 320) + require_extension_internal("GL_OES_sample_variables"); + if (!options.es && options.version < 400) + SPIRV_CROSS_THROW("gl_SampleID not supported before GLSL 400."); + return "gl_SampleID"; + + case BuiltInSampleMask: + if (options.es && options.version < 320) + require_extension_internal("GL_OES_sample_variables"); + if (!options.es && options.version < 400) + SPIRV_CROSS_THROW("gl_SampleMask/gl_SampleMaskIn not supported before GLSL 400."); + + if (storage == StorageClassInput) + return "gl_SampleMaskIn"; + else + return "gl_SampleMask"; + + case BuiltInSamplePosition: + if (options.es && options.version < 320) + require_extension_internal("GL_OES_sample_variables"); + if (!options.es && options.version < 400) + SPIRV_CROSS_THROW("gl_SamplePosition not supported before GLSL 400."); + return "gl_SamplePosition"; + + case BuiltInViewIndex: + if (options.vulkan_semantics) + { + require_extension_internal("GL_EXT_multiview"); + return "gl_ViewIndex"; + } + else + { + require_extension_internal("GL_OVR_multiview2"); + return "gl_ViewID_OVR"; + } + + case BuiltInNumSubgroups: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_basic"); + return "gl_NumSubgroups"; + + case BuiltInSubgroupId: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_basic"); + return "gl_SubgroupID"; + + case BuiltInSubgroupSize: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_basic"); + return "gl_SubgroupSize"; + + case BuiltInSubgroupLocalInvocationId: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_basic"); + return "gl_SubgroupInvocationID"; + + case BuiltInSubgroupEqMask: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_ballot"); + return "gl_SubgroupEqMask"; + + case BuiltInSubgroupGeMask: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_ballot"); + return "gl_SubgroupGeMask"; + + case BuiltInSubgroupGtMask: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_ballot"); + return "gl_SubgroupGtMask"; + + case BuiltInSubgroupLeMask: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_ballot"); + return "gl_SubgroupLeMask"; + + case BuiltInSubgroupLtMask: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_ballot"); + return "gl_SubgroupLtMask"; + + case BuiltInLaunchIdNV: + return "gl_LaunchIDNV"; + case BuiltInLaunchSizeNV: + return "gl_LaunchSizeNV"; + case BuiltInWorldRayOriginNV: + return "gl_WorldRayOriginNV"; + case BuiltInWorldRayDirectionNV: + return "gl_WorldRayDirectionNV"; + case BuiltInObjectRayOriginNV: + return "gl_ObjectRayOriginNV"; + case BuiltInObjectRayDirectionNV: + return "gl_ObjectRayDirectionNV"; + case BuiltInRayTminNV: + return "gl_RayTminNV"; + case BuiltInRayTmaxNV: + return "gl_RayTmaxNV"; + case BuiltInInstanceCustomIndexNV: + return "gl_InstanceCustomIndexNV"; + case BuiltInObjectToWorldNV: + return "gl_ObjectToWorldNV"; + case BuiltInWorldToObjectNV: + return "gl_WorldToObjectNV"; + case BuiltInHitTNV: + return "gl_HitTNV"; + case BuiltInHitKindNV: + return "gl_HitKindNV"; + case BuiltInIncomingRayFlagsNV: + return "gl_IncomingRayFlagsNV"; + + case BuiltInBaryCoordNV: + { + if (options.es && options.version < 320) + SPIRV_CROSS_THROW("gl_BaryCoordNV requires ESSL 320."); + else if (!options.es && options.version < 450) + SPIRV_CROSS_THROW("gl_BaryCoordNV requires GLSL 450."); + require_extension_internal("GL_NV_fragment_shader_barycentric"); + return "gl_BaryCoordNV"; + } + + case BuiltInBaryCoordNoPerspNV: + { + if (options.es && options.version < 320) + SPIRV_CROSS_THROW("gl_BaryCoordNoPerspNV requires ESSL 320."); + else if (!options.es && options.version < 450) + SPIRV_CROSS_THROW("gl_BaryCoordNoPerspNV requires GLSL 450."); + require_extension_internal("GL_NV_fragment_shader_barycentric"); + return "gl_BaryCoordNoPerspNV"; + } + + case BuiltInFragStencilRefEXT: + { + if (!options.es) + { + require_extension_internal("GL_ARB_shader_stencil_export"); + return "gl_FragStencilRefARB"; + } + else + SPIRV_CROSS_THROW("Stencil export not supported in GLES."); + } + + case BuiltInDeviceIndex: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for device group support."); + require_extension_internal("GL_EXT_device_group"); + return "gl_DeviceIndex"; + + default: + return join("gl_BuiltIn_", convert_to_string(builtin)); + } +} + +const char *CompilerGLSL::index_to_swizzle(uint32_t index) +{ + switch (index) + { + case 0: + return "x"; + case 1: + return "y"; + case 2: + return "z"; + case 3: + return "w"; + default: + SPIRV_CROSS_THROW("Swizzle index out of range"); + } +} + +void CompilerGLSL::access_chain_internal_append_index(std::string &expr, uint32_t , const SPIRType *type, + AccessChainFlags flags, bool & , + uint32_t index) +{ + bool index_is_literal = (flags & ACCESS_CHAIN_INDEX_IS_LITERAL_BIT) != 0; + bool register_expression_read = (flags & ACCESS_CHAIN_SKIP_REGISTER_EXPRESSION_READ_BIT) == 0; + + expr += "["; + + + bool nonuniform_index = + has_decoration(index, DecorationNonUniformEXT) && + (has_decoration(type->self, DecorationBlock) || has_decoration(type->self, DecorationBufferBlock)); + if (nonuniform_index) + { + expr += backend.nonuniform_qualifier; + expr += "("; + } + + if (index_is_literal) + expr += convert_to_string(index); + else + expr += to_expression(index, register_expression_read); + + if (nonuniform_index) + expr += ")"; + + expr += "]"; +} + +string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indices, uint32_t count, + AccessChainFlags flags, AccessChainMeta *meta) +{ + string expr; + + bool index_is_literal = (flags & ACCESS_CHAIN_INDEX_IS_LITERAL_BIT) != 0; + bool chain_only = (flags & ACCESS_CHAIN_CHAIN_ONLY_BIT) != 0; + bool ptr_chain = (flags & ACCESS_CHAIN_PTR_CHAIN_BIT) != 0; + bool register_expression_read = (flags & ACCESS_CHAIN_SKIP_REGISTER_EXPRESSION_READ_BIT) == 0; + + if (!chain_only) + { + + auto *e = maybe_get(base); + bool old_transpose = e && e->need_transpose; + if (e) + e->need_transpose = false; + expr = to_enclosed_expression(base, register_expression_read); + if (e) + e->need_transpose = old_transpose; + } + + + + uint32_t type_id = expression_type_id(base); + + if (!backend.native_pointers) + { + if (ptr_chain) + SPIRV_CROSS_THROW("Backend does not support native pointers and does not support OpPtrAccessChain."); + + + + if (should_dereference(base)) + { + auto &type = get(type_id); + expr = dereference_expression(type, expr); + } + } + + const auto *type = &get_pointee_type(type_id); + + bool access_chain_is_arrayed = expr.find_first_of('[') != string::npos; + bool row_major_matrix_needs_conversion = is_non_native_row_major_matrix(base); + bool is_packed = has_extended_decoration(base, SPIRVCrossDecorationPhysicalTypePacked); + uint32_t physical_type = get_extended_decoration(base, SPIRVCrossDecorationPhysicalTypeID); + bool is_invariant = has_decoration(base, DecorationInvariant); + bool pending_array_enclose = false; + bool dimension_flatten = false; + + const auto append_index = [&](uint32_t index) { + access_chain_internal_append_index(expr, base, type, flags, access_chain_is_arrayed, index); + }; + + for (uint32_t i = 0; i < count; i++) + { + uint32_t index = indices[i]; + + + if (ptr_chain && i == 0) + { + + + if (options.flatten_multidimensional_arrays) + { + dimension_flatten = type->array.size() >= 1; + pending_array_enclose = dimension_flatten; + if (pending_array_enclose) + expr += "["; + } + + if (options.flatten_multidimensional_arrays && dimension_flatten) + { + + if (index_is_literal) + expr += convert_to_string(index); + else + expr += to_enclosed_expression(index, register_expression_read); + + for (auto j = uint32_t(type->array.size()); j; j--) + { + expr += " * "; + expr += enclose_expression(to_array_size(*type, j - 1)); + } + + if (type->array.empty()) + pending_array_enclose = false; + else + expr += " + "; + + if (!pending_array_enclose) + expr += "]"; + } + else + { + append_index(index); + } + + if (type->basetype == SPIRType::ControlPointArray) + { + type_id = type->parent_type; + type = &get(type_id); + } + + access_chain_is_arrayed = true; + } + + else if (!type->array.empty()) + { + + + if (options.flatten_multidimensional_arrays && !pending_array_enclose) + { + dimension_flatten = type->array.size() > 1; + pending_array_enclose = dimension_flatten; + if (pending_array_enclose) + expr += "["; + } + + assert(type->parent_type); + + auto *var = maybe_get(base); + if (backend.force_gl_in_out_block && i == 0 && var && is_builtin_variable(*var) && + !has_decoration(type->self, DecorationBlock)) + { + + + + + + auto builtin = ir.meta[base].decoration.builtin_type; + switch (builtin) + { + + + case BuiltInPosition: + case BuiltInPointSize: + if (var->storage == StorageClassInput) + expr = join("gl_in[", to_expression(index, register_expression_read), "].", expr); + else if (var->storage == StorageClassOutput) + expr = join("gl_out[", to_expression(index, register_expression_read), "].", expr); + else + append_index(index); + break; + + default: + append_index(index); + break; + } + } + else if (options.flatten_multidimensional_arrays && dimension_flatten) + { + + auto &parent_type = get(type->parent_type); + + if (index_is_literal) + expr += convert_to_string(index); + else + expr += to_enclosed_expression(index, register_expression_read); + + for (auto j = uint32_t(parent_type.array.size()); j; j--) + { + expr += " * "; + expr += enclose_expression(to_array_size(parent_type, j - 1)); + } + + if (parent_type.array.empty()) + pending_array_enclose = false; + else + expr += " + "; + + if (!pending_array_enclose) + expr += "]"; + } + + + else if (!builtin_translates_to_nonarray(BuiltIn(get_decoration(base, DecorationBuiltIn)))) + { + append_index(index); + } + + type_id = type->parent_type; + type = &get(type_id); + + access_chain_is_arrayed = true; + } + + + else if (type->basetype == SPIRType::Struct) + { + if (!index_is_literal) + index = get(index).scalar(); + + if (index >= type->member_types.size()) + SPIRV_CROSS_THROW("Member index is out of bounds!"); + + BuiltIn builtin; + if (is_member_builtin(*type, index, &builtin)) + { + if (access_chain_is_arrayed) + { + expr += "."; + expr += builtin_to_glsl(builtin, type->storage); + } + else + expr = builtin_to_glsl(builtin, type->storage); + } + else + { + + string qual_mbr_name = get_member_qualified_name(type_id, index); + if (!qual_mbr_name.empty()) + expr = qual_mbr_name; + else + expr += to_member_reference(base, *type, index, ptr_chain); + } + + if (has_member_decoration(type->self, index, DecorationInvariant)) + is_invariant = true; + + is_packed = member_is_packed_physical_type(*type, index); + if (member_is_remapped_physical_type(*type, index)) + physical_type = get_extended_member_decoration(type->self, index, SPIRVCrossDecorationPhysicalTypeID); + else + physical_type = 0; + + row_major_matrix_needs_conversion = member_is_non_native_row_major_matrix(*type, index); + type = &get(type->member_types[index]); + } + + else if (type->columns > 1) + { + + + + + expr += "["; + if (index_is_literal) + expr += convert_to_string(index); + else + expr += to_expression(index, register_expression_read); + expr += "]"; + + type_id = type->parent_type; + type = &get(type_id); + } + + else if (type->vecsize > 1) + { + string deferred_index; + if (row_major_matrix_needs_conversion) + { + + auto column_index = expr.find_last_of('['); + if (column_index != string::npos) + { + deferred_index = expr.substr(column_index); + expr.resize(column_index); + } + } + + if (index_is_literal && !is_packed && !row_major_matrix_needs_conversion) + { + expr += "."; + expr += index_to_swizzle(index); + } + else if (ir.ids[index].get_type() == TypeConstant && !is_packed && !row_major_matrix_needs_conversion) + { + auto &c = get(index); + if (c.specialization) + { + + expr += join("[", to_expression(index), "]"); + } + else + { + expr += "."; + expr += index_to_swizzle(c.scalar()); + } + } + else if (index_is_literal) + { + + expr += join("[", index, "]"); + } + else + { + expr += "["; + expr += to_expression(index, register_expression_read); + expr += "]"; + } + + expr += deferred_index; + row_major_matrix_needs_conversion = false; + + is_packed = false; + physical_type = 0; + type_id = type->parent_type; + type = &get(type_id); + } + else if (!backend.allow_truncated_access_chain) + SPIRV_CROSS_THROW("Cannot subdivide a scalar value!"); + } + + if (pending_array_enclose) + { + SPIRV_CROSS_THROW("Flattening of multidimensional arrays were enabled, " + "but the access chain was terminated in the middle of a multidimensional array. " + "This is not supported."); + } + + if (meta) + { + meta->need_transpose = row_major_matrix_needs_conversion; + meta->storage_is_packed = is_packed; + meta->storage_is_invariant = is_invariant; + meta->storage_physical_type = physical_type; + } + + return expr; +} + +string CompilerGLSL::to_flattened_struct_member(const SPIRVariable &var, uint32_t index) +{ + auto &type = get(var.basetype); + return sanitize_underscores(join(to_name(var.self), "_", to_member_name(type, index))); +} + +string CompilerGLSL::access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, + AccessChainMeta *meta, bool ptr_chain) +{ + if (flattened_buffer_blocks.count(base)) + { + uint32_t matrix_stride = 0; + bool need_transpose = false; + flattened_access_chain_offset(expression_type(base), indices, count, 0, 16, &need_transpose, &matrix_stride, + ptr_chain); + + if (meta) + { + meta->need_transpose = target_type.columns > 1 && need_transpose; + meta->storage_is_packed = false; + } + + return flattened_access_chain(base, indices, count, target_type, 0, matrix_stride, need_transpose); + } + else if (flattened_structs.count(base) && count > 0) + { + AccessChainFlags flags = ACCESS_CHAIN_CHAIN_ONLY_BIT | ACCESS_CHAIN_SKIP_REGISTER_EXPRESSION_READ_BIT; + if (ptr_chain) + flags |= ACCESS_CHAIN_PTR_CHAIN_BIT; + + auto chain = access_chain_internal(base, indices, count, flags, nullptr).substr(1); + if (meta) + { + meta->need_transpose = false; + meta->storage_is_packed = false; + } + return sanitize_underscores(join(to_name(base), "_", chain)); + } + else + { + AccessChainFlags flags = ACCESS_CHAIN_SKIP_REGISTER_EXPRESSION_READ_BIT; + if (ptr_chain) + flags |= ACCESS_CHAIN_PTR_CHAIN_BIT; + return access_chain_internal(base, indices, count, flags, meta); + } +} + +string CompilerGLSL::load_flattened_struct(SPIRVariable &var) +{ + auto expr = type_to_glsl_constructor(get(var.basetype)); + expr += '('; + + auto &type = get(var.basetype); + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + { + if (i) + expr += ", "; + + + + expr += to_flattened_struct_member(var, i); + } + expr += ')'; + return expr; +} + +void CompilerGLSL::store_flattened_struct(SPIRVariable &var, uint32_t value) +{ + + + auto rhs = to_expression(value); + + + + + begin_scope(); + statement(variable_decl_function_local(var), " = ", rhs, ";"); + + auto &type = get(var.basetype); + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + { + + + + auto lhs = sanitize_underscores(join(to_name(var.self), "_", to_member_name(type, i))); + rhs = join(to_name(var.self), ".", to_member_name(type, i)); + statement(lhs, " = ", rhs, ";"); + } + end_scope(); +} + +std::string CompilerGLSL::flattened_access_chain(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, + bool need_transpose) +{ + if (!target_type.array.empty()) + SPIRV_CROSS_THROW("Access chains that result in an array can not be flattened"); + else if (target_type.basetype == SPIRType::Struct) + return flattened_access_chain_struct(base, indices, count, target_type, offset); + else if (target_type.columns > 1) + return flattened_access_chain_matrix(base, indices, count, target_type, offset, matrix_stride, need_transpose); + else + return flattened_access_chain_vector(base, indices, count, target_type, offset, matrix_stride, need_transpose); +} + +std::string CompilerGLSL::flattened_access_chain_struct(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset) +{ + std::string expr; + + expr += type_to_glsl_constructor(target_type); + expr += "("; + + for (uint32_t i = 0; i < uint32_t(target_type.member_types.size()); ++i) + { + if (i != 0) + expr += ", "; + + const SPIRType &member_type = get(target_type.member_types[i]); + uint32_t member_offset = type_struct_member_offset(target_type, i); + + + + bool need_transpose = false; + uint32_t matrix_stride = 0; + if (member_type.columns > 1) + { + need_transpose = combined_decoration_for_member(target_type, i).get(DecorationRowMajor); + matrix_stride = type_struct_member_matrix_stride(target_type, i); + } + + auto tmp = flattened_access_chain(base, indices, count, member_type, offset + member_offset, matrix_stride, + need_transpose); + + + if (need_transpose) + expr += convert_row_major_matrix(tmp, member_type, 0, false); + else + expr += tmp; + } + + expr += ")"; + + return expr; +} + +std::string CompilerGLSL::flattened_access_chain_matrix(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, + uint32_t matrix_stride, bool need_transpose) +{ + assert(matrix_stride); + SPIRType tmp_type = target_type; + if (need_transpose) + swap(tmp_type.vecsize, tmp_type.columns); + + std::string expr; + + expr += type_to_glsl_constructor(tmp_type); + expr += "("; + + for (uint32_t i = 0; i < tmp_type.columns; i++) + { + if (i != 0) + expr += ", "; + + expr += flattened_access_chain_vector(base, indices, count, tmp_type, offset + i * matrix_stride, matrix_stride, + false); + } + + expr += ")"; + + return expr; +} + +std::string CompilerGLSL::flattened_access_chain_vector(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, + uint32_t matrix_stride, bool need_transpose) +{ + auto result = flattened_access_chain_offset(expression_type(base), indices, count, offset, 16); + + auto buffer_name = to_name(expression_type(base).self); + + if (need_transpose) + { + std::string expr; + + if (target_type.vecsize > 1) + { + expr += type_to_glsl_constructor(target_type); + expr += "("; + } + + for (uint32_t i = 0; i < target_type.vecsize; ++i) + { + if (i != 0) + expr += ", "; + + uint32_t component_offset = result.second + i * matrix_stride; + + assert(component_offset % (target_type.width / 8) == 0); + uint32_t index = component_offset / (target_type.width / 8); + + expr += buffer_name; + expr += "["; + expr += result.first; + expr += convert_to_string(index / 4); + expr += "]"; + + expr += vector_swizzle(1, index % 4); + } + + if (target_type.vecsize > 1) + { + expr += ")"; + } + + return expr; + } + else + { + assert(result.second % (target_type.width / 8) == 0); + uint32_t index = result.second / (target_type.width / 8); + + std::string expr; + + expr += buffer_name; + expr += "["; + expr += result.first; + expr += convert_to_string(index / 4); + expr += "]"; + + expr += vector_swizzle(target_type.vecsize, index % 4); + + return expr; + } +} + +std::pair CompilerGLSL::flattened_access_chain_offset( + const SPIRType &basetype, const uint32_t *indices, uint32_t count, uint32_t offset, uint32_t word_stride, + bool *need_transpose, uint32_t *out_matrix_stride, bool ptr_chain) +{ + + const auto *type = &get_pointee_type(basetype); + + + + + + + assert(type->basetype == SPIRType::Struct); + uint32_t type_id = 0; + + std::string expr; + + + bool row_major_matrix_needs_conversion = need_transpose ? *need_transpose : false; + uint32_t matrix_stride = out_matrix_stride ? *out_matrix_stride : 0; + + for (uint32_t i = 0; i < count; i++) + { + uint32_t index = indices[i]; + + + if (ptr_chain && i == 0) + { + + uint32_t array_stride = get_decoration(basetype.self, DecorationArrayStride); + if (!array_stride) + SPIRV_CROSS_THROW("SPIR-V does not define ArrayStride for buffer block."); + + auto *constant = maybe_get(index); + if (constant) + { + + offset += constant->scalar() * array_stride; + } + else + { + + if (array_stride % word_stride) + { + SPIRV_CROSS_THROW( + "Array stride for dynamic indexing must be divisible by the size of a 4-component vector. " + "Likely culprit here is a float or vec2 array inside a push constant block which is std430. " + "This cannot be flattened. Try using std140 layout instead."); + } + + expr += to_enclosed_expression(index); + expr += " * "; + expr += convert_to_string(array_stride / word_stride); + expr += " + "; + } + + } + + else if (!type->array.empty()) + { + + uint32_t array_stride = get_decoration(type_id, DecorationArrayStride); + if (!array_stride) + SPIRV_CROSS_THROW("SPIR-V does not define ArrayStride for buffer block."); + + auto *constant = maybe_get(index); + if (constant) + { + + offset += constant->scalar() * array_stride; + } + else + { + + if (array_stride % word_stride) + { + SPIRV_CROSS_THROW( + "Array stride for dynamic indexing must be divisible by the size of a 4-component vector. " + "Likely culprit here is a float or vec2 array inside a push constant block which is std430. " + "This cannot be flattened. Try using std140 layout instead."); + } + + expr += to_enclosed_expression(index, false); + expr += " * "; + expr += convert_to_string(array_stride / word_stride); + expr += " + "; + } + + uint32_t parent_type = type->parent_type; + type = &get(parent_type); + type_id = parent_type; + + + } + + + else if (type->basetype == SPIRType::Struct) + { + index = get(index).scalar(); + + if (index >= type->member_types.size()) + SPIRV_CROSS_THROW("Member index is out of bounds!"); + + offset += type_struct_member_offset(*type, index); + type_id = type->member_types[index]; + + auto &struct_type = *type; + type = &get(type->member_types[index]); + + if (type->columns > 1) + { + matrix_stride = type_struct_member_matrix_stride(struct_type, index); + row_major_matrix_needs_conversion = + combined_decoration_for_member(struct_type, index).get(DecorationRowMajor); + } + else + row_major_matrix_needs_conversion = false; + } + + else if (type->columns > 1) + { + auto *constant = maybe_get(index); + if (constant) + { + index = get(index).scalar(); + offset += index * (row_major_matrix_needs_conversion ? (type->width / 8) : matrix_stride); + } + else + { + uint32_t indexing_stride = row_major_matrix_needs_conversion ? (type->width / 8) : matrix_stride; + + if (indexing_stride % word_stride) + { + SPIRV_CROSS_THROW( + "Matrix stride for dynamic indexing must be divisible by the size of a 4-component vector. " + "Likely culprit here is a row-major matrix being accessed dynamically. " + "This cannot be flattened. Try using std140 layout instead."); + } + + expr += to_enclosed_expression(index, false); + expr += " * "; + expr += convert_to_string(indexing_stride / word_stride); + expr += " + "; + } + + uint32_t parent_type = type->parent_type; + type = &get(type->parent_type); + type_id = parent_type; + } + + else if (type->vecsize > 1) + { + auto *constant = maybe_get(index); + if (constant) + { + index = get(index).scalar(); + offset += index * (row_major_matrix_needs_conversion ? matrix_stride : (type->width / 8)); + } + else + { + uint32_t indexing_stride = row_major_matrix_needs_conversion ? matrix_stride : (type->width / 8); + + + if (indexing_stride % word_stride) + { + SPIRV_CROSS_THROW( + "Stride for dynamic vector indexing must be divisible by the size of a 4-component vector. " + "This cannot be flattened in legacy targets."); + } + + expr += to_enclosed_expression(index, false); + expr += " * "; + expr += convert_to_string(indexing_stride / word_stride); + expr += " + "; + } + + uint32_t parent_type = type->parent_type; + type = &get(type->parent_type); + type_id = parent_type; + } + else + SPIRV_CROSS_THROW("Cannot subdivide a scalar value!"); + } + + if (need_transpose) + *need_transpose = row_major_matrix_needs_conversion; + if (out_matrix_stride) + *out_matrix_stride = matrix_stride; + + return std::make_pair(expr, offset); +} + +bool CompilerGLSL::should_dereference(uint32_t id) +{ + const auto &type = expression_type(id); + + if (!type.pointer) + return false; + + + if (!expression_is_lvalue(id)) + return false; + + + if (auto *var = maybe_get(id)) + return var->phi_variable; + + + if (auto *expr = maybe_get(id)) + return !expr->access_chain; + + + return true; +} + +bool CompilerGLSL::should_forward(uint32_t id) const +{ + + + auto *var = maybe_get(id); + if (var && var->forwardable) + return true; + + + if (options.force_temporary) + return false; + + + if (is_immutable(id)) + return true; + + return false; +} + +bool CompilerGLSL::should_suppress_usage_tracking(uint32_t id) const +{ + + return !expression_is_forwarded(id) || expression_suppresses_usage_tracking(id); +} + +void CompilerGLSL::track_expression_read(uint32_t id) +{ + switch (ir.ids[id].get_type()) + { + case TypeExpression: + { + auto &e = get(id); + for (auto implied_read : e.implied_read_expressions) + track_expression_read(implied_read); + break; + } + + case TypeAccessChain: + { + auto &e = get(id); + for (auto implied_read : e.implied_read_expressions) + track_expression_read(implied_read); + break; + } + + default: + break; + } + + + + if (expression_is_forwarded(id) && !expression_suppresses_usage_tracking(id)) + { + auto &v = expression_usage_counts[id]; + v++; + + if (v >= 2) + { + + + + forced_temporaries.insert(id); + + force_recompile(); + } + } +} + +bool CompilerGLSL::args_will_forward(uint32_t id, const uint32_t *args, uint32_t num_args, bool pure) +{ + if (forced_temporaries.find(id) != end(forced_temporaries)) + return false; + + for (uint32_t i = 0; i < num_args; i++) + if (!should_forward(args[i])) + return false; + + + if (!pure) + { + for (auto global : global_variables) + if (!should_forward(global)) + return false; + for (auto aliased : aliased_variables) + if (!should_forward(aliased)) + return false; + } + + return true; +} + +void CompilerGLSL::register_impure_function_call() +{ + + for (auto global : global_variables) + flush_dependees(get(global)); + for (auto aliased : aliased_variables) + flush_dependees(get(aliased)); +} + +void CompilerGLSL::register_call_out_argument(uint32_t id) +{ + register_write(id); + + auto *var = maybe_get(id); + if (var) + flush_variable_declaration(var->self); +} + +string CompilerGLSL::variable_decl_function_local(SPIRVariable &var) +{ + + + + + auto old_storage = var.storage; + var.storage = StorageClassFunction; + auto expr = variable_decl(var); + var.storage = old_storage; + return expr; +} + +void CompilerGLSL::emit_variable_temporary_copies(const SPIRVariable &var) +{ + + if (var.allocate_temporary_copy && !flushed_phi_variables.count(var.self)) + { + auto &type = get(var.basetype); + auto &flags = get_decoration_bitset(var.self); + statement(flags_to_qualifiers_glsl(type, flags), variable_decl(type, join("_", var.self, "_copy")), ";"); + flushed_phi_variables.insert(var.self); + } +} + +void CompilerGLSL::flush_variable_declaration(uint32_t id) +{ + + auto *var = maybe_get(id); + if (var && var->deferred_declaration) + { + statement(variable_decl_function_local(*var), ";"); + var->deferred_declaration = false; + } + if (var) + { + emit_variable_temporary_copies(*var); + } +} + +bool CompilerGLSL::remove_duplicate_swizzle(string &op) +{ + auto pos = op.find_last_of('.'); + if (pos == string::npos || pos == 0) + return false; + + string final_swiz = op.substr(pos + 1, string::npos); + + if (backend.swizzle_is_function) + { + if (final_swiz.size() < 2) + return false; + + if (final_swiz.substr(final_swiz.size() - 2, string::npos) == "()") + final_swiz.erase(final_swiz.size() - 2, string::npos); + else + return false; + } + + + + + for (uint32_t i = 0; i < final_swiz.size(); i++) + { + static const char expected[] = { 'x', 'y', 'z', 'w' }; + if (i >= 4 || final_swiz[i] != expected[i]) + return false; + } + + auto prevpos = op.find_last_of('.', pos - 1); + if (prevpos == string::npos) + return false; + + prevpos++; + + + for (auto i = prevpos; i < pos; i++) + { + if (op[i] < 'w' || op[i] > 'z') + { + + if (backend.swizzle_is_function && i + 2 == pos && op[i] == '(' && op[i + 1] == ')') + break; + return false; + } + } + + + + if (pos - prevpos >= final_swiz.size()) + { + op.erase(prevpos + final_swiz.size(), string::npos); + + + if (backend.swizzle_is_function) + op += "()"; + } + return true; +} + + + + + +bool CompilerGLSL::remove_unity_swizzle(uint32_t base, string &op) +{ + auto pos = op.find_last_of('.'); + if (pos == string::npos || pos == 0) + return false; + + string final_swiz = op.substr(pos + 1, string::npos); + + if (backend.swizzle_is_function) + { + if (final_swiz.size() < 2) + return false; + + if (final_swiz.substr(final_swiz.size() - 2, string::npos) == "()") + final_swiz.erase(final_swiz.size() - 2, string::npos); + else + return false; + } + + + + + for (uint32_t i = 0; i < final_swiz.size(); i++) + { + static const char expected[] = { 'x', 'y', 'z', 'w' }; + if (i >= 4 || final_swiz[i] != expected[i]) + return false; + } + + auto &type = expression_type(base); + + + assert(type.columns == 1 && type.array.empty()); + + if (type.vecsize == final_swiz.size()) + op.erase(pos, string::npos); + return true; +} + +string CompilerGLSL::build_composite_combiner(uint32_t return_type, const uint32_t *elems, uint32_t length) +{ + ID base = 0; + string op; + string subop; + + + auto &type = get(return_type); + bool can_apply_swizzle_opt = type.basetype != SPIRType::Struct && type.array.empty() && type.columns == 1; + bool swizzle_optimization = false; + + for (uint32_t i = 0; i < length; i++) + { + auto *e = maybe_get(elems[i]); + + + + if (can_apply_swizzle_opt && e && e->base_expression && e->base_expression == base) + { + + assert(!e->expression.empty() && e->expression.front() == '.'); + subop += e->expression.substr(1, string::npos); + swizzle_optimization = true; + } + else + { + + + + + + + if (swizzle_optimization) + { + if (backend.swizzle_is_function) + subop += "()"; + + + + + + + + + + + + if (!remove_duplicate_swizzle(subop)) + remove_unity_swizzle(base, subop); + + + strip_enclosed_expression(subop); + swizzle_optimization = false; + op += subop; + } + else + op += subop; + + if (i) + op += ", "; + subop = to_composite_constructor_expression(elems[i]); + } + + base = e ? e->base_expression : ID(0); + } + + if (swizzle_optimization) + { + if (backend.swizzle_is_function) + subop += "()"; + + if (!remove_duplicate_swizzle(subop)) + remove_unity_swizzle(base, subop); + + strip_enclosed_expression(subop); + } + + op += subop; + return op; +} + +bool CompilerGLSL::skip_argument(uint32_t id) const +{ + if (!combined_image_samplers.empty() || !options.vulkan_semantics) + { + auto &type = expression_type(id); + if (type.basetype == SPIRType::Sampler || (type.basetype == SPIRType::Image && type.image.sampled == 1)) + return true; + } + return false; +} + +bool CompilerGLSL::optimize_read_modify_write(const SPIRType &type, const string &lhs, const string &rhs) +{ + + + if (rhs.size() < lhs.size() + 3) + return false; + + + + if (type.vecsize > 1 && type.columns > 1) + return false; + + auto index = rhs.find(lhs); + if (index != 0) + return false; + + + auto op = rhs.find_first_of("+-/*%|&^", lhs.size() + 1); + if (op != lhs.size() + 1) + return false; + + + if (rhs[op + 1] != ' ') + return false; + + char bop = rhs[op]; + auto expr = rhs.substr(lhs.size() + 3); + + + if ((bop == '+' || bop == '-') && (expr == "1" || expr == "uint(1)" || expr == "1u" || expr == "int(1u)")) + statement(lhs, bop, bop, ";"); + else + statement(lhs, " ", bop, "= ", expr, ";"); + return true; +} + +void CompilerGLSL::register_control_dependent_expression(uint32_t expr) +{ + if (forwarded_temporaries.find(expr) == end(forwarded_temporaries)) + return; + + assert(current_emitting_block); + current_emitting_block->invalidate_expressions.push_back(expr); +} + +void CompilerGLSL::emit_block_instructions(SPIRBlock &block) +{ + current_emitting_block = █ + for (auto &op : block.ops) + emit_instruction(op); + current_emitting_block = nullptr; +} + +void CompilerGLSL::disallow_forwarding_in_expression_chain(const SPIRExpression &expr) +{ + + + + if (expression_is_forwarded(expr.self) && !expression_suppresses_usage_tracking(expr.self) && + forced_invariant_temporaries.count(expr.self) == 0) + { + forced_temporaries.insert(expr.self); + forced_invariant_temporaries.insert(expr.self); + force_recompile(); + + for (auto &dependent : expr.expression_dependencies) + disallow_forwarding_in_expression_chain(get(dependent)); + } +} + +void CompilerGLSL::handle_store_to_invariant_variable(uint32_t store_id, uint32_t value_id) +{ + + + + + + + + if (!has_decoration(store_id, DecorationInvariant)) + return; + + auto *expr = maybe_get(value_id); + if (!expr) + return; + + disallow_forwarding_in_expression_chain(*expr); +} + +void CompilerGLSL::emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression) +{ + auto rhs = to_pointer_expression(rhs_expression); + + + if (!rhs.empty()) + { + handle_store_to_invariant_variable(lhs_expression, rhs_expression); + + auto lhs = to_dereferenced_expression(lhs_expression); + + + bitcast_to_builtin_store(lhs_expression, rhs, expression_type(rhs_expression)); + + + + + + if (!optimize_read_modify_write(expression_type(rhs_expression), lhs, rhs)) + statement(lhs, " = ", rhs, ";"); + register_write(lhs_expression); + } +} + +uint32_t CompilerGLSL::get_integer_width_for_instruction(const Instruction &instr) const +{ + if (instr.length < 3) + return 32; + + auto *ops = stream(instr); + + switch (instr.op) + { + case OpSConvert: + case OpConvertSToF: + case OpUConvert: + case OpConvertUToF: + case OpIEqual: + case OpINotEqual: + case OpSLessThan: + case OpSLessThanEqual: + case OpSGreaterThan: + case OpSGreaterThanEqual: + case OpULessThan: + case OpULessThanEqual: + case OpUGreaterThan: + case OpUGreaterThanEqual: + return expression_type(ops[2]).width; + + default: + { + + auto *type = maybe_get(ops[0]); + if (type && type_is_integral(*type)) + return type->width; + else + return 32; + } + } +} + +uint32_t CompilerGLSL::get_integer_width_for_glsl_instruction(GLSLstd450 op, const uint32_t *ops, uint32_t length) const +{ + if (length < 1) + return 32; + + switch (op) + { + case GLSLstd450SAbs: + case GLSLstd450SSign: + case GLSLstd450UMin: + case GLSLstd450SMin: + case GLSLstd450UMax: + case GLSLstd450SMax: + case GLSLstd450UClamp: + case GLSLstd450SClamp: + case GLSLstd450FindSMsb: + case GLSLstd450FindUMsb: + return expression_type(ops[0]).width; + + default: + { + + return 32; + } + } +} + +void CompilerGLSL::emit_instruction(const Instruction &instruction) +{ + auto ops = stream(instruction); + auto opcode = static_cast(instruction.op); + uint32_t length = instruction.length; + +#define GLSL_BOP(op) emit_binary_op(ops[0], ops[1], ops[2], ops[3], #op) +#define GLSL_BOP_CAST(op, type) \ + emit_binary_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, opcode_is_sign_invariant(opcode)) +#define GLSL_UOP(op) emit_unary_op(ops[0], ops[1], ops[2], #op) +#define GLSL_QFOP(op) emit_quaternary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], ops[5], #op) +#define GLSL_TFOP(op) emit_trinary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], #op) +#define GLSL_BFOP(op) emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], #op) +#define GLSL_BFOP_CAST(op, type) \ + emit_binary_func_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, opcode_is_sign_invariant(opcode)) +#define GLSL_BFOP(op) emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], #op) +#define GLSL_UFOP(op) emit_unary_func_op(ops[0], ops[1], ops[2], #op) + + + uint32_t integer_width = get_integer_width_for_instruction(instruction); + auto int_type = to_signed_basetype(integer_width); + auto uint_type = to_unsigned_basetype(integer_width); + + switch (opcode) + { + + case OpLoad: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + + flush_variable_declaration(ptr); + + + + + bool forward = should_forward(ptr) && forced_temporaries.find(id) == end(forced_temporaries); + + + bool need_transpose = false; + bool old_need_transpose = false; + + auto *ptr_expression = maybe_get(ptr); + + if (forward) + { + + + if (ptr_expression && ptr_expression->need_transpose) + { + old_need_transpose = true; + ptr_expression->need_transpose = false; + need_transpose = true; + } + else if (is_non_native_row_major_matrix(ptr)) + need_transpose = true; + } + + + + + string expr; + + bool is_packed = has_extended_decoration(ptr, SPIRVCrossDecorationPhysicalTypePacked); + bool is_remapped = has_extended_decoration(ptr, SPIRVCrossDecorationPhysicalTypeID); + if (forward || (!is_packed && !is_remapped)) + { + + expr = to_dereferenced_expression(ptr, false); + } + else + { + + + expr = to_unpacked_expression(ptr); + } + + + bitcast_from_builtin_load(ptr, expr, get(result_type)); + + + + + unroll_array_from_complex_load(id, ptr, expr); + + auto &type = get(result_type); + + + if (has_decoration(id, DecorationNonUniformEXT) || has_decoration(ptr, DecorationNonUniformEXT)) + { + propagate_nonuniform_qualifier(ptr); + convert_non_uniform_expression(type, expr); + } + + if (forward && ptr_expression) + ptr_expression->need_transpose = old_need_transpose; + + + + + bool usage_tracking = ptr_expression && flattened_buffer_blocks.count(ptr_expression->loaded_from) != 0 && + (type.basetype == SPIRType::Struct || (type.columns > 1)); + + SPIRExpression *e = nullptr; + if (!backend.array_is_value_type && !type.array.empty() && !forward) + { + + + + e = &emit_uninitialized_temporary_expression(result_type, id); + emit_array_copy(to_expression(id), ptr, StorageClassFunction, get_backing_variable_storage(ptr)); + } + else + e = &emit_op(result_type, id, expr, forward, !usage_tracking); + + e->need_transpose = need_transpose; + register_read(id, ptr, forward); + + if (forward) + { + + if (has_extended_decoration(ptr, SPIRVCrossDecorationPhysicalTypePacked)) + set_extended_decoration(id, SPIRVCrossDecorationPhysicalTypePacked); + if (has_extended_decoration(ptr, SPIRVCrossDecorationPhysicalTypeID)) + { + set_extended_decoration(id, SPIRVCrossDecorationPhysicalTypeID, + get_extended_decoration(ptr, SPIRVCrossDecorationPhysicalTypeID)); + } + } + else + { + + unset_extended_decoration(id, SPIRVCrossDecorationPhysicalTypePacked); + unset_extended_decoration(id, SPIRVCrossDecorationPhysicalTypeID); + } + + inherit_expression_dependencies(id, ptr); + if (forward) + add_implied_read_expression(*e, ptr); + break; + } + + case OpInBoundsAccessChain: + case OpAccessChain: + case OpPtrAccessChain: + { + auto *var = maybe_get(ops[2]); + if (var) + flush_variable_declaration(var->self); + + + + AccessChainMeta meta; + bool ptr_chain = opcode == OpPtrAccessChain; + auto e = access_chain(ops[2], &ops[3], length - 3, get(ops[0]), &meta, ptr_chain); + + auto &expr = set(ops[1], move(e), ops[0], should_forward(ops[2])); + + auto *backing_variable = maybe_get_backing_variable(ops[2]); + expr.loaded_from = backing_variable ? backing_variable->self : ID(ops[2]); + expr.need_transpose = meta.need_transpose; + expr.access_chain = true; + + + if (meta.storage_is_packed) + set_extended_decoration(ops[1], SPIRVCrossDecorationPhysicalTypePacked); + if (meta.storage_physical_type != 0) + set_extended_decoration(ops[1], SPIRVCrossDecorationPhysicalTypeID, meta.storage_physical_type); + if (meta.storage_is_invariant) + set_decoration(ops[1], DecorationInvariant); + + + + + forwarded_temporaries.insert(ops[1]); + + suppressed_usage_tracking.insert(ops[1]); + + for (uint32_t i = 2; i < length; i++) + { + inherit_expression_dependencies(ops[1], ops[i]); + add_implied_read_expression(expr, ops[i]); + } + + + + if (expr.expression_dependencies.empty()) + forwarded_temporaries.erase(ops[1]); + + break; + } + + case OpStore: + { + auto *var = maybe_get(ops[0]); + + if (has_decoration(ops[0], DecorationNonUniformEXT)) + propagate_nonuniform_qualifier(ops[0]); + + if (var && var->statically_assigned) + var->static_expression = ops[1]; + else if (var && var->loop_variable && !var->loop_variable_enable) + var->static_expression = ops[1]; + else if (var && var->remapped_variable) + { + + } + else if (var && flattened_structs.count(ops[0])) + { + store_flattened_struct(*var, ops[1]); + register_write(ops[0]); + } + else + { + emit_store_statement(ops[0], ops[1]); + } + + + + if (expression_type(ops[1]).pointer) + register_write(ops[1]); + break; + } + + case OpArrayLength: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto e = access_chain_internal(ops[2], &ops[3], length - 3, ACCESS_CHAIN_INDEX_IS_LITERAL_BIT, nullptr); + set(id, join(type_to_glsl(get(result_type)), "(", e, ".length())"), result_type, + true); + break; + } + + + case OpFunctionCall: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t func = ops[2]; + const auto *arg = &ops[3]; + length -= 3; + + auto &callee = get(func); + auto &return_type = get(callee.return_type); + bool pure = function_is_pure(callee); + + bool callee_has_out_variables = false; + bool emit_return_value_as_argument = false; + + + for (uint32_t i = 0; i < length; i++) + { + if (callee.arguments[i].write_count) + { + register_call_out_argument(arg[i]); + callee_has_out_variables = true; + } + + flush_variable_declaration(arg[i]); + } + + if (!return_type.array.empty() && !backend.can_return_array) + { + callee_has_out_variables = true; + emit_return_value_as_argument = true; + } + + if (!pure) + register_impure_function_call(); + + string funexpr; + SmallVector arglist; + funexpr += to_name(func) + "("; + + if (emit_return_value_as_argument) + { + statement(type_to_glsl(return_type), " ", to_name(id), type_to_array_glsl(return_type), ";"); + arglist.push_back(to_name(id)); + } + + for (uint32_t i = 0; i < length; i++) + { + + + if (skip_argument(arg[i])) + continue; + + arglist.push_back(to_func_call_arg(callee.arguments[i], arg[i])); + } + + for (auto &combined : callee.combined_parameters) + { + auto image_id = combined.global_image ? combined.image_id : VariableID(arg[combined.image_id]); + auto sampler_id = combined.global_sampler ? combined.sampler_id : VariableID(arg[combined.sampler_id]); + arglist.push_back(to_combined_image_sampler(image_id, sampler_id)); + } + + append_global_func_args(callee, length, arglist); + + funexpr += merge(arglist); + funexpr += ")"; + + + check_function_call_constraints(arg, length); + + if (return_type.basetype != SPIRType::Void) + { + + + + + + + + bool forward = args_will_forward(id, arg, length, pure) && !callee_has_out_variables && pure && + (forced_temporaries.find(id) == end(forced_temporaries)); + + if (emit_return_value_as_argument) + { + statement(funexpr, ";"); + set(id, to_name(id), result_type, true); + } + else + emit_op(result_type, id, funexpr, forward); + + + + for (uint32_t i = 0; i < length; i++) + register_read(id, arg[i], forward); + + + + if (forward) + register_global_read_dependencies(callee, id); + } + else + statement(funexpr, ";"); + + break; + } + + + case OpCompositeConstruct: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + const auto *const elems = &ops[2]; + length -= 2; + + bool forward = true; + for (uint32_t i = 0; i < length; i++) + forward = forward && should_forward(elems[i]); + + auto &out_type = get(result_type); + auto *in_type = length > 0 ? &expression_type(elems[0]) : nullptr; + + + + bool composite = !out_type.array.empty() || out_type.basetype == SPIRType::Struct; + + bool splat = false; + bool swizzle_splat = false; + + if (in_type) + { + splat = in_type->vecsize == 1 && in_type->columns == 1 && !composite && backend.use_constructor_splatting; + swizzle_splat = in_type->vecsize == 1 && in_type->columns == 1 && backend.can_swizzle_scalar; + + if (ir.ids[elems[0]].get_type() == TypeConstant && !type_is_floating_point(*in_type)) + { + + swizzle_splat = false; + } + } + + if (splat || swizzle_splat) + { + uint32_t input = elems[0]; + for (uint32_t i = 0; i < length; i++) + { + if (input != elems[i]) + { + splat = false; + swizzle_splat = false; + } + } + } + + if (out_type.basetype == SPIRType::Struct && !backend.can_declare_struct_inline) + forward = false; + if (!out_type.array.empty() && !backend.can_declare_arrays_inline) + forward = false; + if (type_is_empty(out_type) && !backend.supports_empty_struct) + forward = false; + + string constructor_op; + if (backend.use_initializer_list && composite) + { + bool needs_trailing_tracket = false; + + + if (backend.use_typed_initializer_list && out_type.basetype == SPIRType::Struct && out_type.array.empty()) + constructor_op += type_to_glsl_constructor(get(result_type)); + else if (backend.use_typed_initializer_list && !out_type.array.empty()) + { + + constructor_op += type_to_glsl_constructor(get(result_type)) + "("; + needs_trailing_tracket = true; + } + constructor_op += "{ "; + + if (type_is_empty(out_type) && !backend.supports_empty_struct) + constructor_op += "0"; + else if (splat) + constructor_op += to_unpacked_expression(elems[0]); + else + constructor_op += build_composite_combiner(result_type, elems, length); + constructor_op += " }"; + if (needs_trailing_tracket) + constructor_op += ")"; + } + else if (swizzle_splat && !composite) + { + constructor_op = remap_swizzle(get(result_type), 1, to_unpacked_expression(elems[0])); + } + else + { + constructor_op = type_to_glsl_constructor(get(result_type)) + "("; + if (type_is_empty(out_type) && !backend.supports_empty_struct) + constructor_op += "0"; + else if (splat) + constructor_op += to_unpacked_expression(elems[0]); + else + constructor_op += build_composite_combiner(result_type, elems, length); + constructor_op += ")"; + } + + if (!constructor_op.empty()) + { + emit_op(result_type, id, constructor_op, forward); + for (uint32_t i = 0; i < length; i++) + inherit_expression_dependencies(id, elems[i]); + } + break; + } + + case OpVectorInsertDynamic: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t vec = ops[2]; + uint32_t comp = ops[3]; + uint32_t index = ops[4]; + + flush_variable_declaration(vec); + + + statement(declare_temporary(result_type, id), to_expression(vec), ";"); + set(id, to_name(id), result_type, true); + auto chain = access_chain_internal(id, &index, 1, 0, nullptr); + statement(chain, " = ", to_expression(comp), ";"); + break; + } + + case OpVectorExtractDynamic: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto expr = access_chain_internal(ops[2], &ops[3], 1, 0, nullptr); + emit_op(result_type, id, expr, should_forward(ops[2])); + inherit_expression_dependencies(id, ops[2]); + inherit_expression_dependencies(id, ops[3]); + break; + } + + case OpCompositeExtract: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + length -= 3; + + auto &type = get(result_type); + + + bool allow_base_expression = forced_temporaries.find(id) == end(forced_temporaries); + + + auto &composite_type = expression_type(ops[2]); + if (composite_type.basetype == SPIRType::Struct || !composite_type.array.empty()) + allow_base_expression = false; + + + if (has_extended_decoration(ops[2], SPIRVCrossDecorationPhysicalTypePacked)) + allow_base_expression = false; + + + + if (is_non_native_row_major_matrix(ops[2])) + allow_base_expression = false; + + AccessChainMeta meta; + SPIRExpression *e = nullptr; + + + if (allow_base_expression && should_forward(ops[2]) && type.vecsize == 1 && type.columns == 1 && length == 1) + { + + + + + + + + + + + + + auto expr = access_chain_internal(ops[2], &ops[3], length, + ACCESS_CHAIN_INDEX_IS_LITERAL_BIT | ACCESS_CHAIN_CHAIN_ONLY_BIT, &meta); + e = &emit_op(result_type, id, expr, true, should_suppress_usage_tracking(ops[2])); + inherit_expression_dependencies(id, ops[2]); + e->base_expression = ops[2]; + } + else + { + auto expr = access_chain_internal(ops[2], &ops[3], length, ACCESS_CHAIN_INDEX_IS_LITERAL_BIT, &meta); + e = &emit_op(result_type, id, expr, should_forward(ops[2]), should_suppress_usage_tracking(ops[2])); + inherit_expression_dependencies(id, ops[2]); + } + + + + + e->need_transpose = meta.need_transpose; + if (meta.storage_is_packed) + set_extended_decoration(id, SPIRVCrossDecorationPhysicalTypePacked); + if (meta.storage_physical_type != 0) + set_extended_decoration(id, SPIRVCrossDecorationPhysicalTypeID, meta.storage_physical_type); + if (meta.storage_is_invariant) + set_decoration(id, DecorationInvariant); + + break; + } + + case OpCompositeInsert: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t obj = ops[2]; + uint32_t composite = ops[3]; + const auto *elems = &ops[4]; + length -= 4; + + flush_variable_declaration(composite); + + + statement(declare_temporary(result_type, id), to_expression(composite), ";"); + set(id, to_name(id), result_type, true); + auto chain = access_chain_internal(id, elems, length, ACCESS_CHAIN_INDEX_IS_LITERAL_BIT, nullptr); + statement(chain, " = ", to_expression(obj), ";"); + + break; + } + + case OpCopyMemory: + { + uint32_t lhs = ops[0]; + uint32_t rhs = ops[1]; + if (lhs != rhs) + { + flush_variable_declaration(lhs); + flush_variable_declaration(rhs); + statement(to_expression(lhs), " = ", to_expression(rhs), ";"); + register_write(lhs); + } + break; + } + + case OpCopyObject: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t rhs = ops[2]; + bool pointer = get(result_type).pointer; + + auto *chain = maybe_get(rhs); + if (chain) + { + + auto &e = set(id, *chain); + e.self = id; + } + else if (expression_is_lvalue(rhs) && !pointer) + { + + + statement(declare_temporary(result_type, id), to_unpacked_expression(rhs), ";"); + set(id, to_name(id), result_type, true); + } + else + { + + + + auto &e = set(id, to_expression(rhs), result_type, true); + if (pointer) + { + auto *var = maybe_get_backing_variable(rhs); + e.loaded_from = var ? var->self : ID(0); + } + + + auto *rhs_expr = maybe_get(rhs); + if (rhs_expr) + { + e.implied_read_expressions = rhs_expr->implied_read_expressions; + e.expression_dependencies = rhs_expr->expression_dependencies; + } + } + break; + } + + case OpVectorShuffle: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t vec0 = ops[2]; + uint32_t vec1 = ops[3]; + const auto *elems = &ops[4]; + length -= 4; + + auto &type0 = expression_type(vec0); + + + + bool shuffle = false; + for (uint32_t i = 0; i < length; i++) + if (elems[i] >= type0.vecsize || elems[i] == 0xffffffffu) + shuffle = true; + + + if (!shuffle && has_extended_decoration(vec0, SPIRVCrossDecorationPhysicalTypePacked)) + shuffle = true; + + string expr; + bool should_fwd, trivial_forward; + + if (shuffle) + { + should_fwd = should_forward(vec0) && should_forward(vec1); + trivial_forward = should_suppress_usage_tracking(vec0) && should_suppress_usage_tracking(vec1); + + + SmallVector args; + for (uint32_t i = 0; i < length; i++) + { + if (elems[i] == 0xffffffffu) + { + + + + SPIRConstant c; + c.constant_type = type0.parent_type; + assert(type0.parent_type != ID(0)); + args.push_back(constant_expression(c)); + } + else if (elems[i] >= type0.vecsize) + args.push_back(to_extract_component_expression(vec1, elems[i] - type0.vecsize)); + else + args.push_back(to_extract_component_expression(vec0, elems[i])); + } + expr += join(type_to_glsl_constructor(get(result_type)), "(", merge(args), ")"); + } + else + { + should_fwd = should_forward(vec0); + trivial_forward = should_suppress_usage_tracking(vec0); + + + + expr += to_enclosed_unpacked_expression(vec0); + expr += "."; + for (uint32_t i = 0; i < length; i++) + { + assert(elems[i] != 0xffffffffu); + expr += index_to_swizzle(elems[i]); + } + + if (backend.swizzle_is_function && length > 1) + expr += "()"; + } + + + + + emit_op(result_type, id, expr, should_fwd, trivial_forward); + + inherit_expression_dependencies(id, vec0); + if (vec0 != vec1) + inherit_expression_dependencies(id, vec1); + break; + } + + + case OpIsNan: + GLSL_UFOP(isnan); + break; + + case OpIsInf: + GLSL_UFOP(isinf); + break; + + case OpSNegate: + case OpFNegate: + GLSL_UOP(-); + break; + + case OpIAdd: + { + + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(+, type); + break; + } + + case OpFAdd: + GLSL_BOP(+); + break; + + case OpISub: + { + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(-, type); + break; + } + + case OpFSub: + GLSL_BOP(-); + break; + + case OpIMul: + { + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(*, type); + break; + } + + case OpVectorTimesMatrix: + case OpMatrixTimesVector: + { + + auto *e = maybe_get(ops[opcode == OpMatrixTimesVector ? 2 : 3]); + if (e && e->need_transpose) + { + e->need_transpose = false; + string expr; + + if (opcode == OpMatrixTimesVector) + expr = join(to_enclosed_unpacked_expression(ops[3]), " * ", + enclose_expression(to_unpacked_row_major_matrix_expression(ops[2]))); + else + expr = join(enclose_expression(to_unpacked_row_major_matrix_expression(ops[3])), " * ", + to_enclosed_unpacked_expression(ops[2])); + + bool forward = should_forward(ops[2]) && should_forward(ops[3]); + emit_op(ops[0], ops[1], expr, forward); + e->need_transpose = true; + inherit_expression_dependencies(ops[1], ops[2]); + inherit_expression_dependencies(ops[1], ops[3]); + } + else + GLSL_BOP(*); + break; + } + + case OpMatrixTimesMatrix: + { + auto *a = maybe_get(ops[2]); + auto *b = maybe_get(ops[3]); + + + + if (a && b && a->need_transpose && b->need_transpose) + { + a->need_transpose = false; + b->need_transpose = false; + auto expr = join(enclose_expression(to_unpacked_row_major_matrix_expression(ops[3])), " * ", + enclose_expression(to_unpacked_row_major_matrix_expression(ops[2]))); + bool forward = should_forward(ops[2]) && should_forward(ops[3]); + auto &e = emit_op(ops[0], ops[1], expr, forward); + e.need_transpose = true; + a->need_transpose = true; + b->need_transpose = true; + inherit_expression_dependencies(ops[1], ops[2]); + inherit_expression_dependencies(ops[1], ops[3]); + } + else + GLSL_BOP(*); + + break; + } + + case OpFMul: + case OpMatrixTimesScalar: + case OpVectorTimesScalar: + GLSL_BOP(*); + break; + + case OpOuterProduct: + GLSL_BFOP(outerProduct); + break; + + case OpDot: + GLSL_BFOP(dot); + break; + + case OpTranspose: + GLSL_UFOP(transpose); + break; + + case OpSRem: + { + uint32_t result_type = ops[0]; + uint32_t result_id = ops[1]; + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + + + bool forward = should_forward(op0) && should_forward(op1); + auto expr = join(to_enclosed_expression(op0), " - ", to_enclosed_expression(op1), " * ", "(", + to_enclosed_expression(op0), " / ", to_enclosed_expression(op1), ")"); + + emit_op(result_type, result_id, expr, forward); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + break; + } + + case OpSDiv: + GLSL_BOP_CAST(/, int_type); + break; + + case OpUDiv: + GLSL_BOP_CAST(/, uint_type); + break; + + case OpIAddCarry: + case OpISubBorrow: + { + if (options.es && options.version < 310) + SPIRV_CROSS_THROW("Extended arithmetic is only available from ESSL 310."); + else if (!options.es && options.version < 400) + SPIRV_CROSS_THROW("Extended arithmetic is only available from GLSL 400."); + + uint32_t result_type = ops[0]; + uint32_t result_id = ops[1]; + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + auto &type = get(result_type); + emit_uninitialized_temporary_expression(result_type, result_id); + const char *op = opcode == OpIAddCarry ? "uaddCarry" : "usubBorrow"; + + statement(to_expression(result_id), ".", to_member_name(type, 0), " = ", op, "(", to_expression(op0), ", ", + to_expression(op1), ", ", to_expression(result_id), ".", to_member_name(type, 1), ");"); + break; + } + + case OpUMulExtended: + case OpSMulExtended: + { + if (options.es && options.version < 310) + SPIRV_CROSS_THROW("Extended arithmetic is only available from ESSL 310."); + else if (!options.es && options.version < 400) + SPIRV_CROSS_THROW("Extended arithmetic is only available from GLSL 4000."); + + uint32_t result_type = ops[0]; + uint32_t result_id = ops[1]; + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + auto &type = get(result_type); + emit_uninitialized_temporary_expression(result_type, result_id); + const char *op = opcode == OpUMulExtended ? "umulExtended" : "imulExtended"; + + statement(op, "(", to_expression(op0), ", ", to_expression(op1), ", ", to_expression(result_id), ".", + to_member_name(type, 1), ", ", to_expression(result_id), ".", to_member_name(type, 0), ");"); + break; + } + + case OpFDiv: + GLSL_BOP(/); + break; + + case OpShiftRightLogical: + GLSL_BOP_CAST(>>, uint_type); + break; + + case OpShiftRightArithmetic: + GLSL_BOP_CAST(>>, int_type); + break; + + case OpShiftLeftLogical: + { + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(<<, type); + break; + } + + case OpBitwiseOr: + { + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(|, type); + break; + } + + case OpBitwiseXor: + { + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(^, type); + break; + } + + case OpBitwiseAnd: + { + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(&, type); + break; + } + + case OpNot: + GLSL_UOP(~); + break; + + case OpUMod: + GLSL_BOP_CAST(%, uint_type); + break; + + case OpSMod: + GLSL_BOP_CAST(%, int_type); + break; + + case OpFMod: + GLSL_BFOP(mod); + break; + + case OpFRem: + { + if (is_legacy()) + SPIRV_CROSS_THROW("OpFRem requires trunc() and is only supported on non-legacy targets. A workaround is " + "needed for legacy."); + + uint32_t result_type = ops[0]; + uint32_t result_id = ops[1]; + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + + + bool forward = should_forward(op0) && should_forward(op1); + auto expr = join(to_enclosed_expression(op0), " - ", to_enclosed_expression(op1), " * ", "trunc(", + to_enclosed_expression(op0), " / ", to_enclosed_expression(op1), ")"); + + emit_op(result_type, result_id, expr, forward); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + break; + } + + + case OpAny: + GLSL_UFOP(any); + break; + + case OpAll: + GLSL_UFOP(all); + break; + + case OpSelect: + emit_mix_op(ops[0], ops[1], ops[4], ops[3], ops[2]); + break; + + case OpLogicalOr: + { + + auto result_type = ops[0]; + auto id = ops[1]; + auto &type = get(result_type); + + if (type.vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "||", false, SPIRType::Unknown); + else + GLSL_BOP(||); + break; + } + + case OpLogicalAnd: + { + + auto result_type = ops[0]; + auto id = ops[1]; + auto &type = get(result_type); + + if (type.vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "&&", false, SPIRType::Unknown); + else + GLSL_BOP(&&); + break; + } + + case OpLogicalNot: + { + auto &type = get(ops[0]); + if (type.vecsize > 1) + GLSL_UFOP(not); + else + GLSL_UOP(!); + break; + } + + case OpIEqual: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP_CAST(equal, int_type); + else + GLSL_BOP_CAST(==, int_type); + break; + } + + case OpLogicalEqual: + case OpFOrdEqual: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP(equal); + else + GLSL_BOP(==); + break; + } + + case OpINotEqual: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP_CAST(notEqual, int_type); + else + GLSL_BOP_CAST(!=, int_type); + break; + } + + case OpLogicalNotEqual: + case OpFOrdNotEqual: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP(notEqual); + else + GLSL_BOP(!=); + break; + } + + case OpUGreaterThan: + case OpSGreaterThan: + { + auto type = opcode == OpUGreaterThan ? uint_type : int_type; + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP_CAST(greaterThan, type); + else + GLSL_BOP_CAST(>, type); + break; + } + + case OpFOrdGreaterThan: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP(greaterThan); + else + GLSL_BOP(>); + break; + } + + case OpUGreaterThanEqual: + case OpSGreaterThanEqual: + { + auto type = opcode == OpUGreaterThanEqual ? uint_type : int_type; + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP_CAST(greaterThanEqual, type); + else + GLSL_BOP_CAST(>=, type); + break; + } + + case OpFOrdGreaterThanEqual: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP(greaterThanEqual); + else + GLSL_BOP(>=); + break; + } + + case OpULessThan: + case OpSLessThan: + { + auto type = opcode == OpULessThan ? uint_type : int_type; + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP_CAST(lessThan, type); + else + GLSL_BOP_CAST(<, type); + break; + } + + case OpFOrdLessThan: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP(lessThan); + else + GLSL_BOP(<); + break; + } + + case OpULessThanEqual: + case OpSLessThanEqual: + { + auto type = opcode == OpULessThanEqual ? uint_type : int_type; + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP_CAST(lessThanEqual, type); + else + GLSL_BOP_CAST(<=, type); + break; + } + + case OpFOrdLessThanEqual: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP(lessThanEqual); + else + GLSL_BOP(<=); + break; + } + + + case OpSConvert: + case OpConvertSToF: + case OpUConvert: + case OpConvertUToF: + { + auto input_type = opcode == OpSConvert || opcode == OpConvertSToF ? int_type : uint_type; + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto &type = get(result_type); + auto &arg_type = expression_type(ops[2]); + auto func = type_to_glsl_constructor(type); + + + + if (arg_type.width < type.width) + emit_unary_func_op_cast(result_type, id, ops[2], func.c_str(), input_type, type.basetype); + else + emit_unary_func_op(result_type, id, ops[2], func.c_str()); + break; + } + + case OpConvertFToU: + case OpConvertFToS: + { + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto &type = get(result_type); + auto expected_type = type; + auto &float_type = expression_type(ops[2]); + expected_type.basetype = + opcode == OpConvertFToS ? to_signed_basetype(type.width) : to_unsigned_basetype(type.width); + + auto func = type_to_glsl_constructor(expected_type); + emit_unary_func_op_cast(result_type, id, ops[2], func.c_str(), float_type.basetype, expected_type.basetype); + break; + } + + case OpFConvert: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto func = type_to_glsl_constructor(get(result_type)); + emit_unary_func_op(result_type, id, ops[2], func.c_str()); + break; + } + + case OpBitcast: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t arg = ops[2]; + + auto op = bitcast_glsl_op(get(result_type), expression_type(arg)); + emit_unary_func_op(result_type, id, arg, op.c_str()); + break; + } + + case OpQuantizeToF16: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t arg = ops[2]; + + string op; + auto &type = get(result_type); + + switch (type.vecsize) + { + case 1: + op = join("unpackHalf2x16(packHalf2x16(vec2(", to_expression(arg), "))).x"); + break; + case 2: + op = join("unpackHalf2x16(packHalf2x16(", to_expression(arg), "))"); + break; + case 3: + { + auto op0 = join("unpackHalf2x16(packHalf2x16(", to_expression(arg), ".xy))"); + auto op1 = join("unpackHalf2x16(packHalf2x16(", to_expression(arg), ".zz)).x"); + op = join("vec3(", op0, ", ", op1, ")"); + break; + } + case 4: + { + auto op0 = join("unpackHalf2x16(packHalf2x16(", to_expression(arg), ".xy))"); + auto op1 = join("unpackHalf2x16(packHalf2x16(", to_expression(arg), ".zw))"); + op = join("vec4(", op0, ", ", op1, ")"); + break; + } + default: + SPIRV_CROSS_THROW("Illegal argument to OpQuantizeToF16."); + } + + emit_op(result_type, id, op, should_forward(arg)); + inherit_expression_dependencies(id, arg); + break; + } + + + case OpDPdx: + GLSL_UFOP(dFdx); + if (is_legacy_es()) + require_extension_internal("GL_OES_standard_derivatives"); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdy: + GLSL_UFOP(dFdy); + if (is_legacy_es()) + require_extension_internal("GL_OES_standard_derivatives"); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdxFine: + GLSL_UFOP(dFdxFine); + if (options.es) + { + SPIRV_CROSS_THROW("GL_ARB_derivative_control is unavailable in OpenGL ES."); + } + if (options.version < 450) + require_extension_internal("GL_ARB_derivative_control"); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdyFine: + GLSL_UFOP(dFdyFine); + if (options.es) + { + SPIRV_CROSS_THROW("GL_ARB_derivative_control is unavailable in OpenGL ES."); + } + if (options.version < 450) + require_extension_internal("GL_ARB_derivative_control"); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdxCoarse: + if (options.es) + { + SPIRV_CROSS_THROW("GL_ARB_derivative_control is unavailable in OpenGL ES."); + } + GLSL_UFOP(dFdxCoarse); + if (options.version < 450) + require_extension_internal("GL_ARB_derivative_control"); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdyCoarse: + GLSL_UFOP(dFdyCoarse); + if (options.es) + { + SPIRV_CROSS_THROW("GL_ARB_derivative_control is unavailable in OpenGL ES."); + } + if (options.version < 450) + require_extension_internal("GL_ARB_derivative_control"); + register_control_dependent_expression(ops[1]); + break; + + case OpFwidth: + GLSL_UFOP(fwidth); + if (is_legacy_es()) + require_extension_internal("GL_OES_standard_derivatives"); + register_control_dependent_expression(ops[1]); + break; + + case OpFwidthCoarse: + GLSL_UFOP(fwidthCoarse); + if (options.es) + { + SPIRV_CROSS_THROW("GL_ARB_derivative_control is unavailable in OpenGL ES."); + } + if (options.version < 450) + require_extension_internal("GL_ARB_derivative_control"); + register_control_dependent_expression(ops[1]); + break; + + case OpFwidthFine: + GLSL_UFOP(fwidthFine); + if (options.es) + { + SPIRV_CROSS_THROW("GL_ARB_derivative_control is unavailable in OpenGL ES."); + } + if (options.version < 450) + require_extension_internal("GL_ARB_derivative_control"); + register_control_dependent_expression(ops[1]); + break; + + + case OpBitFieldInsert: + { + emit_bitfield_insert_op(ops[0], ops[1], ops[2], ops[3], ops[4], ops[5], "bitfieldInsert", SPIRType::Int); + break; + } + + case OpBitFieldSExtract: + { + emit_trinary_func_op_bitextract(ops[0], ops[1], ops[2], ops[3], ops[4], "bitfieldExtract", int_type, int_type, + SPIRType::Int, SPIRType::Int); + break; + } + + case OpBitFieldUExtract: + { + emit_trinary_func_op_bitextract(ops[0], ops[1], ops[2], ops[3], ops[4], "bitfieldExtract", uint_type, uint_type, + SPIRType::Int, SPIRType::Int); + break; + } + + case OpBitReverse: + + GLSL_UFOP(bitfieldReverse); + break; + + case OpBitCount: + { + auto basetype = expression_type(ops[2]).basetype; + emit_unary_func_op_cast(ops[0], ops[1], ops[2], "bitCount", basetype, int_type); + break; + } + + + case OpAtomicExchange: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + + uint32_t val = ops[5]; + const char *op = check_atomic_image(ptr) ? "imageAtomicExchange" : "atomicExchange"; + forced_temporaries.insert(id); + emit_binary_func_op(result_type, id, ptr, val, op); + flush_all_atomic_capable_variables(); + break; + } + + case OpAtomicCompareExchange: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + uint32_t val = ops[6]; + uint32_t comp = ops[7]; + const char *op = check_atomic_image(ptr) ? "imageAtomicCompSwap" : "atomicCompSwap"; + + forced_temporaries.insert(id); + emit_trinary_func_op(result_type, id, ptr, comp, val, op); + flush_all_atomic_capable_variables(); + break; + } + + case OpAtomicLoad: + flush_all_atomic_capable_variables(); + + + forced_temporaries.insert(ops[1]); + GLSL_UFOP(atomicCounter); + break; + + case OpAtomicStore: + SPIRV_CROSS_THROW("Unsupported opcode OpAtomicStore."); + + case OpAtomicIIncrement: + case OpAtomicIDecrement: + { + forced_temporaries.insert(ops[1]); + auto &type = expression_type(ops[2]); + if (type.storage == StorageClassAtomicCounter) + { + + if (opcode == OpAtomicIIncrement) + GLSL_UFOP(atomicCounterIncrement); + else + GLSL_UFOP(atomicCounterDecrement); + } + else + { + bool atomic_image = check_atomic_image(ops[2]); + bool unsigned_type = (type.basetype == SPIRType::UInt) || + (atomic_image && get(type.image.type).basetype == SPIRType::UInt); + const char *op = atomic_image ? "imageAtomicAdd" : "atomicAdd"; + + const char *increment = nullptr; + if (opcode == OpAtomicIIncrement && unsigned_type) + increment = "1u"; + else if (opcode == OpAtomicIIncrement) + increment = "1"; + else if (unsigned_type) + increment = "uint(-1)"; + else + increment = "-1"; + + emit_op(ops[0], ops[1], join(op, "(", to_expression(ops[2]), ", ", increment, ")"), false); + } + + flush_all_atomic_capable_variables(); + break; + } + + case OpAtomicIAdd: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicAdd" : "atomicAdd"; + forced_temporaries.insert(ops[1]); + emit_binary_func_op(ops[0], ops[1], ops[2], ops[5], op); + flush_all_atomic_capable_variables(); + break; + } + + case OpAtomicISub: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicAdd" : "atomicAdd"; + forced_temporaries.insert(ops[1]); + auto expr = join(op, "(", to_expression(ops[2]), ", -", to_enclosed_expression(ops[5]), ")"); + emit_op(ops[0], ops[1], expr, should_forward(ops[2]) && should_forward(ops[5])); + flush_all_atomic_capable_variables(); + break; + } + + case OpAtomicSMin: + case OpAtomicUMin: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicMin" : "atomicMin"; + forced_temporaries.insert(ops[1]); + emit_binary_func_op(ops[0], ops[1], ops[2], ops[5], op); + flush_all_atomic_capable_variables(); + break; + } + + case OpAtomicSMax: + case OpAtomicUMax: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicMax" : "atomicMax"; + forced_temporaries.insert(ops[1]); + emit_binary_func_op(ops[0], ops[1], ops[2], ops[5], op); + flush_all_atomic_capable_variables(); + break; + } + + case OpAtomicAnd: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicAnd" : "atomicAnd"; + forced_temporaries.insert(ops[1]); + emit_binary_func_op(ops[0], ops[1], ops[2], ops[5], op); + flush_all_atomic_capable_variables(); + break; + } + + case OpAtomicOr: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicOr" : "atomicOr"; + forced_temporaries.insert(ops[1]); + emit_binary_func_op(ops[0], ops[1], ops[2], ops[5], op); + flush_all_atomic_capable_variables(); + break; + } + + case OpAtomicXor: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicXor" : "atomicXor"; + forced_temporaries.insert(ops[1]); + emit_binary_func_op(ops[0], ops[1], ops[2], ops[5], op); + flush_all_atomic_capable_variables(); + break; + } + + + case OpEmitVertex: + statement("EmitVertex();"); + break; + + case OpEndPrimitive: + statement("EndPrimitive();"); + break; + + case OpEmitStreamVertex: + statement("EmitStreamVertex();"); + break; + + case OpEndStreamPrimitive: + statement("EndStreamPrimitive();"); + break; + + + case OpImageSampleExplicitLod: + case OpImageSampleProjExplicitLod: + case OpImageSampleDrefExplicitLod: + case OpImageSampleProjDrefExplicitLod: + case OpImageSampleImplicitLod: + case OpImageSampleProjImplicitLod: + case OpImageSampleDrefImplicitLod: + case OpImageSampleProjDrefImplicitLod: + case OpImageFetch: + case OpImageGather: + case OpImageDrefGather: + + emit_texture_op(instruction); + break; + + case OpImage: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + + auto &e = emit_op(result_type, id, to_expression(ops[2]), true, true); + + + auto *var = maybe_get_backing_variable(ops[2]); + e.loaded_from = var ? var->self : ID(0); + break; + } + + case OpImageQueryLod: + { + if (!options.es && options.version < 400) + { + require_extension_internal("GL_ARB_texture_query_lod"); + + GLSL_BFOP(textureQueryLOD); + } + else if (options.es) + SPIRV_CROSS_THROW("textureQueryLod not supported in ES profile."); + else + GLSL_BFOP(textureQueryLod); + register_control_dependent_expression(ops[1]); + break; + } + + case OpImageQueryLevels: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + if (!options.es && options.version < 430) + require_extension_internal("GL_ARB_texture_query_levels"); + if (options.es) + SPIRV_CROSS_THROW("textureQueryLevels not supported in ES profile."); + + auto expr = join("textureQueryLevels(", convert_separate_image_to_expression(ops[2]), ")"); + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::Int, expr); + emit_op(result_type, id, expr, true); + break; + } + + case OpImageQuerySamples: + { + auto &type = expression_type(ops[2]); + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + string expr; + if (type.image.sampled == 2) + expr = join("imageSamples(", to_expression(ops[2]), ")"); + else + expr = join("textureSamples(", convert_separate_image_to_expression(ops[2]), ")"); + + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::Int, expr); + emit_op(result_type, id, expr, true); + break; + } + + case OpSampledImage: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_sampled_image_op(result_type, id, ops[2], ops[3]); + inherit_expression_dependencies(id, ops[2]); + inherit_expression_dependencies(id, ops[3]); + break; + } + + case OpImageQuerySizeLod: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto expr = join("textureSize(", convert_separate_image_to_expression(ops[2]), ", ", + bitcast_expression(SPIRType::Int, ops[3]), ")"); + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::Int, expr); + emit_op(result_type, id, expr, true); + break; + } + + + case OpImageRead: + { + + + + auto *var = maybe_get_backing_variable(ops[2]); + if (var) + { + auto &flags = ir.meta[var->self].decoration.decoration_flags; + if (flags.get(DecorationNonReadable)) + { + flags.clear(DecorationNonReadable); + force_recompile(); + } + } + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + bool pure; + string imgexpr; + auto &type = expression_type(ops[2]); + + if (var && var->remapped_variable) + { + if (type.image.ms) + SPIRV_CROSS_THROW("Trying to remap multisampled image to variable, this is not possible."); + + auto itr = + find_if(begin(pls_inputs), end(pls_inputs), [var](const PlsRemap &pls) { return pls.id == var->self; }); + + if (itr == end(pls_inputs)) + { + + + if (!var->remapped_components) + SPIRV_CROSS_THROW("subpassInput was remapped, but remap_components is not set correctly."); + imgexpr = remap_swizzle(get(result_type), var->remapped_components, to_expression(ops[2])); + } + else + { + + + uint32_t components = pls_format_to_components(itr->format); + imgexpr = remap_swizzle(get(result_type), components, to_expression(ops[2])); + } + pure = true; + } + else if (type.image.dim == DimSubpassData) + { + if (options.vulkan_semantics) + { + + if (type.image.ms) + { + uint32_t operands = ops[4]; + if (operands != ImageOperandsSampleMask || length != 6) + SPIRV_CROSS_THROW( + "Multisampled image used in OpImageRead, but unexpected operand mask was used."); + + uint32_t samples = ops[5]; + imgexpr = join("subpassLoad(", to_expression(ops[2]), ", ", to_expression(samples), ")"); + } + else + imgexpr = join("subpassLoad(", to_expression(ops[2]), ")"); + } + else + { + if (type.image.ms) + { + uint32_t operands = ops[4]; + if (operands != ImageOperandsSampleMask || length != 6) + SPIRV_CROSS_THROW( + "Multisampled image used in OpImageRead, but unexpected operand mask was used."); + + uint32_t samples = ops[5]; + imgexpr = join("texelFetch(", to_expression(ops[2]), ", ivec2(gl_FragCoord.xy), ", + to_expression(samples), ")"); + } + else + { + + imgexpr = join("texelFetch(", to_expression(ops[2]), ", ivec2(gl_FragCoord.xy), 0)"); + } + } + imgexpr = remap_swizzle(get(result_type), 4, imgexpr); + pure = true; + } + else + { + + auto coord_expr = to_expression(ops[3]); + auto target_coord_type = expression_type(ops[3]); + target_coord_type.basetype = SPIRType::Int; + coord_expr = bitcast_expression(target_coord_type, expression_type(ops[3]).basetype, coord_expr); + + + if (type.image.ms) + { + uint32_t operands = ops[4]; + if (operands != ImageOperandsSampleMask || length != 6) + SPIRV_CROSS_THROW("Multisampled image used in OpImageRead, but unexpected operand mask was used."); + + uint32_t samples = ops[5]; + imgexpr = + join("imageLoad(", to_expression(ops[2]), ", ", coord_expr, ", ", to_expression(samples), ")"); + } + else + imgexpr = join("imageLoad(", to_expression(ops[2]), ", ", coord_expr, ")"); + + imgexpr = remap_swizzle(get(result_type), 4, imgexpr); + pure = false; + } + + if (var && var->forwardable) + { + bool forward = forced_temporaries.find(id) == end(forced_temporaries); + auto &e = emit_op(result_type, id, imgexpr, forward); + + + if (!pure) + { + e.loaded_from = var->self; + if (forward) + var->dependees.push_back(id); + } + } + else + emit_op(result_type, id, imgexpr, false); + + inherit_expression_dependencies(id, ops[2]); + if (type.image.ms) + inherit_expression_dependencies(id, ops[5]); + break; + } + + case OpImageTexelPointer: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto coord_expr = to_expression(ops[3]); + auto target_coord_type = expression_type(ops[3]); + target_coord_type.basetype = SPIRType::Int; + coord_expr = bitcast_expression(target_coord_type, expression_type(ops[3]).basetype, coord_expr); + + auto &e = set(id, join(to_expression(ops[2]), ", ", coord_expr), result_type, true); + + + auto *var = maybe_get_backing_variable(ops[2]); + e.loaded_from = var ? var->self : ID(0); + inherit_expression_dependencies(id, ops[3]); + break; + } + + case OpImageWrite: + { + + + + auto *var = maybe_get_backing_variable(ops[0]); + if (var) + { + auto &flags = ir.meta[var->self].decoration.decoration_flags; + if (flags.get(DecorationNonWritable)) + { + flags.clear(DecorationNonWritable); + force_recompile(); + } + } + + auto &type = expression_type(ops[0]); + auto &value_type = expression_type(ops[2]); + auto store_type = value_type; + store_type.vecsize = 4; + + + auto coord_expr = to_expression(ops[1]); + auto target_coord_type = expression_type(ops[1]); + target_coord_type.basetype = SPIRType::Int; + coord_expr = bitcast_expression(target_coord_type, expression_type(ops[1]).basetype, coord_expr); + + if (type.image.ms) + { + uint32_t operands = ops[3]; + if (operands != ImageOperandsSampleMask || length != 5) + SPIRV_CROSS_THROW("Multisampled image used in OpImageWrite, but unexpected operand mask was used."); + uint32_t samples = ops[4]; + statement("imageStore(", to_expression(ops[0]), ", ", coord_expr, ", ", to_expression(samples), ", ", + remap_swizzle(store_type, value_type.vecsize, to_expression(ops[2])), ");"); + } + else + statement("imageStore(", to_expression(ops[0]), ", ", coord_expr, ", ", + remap_swizzle(store_type, value_type.vecsize, to_expression(ops[2])), ");"); + + if (var && variable_storage_is_aliased(*var)) + flush_all_aliased_variables(); + break; + } + + case OpImageQuerySize: + { + auto &type = expression_type(ops[2]); + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + if (type.basetype == SPIRType::Image) + { + string expr; + if (type.image.sampled == 2) + { + + expr = join("imageSize(", to_expression(ops[2]), ")"); + } + else + { + + expr = join("textureSize(", convert_separate_image_to_expression(ops[2]), ")"); + } + + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::Int, expr); + emit_op(result_type, id, expr, true); + } + else + SPIRV_CROSS_THROW("Invalid type for OpImageQuerySize."); + break; + } + + + case OpControlBarrier: + case OpMemoryBarrier: + { + uint32_t execution_scope = 0; + uint32_t memory; + uint32_t semantics; + + if (opcode == OpMemoryBarrier) + { + memory = get(ops[0]).scalar(); + semantics = get(ops[1]).scalar(); + } + else + { + execution_scope = get(ops[0]).scalar(); + memory = get(ops[1]).scalar(); + semantics = get(ops[2]).scalar(); + } + + if (execution_scope == ScopeSubgroup || memory == ScopeSubgroup) + { + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Can only use subgroup operations in Vulkan semantics."); + require_extension_internal("GL_KHR_shader_subgroup_basic"); + } + + if (execution_scope != ScopeSubgroup && get_entry_point().model == ExecutionModelTessellationControl) + { + + if (opcode == OpControlBarrier) + statement("barrier();"); + break; + } + + + semantics = mask_relevant_memory_semantics(semantics); + + if (opcode == OpMemoryBarrier) + { + + + const Instruction *next = get_next_instruction_in_block(instruction); + if (next && next->op == OpControlBarrier) + { + auto *next_ops = stream(*next); + uint32_t next_memory = get(next_ops[1]).scalar(); + uint32_t next_semantics = get(next_ops[2]).scalar(); + next_semantics = mask_relevant_memory_semantics(next_semantics); + + bool memory_scope_covered = false; + if (next_memory == memory) + memory_scope_covered = true; + else if (next_semantics == MemorySemanticsWorkgroupMemoryMask) + { + + + if ((next_memory == ScopeDevice || next_memory == ScopeWorkgroup) && + (memory == ScopeDevice || memory == ScopeWorkgroup)) + { + memory_scope_covered = true; + } + } + else if (memory == ScopeWorkgroup && next_memory == ScopeDevice) + { + + memory_scope_covered = true; + } + + + if (memory_scope_covered && (semantics & next_semantics) == semantics) + break; + } + } + + + + if (semantics || opcode == OpControlBarrier) + { + assert(current_emitting_block); + flush_control_dependent_expressions(current_emitting_block->self); + flush_all_active_variables(); + } + + if (memory == ScopeWorkgroup) + { + if (semantics == MemorySemanticsWorkgroupMemoryMask) + statement("memoryBarrierShared();"); + else if (semantics != 0) + statement("groupMemoryBarrier();"); + } + else if (memory == ScopeSubgroup) + { + const uint32_t all_barriers = + MemorySemanticsWorkgroupMemoryMask | MemorySemanticsUniformMemoryMask | MemorySemanticsImageMemoryMask; + + if (semantics & (MemorySemanticsCrossWorkgroupMemoryMask | MemorySemanticsSubgroupMemoryMask)) + { + + + statement("subgroupMemoryBarrier();"); + } + else if ((semantics & all_barriers) == all_barriers) + { + + statement("subgroupMemoryBarrier();"); + } + else + { + + if (semantics & MemorySemanticsWorkgroupMemoryMask) + statement("subgroupMemoryBarrierShared();"); + if (semantics & MemorySemanticsUniformMemoryMask) + statement("subgroupMemoryBarrierBuffer();"); + if (semantics & MemorySemanticsImageMemoryMask) + statement("subgroupMemoryBarrierImage();"); + } + } + else + { + const uint32_t all_barriers = MemorySemanticsWorkgroupMemoryMask | MemorySemanticsUniformMemoryMask | + MemorySemanticsImageMemoryMask | MemorySemanticsAtomicCounterMemoryMask; + + if (semantics & (MemorySemanticsCrossWorkgroupMemoryMask | MemorySemanticsSubgroupMemoryMask)) + { + + + statement("memoryBarrier();"); + } + else if ((semantics & all_barriers) == all_barriers) + { + + statement("memoryBarrier();"); + } + else + { + + if (semantics & MemorySemanticsWorkgroupMemoryMask) + statement("memoryBarrierShared();"); + if (semantics & MemorySemanticsUniformMemoryMask) + statement("memoryBarrierBuffer();"); + if (semantics & MemorySemanticsImageMemoryMask) + statement("memoryBarrierImage();"); + if (semantics & MemorySemanticsAtomicCounterMemoryMask) + statement("memoryBarrierAtomicCounter();"); + } + } + + if (opcode == OpControlBarrier) + { + if (execution_scope == ScopeSubgroup) + statement("subgroupBarrier();"); + else + statement("barrier();"); + } + break; + } + + case OpExtInst: + { + uint32_t extension_set = ops[2]; + + if (get(extension_set).ext == SPIRExtension::GLSL) + { + emit_glsl_op(ops[0], ops[1], ops[3], &ops[4], length - 4); + } + else if (get(extension_set).ext == SPIRExtension::SPV_AMD_shader_ballot) + { + emit_spv_amd_shader_ballot_op(ops[0], ops[1], ops[3], &ops[4], length - 4); + } + else if (get(extension_set).ext == SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter) + { + emit_spv_amd_shader_explicit_vertex_parameter_op(ops[0], ops[1], ops[3], &ops[4], length - 4); + } + else if (get(extension_set).ext == SPIRExtension::SPV_AMD_shader_trinary_minmax) + { + emit_spv_amd_shader_trinary_minmax_op(ops[0], ops[1], ops[3], &ops[4], length - 4); + } + else if (get(extension_set).ext == SPIRExtension::SPV_AMD_gcn_shader) + { + emit_spv_amd_gcn_shader_op(ops[0], ops[1], ops[3], &ops[4], length - 4); + } + else if (get(extension_set).ext == SPIRExtension::SPV_debug_info) + { + break; + } + else + { + statement("// unimplemented ext op ", instruction.op); + break; + } + + break; + } + + + case OpSubgroupBallotKHR: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + string expr; + expr = join("uvec4(unpackUint2x32(ballotARB(" + to_expression(ops[2]) + ")), 0u, 0u)"); + emit_op(result_type, id, expr, should_forward(ops[2])); + + require_extension_internal("GL_ARB_shader_ballot"); + inherit_expression_dependencies(id, ops[2]); + register_control_dependent_expression(ops[1]); + break; + } + + case OpSubgroupFirstInvocationKHR: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[2], "readFirstInvocationARB"); + + require_extension_internal("GL_ARB_shader_ballot"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpSubgroupReadInvocationKHR: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_binary_func_op(result_type, id, ops[2], ops[3], "readInvocationARB"); + + require_extension_internal("GL_ARB_shader_ballot"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpSubgroupAllKHR: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[2], "allInvocationsARB"); + + require_extension_internal("GL_ARB_shader_group_vote"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpSubgroupAnyKHR: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[2], "anyInvocationARB"); + + require_extension_internal("GL_ARB_shader_group_vote"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpSubgroupAllEqualKHR: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[2], "allInvocationsEqualARB"); + + require_extension_internal("GL_ARB_shader_group_vote"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpGroupIAddNonUniformAMD: + case OpGroupFAddNonUniformAMD: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[4], "addInvocationsNonUniformAMD"); + + require_extension_internal("GL_AMD_shader_ballot"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpGroupFMinNonUniformAMD: + case OpGroupUMinNonUniformAMD: + case OpGroupSMinNonUniformAMD: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[4], "minInvocationsNonUniformAMD"); + + require_extension_internal("GL_AMD_shader_ballot"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpGroupFMaxNonUniformAMD: + case OpGroupUMaxNonUniformAMD: + case OpGroupSMaxNonUniformAMD: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[4], "maxInvocationsNonUniformAMD"); + + require_extension_internal("GL_AMD_shader_ballot"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpFragmentMaskFetchAMD: + { + auto &type = expression_type(ops[2]); + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + if (type.image.dim == spv::DimSubpassData) + { + emit_unary_func_op(result_type, id, ops[2], "fragmentMaskFetchAMD"); + } + else + { + emit_binary_func_op(result_type, id, ops[2], ops[3], "fragmentMaskFetchAMD"); + } + + require_extension_internal("GL_AMD_shader_fragment_mask"); + break; + } + + case OpFragmentFetchAMD: + { + auto &type = expression_type(ops[2]); + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + if (type.image.dim == spv::DimSubpassData) + { + emit_binary_func_op(result_type, id, ops[2], ops[4], "fragmentFetchAMD"); + } + else + { + emit_trinary_func_op(result_type, id, ops[2], ops[3], ops[4], "fragmentFetchAMD"); + } + + require_extension_internal("GL_AMD_shader_fragment_mask"); + break; + } + + + case OpGroupNonUniformElect: + case OpGroupNonUniformBroadcast: + case OpGroupNonUniformBroadcastFirst: + case OpGroupNonUniformBallot: + case OpGroupNonUniformInverseBallot: + case OpGroupNonUniformBallotBitExtract: + case OpGroupNonUniformBallotBitCount: + case OpGroupNonUniformBallotFindLSB: + case OpGroupNonUniformBallotFindMSB: + case OpGroupNonUniformShuffle: + case OpGroupNonUniformShuffleXor: + case OpGroupNonUniformShuffleUp: + case OpGroupNonUniformShuffleDown: + case OpGroupNonUniformAll: + case OpGroupNonUniformAny: + case OpGroupNonUniformAllEqual: + case OpGroupNonUniformFAdd: + case OpGroupNonUniformIAdd: + case OpGroupNonUniformFMul: + case OpGroupNonUniformIMul: + case OpGroupNonUniformFMin: + case OpGroupNonUniformFMax: + case OpGroupNonUniformSMin: + case OpGroupNonUniformSMax: + case OpGroupNonUniformUMin: + case OpGroupNonUniformUMax: + case OpGroupNonUniformBitwiseAnd: + case OpGroupNonUniformBitwiseOr: + case OpGroupNonUniformBitwiseXor: + case OpGroupNonUniformQuadSwap: + case OpGroupNonUniformQuadBroadcast: + emit_subgroup_op(instruction); + break; + + case OpFUnordEqual: + case OpFUnordNotEqual: + case OpFUnordLessThan: + case OpFUnordGreaterThan: + case OpFUnordLessThanEqual: + case OpFUnordGreaterThanEqual: + { + + + + + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + + string expr; + if (expression_type(op0).vecsize > 1) + { + const char *comp_op = nullptr; + switch (opcode) + { + case OpFUnordEqual: + comp_op = "notEqual"; + break; + + case OpFUnordNotEqual: + comp_op = "equal"; + break; + + case OpFUnordLessThan: + comp_op = "greaterThanEqual"; + break; + + case OpFUnordLessThanEqual: + comp_op = "greaterThan"; + break; + + case OpFUnordGreaterThan: + comp_op = "lessThanEqual"; + break; + + case OpFUnordGreaterThanEqual: + comp_op = "lessThan"; + break; + + default: + assert(0); + break; + } + + expr = join("not(", comp_op, "(", to_unpacked_expression(op0), ", ", to_unpacked_expression(op1), "))"); + } + else + { + const char *comp_op = nullptr; + switch (opcode) + { + case OpFUnordEqual: + comp_op = " != "; + break; + + case OpFUnordNotEqual: + comp_op = " == "; + break; + + case OpFUnordLessThan: + comp_op = " >= "; + break; + + case OpFUnordLessThanEqual: + comp_op = " > "; + break; + + case OpFUnordGreaterThan: + comp_op = " <= "; + break; + + case OpFUnordGreaterThanEqual: + comp_op = " < "; + break; + + default: + assert(0); + break; + } + + expr = join("!(", to_enclosed_unpacked_expression(op0), comp_op, to_enclosed_unpacked_expression(op1), ")"); + } + + emit_op(ops[0], ops[1], expr, should_forward(op0) && should_forward(op1)); + inherit_expression_dependencies(ops[1], op0); + inherit_expression_dependencies(ops[1], op1); + break; + } + + case OpReportIntersectionNV: + statement("reportIntersectionNV(", to_expression(ops[0]), ", ", to_expression(ops[1]), ");"); + break; + case OpIgnoreIntersectionNV: + statement("ignoreIntersectionNV();"); + break; + case OpTerminateRayNV: + statement("terminateRayNV();"); + break; + case OpTraceNV: + statement("traceNV(", to_expression(ops[0]), ", ", to_expression(ops[1]), ", ", to_expression(ops[2]), ", ", + to_expression(ops[3]), ", ", to_expression(ops[4]), ", ", to_expression(ops[5]), ", ", + to_expression(ops[6]), ", ", to_expression(ops[7]), ", ", to_expression(ops[8]), ", ", + to_expression(ops[9]), ", ", to_expression(ops[10]), ");"); + break; + case OpExecuteCallableNV: + statement("executeCallableNV(", to_expression(ops[0]), ", ", to_expression(ops[1]), ");"); + break; + + case OpConvertUToPtr: + { + auto &type = get(ops[0]); + if (type.storage != StorageClassPhysicalStorageBufferEXT) + SPIRV_CROSS_THROW("Only StorageClassPhysicalStorageBufferEXT is supported by OpConvertUToPtr."); + + auto op = type_to_glsl(type); + emit_unary_func_op(ops[0], ops[1], ops[2], op.c_str()); + break; + } + + case OpConvertPtrToU: + { + auto &type = get(ops[0]); + auto &ptr_type = expression_type(ops[2]); + if (ptr_type.storage != StorageClassPhysicalStorageBufferEXT) + SPIRV_CROSS_THROW("Only StorageClassPhysicalStorageBufferEXT is supported by OpConvertPtrToU."); + + auto op = type_to_glsl(type); + emit_unary_func_op(ops[0], ops[1], ops[2], op.c_str()); + break; + } + + case OpUndef: + + break; + + case OpLine: + { + emit_line_directive(ops[0], ops[1]); + break; + } + + case OpNoLine: + break; + + case OpDemoteToHelperInvocationEXT: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("GL_EXT_demote_to_helper_invocation is only supported in Vulkan GLSL."); + require_extension_internal("GL_EXT_demote_to_helper_invocation"); + statement(backend.demote_literal, ";"); + break; + + case OpIsHelperInvocationEXT: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("GL_EXT_demote_to_helper_invocation is only supported in Vulkan GLSL."); + require_extension_internal("GL_EXT_demote_to_helper_invocation"); + emit_op(ops[0], ops[1], "helperInvocationEXT()", false); + break; + + case OpBeginInvocationInterlockEXT: + + if (!interlocked_is_complex) + { + if (options.es) + statement("beginInvocationInterlockNV();"); + else + statement("beginInvocationInterlockARB();"); + + flush_all_active_variables(); + + } + break; + + case OpEndInvocationInterlockEXT: + + if (!interlocked_is_complex) + { + if (options.es) + statement("endInvocationInterlockNV();"); + else + statement("endInvocationInterlockARB();"); + + flush_all_active_variables(); + + } + break; + + default: + statement("// unimplemented op ", instruction.op); + break; + } +} + + + + + + + + + +void CompilerGLSL::append_global_func_args(const SPIRFunction &func, uint32_t index, SmallVector &arglist) +{ + auto &args = func.arguments; + uint32_t arg_cnt = uint32_t(args.size()); + for (uint32_t arg_idx = index; arg_idx < arg_cnt; arg_idx++) + { + auto &arg = args[arg_idx]; + assert(arg.alias_global_variable); + + + + uint32_t var_id = get(arg.id).basevariable; + if (var_id) + flush_variable_declaration(var_id); + + arglist.push_back(to_func_call_arg(arg, arg.id)); + } +} + +string CompilerGLSL::to_member_name(const SPIRType &type, uint32_t index) +{ + if (type.type_alias != TypeID(0) && + !has_extended_decoration(type.type_alias, SPIRVCrossDecorationBufferBlockRepacked)) + { + return to_member_name(get(type.type_alias), index); + } + + auto &memb = ir.meta[type.self].members; + if (index < memb.size() && !memb[index].alias.empty()) + return memb[index].alias; + else + return join("_m", index); +} + +string CompilerGLSL::to_member_reference(uint32_t, const SPIRType &type, uint32_t index, bool) +{ + return join(".", to_member_name(type, index)); +} + +void CompilerGLSL::add_member_name(SPIRType &type, uint32_t index) +{ + auto &memb = ir.meta[type.self].members; + if (index < memb.size() && !memb[index].alias.empty()) + { + auto &name = memb[index].alias; + if (name.empty()) + return; + + + if (name[0] == '_' && name.size() >= 2 && isdigit(name[1])) + { + name.clear(); + return; + } + + update_name_cache(type.member_name_cache, name); + } +} + + +bool CompilerGLSL::is_non_native_row_major_matrix(uint32_t id) +{ + + + if (backend.native_row_major_matrix && !is_legacy()) + return false; + + + if (!has_decoration(id, DecorationRowMajor)) + return false; + + + + + const auto type = expression_type(id); + if (type.columns != type.vecsize) + SPIRV_CROSS_THROW("Row-major matrices must be square on this platform."); + + return true; +} + + +bool CompilerGLSL::member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index) +{ + + if (backend.native_row_major_matrix && !is_legacy()) + return false; + + + if (!has_member_decoration(type.self, index, DecorationRowMajor)) + return false; + + + + + const auto mbr_type = get(type.member_types[index]); + if (mbr_type.columns != mbr_type.vecsize) + SPIRV_CROSS_THROW("Row-major matrices must be square on this platform."); + + return true; +} + + +bool CompilerGLSL::member_is_remapped_physical_type(const SPIRType &type, uint32_t index) const +{ + return has_extended_member_decoration(type.self, index, SPIRVCrossDecorationPhysicalTypeID); +} + + +bool CompilerGLSL::member_is_packed_physical_type(const SPIRType &type, uint32_t index) const +{ + return has_extended_member_decoration(type.self, index, SPIRVCrossDecorationPhysicalTypePacked); +} + + + + + +string CompilerGLSL::convert_row_major_matrix(string exp_str, const SPIRType &exp_type, uint32_t , + bool ) +{ + strip_enclosed_expression(exp_str); + if (!is_matrix(exp_type)) + { + auto column_index = exp_str.find_last_of('['); + if (column_index == string::npos) + return exp_str; + + auto column_expr = exp_str.substr(column_index); + exp_str.resize(column_index); + + auto transposed_expr = type_to_glsl_constructor(exp_type) + "("; + + + for (uint32_t c = 0; c < exp_type.vecsize; c++) + { + transposed_expr += join(exp_str, '[', c, ']', column_expr); + if (c + 1 < exp_type.vecsize) + transposed_expr += ", "; + } + + transposed_expr += ")"; + return transposed_expr; + } + else + return join("transpose(", exp_str, ")"); +} + +string CompilerGLSL::variable_decl(const SPIRType &type, const string &name, uint32_t id) +{ + string type_name = type_to_glsl(type, id); + remap_variable_type_name(type, name, type_name); + return join(type_name, " ", name, type_to_array_glsl(type)); +} + + + +void CompilerGLSL::emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const string &qualifier, uint32_t) +{ + auto &membertype = get(member_type_id); + + Bitset memberflags; + auto &memb = ir.meta[type.self].members; + if (index < memb.size()) + memberflags = memb[index].decoration_flags; + + string qualifiers; + bool is_block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + + if (is_block) + qualifiers = to_interpolation_qualifiers(memberflags); + + statement(layout_for_member(type, index), qualifiers, qualifier, flags_to_qualifiers_glsl(membertype, memberflags), + variable_decl(membertype, to_member_name(type, index)), ";"); +} + +void CompilerGLSL::emit_struct_padding_target(const SPIRType &) +{ +} + +const char *CompilerGLSL::flags_to_qualifiers_glsl(const SPIRType &type, const Bitset &flags) +{ + + if (flags.get(DecorationRestrictPointerEXT)) + return "restrict "; + + + if (type.basetype != SPIRType::Float && type.basetype != SPIRType::Int && type.basetype != SPIRType::UInt && + type.basetype != SPIRType::Image && type.basetype != SPIRType::SampledImage && + type.basetype != SPIRType::Sampler) + return ""; + + if (options.es) + { + auto &execution = get_entry_point(); + + if (flags.get(DecorationRelaxedPrecision)) + { + bool implied_fmediump = type.basetype == SPIRType::Float && + options.fragment.default_float_precision == Options::Mediump && + execution.model == ExecutionModelFragment; + + bool implied_imediump = (type.basetype == SPIRType::Int || type.basetype == SPIRType::UInt) && + options.fragment.default_int_precision == Options::Mediump && + execution.model == ExecutionModelFragment; + + return implied_fmediump || implied_imediump ? "" : "mediump "; + } + else + { + bool implied_fhighp = + type.basetype == SPIRType::Float && ((options.fragment.default_float_precision == Options::Highp && + execution.model == ExecutionModelFragment) || + (execution.model != ExecutionModelFragment)); + + bool implied_ihighp = (type.basetype == SPIRType::Int || type.basetype == SPIRType::UInt) && + ((options.fragment.default_int_precision == Options::Highp && + execution.model == ExecutionModelFragment) || + (execution.model != ExecutionModelFragment)); + + return implied_fhighp || implied_ihighp ? "" : "highp "; + } + } + else if (backend.allow_precision_qualifiers) + { + + + if (flags.get(DecorationRelaxedPrecision)) + return "mediump "; + else + return ""; + } + else + return ""; +} + +const char *CompilerGLSL::to_precision_qualifiers_glsl(uint32_t id) +{ + auto &type = expression_type(id); + bool use_precision_qualifiers = backend.allow_precision_qualifiers || options.es; + if (use_precision_qualifiers && (type.basetype == SPIRType::Image || type.basetype == SPIRType::SampledImage)) + { + + auto &result_type = get(type.image.type); + if (result_type.width < 32) + return "mediump "; + } + return flags_to_qualifiers_glsl(type, ir.meta[id].decoration.decoration_flags); +} + +string CompilerGLSL::to_qualifiers_glsl(uint32_t id) +{ + auto &flags = ir.meta[id].decoration.decoration_flags; + string res; + + auto *var = maybe_get(id); + + if (var && var->storage == StorageClassWorkgroup && !backend.shared_is_implied) + res += "shared "; + + res += to_interpolation_qualifiers(flags); + if (var) + res += to_storage_qualifiers_glsl(*var); + + auto &type = expression_type(id); + if (type.image.dim != DimSubpassData && type.image.sampled == 2) + { + if (flags.get(DecorationCoherent)) + res += "coherent "; + if (flags.get(DecorationRestrict)) + res += "restrict "; + if (flags.get(DecorationNonWritable)) + res += "readonly "; + if (flags.get(DecorationNonReadable)) + res += "writeonly "; + } + + res += to_precision_qualifiers_glsl(id); + + return res; +} + +string CompilerGLSL::argument_decl(const SPIRFunction::Parameter &arg) +{ + + auto &type = expression_type(arg.id); + const char *direction = ""; + + if (type.pointer) + { + if (arg.write_count && arg.read_count) + direction = "inout "; + else if (arg.write_count) + direction = "out "; + } + + return join(direction, to_qualifiers_glsl(arg.id), variable_decl(type, to_name(arg.id), arg.id)); +} + +string CompilerGLSL::to_initializer_expression(const SPIRVariable &var) +{ + return to_expression(var.initializer); +} + +string CompilerGLSL::variable_decl(const SPIRVariable &variable) +{ + + auto &type = get_variable_data_type(variable); + + if (type.pointer_depth > 1) + SPIRV_CROSS_THROW("Cannot declare pointer-to-pointer types."); + + auto res = join(to_qualifiers_glsl(variable.self), variable_decl(type, to_name(variable.self), variable.self)); + + if (variable.loop_variable && variable.static_expression) + { + uint32_t expr = variable.static_expression; + if (ir.ids[expr].get_type() != TypeUndef) + res += join(" = ", to_expression(variable.static_expression)); + } + else if (variable.initializer) + { + uint32_t expr = variable.initializer; + if (ir.ids[expr].get_type() != TypeUndef) + res += join(" = ", to_initializer_expression(variable)); + } + return res; +} + +const char *CompilerGLSL::to_pls_qualifiers_glsl(const SPIRVariable &variable) +{ + auto &flags = ir.meta[variable.self].decoration.decoration_flags; + if (flags.get(DecorationRelaxedPrecision)) + return "mediump "; + else + return "highp "; +} + +string CompilerGLSL::pls_decl(const PlsRemap &var) +{ + auto &variable = get(var.id); + + SPIRType type; + type.vecsize = pls_format_to_components(var.format); + type.basetype = pls_format_to_basetype(var.format); + + return join(to_pls_layout(var.format), to_pls_qualifiers_glsl(variable), type_to_glsl(type), " ", + to_name(variable.self)); +} + +uint32_t CompilerGLSL::to_array_size_literal(const SPIRType &type) const +{ + return to_array_size_literal(type, uint32_t(type.array.size() - 1)); +} + +uint32_t CompilerGLSL::to_array_size_literal(const SPIRType &type, uint32_t index) const +{ + assert(type.array.size() == type.array_size_literal.size()); + + if (type.array_size_literal[index]) + { + return type.array[index]; + } + else + { + + + uint32_t array_size_id = type.array[index]; + + + if (ir.ids[array_size_id].get_type() == TypeConstantOp) + SPIRV_CROSS_THROW("An array size was found to be an OpSpecConstantOp. This is not supported since " + "SPIRV-Cross cannot deduce the actual size here."); + + uint32_t array_size = get(array_size_id).scalar(); + return array_size; + } +} + +string CompilerGLSL::to_array_size(const SPIRType &type, uint32_t index) +{ + assert(type.array.size() == type.array_size_literal.size()); + + + + if (type.storage == StorageClassInput && (get_entry_point().model == ExecutionModelTessellationControl || + get_entry_point().model == ExecutionModelTessellationEvaluation)) + return ""; + + auto &size = type.array[index]; + if (!type.array_size_literal[index]) + return to_expression(size); + else if (size) + return convert_to_string(size); + else if (!backend.unsized_array_supported) + { + + + + + + + return "1"; + } + else + return ""; +} + +string CompilerGLSL::type_to_array_glsl(const SPIRType &type) +{ + if (type.pointer && type.storage == StorageClassPhysicalStorageBufferEXT && type.basetype != SPIRType::Struct) + { + + return ""; + } + + if (type.array.empty()) + return ""; + + if (options.flatten_multidimensional_arrays) + { + string res; + res += "["; + for (auto i = uint32_t(type.array.size()); i; i--) + { + res += enclose_expression(to_array_size(type, i - 1)); + if (i > 1) + res += " * "; + } + res += "]"; + return res; + } + else + { + if (type.array.size() > 1) + { + if (!options.es && options.version < 430) + require_extension_internal("GL_ARB_arrays_of_arrays"); + else if (options.es && options.version < 310) + SPIRV_CROSS_THROW("Arrays of arrays not supported before ESSL version 310. " + "Try using --flatten-multidimensional-arrays or set " + "options.flatten_multidimensional_arrays to true."); + } + + string res; + for (auto i = uint32_t(type.array.size()); i; i--) + { + res += "["; + res += to_array_size(type, i - 1); + res += "]"; + } + return res; + } +} + +string CompilerGLSL::image_type_glsl(const SPIRType &type, uint32_t id) +{ + auto &imagetype = get(type.image.type); + string res; + + switch (imagetype.basetype) + { + case SPIRType::Int: + case SPIRType::Short: + case SPIRType::SByte: + res = "i"; + break; + case SPIRType::UInt: + case SPIRType::UShort: + case SPIRType::UByte: + res = "u"; + break; + default: + break; + } + + + + + if (type.basetype == SPIRType::Image && type.image.dim == DimSubpassData && options.vulkan_semantics) + return res + "subpassInput" + (type.image.ms ? "MS" : ""); + + + + if (type.basetype == SPIRType::Image && type.image.dim != DimSubpassData) + { + + if (type.image.dim == DimBuffer && type.image.sampled == 1) + res += "sampler"; + else + res += type.image.sampled == 2 ? "image" : "texture"; + } + else + res += "sampler"; + + switch (type.image.dim) + { + case Dim1D: + res += "1D"; + break; + case Dim2D: + res += "2D"; + break; + case Dim3D: + res += "3D"; + break; + case DimCube: + res += "Cube"; + break; + case DimRect: + if (options.es) + SPIRV_CROSS_THROW("Rectangle textures are not supported on OpenGL ES."); + + if (is_legacy_desktop()) + require_extension_internal("GL_ARB_texture_rectangle"); + + res += "2DRect"; + break; + + case DimBuffer: + if (options.es && options.version < 320) + require_extension_internal("GL_OES_texture_buffer"); + else if (!options.es && options.version < 300) + require_extension_internal("GL_EXT_texture_buffer_object"); + res += "Buffer"; + break; + + case DimSubpassData: + res += "2D"; + break; + default: + SPIRV_CROSS_THROW("Only 1D, 2D, 2DRect, 3D, Buffer, InputTarget and Cube textures supported."); + } + + if (type.image.ms) + res += "MS"; + if (type.image.arrayed) + { + if (is_legacy_desktop()) + require_extension_internal("GL_EXT_texture_array"); + res += "Array"; + } + + + if (((type.basetype == SPIRType::SampledImage) || (type.basetype == SPIRType::Sampler)) && + image_is_comparison(type, id)) + { + res += "Shadow"; + } + + return res; +} + +string CompilerGLSL::type_to_glsl_constructor(const SPIRType &type) +{ + if (backend.use_array_constructor && type.array.size() > 1) + { + if (options.flatten_multidimensional_arrays) + SPIRV_CROSS_THROW("Cannot flatten constructors of multidimensional array constructors, e.g. float[][]()."); + else if (!options.es && options.version < 430) + require_extension_internal("GL_ARB_arrays_of_arrays"); + else if (options.es && options.version < 310) + SPIRV_CROSS_THROW("Arrays of arrays not supported before ESSL version 310."); + } + + auto e = type_to_glsl(type); + if (backend.use_array_constructor) + { + for (uint32_t i = 0; i < type.array.size(); i++) + e += "[]"; + } + return e; +} + + + + +string CompilerGLSL::type_to_glsl(const SPIRType &type, uint32_t id) +{ + if (type.pointer && type.storage == StorageClassPhysicalStorageBufferEXT && type.basetype != SPIRType::Struct) + { + + string name = type_to_glsl(get_pointee_type(type)); + for (size_t i = 0; i < type.array.size(); i++) + { + if (type.array_size_literal[i]) + name += join(type.array[i], "_"); + else + name += join("id", type.array[i], "_"); + } + name += "Pointer"; + return name; + } + + switch (type.basetype) + { + case SPIRType::Struct: + + if (backend.explicit_struct_type) + return join("struct ", to_name(type.self)); + else + return to_name(type.self); + + case SPIRType::Image: + case SPIRType::SampledImage: + return image_type_glsl(type, id); + + case SPIRType::Sampler: + + + return comparison_ids.count(id) ? "samplerShadow" : "sampler"; + + case SPIRType::AccelerationStructureNV: + return "accelerationStructureNV"; + + case SPIRType::Void: + return "void"; + + default: + break; + } + + if (type.basetype == SPIRType::UInt && is_legacy()) + SPIRV_CROSS_THROW("Unsigned integers are not supported on legacy targets."); + + if (type.vecsize == 1 && type.columns == 1) + { + switch (type.basetype) + { + case SPIRType::Boolean: + return "bool"; + case SPIRType::SByte: + return backend.basic_int8_type; + case SPIRType::UByte: + return backend.basic_uint8_type; + case SPIRType::Short: + return backend.basic_int16_type; + case SPIRType::UShort: + return backend.basic_uint16_type; + case SPIRType::Int: + return backend.basic_int_type; + case SPIRType::UInt: + return backend.basic_uint_type; + case SPIRType::AtomicCounter: + return "atomic_uint"; + case SPIRType::Half: + return "float16_t"; + case SPIRType::Float: + return "float"; + case SPIRType::Double: + return "double"; + case SPIRType::Int64: + return "int64_t"; + case SPIRType::UInt64: + return "uint64_t"; + default: + return "???"; + } + } + else if (type.vecsize > 1 && type.columns == 1) + { + switch (type.basetype) + { + case SPIRType::Boolean: + return join("bvec", type.vecsize); + case SPIRType::SByte: + return join("i8vec", type.vecsize); + case SPIRType::UByte: + return join("u8vec", type.vecsize); + case SPIRType::Short: + return join("i16vec", type.vecsize); + case SPIRType::UShort: + return join("u16vec", type.vecsize); + case SPIRType::Int: + return join("ivec", type.vecsize); + case SPIRType::UInt: + return join("uvec", type.vecsize); + case SPIRType::Half: + return join("f16vec", type.vecsize); + case SPIRType::Float: + return join("vec", type.vecsize); + case SPIRType::Double: + return join("dvec", type.vecsize); + case SPIRType::Int64: + return join("i64vec", type.vecsize); + case SPIRType::UInt64: + return join("u64vec", type.vecsize); + default: + return "???"; + } + } + else if (type.vecsize == type.columns) + { + switch (type.basetype) + { + case SPIRType::Boolean: + return join("bmat", type.vecsize); + case SPIRType::Int: + return join("imat", type.vecsize); + case SPIRType::UInt: + return join("umat", type.vecsize); + case SPIRType::Half: + return join("f16mat", type.vecsize); + case SPIRType::Float: + return join("mat", type.vecsize); + case SPIRType::Double: + return join("dmat", type.vecsize); + + default: + return "???"; + } + } + else + { + switch (type.basetype) + { + case SPIRType::Boolean: + return join("bmat", type.columns, "x", type.vecsize); + case SPIRType::Int: + return join("imat", type.columns, "x", type.vecsize); + case SPIRType::UInt: + return join("umat", type.columns, "x", type.vecsize); + case SPIRType::Half: + return join("f16mat", type.columns, "x", type.vecsize); + case SPIRType::Float: + return join("mat", type.columns, "x", type.vecsize); + case SPIRType::Double: + return join("dmat", type.columns, "x", type.vecsize); + + default: + return "???"; + } + } +} + +void CompilerGLSL::add_variable(unordered_set &variables_primary, + const unordered_set &variables_secondary, string &name) +{ + if (name.empty()) + return; + + + if (name[0] == '_' && name.size() >= 2 && isdigit(name[1])) + { + name.clear(); + return; + } + + + name = sanitize_underscores(name); + + update_name_cache(variables_primary, variables_secondary, name); +} + +void CompilerGLSL::add_local_variable_name(uint32_t id) +{ + add_variable(local_variable_names, block_names, ir.meta[id].decoration.alias); +} + +void CompilerGLSL::add_resource_name(uint32_t id) +{ + add_variable(resource_names, block_names, ir.meta[id].decoration.alias); +} + +void CompilerGLSL::add_header_line(const std::string &line) +{ + header_lines.push_back(line); +} + +bool CompilerGLSL::has_extension(const std::string &ext) const +{ + auto itr = find(begin(forced_extensions), end(forced_extensions), ext); + return itr != end(forced_extensions); +} + +void CompilerGLSL::require_extension(const std::string &ext) +{ + if (!has_extension(ext)) + forced_extensions.push_back(ext); +} + +void CompilerGLSL::require_extension_internal(const string &ext) +{ + if (backend.supports_extensions && !has_extension(ext)) + { + forced_extensions.push_back(ext); + force_recompile(); + } +} + +void CompilerGLSL::flatten_buffer_block(VariableID id) +{ + auto &var = get(id); + auto &type = get(var.basetype); + auto name = to_name(type.self, false); + auto &flags = ir.meta[type.self].decoration.decoration_flags; + + if (!type.array.empty()) + SPIRV_CROSS_THROW(name + " is an array of UBOs."); + if (type.basetype != SPIRType::Struct) + SPIRV_CROSS_THROW(name + " is not a struct."); + if (!flags.get(DecorationBlock)) + SPIRV_CROSS_THROW(name + " is not a block."); + if (type.member_types.empty()) + SPIRV_CROSS_THROW(name + " is an empty struct."); + + flattened_buffer_blocks.insert(id); +} + +bool CompilerGLSL::builtin_translates_to_nonarray(spv::BuiltIn ) const +{ + return false; +} + +bool CompilerGLSL::check_atomic_image(uint32_t id) +{ + auto &type = expression_type(id); + if (type.storage == StorageClassImage) + { + if (options.es && options.version < 320) + require_extension_internal("GL_OES_shader_image_atomic"); + + auto *var = maybe_get_backing_variable(id); + if (var) + { + auto &flags = ir.meta[var->self].decoration.decoration_flags; + if (flags.get(DecorationNonWritable) || flags.get(DecorationNonReadable)) + { + flags.clear(DecorationNonWritable); + flags.clear(DecorationNonReadable); + force_recompile(); + } + } + return true; + } + else + return false; +} + +void CompilerGLSL::add_function_overload(const SPIRFunction &func) +{ + Hasher hasher; + for (auto &arg : func.arguments) + { + + + + uint32_t type_id = get_pointee_type_id(arg.type); + auto &type = get(type_id); + + if (!combined_image_samplers.empty()) + { + + + + + if (type.basetype == SPIRType::SampledImage || + (type.basetype == SPIRType::Image && type.image.sampled == 1) || type.basetype == SPIRType::Sampler) + { + continue; + } + } + + hasher.u32(type_id); + } + uint64_t types_hash = hasher.get(); + + auto function_name = to_name(func.self); + auto itr = function_overloads.find(function_name); + if (itr != end(function_overloads)) + { + + auto &overloads = itr->second; + if (overloads.count(types_hash) != 0) + { + + add_resource_name(func.self); + function_overloads[to_name(func.self)].insert(types_hash); + } + else + { + + overloads.insert(types_hash); + } + } + else + { + + add_resource_name(func.self); + function_overloads[to_name(func.self)].insert(types_hash); + } +} + +void CompilerGLSL::emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) +{ + if (func.self != ir.default_entry_point) + add_function_overload(func); + + + local_variable_names = resource_names; + + string decl; + + auto &type = get(func.return_type); + decl += flags_to_qualifiers_glsl(type, return_flags); + decl += type_to_glsl(type); + decl += type_to_array_glsl(type); + decl += " "; + + if (func.self == ir.default_entry_point) + { + + + if (interlocked_is_complex) + decl += "spvMainInterlockedBody"; + else + decl += "main"; + + processing_entry_point = true; + } + else + decl += to_name(func.self); + + decl += "("; + SmallVector arglist; + for (auto &arg : func.arguments) + { + + + if (skip_argument(arg.id)) + continue; + + + + + + add_local_variable_name(arg.id); + + arglist.push_back(argument_decl(arg)); + + + auto *var = maybe_get(arg.id); + if (var) + var->parameter = &arg; + } + + for (auto &arg : func.shadow_arguments) + { + + + + + add_local_variable_name(arg.id); + + arglist.push_back(argument_decl(arg)); + + + auto *var = maybe_get(arg.id); + if (var) + var->parameter = &arg; + } + + decl += merge(arglist); + decl += ")"; + statement(decl); +} + +void CompilerGLSL::emit_function(SPIRFunction &func, const Bitset &return_flags) +{ + + if (func.active) + return; + func.active = true; + + + for (auto block : func.blocks) + { + auto &b = get(block); + for (auto &i : b.ops) + { + auto ops = stream(i); + auto op = static_cast(i.op); + + if (op == OpFunctionCall) + { + + uint32_t id = ops[2]; + emit_function(get(id), ir.meta[ops[1]].decoration.decoration_flags); + } + } + } + + if (func.entry_line.file_id != 0) + emit_line_directive(func.entry_line.file_id, func.entry_line.line_literal); + emit_function_prototype(func, return_flags); + begin_scope(); + + if (func.self == ir.default_entry_point) + emit_entry_point_declarations(); + + current_function = &func; + auto &entry_block = get(func.entry_block); + + for (auto &v : func.local_variables) + { + auto &var = get(v); + var.deferred_declaration = false; + + if (var.storage == StorageClassWorkgroup) + { + + + + add_local_variable_name(var.self); + statement(variable_decl(var), ";"); + var.deferred_declaration = false; + } + else if (var.storage == StorageClassPrivate) + { + + + + + + + add_local_variable_name(var.self); + auto &dominated = entry_block.dominated_variables; + if (find(begin(dominated), end(dominated), var.self) == end(dominated)) + entry_block.dominated_variables.push_back(var.self); + var.deferred_declaration = true; + } + else if (var.storage == StorageClassFunction && var.remapped_variable && var.static_expression) + { + + var.deferred_declaration = false; + } + else if (expression_is_lvalue(v)) + { + add_local_variable_name(var.self); + + if (var.initializer) + statement(variable_decl_function_local(var), ";"); + else + { + + + + var.deferred_declaration = true; + } + } + else + { + + + + + + var.statically_assigned = true; + } + + var.loop_variable_enable = false; + + + if (var.loop_variable) + var.deferred_declaration = false; + } + + + for (auto &block_id : func.blocks) + { + auto &block = get(block_id); + sort(begin(block.dominated_variables), end(block.dominated_variables)); + } + + for (auto &line : current_function->fixup_hooks_in) + line(); + + emit_block_chain(entry_block); + + end_scope(); + processing_entry_point = false; + statement(""); + + + + for (auto &v : func.local_variables) + { + auto &var = get(v); + var.deferred_declaration = false; + } +} + +void CompilerGLSL::emit_fixup() +{ + auto &execution = get_entry_point(); + if (execution.model == ExecutionModelVertex) + { + if (options.vertex.fixup_clipspace) + { + const char *suffix = backend.float_literal_suffix ? "f" : ""; + statement("gl_Position.z = 2.0", suffix, " * gl_Position.z - gl_Position.w;"); + } + + if (options.vertex.flip_vert_y) + statement("gl_Position.y = -gl_Position.y;"); + } +} + +void CompilerGLSL::flush_phi(BlockID from, BlockID to) +{ + auto &child = get(to); + if (child.ignore_phi_from_block == from) + return; + + unordered_set temporary_phi_variables; + + for (auto itr = begin(child.phi_variables); itr != end(child.phi_variables); ++itr) + { + auto &phi = *itr; + + if (phi.parent == from) + { + auto &var = get(phi.function_variable); + + + if (var.loop_variable && !var.loop_variable_enable) + var.static_expression = phi.local_variable; + else + { + flush_variable_declaration(phi.function_variable); + + + + + + bool need_saved_temporary = + find_if(itr + 1, end(child.phi_variables), [&](const SPIRBlock::Phi &future_phi) -> bool { + return future_phi.local_variable == ID(phi.function_variable) && future_phi.parent == from; + }) != end(child.phi_variables); + + if (need_saved_temporary) + { + + + if (!var.allocate_temporary_copy) + { + var.allocate_temporary_copy = true; + force_recompile(); + } + statement("_", phi.function_variable, "_copy", " = ", to_name(phi.function_variable), ";"); + temporary_phi_variables.insert(phi.function_variable); + } + + + + auto lhs = to_expression(phi.function_variable); + + string rhs; + if (temporary_phi_variables.count(phi.local_variable)) + rhs = join("_", phi.local_variable, "_copy"); + else + rhs = to_pointer_expression(phi.local_variable); + + if (!optimize_read_modify_write(get(var.basetype), lhs, rhs)) + statement(lhs, " = ", rhs, ";"); + } + + register_write(phi.function_variable); + } + } +} + +void CompilerGLSL::branch_to_continue(BlockID from, BlockID to) +{ + auto &to_block = get(to); + if (from == to) + return; + + assert(is_continue(to)); + if (to_block.complex_continue) + { + + auto usage_counts = expression_usage_counts; + + emit_block_chain(to_block); + + + expression_usage_counts = usage_counts; + } + else + { + auto &from_block = get(from); + bool outside_control_flow = false; + uint32_t loop_dominator = 0; + + + if (from_block.merge_block) + { + + + loop_dominator = from; + } + else if (from_block.loop_dominator != BlockID(SPIRBlock::NoDominator)) + { + loop_dominator = from_block.loop_dominator; + } + + if (loop_dominator != 0) + { + auto &cfg = get_cfg_for_current_function(); + + + + outside_control_flow = cfg.node_terminates_control_flow_in_sub_graph(loop_dominator, from); + } + + + + + + + if (!outside_control_flow) + statement("continue;"); + } +} + +void CompilerGLSL::branch(BlockID from, BlockID to) +{ + flush_phi(from, to); + flush_control_dependent_expressions(from); + + bool to_is_continue = is_continue(to); + + + if ((ir.block_meta[to] & ParsedIR::BLOCK_META_LOOP_HEADER_BIT) != 0 && get(from).loop_dominator == to) + { + + + + statement("continue;"); + } + else if (is_break(to)) + { + + + + + + if (current_emitting_switch && is_loop_break(to) && + current_emitting_switch->loop_dominator != BlockID(SPIRBlock::NoDominator) && + get(current_emitting_switch->loop_dominator).merge_block == to) + { + if (!current_emitting_switch->need_ladder_break) + { + force_recompile(); + current_emitting_switch->need_ladder_break = true; + } + + statement("_", current_emitting_switch->self, "_ladder_break = true;"); + } + statement("break;"); + } + else if (to_is_continue || from == to) + { + + + + + + + + + + + + auto &block_meta = ir.block_meta[to]; + bool branching_to_merge = + (block_meta & (ParsedIR::BLOCK_META_SELECTION_MERGE_BIT | ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT | + ParsedIR::BLOCK_META_LOOP_MERGE_BIT)) != 0; + if (!to_is_continue || !branching_to_merge) + branch_to_continue(from, to); + } + else if (!is_conditional(to)) + emit_block_chain(get(to)); + + + + + +} + +void CompilerGLSL::branch(BlockID from, uint32_t cond, BlockID true_block, BlockID false_block) +{ + auto &from_block = get(from); + BlockID merge_block = from_block.merge == SPIRBlock::MergeSelection ? from_block.next_block : BlockID(0); + + + + bool true_sub = !is_conditional(true_block); + bool false_sub = !is_conditional(false_block); + + bool true_block_is_selection_merge = true_block == merge_block; + bool false_block_is_selection_merge = false_block == merge_block; + + if (true_sub) + { + emit_block_hints(get(from)); + statement("if (", to_expression(cond), ")"); + begin_scope(); + branch(from, true_block); + end_scope(); + + + + + + if (false_sub || (!false_block_is_selection_merge && is_continue(false_block)) || is_break(false_block)) + { + statement("else"); + begin_scope(); + branch(from, false_block); + end_scope(); + } + else if (flush_phi_required(from, false_block)) + { + statement("else"); + begin_scope(); + flush_phi(from, false_block); + end_scope(); + } + } + else if (false_sub) + { + + emit_block_hints(get(from)); + statement("if (!", to_enclosed_expression(cond), ")"); + begin_scope(); + branch(from, false_block); + end_scope(); + + if ((!true_block_is_selection_merge && is_continue(true_block)) || is_break(true_block)) + { + statement("else"); + begin_scope(); + branch(from, true_block); + end_scope(); + } + else if (flush_phi_required(from, true_block)) + { + statement("else"); + begin_scope(); + flush_phi(from, true_block); + end_scope(); + } + } +} + + + + +string CompilerGLSL::emit_continue_block(uint32_t continue_block, bool follow_true_block, bool follow_false_block) +{ + auto *block = &get(continue_block); + + + + current_continue_block = block; + + SmallVector statements; + + + auto *old = redirect_statement; + redirect_statement = &statements; + + + while ((ir.block_meta[block->self] & ParsedIR::BLOCK_META_LOOP_HEADER_BIT) == 0) + { + + emit_block_instructions(*block); + + + if (block->next_block) + { + flush_phi(continue_block, block->next_block); + block = &get(block->next_block); + } + + else if (block->true_block && follow_true_block) + { + flush_phi(continue_block, block->true_block); + block = &get(block->true_block); + } + else if (block->false_block && follow_false_block) + { + flush_phi(continue_block, block->false_block); + block = &get(block->false_block); + } + else + { + SPIRV_CROSS_THROW("Invalid continue block detected!"); + } + } + + + redirect_statement = old; + + + + for (auto &s : statements) + { + if (!s.empty() && s.back() == ';') + s.erase(s.size() - 1, 1); + } + + current_continue_block = nullptr; + return merge(statements); +} + +void CompilerGLSL::emit_while_loop_initializers(const SPIRBlock &block) +{ + + for (auto &loop_var : block.loop_variables) + { + auto &var = get(loop_var); + statement(variable_decl(var), ";"); + } +} + +string CompilerGLSL::emit_for_loop_initializers(const SPIRBlock &block) +{ + if (block.loop_variables.empty()) + return ""; + + bool same_types = for_loop_initializers_are_same_type(block); + + + + + uint32_t missing_initializers = 0; + for (auto &variable : block.loop_variables) + { + uint32_t expr = get(variable).static_expression; + + + + if (expr == 0 || ir.ids[expr].get_type() == TypeUndef) + missing_initializers++; + } + + if (block.loop_variables.size() == 1 && missing_initializers == 0) + { + return variable_decl(get(block.loop_variables.front())); + } + else if (!same_types || missing_initializers == uint32_t(block.loop_variables.size())) + { + for (auto &loop_var : block.loop_variables) + statement(variable_decl(get(loop_var)), ";"); + return ""; + } + else + { + + + string expr; + + for (auto &loop_var : block.loop_variables) + { + uint32_t static_expr = get(loop_var).static_expression; + if (static_expr == 0 || ir.ids[static_expr].get_type() == TypeUndef) + { + statement(variable_decl(get(loop_var)), ";"); + } + else + { + auto &var = get(loop_var); + auto &type = get_variable_data_type(var); + if (expr.empty()) + { + + expr = join(to_qualifiers_glsl(var.self), type_to_glsl(type), " "); + } + else + { + expr += ", "; + + + if (type.pointer) + expr += "* "; + } + + expr += join(to_name(loop_var), " = ", to_pointer_expression(var.static_expression)); + } + } + return expr; + } +} + +bool CompilerGLSL::for_loop_initializers_are_same_type(const SPIRBlock &block) +{ + if (block.loop_variables.size() <= 1) + return true; + + uint32_t expected = 0; + Bitset expected_flags; + for (auto &var : block.loop_variables) + { + + uint32_t expr = get(var).static_expression; + if (expr == 0 || ir.ids[expr].get_type() == TypeUndef) + continue; + + if (expected == 0) + { + expected = get(var).basetype; + expected_flags = get_decoration_bitset(var); + } + else if (expected != get(var).basetype) + return false; + + + if (expected_flags != get_decoration_bitset(var)) + return false; + } + + return true; +} + +bool CompilerGLSL::attempt_emit_loop_header(SPIRBlock &block, SPIRBlock::Method method) +{ + SPIRBlock::ContinueBlockType continue_type = continue_block_type(get(block.continue_block)); + + if (method == SPIRBlock::MergeToSelectForLoop || method == SPIRBlock::MergeToSelectContinueForLoop) + { + uint32_t current_count = statement_count; + + + + emit_block_instructions(block); + + bool condition_is_temporary = forced_temporaries.find(block.condition) == end(forced_temporaries); + + + if (current_count == statement_count && condition_is_temporary) + { + switch (continue_type) + { + case SPIRBlock::ForLoop: + { + + flush_undeclared_variables(block); + + + + auto initializer = emit_for_loop_initializers(block); + auto condition = to_expression(block.condition); + + + if (execution_is_noop(get(block.true_block), get(block.merge_block))) + condition = join("!", enclose_expression(condition)); + + emit_block_hints(block); + if (method != SPIRBlock::MergeToSelectContinueForLoop) + { + auto continue_block = emit_continue_block(block.continue_block, false, false); + statement("for (", initializer, "; ", condition, "; ", continue_block, ")"); + } + else + statement("for (", initializer, "; ", condition, "; )"); + break; + } + + case SPIRBlock::WhileLoop: + { + + flush_undeclared_variables(block); + emit_while_loop_initializers(block); + emit_block_hints(block); + + auto condition = to_expression(block.condition); + + if (execution_is_noop(get(block.true_block), get(block.merge_block))) + condition = join("!", enclose_expression(condition)); + + statement("while (", condition, ")"); + break; + } + + default: + block.disable_block_optimization = true; + force_recompile(); + begin_scope(); + return false; + } + + begin_scope(); + return true; + } + else + { + block.disable_block_optimization = true; + force_recompile(); + begin_scope(); + return false; + } + } + else if (method == SPIRBlock::MergeToDirectForLoop) + { + auto &child = get(block.next_block); + + + flush_undeclared_variables(child); + + uint32_t current_count = statement_count; + + + + + emit_block_instructions(child); + + bool condition_is_temporary = forced_temporaries.find(child.condition) == end(forced_temporaries); + + if (current_count == statement_count && condition_is_temporary) + { + uint32_t target_block = child.true_block; + + switch (continue_type) + { + case SPIRBlock::ForLoop: + { + + + auto initializer = emit_for_loop_initializers(block); + auto condition = to_expression(child.condition); + + + if (execution_is_noop(get(child.true_block), get(block.merge_block))) + { + condition = join("!", enclose_expression(condition)); + target_block = child.false_block; + } + + auto continue_block = emit_continue_block(block.continue_block, false, false); + emit_block_hints(block); + statement("for (", initializer, "; ", condition, "; ", continue_block, ")"); + break; + } + + case SPIRBlock::WhileLoop: + { + emit_while_loop_initializers(block); + emit_block_hints(block); + + auto condition = to_expression(child.condition); + + if (execution_is_noop(get(child.true_block), get(block.merge_block))) + { + condition = join("!", enclose_expression(condition)); + target_block = child.false_block; + } + + statement("while (", condition, ")"); + break; + } + + default: + block.disable_block_optimization = true; + force_recompile(); + begin_scope(); + return false; + } + + begin_scope(); + branch(child.self, target_block); + return true; + } + else + { + block.disable_block_optimization = true; + force_recompile(); + begin_scope(); + return false; + } + } + else + return false; +} + +void CompilerGLSL::flush_undeclared_variables(SPIRBlock &block) +{ + for (auto &v : block.dominated_variables) + flush_variable_declaration(v); +} + +void CompilerGLSL::emit_hoisted_temporaries(SmallVector> &temporaries) +{ + + + sort(begin(temporaries), end(temporaries), + [](const pair &a, const pair &b) { return a.second < b.second; }); + + for (auto &tmp : temporaries) + { + add_local_variable_name(tmp.second); + auto &flags = ir.meta[tmp.second].decoration.decoration_flags; + auto &type = get(tmp.first); + statement(flags_to_qualifiers_glsl(type, flags), variable_decl(type, to_name(tmp.second)), ";"); + + hoisted_temporaries.insert(tmp.second); + forced_temporaries.insert(tmp.second); + + + set(tmp.second, to_name(tmp.second), tmp.first, true); + } +} + +void CompilerGLSL::emit_block_chain(SPIRBlock &block) +{ + bool select_branch_to_true_block = false; + bool select_branch_to_false_block = false; + bool skip_direct_branch = false; + bool emitted_loop_header_variables = false; + bool force_complex_continue_block = false; + + emit_hoisted_temporaries(block.declare_temporary); + + SPIRBlock::ContinueBlockType continue_type = SPIRBlock::ContinueNone; + if (block.continue_block) + continue_type = continue_block_type(get(block.continue_block)); + + + for (auto var_id : block.loop_variables) + { + auto &var = get(var_id); + var.loop_variable_enable = true; + + emit_variable_temporary_copies(var); + } + + + SmallVector rearm_dominated_variables(block.dominated_variables.size()); + for (size_t i = 0; i < block.dominated_variables.size(); i++) + { + uint32_t var_id = block.dominated_variables[i]; + auto &var = get(var_id); + rearm_dominated_variables[i] = var.deferred_declaration; + } + + + + + + if (!is_legacy_es() && block_is_loop_candidate(block, SPIRBlock::MergeToSelectContinueForLoop)) + { + flush_undeclared_variables(block); + if (attempt_emit_loop_header(block, SPIRBlock::MergeToSelectContinueForLoop)) + { + if (execution_is_noop(get(block.true_block), get(block.merge_block))) + select_branch_to_false_block = true; + else + select_branch_to_true_block = true; + + emitted_loop_header_variables = true; + force_complex_continue_block = true; + } + } + + else if (block_is_loop_candidate(block, SPIRBlock::MergeToSelectForLoop)) + { + flush_undeclared_variables(block); + if (attempt_emit_loop_header(block, SPIRBlock::MergeToSelectForLoop)) + { + + if (execution_is_noop(get(block.true_block), get(block.merge_block))) + select_branch_to_false_block = true; + else + select_branch_to_true_block = true; + + emitted_loop_header_variables = true; + } + } + + + else if (block_is_loop_candidate(block, SPIRBlock::MergeToDirectForLoop)) + { + flush_undeclared_variables(block); + if (attempt_emit_loop_header(block, SPIRBlock::MergeToDirectForLoop)) + { + skip_direct_branch = true; + emitted_loop_header_variables = true; + } + } + else if (continue_type == SPIRBlock::DoWhileLoop) + { + flush_undeclared_variables(block); + emit_while_loop_initializers(block); + emitted_loop_header_variables = true; + + + + + emit_hoisted_temporaries(block.potential_declare_temporary); + statement("do"); + begin_scope(); + + emit_block_instructions(block); + } + else if (block.merge == SPIRBlock::MergeLoop) + { + flush_undeclared_variables(block); + emit_while_loop_initializers(block); + emitted_loop_header_variables = true; + + + get(block.continue_block).complex_continue = true; + continue_type = SPIRBlock::ComplexLoop; + + + + + + emit_hoisted_temporaries(block.potential_declare_temporary); + statement("for (;;)"); + begin_scope(); + + emit_block_instructions(block); + } + else + { + emit_block_instructions(block); + } + + + + if (!emitted_loop_header_variables && !block.loop_variables.empty()) + { + force_recompile(); + for (auto var : block.loop_variables) + get(var).loop_variable = false; + block.loop_variables.clear(); + } + + flush_undeclared_variables(block); + bool emit_next_block = true; + + + switch (block.terminator) + { + case SPIRBlock::Direct: + + if (block.loop_dominator == block.next_block) + { + branch(block.self, block.next_block); + emit_next_block = false; + } + + else if (skip_direct_branch) + emit_next_block = false; + else if (is_continue(block.next_block) || is_break(block.next_block) || is_conditional(block.next_block)) + { + branch(block.self, block.next_block); + emit_next_block = false; + } + break; + + case SPIRBlock::Select: + + if (select_branch_to_true_block) + { + if (force_complex_continue_block) + { + assert(block.true_block == block.continue_block); + + + auto &complex_continue = get(block.continue_block).complex_continue; + bool old_complex = complex_continue; + complex_continue = true; + branch(block.self, block.true_block); + complex_continue = old_complex; + } + else + branch(block.self, block.true_block); + } + else if (select_branch_to_false_block) + { + if (force_complex_continue_block) + { + assert(block.false_block == block.continue_block); + + + auto &complex_continue = get(block.continue_block).complex_continue; + bool old_complex = complex_continue; + complex_continue = true; + branch(block.self, block.false_block); + complex_continue = old_complex; + } + else + branch(block.self, block.false_block); + } + else + branch(block.self, block.condition, block.true_block, block.false_block); + break; + + case SPIRBlock::MultiSelect: + { + auto &type = expression_type(block.condition); + bool unsigned_case = + type.basetype == SPIRType::UInt || type.basetype == SPIRType::UShort || type.basetype == SPIRType::UByte; + + if (block.merge == SPIRBlock::MergeNone) + SPIRV_CROSS_THROW("Switch statement is not structured"); + + if (type.basetype == SPIRType::UInt64 || type.basetype == SPIRType::Int64) + { + + SPIRV_CROSS_THROW("Cannot use 64-bit switch selectors."); + } + + const char *label_suffix = ""; + if (type.basetype == SPIRType::UInt && backend.uint32_t_literal_suffix) + label_suffix = "u"; + else if (type.basetype == SPIRType::UShort) + label_suffix = backend.uint16_t_literal_suffix; + else if (type.basetype == SPIRType::Short) + label_suffix = backend.int16_t_literal_suffix; + + SPIRBlock *old_emitting_switch = current_emitting_switch; + current_emitting_switch = █ + + if (block.need_ladder_break) + statement("bool _", block.self, "_ladder_break = false;"); + + + unordered_map> case_constructs; + SmallVector block_declaration_order; + SmallVector literals_to_merge; + + + + + + for (auto &c : block.cases) + { + if (c.block != block.next_block && c.block != block.default_block) + { + if (!case_constructs.count(c.block)) + block_declaration_order.push_back(c.block); + case_constructs[c.block].push_back(c.value); + } + else if (c.block == block.next_block && block.default_block != block.next_block) + { + + + literals_to_merge.push_back(c.value); + } + } + + + if (block.default_block != block.next_block) + { + auto &default_block = get(block.default_block); + + + + + size_t num_blocks = block_declaration_order.size(); + bool injected_block = false; + + for (size_t i = 0; i < num_blocks; i++) + { + auto &case_block = get(block_declaration_order[i]); + if (execution_is_direct_branch(case_block, default_block)) + { + + block_declaration_order.insert(begin(block_declaration_order) + i + 1, block.default_block); + injected_block = true; + break; + } + else if (execution_is_direct_branch(default_block, case_block)) + { + + block_declaration_order.insert(begin(block_declaration_order) + i, block.default_block); + injected_block = true; + break; + } + } + + + if (!injected_block) + block_declaration_order.push_back(block.default_block); + + case_constructs[block.default_block] = {}; + } + + size_t num_blocks = block_declaration_order.size(); + + const auto to_case_label = [](uint32_t literal, bool is_unsigned_case) -> string { + return is_unsigned_case ? convert_to_string(literal) : convert_to_string(int32_t(literal)); + }; + + + + + + + for (size_t i = 1; i < num_blocks; i++) + { + if (flush_phi_required(block.self, block_declaration_order[i]) && + flush_phi_required(block_declaration_order[i - 1], block_declaration_order[i])) + { + uint32_t target_block = block_declaration_order[i]; + + + get(target_block).ignore_phi_from_block = 0; + + auto &literals = case_constructs[target_block]; + + if (literals.empty()) + { + + + + SmallVector conditions; + for (size_t j = 0; j < num_blocks; j++) + { + auto &negative_literals = case_constructs[block_declaration_order[j]]; + for (auto &case_label : negative_literals) + conditions.push_back(join(to_enclosed_expression(block.condition), + " != ", to_case_label(case_label, unsigned_case))); + } + + statement("if (", merge(conditions, " && "), ")"); + begin_scope(); + flush_phi(block.self, target_block); + end_scope(); + } + else + { + SmallVector conditions; + conditions.reserve(literals.size()); + for (auto &case_label : literals) + conditions.push_back(join(to_enclosed_expression(block.condition), + " == ", to_case_label(case_label, unsigned_case))); + statement("if (", merge(conditions, " || "), ")"); + begin_scope(); + flush_phi(block.self, target_block); + end_scope(); + } + + + get(target_block).ignore_phi_from_block = block.self; + } + } + + emit_block_hints(block); + statement("switch (", to_expression(block.condition), ")"); + begin_scope(); + + for (size_t i = 0; i < num_blocks; i++) + { + uint32_t target_block = block_declaration_order[i]; + auto &literals = case_constructs[target_block]; + + if (literals.empty()) + { + + statement("default:"); + } + else + { + for (auto &case_literal : literals) + { + + statement("case ", to_case_label(case_literal, unsigned_case), label_suffix, ":"); + } + } + + auto &case_block = get(target_block); + if (backend.support_case_fallthrough && i + 1 < num_blocks && + execution_is_direct_branch(case_block, get(block_declaration_order[i + 1]))) + { + + + + + current_emitting_switch_fallthrough = true; + } + else + current_emitting_switch_fallthrough = false; + + begin_scope(); + branch(block.self, target_block); + end_scope(); + + current_emitting_switch_fallthrough = false; + } + + + if (flush_phi_required(block.self, block.next_block)) + { + if (block.default_block == block.next_block || !literals_to_merge.empty()) + { + for (auto &case_literal : literals_to_merge) + statement("case ", to_case_label(case_literal, unsigned_case), label_suffix, ":"); + + if (block.default_block == block.next_block) + statement("default:"); + + begin_scope(); + flush_phi(block.self, block.next_block); + statement("break;"); + end_scope(); + } + } + + end_scope(); + + if (block.need_ladder_break) + { + statement("if (_", block.self, "_ladder_break)"); + begin_scope(); + statement("break;"); + end_scope(); + } + + current_emitting_switch = old_emitting_switch; + break; + } + + case SPIRBlock::Return: + { + for (auto &line : current_function->fixup_hooks_out) + line(); + + if (processing_entry_point) + emit_fixup(); + + auto &cfg = get_cfg_for_current_function(); + + if (block.return_value) + { + auto &type = expression_type(block.return_value); + if (!type.array.empty() && !backend.can_return_array) + { + + + if (ir.ids[block.return_value].get_type() != TypeUndef) + { + emit_array_copy("SPIRV_Cross_return_value", block.return_value, StorageClassFunction, + get_backing_variable_storage(block.return_value)); + } + + if (!cfg.node_terminates_control_flow_in_sub_graph(current_function->entry_block, block.self) || + block.loop_dominator != BlockID(SPIRBlock::NoDominator)) + { + statement("return;"); + } + } + else + { + + if (ir.ids[block.return_value].get_type() != TypeUndef) + statement("return ", to_expression(block.return_value), ";"); + } + } + else if (!cfg.node_terminates_control_flow_in_sub_graph(current_function->entry_block, block.self) || + block.loop_dominator != BlockID(SPIRBlock::NoDominator)) + { + + + + + statement("return;"); + } + break; + } + + case SPIRBlock::Kill: + statement(backend.discard_literal, ";"); + break; + + case SPIRBlock::Unreachable: + emit_next_block = false; + break; + + default: + SPIRV_CROSS_THROW("Unimplemented block terminator."); + } + + if (block.next_block && emit_next_block) + { + + + if (block.merge != SPIRBlock::MergeSelection) + flush_phi(block.self, block.next_block); + + + if (!current_emitting_switch_fallthrough) + { + + + + if (is_loop_break(block.next_block)) + { + + assert(block.merge == SPIRBlock::MergeSelection); + statement("break;"); + } + else if (is_continue(block.next_block)) + { + assert(block.merge == SPIRBlock::MergeSelection); + branch_to_continue(block.self, block.next_block); + } + else if (BlockID(block.self) != block.next_block) + emit_block_chain(get(block.next_block)); + } + } + + if (block.merge == SPIRBlock::MergeLoop) + { + if (continue_type == SPIRBlock::DoWhileLoop) + { + + + + const auto &continue_block = get(block.continue_block); + bool positive_test = execution_is_noop(get(continue_block.true_block), + get(continue_block.loop_dominator)); + + uint32_t current_count = statement_count; + auto statements = emit_continue_block(block.continue_block, positive_test, !positive_test); + if (statement_count != current_count) + { + + get(block.continue_block).complex_continue = true; + force_recompile(); + } + + + auto condition = to_expression(continue_block.condition); + if (!positive_test) + condition = join("!", enclose_expression(condition)); + + end_scope_decl(join("while (", condition, ")")); + } + else + end_scope(); + + + + + if (is_continue(block.merge_block)) + branch_to_continue(block.self, block.merge_block); + else + emit_block_chain(get(block.merge_block)); + } + + + block.invalidate_expressions.clear(); + + + + assert(rearm_dominated_variables.size() == block.dominated_variables.size()); + for (size_t i = 0; i < block.dominated_variables.size(); i++) + { + uint32_t var = block.dominated_variables[i]; + get(var).deferred_declaration = rearm_dominated_variables[i]; + } + + + + for (auto &var_id : block.loop_variables) + get(var_id).loop_variable_enable = false; +} + +void CompilerGLSL::begin_scope() +{ + statement("{"); + indent++; +} + +void CompilerGLSL::end_scope() +{ + if (!indent) + SPIRV_CROSS_THROW("Popping empty indent stack."); + indent--; + statement("}"); +} + +void CompilerGLSL::end_scope(const string &trailer) +{ + if (!indent) + SPIRV_CROSS_THROW("Popping empty indent stack."); + indent--; + statement("}", trailer); +} + +void CompilerGLSL::end_scope_decl() +{ + if (!indent) + SPIRV_CROSS_THROW("Popping empty indent stack."); + indent--; + statement("};"); +} + +void CompilerGLSL::end_scope_decl(const string &decl) +{ + if (!indent) + SPIRV_CROSS_THROW("Popping empty indent stack."); + indent--; + statement("} ", decl, ";"); +} + +void CompilerGLSL::check_function_call_constraints(const uint32_t *args, uint32_t length) +{ + + + + + for (uint32_t i = 0; i < length; i++) + { + auto *var = maybe_get(args[i]); + if (!var || !var->remapped_variable) + continue; + + auto &type = get(var->basetype); + if (type.basetype == SPIRType::Image && type.image.dim == DimSubpassData) + { + SPIRV_CROSS_THROW("Tried passing a remapped subpassInput variable to a function. " + "This will not work correctly because type-remapping information is lost. " + "To workaround, please consider not passing the subpass input as a function parameter, " + "or use in/out variables instead which do not need type remapping information."); + } + } +} + +const Instruction *CompilerGLSL::get_next_instruction_in_block(const Instruction &instr) +{ + + auto offset = uint32_t(&instr - current_emitting_block->ops.data()); + if ((offset + 1) < current_emitting_block->ops.size()) + return ¤t_emitting_block->ops[offset + 1]; + else + return nullptr; +} + +uint32_t CompilerGLSL::mask_relevant_memory_semantics(uint32_t semantics) +{ + return semantics & (MemorySemanticsAtomicCounterMemoryMask | MemorySemanticsImageMemoryMask | + MemorySemanticsWorkgroupMemoryMask | MemorySemanticsUniformMemoryMask | + MemorySemanticsCrossWorkgroupMemoryMask | MemorySemanticsSubgroupMemoryMask); +} + +void CompilerGLSL::emit_array_copy(const string &lhs, uint32_t rhs_id, StorageClass, StorageClass) +{ + statement(lhs, " = ", to_expression(rhs_id), ";"); +} + +void CompilerGLSL::unroll_array_from_complex_load(uint32_t target_id, uint32_t source_id, std::string &expr) +{ + if (!backend.force_gl_in_out_block) + return; + + + auto *var = maybe_get(source_id); + if (!var) + return; + + if (var->storage != StorageClassInput) + return; + + auto &type = get_variable_data_type(*var); + if (type.array.empty()) + return; + + auto builtin = BuiltIn(get_decoration(var->self, DecorationBuiltIn)); + bool is_builtin = is_builtin_variable(*var) && (builtin == BuiltInPointSize || builtin == BuiltInPosition); + bool is_tess = is_tessellation_shader(); + + + + + + + if (is_builtin || is_tess) + { + auto new_expr = join("_", target_id, "_unrolled"); + statement(variable_decl(type, new_expr, target_id), ";"); + string array_expr; + if (type.array_size_literal.front()) + { + array_expr = convert_to_string(type.array.front()); + if (type.array.front() == 0) + SPIRV_CROSS_THROW("Cannot unroll an array copy from unsized array."); + } + else + array_expr = to_expression(type.array.front()); + + + statement("for (int i = 0; i < int(", array_expr, "); i++)"); + begin_scope(); + if (is_builtin) + statement(new_expr, "[i] = gl_in[i].", expr, ";"); + else + statement(new_expr, "[i] = ", expr, "[i];"); + end_scope(); + + expr = move(new_expr); + } +} + +void CompilerGLSL::bitcast_from_builtin_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type) +{ + auto *var = maybe_get_backing_variable(source_id); + if (var) + source_id = var->self; + + + if (!has_decoration(source_id, DecorationBuiltIn)) + return; + + auto builtin = static_cast(get_decoration(source_id, DecorationBuiltIn)); + auto expected_type = expr_type.basetype; + + + switch (builtin) + { + case BuiltInLayer: + case BuiltInPrimitiveId: + case BuiltInViewportIndex: + case BuiltInInstanceId: + case BuiltInInstanceIndex: + case BuiltInVertexId: + case BuiltInVertexIndex: + case BuiltInSampleId: + case BuiltInBaseVertex: + case BuiltInBaseInstance: + case BuiltInDrawIndex: + case BuiltInFragStencilRefEXT: + expected_type = SPIRType::Int; + break; + + case BuiltInGlobalInvocationId: + case BuiltInLocalInvocationId: + case BuiltInWorkgroupId: + case BuiltInLocalInvocationIndex: + case BuiltInWorkgroupSize: + case BuiltInNumWorkgroups: + expected_type = SPIRType::UInt; + break; + + default: + break; + } + + if (expected_type != expr_type.basetype) + expr = bitcast_expression(expr_type, expected_type, expr); +} + +void CompilerGLSL::bitcast_to_builtin_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type) +{ + + if (!has_decoration(target_id, DecorationBuiltIn)) + return; + + auto builtin = static_cast(get_decoration(target_id, DecorationBuiltIn)); + auto expected_type = expr_type.basetype; + + + switch (builtin) + { + case BuiltInLayer: + case BuiltInPrimitiveId: + case BuiltInViewportIndex: + case BuiltInFragStencilRefEXT: + expected_type = SPIRType::Int; + break; + + default: + break; + } + + if (expected_type != expr_type.basetype) + { + auto type = expr_type; + type.basetype = expected_type; + expr = bitcast_expression(type, expr_type.basetype, expr); + } +} + +void CompilerGLSL::convert_non_uniform_expression(const SPIRType &type, std::string &expr) +{ + if (*backend.nonuniform_qualifier == '\0') + return; + + + if (type.basetype == SPIRType::Sampler || type.basetype == SPIRType::SampledImage || + type.basetype == SPIRType::Image) + { + + + + + + + + + auto start_array_index = expr.find_first_of('['); + auto end_array_index = expr.find_last_of(']'); + + + if (start_array_index == string::npos || end_array_index == string::npos || end_array_index < start_array_index) + return; + + start_array_index++; + + expr = join(expr.substr(0, start_array_index), backend.nonuniform_qualifier, "(", + expr.substr(start_array_index, end_array_index - start_array_index), ")", + expr.substr(end_array_index, string::npos)); + } +} + +void CompilerGLSL::emit_block_hints(const SPIRBlock &) +{ +} + +void CompilerGLSL::preserve_alias_on_reset(uint32_t id) +{ + preserved_aliases[id] = get_name(id); +} + +void CompilerGLSL::reset_name_caches() +{ + for (auto &preserved : preserved_aliases) + set_name(preserved.first, preserved.second); + + preserved_aliases.clear(); + resource_names.clear(); + block_input_names.clear(); + block_output_names.clear(); + block_ubo_names.clear(); + block_ssbo_names.clear(); + block_names.clear(); + function_overloads.clear(); +} + +void CompilerGLSL::fixup_type_alias() +{ + + + + ir.for_each_typed_id([&](uint32_t self, SPIRType &type) { + if (type.type_alias && type_is_block_like(type)) + { + + ir.for_each_typed_id([&](uint32_t other_id, SPIRType &other_type) { + if (other_id == type.self) + return; + + if (other_type.type_alias == type.type_alias) + other_type.type_alias = type.self; + }); + + this->get(type.type_alias).type_alias = self; + type.type_alias = 0; + } + }); + + ir.for_each_typed_id([&](uint32_t, SPIRType &type) { + if (type.type_alias && type_is_block_like(type)) + { + + type.type_alias = 0; + } + else if (type.type_alias && !type_is_block_like(this->get(type.type_alias))) + { + + + + + type.type_alias = 0; + } + }); +} + +void CompilerGLSL::reorder_type_alias() +{ + + + + auto loop_lock = ir.create_loop_hard_lock(); + + auto &type_ids = ir.ids_for_type[TypeType]; + for (auto alias_itr = begin(type_ids); alias_itr != end(type_ids); ++alias_itr) + { + auto &type = get(*alias_itr); + if (type.type_alias != TypeID(0) && + !has_extended_decoration(type.type_alias, SPIRVCrossDecorationBufferBlockRepacked)) + { + + auto master_itr = find(begin(type_ids), end(type_ids), ID(type.type_alias)); + assert(master_itr != end(type_ids)); + + if (alias_itr < master_itr) + { + + auto &joined_types = ir.ids_for_constant_or_type; + auto alt_alias_itr = find(begin(joined_types), end(joined_types), *alias_itr); + auto alt_master_itr = find(begin(joined_types), end(joined_types), *master_itr); + assert(alt_alias_itr != end(joined_types)); + assert(alt_master_itr != end(joined_types)); + + swap(*alias_itr, *master_itr); + swap(*alt_alias_itr, *alt_master_itr); + } + } + } +} + +void CompilerGLSL::emit_line_directive(uint32_t file_id, uint32_t line_literal) +{ + + + if (redirect_statement) + return; + + if (options.emit_line_directives) + { + require_extension_internal("GL_GOOGLE_cpp_style_line_directive"); + statement_no_indent("#line ", line_literal, " \"", get(file_id).str, "\""); + } +} + +void CompilerGLSL::propagate_nonuniform_qualifier(uint32_t id) +{ + + + + + if (!has_decoration(id, DecorationNonUniformEXT)) + { + set_decoration(id, DecorationNonUniformEXT); + force_recompile(); + } + + auto *e = maybe_get(id); + auto *combined = maybe_get(id); + auto *chain = maybe_get(id); + if (e) + { + for (auto &expr : e->expression_dependencies) + propagate_nonuniform_qualifier(expr); + for (auto &expr : e->implied_read_expressions) + propagate_nonuniform_qualifier(expr); + } + else if (combined) + { + propagate_nonuniform_qualifier(combined->image); + propagate_nonuniform_qualifier(combined->sampler); + } + else if (chain) + { + for (auto &expr : chain->implied_read_expressions) + propagate_nonuniform_qualifier(expr); + } +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_glsl.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_glsl.hpp new file mode 100644 index 000000000000..80afcb266665 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_glsl.hpp @@ -0,0 +1,712 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_GLSL_HPP +#define SPIRV_CROSS_GLSL_HPP + +#include "GLSL.std.450.h" +#include "spirv_cross.hpp" +#include +#include +#include + +namespace SPIRV_CROSS_NAMESPACE +{ +enum PlsFormat +{ + PlsNone = 0, + + PlsR11FG11FB10F, + PlsR32F, + PlsRG16F, + PlsRGB10A2, + PlsRGBA8, + PlsRG16, + + PlsRGBA8I, + PlsRG16I, + + PlsRGB10A2UI, + PlsRGBA8UI, + PlsRG16UI, + PlsR32UI +}; + +struct PlsRemap +{ + uint32_t id; + PlsFormat format; +}; + +enum AccessChainFlagBits +{ + ACCESS_CHAIN_INDEX_IS_LITERAL_BIT = 1 << 0, + ACCESS_CHAIN_CHAIN_ONLY_BIT = 1 << 1, + ACCESS_CHAIN_PTR_CHAIN_BIT = 1 << 2, + ACCESS_CHAIN_SKIP_REGISTER_EXPRESSION_READ_BIT = 1 << 3 +}; +typedef uint32_t AccessChainFlags; + +class CompilerGLSL : public Compiler +{ +public: + struct Options + { + + uint32_t version = 450; + + + bool es = false; + + + bool force_temporary = false; + + + + bool vulkan_semantics = false; + + + + + + bool separate_shader_objects = false; + + + + + + bool flatten_multidimensional_arrays = false; + + + + + + bool enable_420pack_extension = true; + + + bool emit_push_constant_as_uniform_buffer = false; + + + + bool emit_uniform_buffer_as_plain_uniforms = false; + + + + bool emit_line_directives = false; + + enum Precision + { + DontCare, + Lowp, + Mediump, + Highp + }; + + struct + { + + + + bool fixup_clipspace = false; + + + bool flip_vert_y = false; + + + + + + bool support_nonzero_base_instance = true; + } vertex; + + struct + { + + + Precision default_float_precision = Mediump; + Precision default_int_precision = Highp; + } fragment; + }; + + void remap_pixel_local_storage(std::vector inputs, std::vector outputs) + { + pls_inputs = std::move(inputs); + pls_outputs = std::move(outputs); + remap_pls_variables(); + } + + explicit CompilerGLSL(std::vector spirv_) + : Compiler(std::move(spirv_)) + { + init(); + } + + CompilerGLSL(const uint32_t *ir_, size_t word_count) + : Compiler(ir_, word_count) + { + init(); + } + + explicit CompilerGLSL(const ParsedIR &ir_) + : Compiler(ir_) + { + init(); + } + + explicit CompilerGLSL(ParsedIR &&ir_) + : Compiler(std::move(ir_)) + { + init(); + } + + const Options &get_common_options() const + { + return options; + } + + void set_common_options(const Options &opts) + { + options = opts; + } + + std::string compile() override; + + + + std::string get_partial_source(); + + + + + + + + + + + + void add_header_line(const std::string &str); + + + + void require_extension(const std::string &ext); + + + + + + + void flatten_buffer_block(VariableID id); + +protected: + void reset(); + void emit_function(SPIRFunction &func, const Bitset &return_flags); + + bool has_extension(const std::string &ext) const; + void require_extension_internal(const std::string &ext); + + + virtual void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags); + + SPIRBlock *current_emitting_block = nullptr; + SPIRBlock *current_emitting_switch = nullptr; + bool current_emitting_switch_fallthrough = false; + + virtual void emit_instruction(const Instruction &instr); + void emit_block_instructions(SPIRBlock &block); + virtual void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, + uint32_t count); + virtual void emit_spv_amd_shader_ballot_op(uint32_t result_type, uint32_t result_id, uint32_t op, + const uint32_t *args, uint32_t count); + virtual void emit_spv_amd_shader_explicit_vertex_parameter_op(uint32_t result_type, uint32_t result_id, uint32_t op, + const uint32_t *args, uint32_t count); + virtual void emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t result_id, uint32_t op, + const uint32_t *args, uint32_t count); + virtual void emit_spv_amd_gcn_shader_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, + uint32_t count); + virtual void emit_header(); + void emit_line_directive(uint32_t file_id, uint32_t line_literal); + void build_workgroup_size(SmallVector &arguments, const SpecializationConstant &x, + const SpecializationConstant &y, const SpecializationConstant &z); + + virtual void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id); + virtual void emit_texture_op(const Instruction &i); + virtual std::string to_texture_op(const Instruction &i, bool *forward, + SmallVector &inherited_expressions); + virtual void emit_subgroup_op(const Instruction &i); + virtual std::string type_to_glsl(const SPIRType &type, uint32_t id = 0); + virtual std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage); + virtual void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const std::string &qualifier = "", uint32_t base_offset = 0); + virtual void emit_struct_padding_target(const SPIRType &type); + virtual std::string image_type_glsl(const SPIRType &type, uint32_t id = 0); + std::string constant_expression(const SPIRConstant &c); + std::string constant_op_expression(const SPIRConstantOp &cop); + virtual std::string constant_expression_vector(const SPIRConstant &c, uint32_t vector); + virtual void emit_fixup(); + virtual std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id = 0); + virtual std::string to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id); + virtual std::string to_function_name(VariableID img, const SPIRType &imgtype, bool is_fetch, bool is_gather, + bool is_proj, bool has_array_offsets, bool has_offset, bool has_grad, + bool has_dref, uint32_t lod, uint32_t minlod); + virtual std::string to_function_args(VariableID img, const SPIRType &imgtype, bool is_fetch, bool is_gather, + bool is_proj, uint32_t coord, uint32_t coord_components, uint32_t dref, + uint32_t grad_x, uint32_t grad_y, uint32_t lod, uint32_t coffset, + uint32_t offset, uint32_t bias, uint32_t comp, uint32_t sample, + uint32_t minlod, bool *p_forward); + virtual void emit_buffer_block(const SPIRVariable &type); + virtual void emit_push_constant_block(const SPIRVariable &var); + virtual void emit_uniform(const SPIRVariable &var); + virtual std::string unpack_expression_type(std::string expr_str, const SPIRType &type, uint32_t physical_type_id, + bool packed_type, bool row_major); + + virtual bool builtin_translates_to_nonarray(spv::BuiltIn builtin) const; + + StringStream<> buffer; + + template + inline void statement_inner(T &&t) + { + buffer << std::forward(t); + statement_count++; + } + + template + inline void statement_inner(T &&t, Ts &&... ts) + { + buffer << std::forward(t); + statement_count++; + statement_inner(std::forward(ts)...); + } + + template + inline void statement(Ts &&... ts) + { + if (is_forcing_recompilation()) + { + + + statement_count++; + return; + } + + if (redirect_statement) + { + redirect_statement->push_back(join(std::forward(ts)...)); + statement_count++; + } + else + { + for (uint32_t i = 0; i < indent; i++) + buffer << " "; + statement_inner(std::forward(ts)...); + buffer << '\n'; + } + } + + template + inline void statement_no_indent(Ts &&... ts) + { + auto old_indent = indent; + indent = 0; + statement(std::forward(ts)...); + indent = old_indent; + } + + + + + SmallVector *redirect_statement = nullptr; + const SPIRBlock *current_continue_block = nullptr; + + void begin_scope(); + void end_scope(); + void end_scope(const std::string &trailer); + void end_scope_decl(); + void end_scope_decl(const std::string &decl); + + Options options; + + virtual std::string type_to_array_glsl( + const SPIRType &type); + std::string to_array_size(const SPIRType &type, uint32_t index); + uint32_t to_array_size_literal(const SPIRType &type, uint32_t index) const; + uint32_t to_array_size_literal(const SPIRType &type) const; + virtual std::string variable_decl(const SPIRVariable &variable); + std::string variable_decl_function_local(SPIRVariable &variable); + + void add_local_variable_name(uint32_t id); + void add_resource_name(uint32_t id); + void add_member_name(SPIRType &type, uint32_t name); + void add_function_overload(const SPIRFunction &func); + + virtual bool is_non_native_row_major_matrix(uint32_t id); + virtual bool member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index); + bool member_is_remapped_physical_type(const SPIRType &type, uint32_t index) const; + bool member_is_packed_physical_type(const SPIRType &type, uint32_t index) const; + virtual std::string convert_row_major_matrix(std::string exp_str, const SPIRType &exp_type, + uint32_t physical_type_id, bool is_packed); + + std::unordered_set local_variable_names; + std::unordered_set resource_names; + std::unordered_set block_input_names; + std::unordered_set block_output_names; + std::unordered_set block_ubo_names; + std::unordered_set block_ssbo_names; + std::unordered_set block_names; + std::unordered_map> function_overloads; + std::unordered_map preserved_aliases; + void preserve_alias_on_reset(uint32_t id); + void reset_name_caches(); + + bool processing_entry_point = false; + + + + struct BackendVariations + { + std::string discard_literal = "discard"; + std::string demote_literal = "demote"; + std::string null_pointer_literal = ""; + bool float_literal_suffix = false; + bool double_literal_suffix = true; + bool uint32_t_literal_suffix = true; + bool long_long_literal_suffix = false; + const char *basic_int_type = "int"; + const char *basic_uint_type = "uint"; + const char *basic_int8_type = "int8_t"; + const char *basic_uint8_type = "uint8_t"; + const char *basic_int16_type = "int16_t"; + const char *basic_uint16_type = "uint16_t"; + const char *int16_t_literal_suffix = "s"; + const char *uint16_t_literal_suffix = "us"; + const char *nonuniform_qualifier = "nonuniformEXT"; + const char *boolean_mix_function = "mix"; + bool swizzle_is_function = false; + bool shared_is_implied = false; + bool unsized_array_supported = true; + bool explicit_struct_type = false; + bool use_initializer_list = false; + bool use_typed_initializer_list = false; + bool can_declare_struct_inline = true; + bool can_declare_arrays_inline = true; + bool native_row_major_matrix = true; + bool use_constructor_splatting = true; + bool allow_precision_qualifiers = false; + bool can_swizzle_scalar = false; + bool force_gl_in_out_block = false; + bool can_return_array = true; + bool allow_truncated_access_chain = false; + bool supports_extensions = false; + bool supports_empty_struct = false; + bool array_is_value_type = true; + bool comparison_image_samples_scalar = false; + bool native_pointers = false; + bool support_small_type_sampling_result = false; + bool support_case_fallthrough = true; + bool use_array_constructor = false; + } backend; + + void emit_struct(SPIRType &type); + void emit_resources(); + void emit_buffer_block_native(const SPIRVariable &var); + void emit_buffer_reference_block(SPIRType &type, bool forward_declaration); + void emit_buffer_block_legacy(const SPIRVariable &var); + void emit_buffer_block_flattened(const SPIRVariable &type); + void emit_declared_builtin_block(spv::StorageClass storage, spv::ExecutionModel model); + void emit_push_constant_block_vulkan(const SPIRVariable &var); + void emit_push_constant_block_glsl(const SPIRVariable &var); + void emit_interface_block(const SPIRVariable &type); + void emit_flattened_io_block(const SPIRVariable &var, const char *qual); + void emit_block_chain(SPIRBlock &block); + void emit_hoisted_temporaries(SmallVector> &temporaries); + std::string constant_value_macro_name(uint32_t id); + void emit_constant(const SPIRConstant &constant); + void emit_specialization_constant_op(const SPIRConstantOp &constant); + std::string emit_continue_block(uint32_t continue_block, bool follow_true_block, bool follow_false_block); + bool attempt_emit_loop_header(SPIRBlock &block, SPIRBlock::Method method); + + void branch(BlockID from, BlockID to); + void branch_to_continue(BlockID from, BlockID to); + void branch(BlockID from, uint32_t cond, BlockID true_block, BlockID false_block); + void flush_phi(BlockID from, BlockID to); + void flush_variable_declaration(uint32_t id); + void flush_undeclared_variables(SPIRBlock &block); + void emit_variable_temporary_copies(const SPIRVariable &var); + + bool should_dereference(uint32_t id); + bool should_forward(uint32_t id) const; + bool should_suppress_usage_tracking(uint32_t id) const; + void emit_mix_op(uint32_t result_type, uint32_t id, uint32_t left, uint32_t right, uint32_t lerp); + void emit_nminmax_op(uint32_t result_type, uint32_t id, uint32_t op0, uint32_t op1, GLSLstd450 op); + bool to_trivial_mix_op(const SPIRType &type, std::string &op, uint32_t left, uint32_t right, uint32_t lerp); + void emit_quaternary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, + uint32_t op3, const char *op); + void emit_trinary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, + const char *op); + void emit_binary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); + + void emit_unary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op, + SPIRType::BaseType input_type, SPIRType::BaseType expected_result_type); + void emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op, + SPIRType::BaseType input_type, bool skip_cast_if_equal_type); + void emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, + const char *op, SPIRType::BaseType input_type); + void emit_trinary_func_op_bitextract(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + uint32_t op2, const char *op, SPIRType::BaseType expected_result_type, + SPIRType::BaseType input_type0, SPIRType::BaseType input_type1, + SPIRType::BaseType input_type2); + void emit_bitfield_insert_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, + uint32_t op3, const char *op, SPIRType::BaseType offset_count_type); + + void emit_unary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op); + void emit_unrolled_unary_op(uint32_t result_type, uint32_t result_id, uint32_t operand, const char *op); + void emit_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); + void emit_unrolled_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op, + bool negate, SPIRType::BaseType expected_type); + void emit_binary_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op, + SPIRType::BaseType input_type, bool skip_cast_if_equal_type); + + SPIRType binary_op_bitcast_helper(std::string &cast_op0, std::string &cast_op1, SPIRType::BaseType &input_type, + uint32_t op0, uint32_t op1, bool skip_cast_if_equal_type); + + std::string to_ternary_expression(const SPIRType &result_type, uint32_t select, uint32_t true_value, + uint32_t false_value); + + void emit_unary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op); + bool expression_is_forwarded(uint32_t id) const; + bool expression_suppresses_usage_tracking(uint32_t id) const; + SPIRExpression &emit_op(uint32_t result_type, uint32_t result_id, const std::string &rhs, bool forward_rhs, + bool suppress_usage_tracking = false); + + void access_chain_internal_append_index(std::string &expr, uint32_t base, const SPIRType *type, + AccessChainFlags flags, bool &access_chain_is_arrayed, uint32_t index); + + std::string access_chain_internal(uint32_t base, const uint32_t *indices, uint32_t count, AccessChainFlags flags, + AccessChainMeta *meta); + + std::string access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, + AccessChainMeta *meta = nullptr, bool ptr_chain = false); + + std::string flattened_access_chain(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, + bool need_transpose); + std::string flattened_access_chain_struct(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset); + std::string flattened_access_chain_matrix(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, + bool need_transpose); + std::string flattened_access_chain_vector(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, + bool need_transpose); + std::pair flattened_access_chain_offset(const SPIRType &basetype, const uint32_t *indices, + uint32_t count, uint32_t offset, + uint32_t word_stride, bool *need_transpose = nullptr, + uint32_t *matrix_stride = nullptr, + bool ptr_chain = false); + + const char *index_to_swizzle(uint32_t index); + std::string remap_swizzle(const SPIRType &result_type, uint32_t input_components, const std::string &expr); + std::string declare_temporary(uint32_t type, uint32_t id); + void emit_uninitialized_temporary(uint32_t type, uint32_t id); + SPIRExpression &emit_uninitialized_temporary_expression(uint32_t type, uint32_t id); + void append_global_func_args(const SPIRFunction &func, uint32_t index, SmallVector &arglist); + std::string to_expression(uint32_t id, bool register_expression_read = true); + std::string to_composite_constructor_expression(uint32_t id); + std::string to_rerolled_array_expression(const std::string &expr, const SPIRType &type); + std::string to_enclosed_expression(uint32_t id, bool register_expression_read = true); + std::string to_unpacked_expression(uint32_t id, bool register_expression_read = true); + std::string to_unpacked_row_major_matrix_expression(uint32_t id); + std::string to_enclosed_unpacked_expression(uint32_t id, bool register_expression_read = true); + std::string to_dereferenced_expression(uint32_t id, bool register_expression_read = true); + std::string to_pointer_expression(uint32_t id, bool register_expression_read = true); + std::string to_enclosed_pointer_expression(uint32_t id, bool register_expression_read = true); + std::string to_extract_component_expression(uint32_t id, uint32_t index); + std::string enclose_expression(const std::string &expr); + std::string dereference_expression(const SPIRType &expression_type, const std::string &expr); + std::string address_of_expression(const std::string &expr); + void strip_enclosed_expression(std::string &expr); + std::string to_member_name(const SPIRType &type, uint32_t index); + virtual std::string to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain); + std::string type_to_glsl_constructor(const SPIRType &type); + std::string argument_decl(const SPIRFunction::Parameter &arg); + virtual std::string to_qualifiers_glsl(uint32_t id); + const char *to_precision_qualifiers_glsl(uint32_t id); + virtual const char *to_storage_qualifiers_glsl(const SPIRVariable &var); + const char *flags_to_qualifiers_glsl(const SPIRType &type, const Bitset &flags); + const char *format_to_glsl(spv::ImageFormat format); + virtual std::string layout_for_member(const SPIRType &type, uint32_t index); + virtual std::string to_interpolation_qualifiers(const Bitset &flags); + std::string layout_for_variable(const SPIRVariable &variable); + std::string to_combined_image_sampler(VariableID image_id, VariableID samp_id); + virtual bool skip_argument(uint32_t id) const; + virtual void emit_array_copy(const std::string &lhs, uint32_t rhs_id, spv::StorageClass lhs_storage, + spv::StorageClass rhs_storage); + virtual void emit_block_hints(const SPIRBlock &block); + virtual std::string to_initializer_expression(const SPIRVariable &var); + + bool buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing, uint32_t start_offset = 0, + uint32_t end_offset = ~(0u)); + std::string buffer_to_packing_standard(const SPIRType &type, bool support_std430_without_scalar_layout); + + uint32_t type_to_packed_base_size(const SPIRType &type, BufferPackingStandard packing); + uint32_t type_to_packed_alignment(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing); + uint32_t type_to_packed_array_stride(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing); + uint32_t type_to_packed_size(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing); + + std::string bitcast_glsl(const SPIRType &result_type, uint32_t arg); + virtual std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type); + + std::string bitcast_expression(SPIRType::BaseType target_type, uint32_t arg); + std::string bitcast_expression(const SPIRType &target_type, SPIRType::BaseType expr_type, const std::string &expr); + + std::string build_composite_combiner(uint32_t result_type, const uint32_t *elems, uint32_t length); + bool remove_duplicate_swizzle(std::string &op); + bool remove_unity_swizzle(uint32_t base, std::string &op); + + + + bool check_atomic_image(uint32_t id); + + virtual void replace_illegal_names(); + virtual void emit_entry_point_declarations(); + + void replace_fragment_output(SPIRVariable &var); + void replace_fragment_outputs(); + bool check_explicit_lod_allowed(uint32_t lod); + std::string legacy_tex_op(const std::string &op, const SPIRType &imgtype, uint32_t lod, uint32_t id); + + uint32_t indent = 0; + + std::unordered_set emitted_functions; + + + std::unordered_set flushed_phi_variables; + + std::unordered_set flattened_buffer_blocks; + std::unordered_set flattened_structs; + + std::string load_flattened_struct(SPIRVariable &var); + std::string to_flattened_struct_member(const SPIRVariable &var, uint32_t index); + void store_flattened_struct(SPIRVariable &var, uint32_t value); + + + + std::unordered_map expression_usage_counts; + void track_expression_read(uint32_t id); + + SmallVector forced_extensions; + SmallVector header_lines; + + + + + std::unordered_map extra_sub_expressions; + + uint32_t statement_count = 0; + + inline bool is_legacy() const + { + return (options.es && options.version < 300) || (!options.es && options.version < 130); + } + + inline bool is_legacy_es() const + { + return options.es && options.version < 300; + } + + inline bool is_legacy_desktop() const + { + return !options.es && options.version < 130; + } + + bool args_will_forward(uint32_t id, const uint32_t *args, uint32_t num_args, bool pure); + void register_call_out_argument(uint32_t id); + void register_impure_function_call(); + void register_control_dependent_expression(uint32_t expr); + + + std::vector pls_inputs; + std::vector pls_outputs; + std::string pls_decl(const PlsRemap &variable); + const char *to_pls_qualifiers_glsl(const SPIRVariable &variable); + void emit_pls(); + void remap_pls_variables(); + + + + + void add_variable(std::unordered_set &variables_primary, + const std::unordered_set &variables_secondary, std::string &name); + + void check_function_call_constraints(const uint32_t *args, uint32_t length); + void handle_invalid_expression(uint32_t id); + void find_static_extensions(); + + std::string emit_for_loop_initializers(const SPIRBlock &block); + void emit_while_loop_initializers(const SPIRBlock &block); + bool for_loop_initializers_are_same_type(const SPIRBlock &block); + bool optimize_read_modify_write(const SPIRType &type, const std::string &lhs, const std::string &rhs); + void fixup_image_load_store_access(); + + bool type_is_empty(const SPIRType &type); + + virtual void declare_undefined_values(); + + static std::string sanitize_underscores(const std::string &str); + + bool can_use_io_location(spv::StorageClass storage, bool block); + const Instruction *get_next_instruction_in_block(const Instruction &instr); + static uint32_t mask_relevant_memory_semantics(uint32_t semantics); + + std::string convert_half_to_string(const SPIRConstant &value, uint32_t col, uint32_t row); + std::string convert_float_to_string(const SPIRConstant &value, uint32_t col, uint32_t row); + std::string convert_double_to_string(const SPIRConstant &value, uint32_t col, uint32_t row); + + std::string convert_separate_image_to_expression(uint32_t id); + + + + + virtual void bitcast_to_builtin_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type); + virtual void bitcast_from_builtin_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type); + void unroll_array_from_complex_load(uint32_t target_id, uint32_t source_id, std::string &expr); + void convert_non_uniform_expression(const SPIRType &type, std::string &expr); + + void handle_store_to_invariant_variable(uint32_t store_id, uint32_t value_id); + void disallow_forwarding_in_expression_chain(const SPIRExpression &expr); + + bool expression_is_constant_null(uint32_t id) const; + virtual void emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression); + + uint32_t get_integer_width_for_instruction(const Instruction &instr) const; + uint32_t get_integer_width_for_glsl_instruction(GLSLstd450 op, const uint32_t *arguments, uint32_t length) const; + + bool variable_is_lut(const SPIRVariable &var) const; + + char current_locale_radix_character = '.'; + + void fixup_type_alias(); + void reorder_type_alias(); + + void propagate_nonuniform_qualifier(uint32_t id); + +private: + void init(); +}; +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_hlsl.cpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_hlsl.cpp new file mode 100644 index 000000000000..eca64eb13013 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_hlsl.cpp @@ -0,0 +1,4989 @@ + + + + + + + + + + + + + + + + +#include "spirv_hlsl.hpp" +#include "GLSL.std.450.h" +#include +#include + +using namespace spv; +using namespace SPIRV_CROSS_NAMESPACE; +using namespace std; + +static unsigned image_format_to_components(ImageFormat fmt) +{ + switch (fmt) + { + case ImageFormatR8: + case ImageFormatR16: + case ImageFormatR8Snorm: + case ImageFormatR16Snorm: + case ImageFormatR16f: + case ImageFormatR32f: + case ImageFormatR8i: + case ImageFormatR16i: + case ImageFormatR32i: + case ImageFormatR8ui: + case ImageFormatR16ui: + case ImageFormatR32ui: + return 1; + + case ImageFormatRg8: + case ImageFormatRg16: + case ImageFormatRg8Snorm: + case ImageFormatRg16Snorm: + case ImageFormatRg16f: + case ImageFormatRg32f: + case ImageFormatRg8i: + case ImageFormatRg16i: + case ImageFormatRg32i: + case ImageFormatRg8ui: + case ImageFormatRg16ui: + case ImageFormatRg32ui: + return 2; + + case ImageFormatR11fG11fB10f: + return 3; + + case ImageFormatRgba8: + case ImageFormatRgba16: + case ImageFormatRgb10A2: + case ImageFormatRgba8Snorm: + case ImageFormatRgba16Snorm: + case ImageFormatRgba16f: + case ImageFormatRgba32f: + case ImageFormatRgba8i: + case ImageFormatRgba16i: + case ImageFormatRgba32i: + case ImageFormatRgba8ui: + case ImageFormatRgba16ui: + case ImageFormatRgba32ui: + case ImageFormatRgb10a2ui: + return 4; + + case ImageFormatUnknown: + return 4; + + default: + SPIRV_CROSS_THROW("Unrecognized typed image format."); + } +} + +static string image_format_to_type(ImageFormat fmt, SPIRType::BaseType basetype) +{ + switch (fmt) + { + case ImageFormatR8: + case ImageFormatR16: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "unorm float"; + case ImageFormatRg8: + case ImageFormatRg16: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "unorm float2"; + case ImageFormatRgba8: + case ImageFormatRgba16: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "unorm float4"; + case ImageFormatRgb10A2: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "unorm float4"; + + case ImageFormatR8Snorm: + case ImageFormatR16Snorm: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "snorm float"; + case ImageFormatRg8Snorm: + case ImageFormatRg16Snorm: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "snorm float2"; + case ImageFormatRgba8Snorm: + case ImageFormatRgba16Snorm: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "snorm float4"; + + case ImageFormatR16f: + case ImageFormatR32f: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "float"; + case ImageFormatRg16f: + case ImageFormatRg32f: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "float2"; + case ImageFormatRgba16f: + case ImageFormatRgba32f: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "float4"; + + case ImageFormatR11fG11fB10f: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "float3"; + + case ImageFormatR8i: + case ImageFormatR16i: + case ImageFormatR32i: + if (basetype != SPIRType::Int) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "int"; + case ImageFormatRg8i: + case ImageFormatRg16i: + case ImageFormatRg32i: + if (basetype != SPIRType::Int) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "int2"; + case ImageFormatRgba8i: + case ImageFormatRgba16i: + case ImageFormatRgba32i: + if (basetype != SPIRType::Int) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "int4"; + + case ImageFormatR8ui: + case ImageFormatR16ui: + case ImageFormatR32ui: + if (basetype != SPIRType::UInt) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "uint"; + case ImageFormatRg8ui: + case ImageFormatRg16ui: + case ImageFormatRg32ui: + if (basetype != SPIRType::UInt) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "uint2"; + case ImageFormatRgba8ui: + case ImageFormatRgba16ui: + case ImageFormatRgba32ui: + if (basetype != SPIRType::UInt) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "uint4"; + case ImageFormatRgb10a2ui: + if (basetype != SPIRType::UInt) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "uint4"; + + case ImageFormatUnknown: + switch (basetype) + { + case SPIRType::Float: + return "float4"; + case SPIRType::Int: + return "int4"; + case SPIRType::UInt: + return "uint4"; + default: + SPIRV_CROSS_THROW("Unsupported base type for image."); + } + + default: + SPIRV_CROSS_THROW("Unrecognized typed image format."); + } +} + +string CompilerHLSL::image_type_hlsl_modern(const SPIRType &type, uint32_t id) +{ + auto &imagetype = get(type.image.type); + const char *dim = nullptr; + bool typed_load = false; + uint32_t components = 4; + + switch (type.image.dim) + { + case Dim1D: + typed_load = type.image.sampled == 2; + dim = "1D"; + break; + case Dim2D: + typed_load = type.image.sampled == 2; + dim = "2D"; + break; + case Dim3D: + typed_load = type.image.sampled == 2; + dim = "3D"; + break; + case DimCube: + if (type.image.sampled == 2) + SPIRV_CROSS_THROW("RWTextureCube does not exist in HLSL."); + dim = "Cube"; + break; + case DimRect: + SPIRV_CROSS_THROW("Rectangle texture support is not yet implemented for HLSL."); + case DimBuffer: + if (type.image.sampled == 1) + return join("Buffer<", type_to_glsl(imagetype), components, ">"); + else if (type.image.sampled == 2) + { + if (interlocked_resources.count(id)) + return join("RasterizerOrderedBuffer<", image_format_to_type(type.image.format, imagetype.basetype), + ">"); + return join("RWBuffer<", image_format_to_type(type.image.format, imagetype.basetype), ">"); + } + else + SPIRV_CROSS_THROW("Sampler buffers must be either sampled or unsampled. Cannot deduce in runtime."); + case DimSubpassData: + dim = "2D"; + typed_load = false; + break; + default: + SPIRV_CROSS_THROW("Invalid dimension."); + } + const char *arrayed = type.image.arrayed ? "Array" : ""; + const char *ms = type.image.ms ? "MS" : ""; + const char *rw = typed_load ? "RW" : ""; + if (typed_load && interlocked_resources.count(id)) + rw = "RasterizerOrdered"; + return join(rw, "Texture", dim, ms, arrayed, "<", + typed_load ? image_format_to_type(type.image.format, imagetype.basetype) : + join(type_to_glsl(imagetype), components), + ">"); +} + +string CompilerHLSL::image_type_hlsl_legacy(const SPIRType &type, uint32_t id) +{ + auto &imagetype = get(type.image.type); + string res; + + switch (imagetype.basetype) + { + case SPIRType::Int: + res = "i"; + break; + case SPIRType::UInt: + res = "u"; + break; + default: + break; + } + + if (type.basetype == SPIRType::Image && type.image.dim == DimSubpassData) + return res + "subpassInput" + (type.image.ms ? "MS" : ""); + + + + if (type.basetype == SPIRType::Image && type.image.dim != DimSubpassData) + { + + if (type.image.dim == DimBuffer && type.image.sampled == 1) + res += "sampler"; + else + res += type.image.sampled == 2 ? "image" : "texture"; + } + else + res += "sampler"; + + switch (type.image.dim) + { + case Dim1D: + res += "1D"; + break; + case Dim2D: + res += "2D"; + break; + case Dim3D: + res += "3D"; + break; + case DimCube: + res += "CUBE"; + break; + + case DimBuffer: + res += "Buffer"; + break; + + case DimSubpassData: + res += "2D"; + break; + default: + SPIRV_CROSS_THROW("Only 1D, 2D, 3D, Buffer, InputTarget and Cube textures supported."); + } + + if (type.image.ms) + res += "MS"; + if (type.image.arrayed) + res += "Array"; + if (image_is_comparison(type, id)) + res += "Shadow"; + + return res; +} + +string CompilerHLSL::image_type_hlsl(const SPIRType &type, uint32_t id) +{ + if (hlsl_options.shader_model <= 30) + return image_type_hlsl_legacy(type, id); + else + return image_type_hlsl_modern(type, id); +} + + + + +string CompilerHLSL::type_to_glsl(const SPIRType &type, uint32_t id) +{ + + + switch (type.basetype) + { + case SPIRType::Struct: + + if (backend.explicit_struct_type) + return join("struct ", to_name(type.self)); + else + return to_name(type.self); + + case SPIRType::Image: + case SPIRType::SampledImage: + return image_type_hlsl(type, id); + + case SPIRType::Sampler: + return comparison_ids.count(id) ? "SamplerComparisonState" : "SamplerState"; + + case SPIRType::Void: + return "void"; + + default: + break; + } + + if (type.vecsize == 1 && type.columns == 1) + { + switch (type.basetype) + { + case SPIRType::Boolean: + return "bool"; + case SPIRType::Int: + return backend.basic_int_type; + case SPIRType::UInt: + return backend.basic_uint_type; + case SPIRType::AtomicCounter: + return "atomic_uint"; + case SPIRType::Half: + return "min16float"; + case SPIRType::Float: + return "float"; + case SPIRType::Double: + return "double"; + case SPIRType::Int64: + return "int64_t"; + case SPIRType::UInt64: + return "uint64_t"; + default: + return "???"; + } + } + else if (type.vecsize > 1 && type.columns == 1) + { + switch (type.basetype) + { + case SPIRType::Boolean: + return join("bool", type.vecsize); + case SPIRType::Int: + return join("int", type.vecsize); + case SPIRType::UInt: + return join("uint", type.vecsize); + case SPIRType::Half: + return join("min16float", type.vecsize); + case SPIRType::Float: + return join("float", type.vecsize); + case SPIRType::Double: + return join("double", type.vecsize); + case SPIRType::Int64: + return join("i64vec", type.vecsize); + case SPIRType::UInt64: + return join("u64vec", type.vecsize); + default: + return "???"; + } + } + else + { + switch (type.basetype) + { + case SPIRType::Boolean: + return join("bool", type.columns, "x", type.vecsize); + case SPIRType::Int: + return join("int", type.columns, "x", type.vecsize); + case SPIRType::UInt: + return join("uint", type.columns, "x", type.vecsize); + case SPIRType::Half: + return join("min16float", type.columns, "x", type.vecsize); + case SPIRType::Float: + return join("float", type.columns, "x", type.vecsize); + case SPIRType::Double: + return join("double", type.columns, "x", type.vecsize); + + default: + return "???"; + } + } +} + +void CompilerHLSL::emit_header() +{ + for (auto &header : header_lines) + statement(header); + + if (header_lines.size() > 0) + { + statement(""); + } +} + +void CompilerHLSL::emit_interface_block_globally(const SPIRVariable &var) +{ + add_resource_name(var.self); + + + + auto &flags = ir.meta[var.self].decoration.decoration_flags; + auto old_flags = flags; + flags.reset(); + statement("static ", variable_decl(var), ";"); + flags = old_flags; +} + +const char *CompilerHLSL::to_storage_qualifiers_glsl(const SPIRVariable &var) +{ + + + if (var.storage == StorageClassUniformConstant || var.storage == StorageClassUniform || + var.storage == StorageClassPushConstant) + { + return "uniform "; + } + + return ""; +} + +void CompilerHLSL::emit_builtin_outputs_in_struct() +{ + auto &execution = get_entry_point(); + + bool legacy = hlsl_options.shader_model <= 30; + active_output_builtins.for_each_bit([&](uint32_t i) { + const char *type = nullptr; + const char *semantic = nullptr; + auto builtin = static_cast(i); + switch (builtin) + { + case BuiltInPosition: + type = "float4"; + semantic = legacy ? "POSITION" : "SV_Position"; + break; + + case BuiltInFragDepth: + type = "float"; + if (legacy) + { + semantic = "DEPTH"; + } + else + { + if (hlsl_options.shader_model >= 50 && execution.flags.get(ExecutionModeDepthGreater)) + semantic = "SV_DepthGreaterEqual"; + else if (hlsl_options.shader_model >= 50 && execution.flags.get(ExecutionModeDepthLess)) + semantic = "SV_DepthLessEqual"; + else + semantic = "SV_Depth"; + } + break; + + case BuiltInClipDistance: + + for (uint32_t clip = 0; clip < clip_distance_count; clip += 4) + { + uint32_t to_declare = clip_distance_count - clip; + if (to_declare > 4) + to_declare = 4; + + uint32_t semantic_index = clip / 4; + + static const char *types[] = { "float", "float2", "float3", "float4" }; + statement(types[to_declare - 1], " ", builtin_to_glsl(builtin, StorageClassOutput), semantic_index, + " : SV_ClipDistance", semantic_index, ";"); + } + break; + + case BuiltInCullDistance: + + for (uint32_t cull = 0; cull < cull_distance_count; cull += 4) + { + uint32_t to_declare = cull_distance_count - cull; + if (to_declare > 4) + to_declare = 4; + + uint32_t semantic_index = cull / 4; + + static const char *types[] = { "float", "float2", "float3", "float4" }; + statement(types[to_declare - 1], " ", builtin_to_glsl(builtin, StorageClassOutput), semantic_index, + " : SV_CullDistance", semantic_index, ";"); + } + break; + + case BuiltInPointSize: + + + + if (hlsl_options.point_size_compat) + break; + else + SPIRV_CROSS_THROW("Unsupported builtin in HLSL."); + + default: + SPIRV_CROSS_THROW("Unsupported builtin in HLSL."); + break; + } + + if (type && semantic) + statement(type, " ", builtin_to_glsl(builtin, StorageClassOutput), " : ", semantic, ";"); + }); +} + +void CompilerHLSL::emit_builtin_inputs_in_struct() +{ + bool legacy = hlsl_options.shader_model <= 30; + active_input_builtins.for_each_bit([&](uint32_t i) { + const char *type = nullptr; + const char *semantic = nullptr; + auto builtin = static_cast(i); + switch (builtin) + { + case BuiltInFragCoord: + type = "float4"; + semantic = legacy ? "VPOS" : "SV_Position"; + break; + + case BuiltInVertexId: + case BuiltInVertexIndex: + if (legacy) + SPIRV_CROSS_THROW("Vertex index not supported in SM 3.0 or lower."); + type = "uint"; + semantic = "SV_VertexID"; + break; + + case BuiltInInstanceId: + case BuiltInInstanceIndex: + if (legacy) + SPIRV_CROSS_THROW("Instance index not supported in SM 3.0 or lower."); + type = "uint"; + semantic = "SV_InstanceID"; + break; + + case BuiltInSampleId: + if (legacy) + SPIRV_CROSS_THROW("Sample ID not supported in SM 3.0 or lower."); + type = "uint"; + semantic = "SV_SampleIndex"; + break; + + case BuiltInGlobalInvocationId: + type = "uint3"; + semantic = "SV_DispatchThreadID"; + break; + + case BuiltInLocalInvocationId: + type = "uint3"; + semantic = "SV_GroupThreadID"; + break; + + case BuiltInLocalInvocationIndex: + type = "uint"; + semantic = "SV_GroupIndex"; + break; + + case BuiltInWorkgroupId: + type = "uint3"; + semantic = "SV_GroupID"; + break; + + case BuiltInFrontFacing: + type = "bool"; + semantic = "SV_IsFrontFace"; + break; + + case BuiltInNumWorkgroups: + case BuiltInSubgroupSize: + case BuiltInSubgroupLocalInvocationId: + case BuiltInSubgroupEqMask: + case BuiltInSubgroupLtMask: + case BuiltInSubgroupLeMask: + case BuiltInSubgroupGtMask: + case BuiltInSubgroupGeMask: + + break; + + case BuiltInClipDistance: + + for (uint32_t clip = 0; clip < clip_distance_count; clip += 4) + { + uint32_t to_declare = clip_distance_count - clip; + if (to_declare > 4) + to_declare = 4; + + uint32_t semantic_index = clip / 4; + + static const char *types[] = { "float", "float2", "float3", "float4" }; + statement(types[to_declare - 1], " ", builtin_to_glsl(builtin, StorageClassInput), semantic_index, + " : SV_ClipDistance", semantic_index, ";"); + } + break; + + case BuiltInCullDistance: + + for (uint32_t cull = 0; cull < cull_distance_count; cull += 4) + { + uint32_t to_declare = cull_distance_count - cull; + if (to_declare > 4) + to_declare = 4; + + uint32_t semantic_index = cull / 4; + + static const char *types[] = { "float", "float2", "float3", "float4" }; + statement(types[to_declare - 1], " ", builtin_to_glsl(builtin, StorageClassInput), semantic_index, + " : SV_CullDistance", semantic_index, ";"); + } + break; + + case BuiltInPointCoord: + + if (hlsl_options.point_coord_compat) + break; + else + SPIRV_CROSS_THROW("Unsupported builtin in HLSL."); + + default: + SPIRV_CROSS_THROW("Unsupported builtin in HLSL."); + break; + } + + if (type && semantic) + statement(type, " ", builtin_to_glsl(builtin, StorageClassInput), " : ", semantic, ";"); + }); +} + +uint32_t CompilerHLSL::type_to_consumed_locations(const SPIRType &type) const +{ + + uint32_t elements = 0; + + if (type.basetype == SPIRType::Struct) + { + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + elements += type_to_consumed_locations(get(type.member_types[i])); + } + else + { + uint32_t array_multiplier = 1; + for (uint32_t i = 0; i < uint32_t(type.array.size()); i++) + { + if (type.array_size_literal[i]) + array_multiplier *= type.array[i]; + else + array_multiplier *= get(type.array[i]).scalar(); + } + elements += array_multiplier * type.columns; + } + return elements; +} + +string CompilerHLSL::to_interpolation_qualifiers(const Bitset &flags) +{ + string res; + + + if (flags.get(DecorationFlat)) + res += "nointerpolation "; + if (flags.get(DecorationNoPerspective)) + res += "noperspective "; + if (flags.get(DecorationCentroid)) + res += "centroid "; + if (flags.get(DecorationPatch)) + res += "patch "; + if (flags.get(DecorationSample)) + res += "sample "; + if (flags.get(DecorationInvariant)) + res += "invariant "; + + return res; +} + +std::string CompilerHLSL::to_semantic(uint32_t location, ExecutionModel em, StorageClass sc) +{ + if (em == ExecutionModelVertex && sc == StorageClassInput) + { + + + for (auto &attribute : remap_vertex_attributes) + if (attribute.location == location) + return attribute.semantic; + } + + + return join("TEXCOORD", location); +} + +void CompilerHLSL::emit_io_block(const SPIRVariable &var) +{ + auto &execution = get_entry_point(); + + auto &type = get(var.basetype); + add_resource_name(type.self); + + statement("struct ", to_name(type.self)); + begin_scope(); + type.member_name_cache.clear(); + + uint32_t base_location = get_decoration(var.self, DecorationLocation); + + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + { + string semantic; + if (has_member_decoration(type.self, i, DecorationLocation)) + { + uint32_t location = get_member_decoration(type.self, i, DecorationLocation); + semantic = join(" : ", to_semantic(location, execution.model, var.storage)); + } + else + { + + + + uint32_t location = base_location + i; + semantic = join(" : ", to_semantic(location, execution.model, var.storage)); + } + + add_member_name(type, i); + + auto &membertype = get(type.member_types[i]); + statement(to_interpolation_qualifiers(get_member_decoration_bitset(type.self, i)), + variable_decl(membertype, to_member_name(type, i)), semantic, ";"); + } + + end_scope_decl(); + statement(""); + + statement("static ", variable_decl(var), ";"); + statement(""); +} + +void CompilerHLSL::emit_interface_block_in_struct(const SPIRVariable &var, unordered_set &active_locations) +{ + auto &execution = get_entry_point(); + auto type = get(var.basetype); + + string binding; + bool use_location_number = true; + bool legacy = hlsl_options.shader_model <= 30; + if (execution.model == ExecutionModelFragment && var.storage == StorageClassOutput) + { + + uint32_t index = get_decoration(var.self, DecorationIndex); + uint32_t location = get_decoration(var.self, DecorationLocation); + + if (index != 0 && location != 0) + SPIRV_CROSS_THROW("Dual-source blending is only supported on MRT #0 in HLSL."); + + binding = join(legacy ? "COLOR" : "SV_Target", location + index); + use_location_number = false; + if (legacy) + type.vecsize = 4; + } + + const auto get_vacant_location = [&]() -> uint32_t { + for (uint32_t i = 0; i < 64; i++) + if (!active_locations.count(i)) + return i; + SPIRV_CROSS_THROW("All locations from 0 to 63 are exhausted."); + }; + + bool need_matrix_unroll = var.storage == StorageClassInput && execution.model == ExecutionModelVertex; + + auto &m = ir.meta[var.self].decoration; + auto name = to_name(var.self); + if (use_location_number) + { + uint32_t location_number; + + + + if (m.decoration_flags.get(DecorationLocation)) + location_number = m.location; + else + location_number = get_vacant_location(); + + + auto semantic = to_semantic(location_number, execution.model, var.storage); + + if (need_matrix_unroll && type.columns > 1) + { + if (!type.array.empty()) + SPIRV_CROSS_THROW("Arrays of matrices used as input/output. This is not supported."); + + + for (uint32_t i = 0; i < type.columns; i++) + { + SPIRType newtype = type; + newtype.columns = 1; + statement(to_interpolation_qualifiers(get_decoration_bitset(var.self)), + variable_decl(newtype, join(name, "_", i)), " : ", semantic, "_", i, ";"); + active_locations.insert(location_number++); + } + } + else + { + statement(to_interpolation_qualifiers(get_decoration_bitset(var.self)), variable_decl(type, name), " : ", + semantic, ";"); + + + uint32_t consumed_locations = type_to_consumed_locations(type); + for (uint32_t i = 0; i < consumed_locations; i++) + active_locations.insert(location_number + i); + } + } + else + statement(variable_decl(type, name), " : ", binding, ";"); +} + +std::string CompilerHLSL::builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) +{ + switch (builtin) + { + case BuiltInVertexId: + return "gl_VertexID"; + case BuiltInInstanceId: + return "gl_InstanceID"; + case BuiltInNumWorkgroups: + { + if (!num_workgroups_builtin) + SPIRV_CROSS_THROW("NumWorkgroups builtin is used, but remap_num_workgroups_builtin() was not called. " + "Cannot emit code for this builtin."); + + auto &var = get(num_workgroups_builtin); + auto &type = get(var.basetype); + return sanitize_underscores(join(to_name(num_workgroups_builtin), "_", get_member_name(type.self, 0))); + } + case BuiltInPointCoord: + + return "float2(0.5f, 0.5f)"; + case BuiltInSubgroupLocalInvocationId: + return "WaveGetLaneIndex()"; + case BuiltInSubgroupSize: + return "WaveGetLaneCount()"; + + default: + return CompilerGLSL::builtin_to_glsl(builtin, storage); + } +} + +void CompilerHLSL::emit_builtin_variables() +{ + Bitset builtins = active_input_builtins; + builtins.merge_or(active_output_builtins); + + bool need_base_vertex_info = false; + + + builtins.for_each_bit([&](uint32_t i) { + const char *type = nullptr; + auto builtin = static_cast(i); + uint32_t array_size = 0; + + switch (builtin) + { + case BuiltInFragCoord: + case BuiltInPosition: + type = "float4"; + break; + + case BuiltInFragDepth: + type = "float"; + break; + + case BuiltInVertexId: + case BuiltInVertexIndex: + case BuiltInInstanceIndex: + type = "int"; + if (hlsl_options.support_nonzero_base_vertex_base_instance) + need_base_vertex_info = true; + break; + + case BuiltInInstanceId: + case BuiltInSampleId: + type = "int"; + break; + + case BuiltInPointSize: + if (hlsl_options.point_size_compat) + { + + type = "float"; + break; + } + else + SPIRV_CROSS_THROW(join("Unsupported builtin in HLSL: ", unsigned(builtin))); + + case BuiltInGlobalInvocationId: + case BuiltInLocalInvocationId: + case BuiltInWorkgroupId: + type = "uint3"; + break; + + case BuiltInLocalInvocationIndex: + type = "uint"; + break; + + case BuiltInFrontFacing: + type = "bool"; + break; + + case BuiltInNumWorkgroups: + case BuiltInPointCoord: + + break; + + case BuiltInSubgroupLocalInvocationId: + case BuiltInSubgroupSize: + if (hlsl_options.shader_model < 60) + SPIRV_CROSS_THROW("Need SM 6.0 for Wave ops."); + break; + + case BuiltInSubgroupEqMask: + case BuiltInSubgroupLtMask: + case BuiltInSubgroupLeMask: + case BuiltInSubgroupGtMask: + case BuiltInSubgroupGeMask: + if (hlsl_options.shader_model < 60) + SPIRV_CROSS_THROW("Need SM 6.0 for Wave ops."); + type = "uint4"; + break; + + case BuiltInClipDistance: + array_size = clip_distance_count; + type = "float"; + break; + + case BuiltInCullDistance: + array_size = cull_distance_count; + type = "float"; + break; + + default: + SPIRV_CROSS_THROW(join("Unsupported builtin in HLSL: ", unsigned(builtin))); + } + + StorageClass storage = active_input_builtins.get(i) ? StorageClassInput : StorageClassOutput; + + + + if (type) + { + if (array_size) + statement("static ", type, " ", builtin_to_glsl(builtin, storage), "[", array_size, "];"); + else + statement("static ", type, " ", builtin_to_glsl(builtin, storage), ";"); + } + }); + + if (need_base_vertex_info) + { + statement("cbuffer SPIRV_Cross_VertexInfo"); + begin_scope(); + statement("int SPIRV_Cross_BaseVertex;"); + statement("int SPIRV_Cross_BaseInstance;"); + end_scope_decl(); + statement(""); + } +} + +void CompilerHLSL::emit_composite_constants() +{ + + + bool emitted = false; + + ir.for_each_typed_id([&](uint32_t, SPIRConstant &c) { + if (c.specialization) + return; + + auto &type = this->get(c.constant_type); + if (type.basetype == SPIRType::Struct || !type.array.empty()) + { + auto name = to_name(c.self); + statement("static const ", variable_decl(type, name), " = ", constant_expression(c), ";"); + emitted = true; + } + }); + + if (emitted) + statement(""); +} + +void CompilerHLSL::emit_specialization_constants_and_structs() +{ + bool emitted = false; + SpecializationConstant wg_x, wg_y, wg_z; + ID workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + + auto loop_lock = ir.create_loop_hard_lock(); + for (auto &id_ : ir.ids_for_constant_or_type) + { + auto &id = ir.ids[id_]; + + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + + if (c.self == workgroup_size_id) + { + statement("static const uint3 gl_WorkGroupSize = ", + constant_expression(get(workgroup_size_id)), ";"); + emitted = true; + } + else if (c.specialization) + { + auto &type = get(c.constant_type); + auto name = to_name(c.self); + + + c.specialization_constant_macro_name = + constant_value_macro_name(get_decoration(c.self, DecorationSpecId)); + + statement("#ifndef ", c.specialization_constant_macro_name); + statement("#define ", c.specialization_constant_macro_name, " ", constant_expression(c)); + statement("#endif"); + statement("static const ", variable_decl(type, name), " = ", c.specialization_constant_macro_name, ";"); + emitted = true; + } + } + else if (id.get_type() == TypeConstantOp) + { + auto &c = id.get(); + auto &type = get(c.basetype); + auto name = to_name(c.self); + statement("static const ", variable_decl(type, name), " = ", constant_op_expression(c), ";"); + emitted = true; + } + else if (id.get_type() == TypeType) + { + auto &type = id.get(); + if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer && + (!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) && + !ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock))) + { + if (emitted) + statement(""); + emitted = false; + + emit_struct(type); + } + } + } + + if (emitted) + statement(""); +} + +void CompilerHLSL::replace_illegal_names() +{ + static const unordered_set keywords = { + + "line", "linear", "matrix", "point", "row_major", "sampler", + }; + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + if (!is_hidden_variable(var)) + { + auto &m = ir.meta[var.self].decoration; + if (keywords.find(m.alias) != end(keywords)) + m.alias = join("_", m.alias); + } + }); + + CompilerGLSL::replace_illegal_names(); +} + +void CompilerHLSL::emit_resources() +{ + auto &execution = get_entry_point(); + + replace_illegal_names(); + + emit_specialization_constants_and_structs(); + emit_composite_constants(); + + bool emitted = false; + + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + + bool is_block_storage = type.storage == StorageClassStorageBuffer || type.storage == StorageClassUniform; + bool has_block_flags = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + + if (var.storage != StorageClassFunction && type.pointer && is_block_storage && !is_hidden_variable(var) && + has_block_flags) + { + emit_buffer_block(var); + emitted = true; + } + }); + + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + if (var.storage != StorageClassFunction && type.pointer && type.storage == StorageClassPushConstant && + !is_hidden_variable(var)) + { + emit_push_constant_block(var); + emitted = true; + } + }); + + if (execution.model == ExecutionModelVertex && hlsl_options.shader_model <= 30) + { + statement("uniform float4 gl_HalfPixel;"); + emitted = true; + } + + bool skip_separate_image_sampler = !combined_image_samplers.empty() || hlsl_options.shader_model <= 30; + + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + + + if (skip_separate_image_sampler) + { + + bool sampler_buffer = type.basetype == SPIRType::Image && type.image.dim == DimBuffer; + bool separate_image = type.basetype == SPIRType::Image && type.image.sampled == 1; + bool separate_sampler = type.basetype == SPIRType::Sampler; + if (!sampler_buffer && (separate_image || separate_sampler)) + return; + } + + if (var.storage != StorageClassFunction && !is_builtin_variable(var) && !var.remapped_variable && + type.pointer && (type.storage == StorageClassUniformConstant || type.storage == StorageClassAtomicCounter)) + { + emit_uniform(var); + emitted = true; + } + }); + + if (emitted) + statement(""); + emitted = false; + + + emit_builtin_variables(); + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + + + + + if (!block && var.storage != StorageClassFunction && !var.remapped_variable && type.pointer && + (var.storage == StorageClassInput || var.storage == StorageClassOutput) && !is_builtin_variable(var) && + interface_variable_exists_in_entry_point(var.self)) + { + + emit_interface_block_globally(var); + emitted = true; + } + }); + + if (emitted) + statement(""); + emitted = false; + + require_input = false; + require_output = false; + unordered_set active_inputs; + unordered_set active_outputs; + SmallVector input_variables; + SmallVector output_variables; + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + + if (var.storage != StorageClassInput && var.storage != StorageClassOutput) + return; + + + + + if (!block && !var.remapped_variable && type.pointer && !is_builtin_variable(var) && + interface_variable_exists_in_entry_point(var.self)) + { + if (var.storage == StorageClassInput) + input_variables.push_back(&var); + else + output_variables.push_back(&var); + } + + + if (block && !is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self)) + { + auto &active = var.storage == StorageClassInput ? active_inputs : active_outputs; + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + { + if (has_member_decoration(type.self, i, DecorationLocation)) + { + uint32_t location = get_member_decoration(type.self, i, DecorationLocation); + active.insert(location); + } + } + + + emit_io_block(var); + } + }); + + const auto variable_compare = [&](const SPIRVariable *a, const SPIRVariable *b) -> bool { + + + + + + + bool has_location_a = has_decoration(a->self, DecorationLocation); + bool has_location_b = has_decoration(b->self, DecorationLocation); + + if (has_location_a && has_location_b) + { + return get_decoration(a->self, DecorationLocation) < get_decoration(b->self, DecorationLocation); + } + else if (has_location_a && !has_location_b) + return true; + else if (!has_location_a && has_location_b) + return false; + + const auto &name1 = to_name(a->self); + const auto &name2 = to_name(b->self); + + if (name1.empty() && name2.empty()) + return a->self < b->self; + else if (name1.empty()) + return true; + else if (name2.empty()) + return false; + + return name1.compare(name2) < 0; + }; + + auto input_builtins = active_input_builtins; + input_builtins.clear(BuiltInNumWorkgroups); + input_builtins.clear(BuiltInPointCoord); + input_builtins.clear(BuiltInSubgroupSize); + input_builtins.clear(BuiltInSubgroupLocalInvocationId); + input_builtins.clear(BuiltInSubgroupEqMask); + input_builtins.clear(BuiltInSubgroupLtMask); + input_builtins.clear(BuiltInSubgroupLeMask); + input_builtins.clear(BuiltInSubgroupGtMask); + input_builtins.clear(BuiltInSubgroupGeMask); + + if (!input_variables.empty() || !input_builtins.empty()) + { + require_input = true; + statement("struct SPIRV_Cross_Input"); + + begin_scope(); + sort(input_variables.begin(), input_variables.end(), variable_compare); + for (auto var : input_variables) + emit_interface_block_in_struct(*var, active_inputs); + emit_builtin_inputs_in_struct(); + end_scope_decl(); + statement(""); + } + + if (!output_variables.empty() || !active_output_builtins.empty()) + { + require_output = true; + statement("struct SPIRV_Cross_Output"); + + begin_scope(); + + sort(output_variables.begin(), output_variables.end(), variable_compare); + for (auto var : output_variables) + emit_interface_block_in_struct(*var, active_outputs); + emit_builtin_outputs_in_struct(); + end_scope_decl(); + statement(""); + } + + + for (auto global : global_variables) + { + auto &var = get(global); + if (var.storage != StorageClassOutput) + { + if (!variable_is_lut(var)) + { + add_resource_name(var.self); + + const char *storage = nullptr; + switch (var.storage) + { + case StorageClassWorkgroup: + storage = "groupshared"; + break; + + default: + storage = "static"; + break; + } + statement(storage, " ", variable_decl(var), ";"); + emitted = true; + } + } + } + + if (emitted) + statement(""); + + declare_undefined_values(); + + if (requires_op_fmod) + { + static const char *types[] = { + "float", + "float2", + "float3", + "float4", + }; + + for (auto &type : types) + { + statement(type, " mod(", type, " x, ", type, " y)"); + begin_scope(); + statement("return x - y * floor(x / y);"); + end_scope(); + statement(""); + } + } + + if (required_textureSizeVariants != 0) + { + static const char *types[QueryTypeCount] = { "float4", "int4", "uint4" }; + static const char *dims[QueryDimCount] = { "Texture1D", "Texture1DArray", "Texture2D", "Texture2DArray", + "Texture3D", "Buffer", "TextureCube", "TextureCubeArray", + "Texture2DMS", "Texture2DMSArray" }; + + static const bool has_lod[QueryDimCount] = { true, true, true, true, true, false, true, true, false, false }; + + static const char *ret_types[QueryDimCount] = { + "uint", "uint2", "uint2", "uint3", "uint3", "uint", "uint2", "uint3", "uint2", "uint3", + }; + + static const uint32_t return_arguments[QueryDimCount] = { + 1, 2, 2, 3, 3, 1, 2, 3, 2, 3, + }; + + for (uint32_t index = 0; index < QueryDimCount; index++) + { + for (uint32_t type_index = 0; type_index < QueryTypeCount; type_index++) + { + uint32_t bit = 16 * type_index + index; + uint64_t mask = 1ull << bit; + + if ((required_textureSizeVariants & mask) == 0) + continue; + + statement(ret_types[index], " SPIRV_Cross_textureSize(", dims[index], "<", types[type_index], + "> Tex, uint Level, out uint Param)"); + begin_scope(); + statement(ret_types[index], " ret;"); + switch (return_arguments[index]) + { + case 1: + if (has_lod[index]) + statement("Tex.GetDimensions(Level, ret.x, Param);"); + else + { + statement("Tex.GetDimensions(ret.x);"); + statement("Param = 0u;"); + } + break; + case 2: + if (has_lod[index]) + statement("Tex.GetDimensions(Level, ret.x, ret.y, Param);"); + else + statement("Tex.GetDimensions(ret.x, ret.y, Param);"); + break; + case 3: + if (has_lod[index]) + statement("Tex.GetDimensions(Level, ret.x, ret.y, ret.z, Param);"); + else + statement("Tex.GetDimensions(ret.x, ret.y, ret.z, Param);"); + break; + } + + statement("return ret;"); + end_scope(); + statement(""); + } + } + } + + if (requires_fp16_packing) + { + + statement("uint SPIRV_Cross_packHalf2x16(float2 value)"); + begin_scope(); + statement("uint2 Packed = f32tof16(value);"); + statement("return Packed.x | (Packed.y << 16);"); + end_scope(); + statement(""); + + statement("float2 SPIRV_Cross_unpackHalf2x16(uint value)"); + begin_scope(); + statement("return f16tof32(uint2(value & 0xffff, value >> 16));"); + end_scope(); + statement(""); + } + + if (requires_explicit_fp16_packing) + { + + statement("uint SPIRV_Cross_packFloat2x16(min16float2 value)"); + begin_scope(); + statement("uint2 Packed = f32tof16(value);"); + statement("return Packed.x | (Packed.y << 16);"); + end_scope(); + statement(""); + + statement("min16float2 SPIRV_Cross_unpackFloat2x16(uint value)"); + begin_scope(); + statement("return min16float2(f16tof32(uint2(value & 0xffff, value >> 16)));"); + end_scope(); + statement(""); + } + + + if (requires_unorm8_packing) + { + statement("uint SPIRV_Cross_packUnorm4x8(float4 value)"); + begin_scope(); + statement("uint4 Packed = uint4(round(saturate(value) * 255.0));"); + statement("return Packed.x | (Packed.y << 8) | (Packed.z << 16) | (Packed.w << 24);"); + end_scope(); + statement(""); + + statement("float4 SPIRV_Cross_unpackUnorm4x8(uint value)"); + begin_scope(); + statement("uint4 Packed = uint4(value & 0xff, (value >> 8) & 0xff, (value >> 16) & 0xff, value >> 24);"); + statement("return float4(Packed) / 255.0;"); + end_scope(); + statement(""); + } + + if (requires_snorm8_packing) + { + statement("uint SPIRV_Cross_packSnorm4x8(float4 value)"); + begin_scope(); + statement("int4 Packed = int4(round(clamp(value, -1.0, 1.0) * 127.0)) & 0xff;"); + statement("return uint(Packed.x | (Packed.y << 8) | (Packed.z << 16) | (Packed.w << 24));"); + end_scope(); + statement(""); + + statement("float4 SPIRV_Cross_unpackSnorm4x8(uint value)"); + begin_scope(); + statement("int SignedValue = int(value);"); + statement("int4 Packed = int4(SignedValue << 24, SignedValue << 16, SignedValue << 8, SignedValue) >> 24;"); + statement("return clamp(float4(Packed) / 127.0, -1.0, 1.0);"); + end_scope(); + statement(""); + } + + if (requires_unorm16_packing) + { + statement("uint SPIRV_Cross_packUnorm2x16(float2 value)"); + begin_scope(); + statement("uint2 Packed = uint2(round(saturate(value) * 65535.0));"); + statement("return Packed.x | (Packed.y << 16);"); + end_scope(); + statement(""); + + statement("float2 SPIRV_Cross_unpackUnorm2x16(uint value)"); + begin_scope(); + statement("uint2 Packed = uint2(value & 0xffff, value >> 16);"); + statement("return float2(Packed) / 65535.0;"); + end_scope(); + statement(""); + } + + if (requires_snorm16_packing) + { + statement("uint SPIRV_Cross_packSnorm2x16(float2 value)"); + begin_scope(); + statement("int2 Packed = int2(round(clamp(value, -1.0, 1.0) * 32767.0)) & 0xffff;"); + statement("return uint(Packed.x | (Packed.y << 16));"); + end_scope(); + statement(""); + + statement("float2 SPIRV_Cross_unpackSnorm2x16(uint value)"); + begin_scope(); + statement("int SignedValue = int(value);"); + statement("int2 Packed = int2(SignedValue << 16, SignedValue) >> 16;"); + statement("return clamp(float2(Packed) / 32767.0, -1.0, 1.0);"); + end_scope(); + statement(""); + } + + if (requires_bitfield_insert) + { + static const char *types[] = { "uint", "uint2", "uint3", "uint4" }; + for (auto &type : types) + { + statement(type, " SPIRV_Cross_bitfieldInsert(", type, " Base, ", type, " Insert, uint Offset, uint Count)"); + begin_scope(); + statement("uint Mask = Count == 32 ? 0xffffffff : (((1u << Count) - 1) << (Offset & 31));"); + statement("return (Base & ~Mask) | ((Insert << Offset) & Mask);"); + end_scope(); + statement(""); + } + } + + if (requires_bitfield_extract) + { + static const char *unsigned_types[] = { "uint", "uint2", "uint3", "uint4" }; + for (auto &type : unsigned_types) + { + statement(type, " SPIRV_Cross_bitfieldUExtract(", type, " Base, uint Offset, uint Count)"); + begin_scope(); + statement("uint Mask = Count == 32 ? 0xffffffff : ((1 << Count) - 1);"); + statement("return (Base >> Offset) & Mask;"); + end_scope(); + statement(""); + } + + + static const char *signed_types[] = { "int", "int2", "int3", "int4" }; + for (auto &type : signed_types) + { + statement(type, " SPIRV_Cross_bitfieldSExtract(", type, " Base, int Offset, int Count)"); + begin_scope(); + statement("int Mask = Count == 32 ? -1 : ((1 << Count) - 1);"); + statement(type, " Masked = (Base >> Offset) & Mask;"); + statement("int ExtendShift = (32 - Count) & 31;"); + statement("return (Masked << ExtendShift) >> ExtendShift;"); + end_scope(); + statement(""); + } + } + + if (requires_inverse_2x2) + { + statement("// Returns the inverse of a matrix, by using the algorithm of calculating the classical"); + statement("// adjoint and dividing by the determinant. The contents of the matrix are changed."); + statement("float2x2 SPIRV_Cross_Inverse(float2x2 m)"); + begin_scope(); + statement("float2x2 adj; // The adjoint matrix (inverse after dividing by determinant)"); + statement_no_indent(""); + statement("// Create the transpose of the cofactors, as the classical adjoint of the matrix."); + statement("adj[0][0] = m[1][1];"); + statement("adj[0][1] = -m[0][1];"); + statement_no_indent(""); + statement("adj[1][0] = -m[1][0];"); + statement("adj[1][1] = m[0][0];"); + statement_no_indent(""); + statement("// Calculate the determinant as a combination of the cofactors of the first row."); + statement("float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]);"); + statement_no_indent(""); + statement("// Divide the classical adjoint matrix by the determinant."); + statement("// If determinant is zero, matrix is not invertable, so leave it unchanged."); + statement("return (det != 0.0f) ? (adj * (1.0f / det)) : m;"); + end_scope(); + statement(""); + } + + if (requires_inverse_3x3) + { + statement("// Returns the determinant of a 2x2 matrix."); + statement("float SPIRV_Cross_Det2x2(float a1, float a2, float b1, float b2)"); + begin_scope(); + statement("return a1 * b2 - b1 * a2;"); + end_scope(); + statement_no_indent(""); + statement("// Returns the inverse of a matrix, by using the algorithm of calculating the classical"); + statement("// adjoint and dividing by the determinant. The contents of the matrix are changed."); + statement("float3x3 SPIRV_Cross_Inverse(float3x3 m)"); + begin_scope(); + statement("float3x3 adj; // The adjoint matrix (inverse after dividing by determinant)"); + statement_no_indent(""); + statement("// Create the transpose of the cofactors, as the classical adjoint of the matrix."); + statement("adj[0][0] = SPIRV_Cross_Det2x2(m[1][1], m[1][2], m[2][1], m[2][2]);"); + statement("adj[0][1] = -SPIRV_Cross_Det2x2(m[0][1], m[0][2], m[2][1], m[2][2]);"); + statement("adj[0][2] = SPIRV_Cross_Det2x2(m[0][1], m[0][2], m[1][1], m[1][2]);"); + statement_no_indent(""); + statement("adj[1][0] = -SPIRV_Cross_Det2x2(m[1][0], m[1][2], m[2][0], m[2][2]);"); + statement("adj[1][1] = SPIRV_Cross_Det2x2(m[0][0], m[0][2], m[2][0], m[2][2]);"); + statement("adj[1][2] = -SPIRV_Cross_Det2x2(m[0][0], m[0][2], m[1][0], m[1][2]);"); + statement_no_indent(""); + statement("adj[2][0] = SPIRV_Cross_Det2x2(m[1][0], m[1][1], m[2][0], m[2][1]);"); + statement("adj[2][1] = -SPIRV_Cross_Det2x2(m[0][0], m[0][1], m[2][0], m[2][1]);"); + statement("adj[2][2] = SPIRV_Cross_Det2x2(m[0][0], m[0][1], m[1][0], m[1][1]);"); + statement_no_indent(""); + statement("// Calculate the determinant as a combination of the cofactors of the first row."); + statement("float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]);"); + statement_no_indent(""); + statement("// Divide the classical adjoint matrix by the determinant."); + statement("// If determinant is zero, matrix is not invertable, so leave it unchanged."); + statement("return (det != 0.0f) ? (adj * (1.0f / det)) : m;"); + end_scope(); + statement(""); + } + + if (requires_inverse_4x4) + { + if (!requires_inverse_3x3) + { + statement("// Returns the determinant of a 2x2 matrix."); + statement("float SPIRV_Cross_Det2x2(float a1, float a2, float b1, float b2)"); + begin_scope(); + statement("return a1 * b2 - b1 * a2;"); + end_scope(); + statement(""); + } + + statement("// Returns the determinant of a 3x3 matrix."); + statement("float SPIRV_Cross_Det3x3(float a1, float a2, float a3, float b1, float b2, float b3, float c1, " + "float c2, float c3)"); + begin_scope(); + statement("return a1 * SPIRV_Cross_Det2x2(b2, b3, c2, c3) - b1 * SPIRV_Cross_Det2x2(a2, a3, c2, c3) + c1 * " + "SPIRV_Cross_Det2x2(a2, a3, " + "b2, b3);"); + end_scope(); + statement_no_indent(""); + statement("// Returns the inverse of a matrix, by using the algorithm of calculating the classical"); + statement("// adjoint and dividing by the determinant. The contents of the matrix are changed."); + statement("float4x4 SPIRV_Cross_Inverse(float4x4 m)"); + begin_scope(); + statement("float4x4 adj; // The adjoint matrix (inverse after dividing by determinant)"); + statement_no_indent(""); + statement("// Create the transpose of the cofactors, as the classical adjoint of the matrix."); + statement( + "adj[0][0] = SPIRV_Cross_Det3x3(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], " + "m[3][3]);"); + statement( + "adj[0][1] = -SPIRV_Cross_Det3x3(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], " + "m[3][3]);"); + statement( + "adj[0][2] = SPIRV_Cross_Det3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], " + "m[3][3]);"); + statement( + "adj[0][3] = -SPIRV_Cross_Det3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], " + "m[2][3]);"); + statement_no_indent(""); + statement( + "adj[1][0] = -SPIRV_Cross_Det3x3(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], " + "m[3][3]);"); + statement( + "adj[1][1] = SPIRV_Cross_Det3x3(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], " + "m[3][3]);"); + statement( + "adj[1][2] = -SPIRV_Cross_Det3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], " + "m[3][3]);"); + statement( + "adj[1][3] = SPIRV_Cross_Det3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], " + "m[2][3]);"); + statement_no_indent(""); + statement( + "adj[2][0] = SPIRV_Cross_Det3x3(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], " + "m[3][3]);"); + statement( + "adj[2][1] = -SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], " + "m[3][3]);"); + statement( + "adj[2][2] = SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], " + "m[3][3]);"); + statement( + "adj[2][3] = -SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], " + "m[2][3]);"); + statement_no_indent(""); + statement( + "adj[3][0] = -SPIRV_Cross_Det3x3(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], " + "m[3][2]);"); + statement( + "adj[3][1] = SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], " + "m[3][2]);"); + statement( + "adj[3][2] = -SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], " + "m[3][2]);"); + statement( + "adj[3][3] = SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], " + "m[2][2]);"); + statement_no_indent(""); + statement("// Calculate the determinant as a combination of the cofactors of the first row."); + statement("float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]) + (adj[0][3] " + "* m[3][0]);"); + statement_no_indent(""); + statement("// Divide the classical adjoint matrix by the determinant."); + statement("// If determinant is zero, matrix is not invertable, so leave it unchanged."); + statement("return (det != 0.0f) ? (adj * (1.0f / det)) : m;"); + end_scope(); + statement(""); + } + + if (requires_scalar_reflect) + { + + statement("float SPIRV_Cross_Reflect(float i, float n)"); + begin_scope(); + statement("return i - 2.0 * dot(n, i) * n;"); + end_scope(); + statement(""); + } + + if (requires_scalar_refract) + { + + statement("float SPIRV_Cross_Refract(float i, float n, float eta)"); + begin_scope(); + statement("float NoI = n * i;"); + statement("float NoI2 = NoI * NoI;"); + statement("float k = 1.0 - eta * eta * (1.0 - NoI2);"); + statement("if (k < 0.0)"); + begin_scope(); + statement("return 0.0;"); + end_scope(); + statement("else"); + begin_scope(); + statement("return eta * i - (eta * NoI + sqrt(k)) * n;"); + end_scope(); + end_scope(); + statement(""); + } + + if (requires_scalar_faceforward) + { + + statement("float SPIRV_Cross_FaceForward(float n, float i, float nref)"); + begin_scope(); + statement("return i * nref < 0.0 ? n : -n;"); + end_scope(); + statement(""); + } +} + +string CompilerHLSL::layout_for_member(const SPIRType &type, uint32_t index) +{ + auto &flags = get_member_decoration_bitset(type.self, index); + + + + + + + if (flags.get(DecorationColMajor)) + return "row_major "; + else if (flags.get(DecorationRowMajor)) + return "column_major "; + + return ""; +} + +void CompilerHLSL::emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const string &qualifier, uint32_t base_offset) +{ + auto &membertype = get(member_type_id); + + Bitset memberflags; + auto &memb = ir.meta[type.self].members; + if (index < memb.size()) + memberflags = memb[index].decoration_flags; + + string qualifiers; + bool is_block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + + if (is_block) + qualifiers = to_interpolation_qualifiers(memberflags); + + string packing_offset; + bool is_push_constant = type.storage == StorageClassPushConstant; + + if ((has_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset) || is_push_constant) && + has_member_decoration(type.self, index, DecorationOffset)) + { + uint32_t offset = memb[index].offset - base_offset; + if (offset & 3) + SPIRV_CROSS_THROW("Cannot pack on tighter bounds than 4 bytes in HLSL."); + + static const char *packing_swizzle[] = { "", ".y", ".z", ".w" }; + packing_offset = join(" : packoffset(c", offset / 16, packing_swizzle[(offset & 15) >> 2], ")"); + } + + statement(layout_for_member(type, index), qualifiers, qualifier, + variable_decl(membertype, to_member_name(type, index)), packing_offset, ";"); +} + +void CompilerHLSL::emit_buffer_block(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + + bool is_uav = var.storage == StorageClassStorageBuffer || has_decoration(type.self, DecorationBufferBlock); + + if (is_uav) + { + Bitset flags = ir.get_buffer_block_flags(var); + bool is_readonly = flags.get(DecorationNonWritable); + bool is_coherent = flags.get(DecorationCoherent); + bool is_interlocked = interlocked_resources.count(var.self) > 0; + const char *type_name = "ByteAddressBuffer "; + if (!is_readonly) + type_name = is_interlocked ? "RasterizerOrderedByteAddressBuffer " : "RWByteAddressBuffer "; + add_resource_name(var.self); + statement(is_coherent ? "globallycoherent " : "", type_name, to_name(var.self), type_to_array_glsl(type), + to_resource_binding(var), ";"); + } + else + { + if (type.array.empty()) + { + if (buffer_is_packing_standard(type, BufferPackingHLSLCbufferPackOffset)) + set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset); + else + SPIRV_CROSS_THROW("cbuffer cannot be expressed with either HLSL packing layout or packoffset."); + + + + flattened_structs.insert(var.self); + + + auto buffer_name = to_name(type.self, false); + if (ir.meta[type.self].decoration.alias.empty() || + resource_names.find(buffer_name) != end(resource_names) || + block_names.find(buffer_name) != end(block_names)) + { + buffer_name = get_block_fallback_name(var.self); + } + + add_variable(block_names, resource_names, buffer_name); + + + + if (buffer_name.empty()) + buffer_name = join("_", get(var.basetype).self, "_", var.self); + + block_names.insert(buffer_name); + + + declared_block_names[var.self] = buffer_name; + + type.member_name_cache.clear(); + + + + preserve_alias_on_reset(var.self); + add_resource_name(var.self); + statement("cbuffer ", buffer_name, to_resource_binding(var)); + begin_scope(); + + uint32_t i = 0; + for (auto &member : type.member_types) + { + add_member_name(type, i); + auto backup_name = get_member_name(type.self, i); + auto member_name = to_member_name(type, i); + set_member_name(type.self, i, sanitize_underscores(join(to_name(var.self), "_", member_name))); + emit_struct_member(type, member, i, ""); + set_member_name(type.self, i, backup_name); + i++; + } + + end_scope_decl(); + statement(""); + } + else + { + if (hlsl_options.shader_model < 51) + SPIRV_CROSS_THROW( + "Need ConstantBuffer to use arrays of UBOs, but this is only supported in SM 5.1."); + + + if (!buffer_is_packing_standard(type, BufferPackingHLSLCbuffer)) + SPIRV_CROSS_THROW("HLSL ConstantBuffer cannot be expressed with normal HLSL packing rules."); + + add_resource_name(type.self); + add_resource_name(var.self); + + emit_struct(get(type.self)); + statement("ConstantBuffer<", to_name(type.self), "> ", to_name(var.self), type_to_array_glsl(type), + to_resource_binding(var), ";"); + } + } +} + +void CompilerHLSL::emit_push_constant_block(const SPIRVariable &var) +{ + if (root_constants_layout.empty()) + { + emit_buffer_block(var); + } + else + { + for (const auto &layout : root_constants_layout) + { + auto &type = get(var.basetype); + + if (buffer_is_packing_standard(type, BufferPackingHLSLCbufferPackOffset, layout.start, layout.end)) + set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset); + else + SPIRV_CROSS_THROW( + "root constant cbuffer cannot be expressed with either HLSL packing layout or packoffset."); + + flattened_structs.insert(var.self); + type.member_name_cache.clear(); + add_resource_name(var.self); + auto &memb = ir.meta[type.self].members; + + statement("cbuffer SPIRV_CROSS_RootConstant_", to_name(var.self), + to_resource_register('b', layout.binding, layout.space)); + begin_scope(); + + + auto constant_index = 0u; + + + + for (auto i = 0u; i < memb.size(); i++) + { + const auto offset = memb[i].offset; + if (layout.start <= offset && offset < layout.end) + { + const auto &member = type.member_types[i]; + + add_member_name(type, constant_index); + auto backup_name = get_member_name(type.self, i); + auto member_name = to_member_name(type, i); + set_member_name(type.self, constant_index, + sanitize_underscores(join(to_name(var.self), "_", member_name))); + emit_struct_member(type, member, i, "", layout.start); + set_member_name(type.self, constant_index, backup_name); + + constant_index++; + } + } + + end_scope_decl(); + } + } +} + +string CompilerHLSL::to_sampler_expression(uint32_t id) +{ + auto expr = join("_", to_expression(id)); + auto index = expr.find_first_of('['); + if (index == string::npos) + { + return expr + "_sampler"; + } + else + { + + return expr.insert(index, "_sampler"); + } +} + +void CompilerHLSL::emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) +{ + if (hlsl_options.shader_model >= 40 && combined_image_samplers.empty()) + { + set(result_id, result_type, image_id, samp_id); + } + else + { + + emit_op(result_type, result_id, to_combined_image_sampler(image_id, samp_id), true, true); + } +} + +string CompilerHLSL::to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id) +{ + string arg_str = CompilerGLSL::to_func_call_arg(arg, id); + + if (hlsl_options.shader_model <= 30) + return arg_str; + + + auto &type = expression_type(id); + + + + + if (type.basetype == SPIRType::SampledImage && type.image.dim != DimBuffer) + arg_str += ", " + to_sampler_expression(id); + + return arg_str; +} + +void CompilerHLSL::emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) +{ + if (func.self != ir.default_entry_point) + add_function_overload(func); + + auto &execution = get_entry_point(); + + local_variable_names = resource_names; + + string decl; + + auto &type = get(func.return_type); + if (type.array.empty()) + { + decl += flags_to_qualifiers_glsl(type, return_flags); + decl += type_to_glsl(type); + decl += " "; + } + else + { + + decl = "void "; + } + + if (func.self == ir.default_entry_point) + { + if (execution.model == ExecutionModelVertex) + decl += "vert_main"; + else if (execution.model == ExecutionModelFragment) + decl += "frag_main"; + else if (execution.model == ExecutionModelGLCompute) + decl += "comp_main"; + else + SPIRV_CROSS_THROW("Unsupported execution model."); + processing_entry_point = true; + } + else + decl += to_name(func.self); + + decl += "("; + SmallVector arglist; + + if (!type.array.empty()) + { + + string out_argument; + out_argument += "out "; + out_argument += type_to_glsl(type); + out_argument += " "; + out_argument += "SPIRV_Cross_return_value"; + out_argument += type_to_array_glsl(type); + arglist.push_back(move(out_argument)); + } + + for (auto &arg : func.arguments) + { + + + if (skip_argument(arg.id)) + continue; + + + + + + add_local_variable_name(arg.id); + + arglist.push_back(argument_decl(arg)); + + + auto &arg_type = get(arg.type); + if (hlsl_options.shader_model > 30 && arg_type.basetype == SPIRType::SampledImage && + arg_type.image.dim != DimBuffer) + { + + arglist.push_back(join(image_is_comparison(arg_type, arg.id) ? "SamplerComparisonState " : "SamplerState ", + to_sampler_expression(arg.id), type_to_array_glsl(arg_type))); + } + + + auto *var = maybe_get(arg.id); + if (var) + var->parameter = &arg; + } + + for (auto &arg : func.shadow_arguments) + { + + + + + add_local_variable_name(arg.id); + + arglist.push_back(argument_decl(arg)); + + + auto *var = maybe_get(arg.id); + if (var) + var->parameter = &arg; + } + + decl += merge(arglist); + decl += ")"; + statement(decl); +} + +void CompilerHLSL::emit_hlsl_entry_point() +{ + SmallVector arguments; + + if (require_input) + arguments.push_back("SPIRV_Cross_Input stage_input"); + + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + + if (var.storage != StorageClassInput && var.storage != StorageClassOutput) + return; + + if (block && !is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self)) + { + if (var.storage == StorageClassInput) + { + arguments.push_back(join("in ", variable_decl(type, join("stage_input", to_name(var.self))))); + } + else if (var.storage == StorageClassOutput) + { + arguments.push_back(join("out ", variable_decl(type, join("stage_output", to_name(var.self))))); + } + } + }); + + auto &execution = get_entry_point(); + + switch (execution.model) + { + case ExecutionModelGLCompute: + { + SpecializationConstant wg_x, wg_y, wg_z; + get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + + uint32_t x = execution.workgroup_size.x; + uint32_t y = execution.workgroup_size.y; + uint32_t z = execution.workgroup_size.z; + + auto x_expr = wg_x.id ? get(wg_x.id).specialization_constant_macro_name : to_string(x); + auto y_expr = wg_y.id ? get(wg_y.id).specialization_constant_macro_name : to_string(y); + auto z_expr = wg_z.id ? get(wg_z.id).specialization_constant_macro_name : to_string(z); + + statement("[numthreads(", x_expr, ", ", y_expr, ", ", z_expr, ")]"); + break; + } + case ExecutionModelFragment: + if (execution.flags.get(ExecutionModeEarlyFragmentTests)) + statement("[earlydepthstencil]"); + break; + default: + break; + } + + statement(require_output ? "SPIRV_Cross_Output " : "void ", "main(", merge(arguments), ")"); + begin_scope(); + bool legacy = hlsl_options.shader_model <= 30; + + + active_input_builtins.for_each_bit([&](uint32_t i) { + auto builtin = builtin_to_glsl(static_cast(i), StorageClassInput); + switch (static_cast(i)) + { + case BuiltInFragCoord: + + + + if (legacy) + statement(builtin, " = stage_input.", builtin, " + float4(0.5f, 0.5f, 0.0f, 0.0f);"); + else + statement(builtin, " = stage_input.", builtin, ";"); + break; + + case BuiltInVertexId: + case BuiltInVertexIndex: + case BuiltInInstanceIndex: + + if (hlsl_options.support_nonzero_base_vertex_base_instance) + { + if (static_cast(i) == BuiltInInstanceIndex) + statement(builtin, " = int(stage_input.", builtin, ") + SPIRV_Cross_BaseInstance;"); + else + statement(builtin, " = int(stage_input.", builtin, ") + SPIRV_Cross_BaseVertex;"); + } + else + statement(builtin, " = int(stage_input.", builtin, ");"); + break; + + case BuiltInInstanceId: + + statement(builtin, " = int(stage_input.", builtin, ");"); + break; + + case BuiltInNumWorkgroups: + case BuiltInPointCoord: + case BuiltInSubgroupSize: + case BuiltInSubgroupLocalInvocationId: + break; + + case BuiltInSubgroupEqMask: + + + statement("gl_SubgroupEqMask = 1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96));"); + statement("if (WaveGetLaneIndex() >= 32) gl_SubgroupEqMask.x = 0;"); + statement("if (WaveGetLaneIndex() >= 64 || WaveGetLaneIndex() < 32) gl_SubgroupEqMask.y = 0;"); + statement("if (WaveGetLaneIndex() >= 96 || WaveGetLaneIndex() < 64) gl_SubgroupEqMask.z = 0;"); + statement("if (WaveGetLaneIndex() < 96) gl_SubgroupEqMask.w = 0;"); + break; + + case BuiltInSubgroupGeMask: + + + statement("gl_SubgroupGeMask = ~((1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96))) - 1u);"); + statement("if (WaveGetLaneIndex() >= 32) gl_SubgroupGeMask.x = 0u;"); + statement("if (WaveGetLaneIndex() >= 64) gl_SubgroupGeMask.y = 0u;"); + statement("if (WaveGetLaneIndex() >= 96) gl_SubgroupGeMask.z = 0u;"); + statement("if (WaveGetLaneIndex() < 32) gl_SubgroupGeMask.y = ~0u;"); + statement("if (WaveGetLaneIndex() < 64) gl_SubgroupGeMask.z = ~0u;"); + statement("if (WaveGetLaneIndex() < 96) gl_SubgroupGeMask.w = ~0u;"); + break; + + case BuiltInSubgroupGtMask: + + + statement("uint gt_lane_index = WaveGetLaneIndex() + 1;"); + statement("gl_SubgroupGtMask = ~((1u << (gt_lane_index - uint4(0, 32, 64, 96))) - 1u);"); + statement("if (gt_lane_index >= 32) gl_SubgroupGtMask.x = 0u;"); + statement("if (gt_lane_index >= 64) gl_SubgroupGtMask.y = 0u;"); + statement("if (gt_lane_index >= 96) gl_SubgroupGtMask.z = 0u;"); + statement("if (gt_lane_index >= 128) gl_SubgroupGtMask.w = 0u;"); + statement("if (gt_lane_index < 32) gl_SubgroupGtMask.y = ~0u;"); + statement("if (gt_lane_index < 64) gl_SubgroupGtMask.z = ~0u;"); + statement("if (gt_lane_index < 96) gl_SubgroupGtMask.w = ~0u;"); + break; + + case BuiltInSubgroupLeMask: + + + statement("uint le_lane_index = WaveGetLaneIndex() + 1;"); + statement("gl_SubgroupLeMask = (1u << (le_lane_index - uint4(0, 32, 64, 96))) - 1u;"); + statement("if (le_lane_index >= 32) gl_SubgroupLeMask.x = ~0u;"); + statement("if (le_lane_index >= 64) gl_SubgroupLeMask.y = ~0u;"); + statement("if (le_lane_index >= 96) gl_SubgroupLeMask.z = ~0u;"); + statement("if (le_lane_index >= 128) gl_SubgroupLeMask.w = ~0u;"); + statement("if (le_lane_index < 32) gl_SubgroupLeMask.y = 0u;"); + statement("if (le_lane_index < 64) gl_SubgroupLeMask.z = 0u;"); + statement("if (le_lane_index < 96) gl_SubgroupLeMask.w = 0u;"); + break; + + case BuiltInSubgroupLtMask: + + + statement("gl_SubgroupLtMask = (1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96))) - 1u;"); + statement("if (WaveGetLaneIndex() >= 32) gl_SubgroupLtMask.x = ~0u;"); + statement("if (WaveGetLaneIndex() >= 64) gl_SubgroupLtMask.y = ~0u;"); + statement("if (WaveGetLaneIndex() >= 96) gl_SubgroupLtMask.z = ~0u;"); + statement("if (WaveGetLaneIndex() < 32) gl_SubgroupLtMask.y = 0u;"); + statement("if (WaveGetLaneIndex() < 64) gl_SubgroupLtMask.z = 0u;"); + statement("if (WaveGetLaneIndex() < 96) gl_SubgroupLtMask.w = 0u;"); + break; + + case BuiltInClipDistance: + for (uint32_t clip = 0; clip < clip_distance_count; clip++) + statement("gl_ClipDistance[", clip, "] = stage_input.gl_ClipDistance", clip / 4, ".", "xyzw"[clip & 3], + ";"); + break; + + case BuiltInCullDistance: + for (uint32_t cull = 0; cull < cull_distance_count; cull++) + statement("gl_CullDistance[", cull, "] = stage_input.gl_CullDistance", cull / 4, ".", "xyzw"[cull & 3], + ";"); + break; + + default: + statement(builtin, " = stage_input.", builtin, ";"); + break; + } + }); + + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + + if (var.storage != StorageClassInput) + return; + + bool need_matrix_unroll = var.storage == StorageClassInput && execution.model == ExecutionModelVertex; + + if (!block && !var.remapped_variable && type.pointer && !is_builtin_variable(var) && + interface_variable_exists_in_entry_point(var.self)) + { + auto name = to_name(var.self); + auto &mtype = this->get(var.basetype); + if (need_matrix_unroll && mtype.columns > 1) + { + + for (uint32_t col = 0; col < mtype.columns; col++) + statement(name, "[", col, "] = stage_input.", name, "_", col, ";"); + } + else + { + statement(name, " = stage_input.", name, ";"); + } + } + + + if (block && !is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self)) + { + auto name = to_name(var.self); + statement(name, " = stage_input", name, ";"); + } + }); + + + if (execution.model == ExecutionModelVertex) + statement("vert_main();"); + else if (execution.model == ExecutionModelFragment) + statement("frag_main();"); + else if (execution.model == ExecutionModelGLCompute) + statement("comp_main();"); + else + SPIRV_CROSS_THROW("Unsupported shader stage."); + + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + + if (var.storage != StorageClassOutput) + return; + + + if (block && !is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self)) + { + auto name = to_name(var.self); + statement("stage_output", name, " = ", name, ";"); + } + }); + + + if (require_output) + { + statement("SPIRV_Cross_Output stage_output;"); + + + active_output_builtins.for_each_bit([&](uint32_t i) { + + if (i == BuiltInPointSize) + return; + + switch (static_cast(i)) + { + case BuiltInClipDistance: + for (uint32_t clip = 0; clip < clip_distance_count; clip++) + statement("stage_output.gl_ClipDistance", clip / 4, ".", "xyzw"[clip & 3], " = gl_ClipDistance[", + clip, "];"); + break; + + case BuiltInCullDistance: + for (uint32_t cull = 0; cull < cull_distance_count; cull++) + statement("stage_output.gl_CullDistance", cull / 4, ".", "xyzw"[cull & 3], " = gl_CullDistance[", + cull, "];"); + break; + + default: + { + auto builtin_expr = builtin_to_glsl(static_cast(i), StorageClassOutput); + statement("stage_output.", builtin_expr, " = ", builtin_expr, ";"); + break; + } + } + }); + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = this->get(var.basetype); + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + + if (var.storage != StorageClassOutput) + return; + + if (!block && var.storage != StorageClassFunction && !var.remapped_variable && type.pointer && + !is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self)) + { + auto name = to_name(var.self); + + if (legacy && execution.model == ExecutionModelFragment) + { + string output_filler; + for (uint32_t size = type.vecsize; size < 4; ++size) + output_filler += ", 0.0"; + + statement("stage_output.", name, " = float4(", name, output_filler, ");"); + } + else + { + statement("stage_output.", name, " = ", name, ";"); + } + } + }); + + statement("return stage_output;"); + } + + end_scope(); +} + +void CompilerHLSL::emit_fixup() +{ + if (get_entry_point().model == ExecutionModelVertex) + { + + if (hlsl_options.shader_model <= 30) + { + statement("gl_Position.x = gl_Position.x - gl_HalfPixel.x * " + "gl_Position.w;"); + statement("gl_Position.y = gl_Position.y + gl_HalfPixel.y * " + "gl_Position.w;"); + } + + if (options.vertex.flip_vert_y) + statement("gl_Position.y = -gl_Position.y;"); + if (options.vertex.fixup_clipspace) + statement("gl_Position.z = (gl_Position.z + gl_Position.w) * 0.5;"); + } +} + +void CompilerHLSL::emit_texture_op(const Instruction &i) +{ + auto *ops = stream(i); + auto op = static_cast(i.op); + uint32_t length = i.length; + + SmallVector inherited_expressions; + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + VariableID img = ops[2]; + uint32_t coord = ops[3]; + uint32_t dref = 0; + uint32_t comp = 0; + bool gather = false; + bool proj = false; + const uint32_t *opt = nullptr; + auto *combined_image = maybe_get(img); + auto img_expr = to_expression(combined_image ? combined_image->image : img); + + inherited_expressions.push_back(coord); + + + if (has_decoration(img, DecorationNonUniformEXT)) + propagate_nonuniform_qualifier(img); + + switch (op) + { + case OpImageSampleDrefImplicitLod: + case OpImageSampleDrefExplicitLod: + dref = ops[4]; + opt = &ops[5]; + length -= 5; + break; + + case OpImageSampleProjDrefImplicitLod: + case OpImageSampleProjDrefExplicitLod: + dref = ops[4]; + proj = true; + opt = &ops[5]; + length -= 5; + break; + + case OpImageDrefGather: + dref = ops[4]; + opt = &ops[5]; + gather = true; + length -= 5; + break; + + case OpImageGather: + comp = ops[4]; + opt = &ops[5]; + gather = true; + length -= 5; + break; + + case OpImageSampleProjImplicitLod: + case OpImageSampleProjExplicitLod: + opt = &ops[4]; + length -= 4; + proj = true; + break; + + case OpImageQueryLod: + opt = &ops[4]; + length -= 4; + break; + + default: + opt = &ops[4]; + length -= 4; + break; + } + + auto &imgtype = expression_type(img); + uint32_t coord_components = 0; + switch (imgtype.image.dim) + { + case spv::Dim1D: + coord_components = 1; + break; + case spv::Dim2D: + coord_components = 2; + break; + case spv::Dim3D: + coord_components = 3; + break; + case spv::DimCube: + coord_components = 3; + break; + case spv::DimBuffer: + coord_components = 1; + break; + default: + coord_components = 2; + break; + } + + if (dref) + inherited_expressions.push_back(dref); + + if (imgtype.image.arrayed) + coord_components++; + + uint32_t bias = 0; + uint32_t lod = 0; + uint32_t grad_x = 0; + uint32_t grad_y = 0; + uint32_t coffset = 0; + uint32_t offset = 0; + uint32_t coffsets = 0; + uint32_t sample = 0; + uint32_t minlod = 0; + uint32_t flags = 0; + + if (length) + { + flags = opt[0]; + opt++; + length--; + } + + auto test = [&](uint32_t &v, uint32_t flag) { + if (length && (flags & flag)) + { + v = *opt++; + inherited_expressions.push_back(v); + length--; + } + }; + + test(bias, ImageOperandsBiasMask); + test(lod, ImageOperandsLodMask); + test(grad_x, ImageOperandsGradMask); + test(grad_y, ImageOperandsGradMask); + test(coffset, ImageOperandsConstOffsetMask); + test(offset, ImageOperandsOffsetMask); + test(coffsets, ImageOperandsConstOffsetsMask); + test(sample, ImageOperandsSampleMask); + test(minlod, ImageOperandsMinLodMask); + + string expr; + string texop; + + if (minlod != 0) + SPIRV_CROSS_THROW("MinLod texture operand not supported in HLSL."); + + if (op == OpImageFetch) + { + if (hlsl_options.shader_model < 40) + { + SPIRV_CROSS_THROW("texelFetch is not supported in HLSL shader model 2/3."); + } + texop += img_expr; + texop += ".Load"; + } + else if (op == OpImageQueryLod) + { + texop += img_expr; + texop += ".CalculateLevelOfDetail"; + } + else + { + auto &imgformat = get(imgtype.image.type); + if (imgformat.basetype != SPIRType::Float) + { + SPIRV_CROSS_THROW("Sampling non-float textures is not supported in HLSL."); + } + + if (hlsl_options.shader_model >= 40) + { + texop += img_expr; + + if (image_is_comparison(imgtype, img)) + { + if (gather) + { + SPIRV_CROSS_THROW("GatherCmp does not exist in HLSL."); + } + else if (lod || grad_x || grad_y) + { + + texop += ".SampleCmpLevelZero"; + } + else + texop += ".SampleCmp"; + } + else if (gather) + { + uint32_t comp_num = get(comp).scalar(); + if (hlsl_options.shader_model >= 50) + { + switch (comp_num) + { + case 0: + texop += ".GatherRed"; + break; + case 1: + texop += ".GatherGreen"; + break; + case 2: + texop += ".GatherBlue"; + break; + case 3: + texop += ".GatherAlpha"; + break; + default: + SPIRV_CROSS_THROW("Invalid component."); + } + } + else + { + if (comp_num == 0) + texop += ".Gather"; + else + SPIRV_CROSS_THROW("HLSL shader model 4 can only gather from the red component."); + } + } + else if (bias) + texop += ".SampleBias"; + else if (grad_x || grad_y) + texop += ".SampleGrad"; + else if (lod) + texop += ".SampleLevel"; + else + texop += ".Sample"; + } + else + { + switch (imgtype.image.dim) + { + case Dim1D: + texop += "tex1D"; + break; + case Dim2D: + texop += "tex2D"; + break; + case Dim3D: + texop += "tex3D"; + break; + case DimCube: + texop += "texCUBE"; + break; + case DimRect: + case DimBuffer: + case DimSubpassData: + SPIRV_CROSS_THROW("Buffer texture support is not yet implemented for HLSL"); + default: + SPIRV_CROSS_THROW("Invalid dimension."); + } + + if (gather) + SPIRV_CROSS_THROW("textureGather is not supported in HLSL shader model 2/3."); + if (offset || coffset) + SPIRV_CROSS_THROW("textureOffset is not supported in HLSL shader model 2/3."); + if (proj) + texop += "proj"; + if (grad_x || grad_y) + texop += "grad"; + if (lod) + texop += "lod"; + if (bias) + texop += "bias"; + } + } + + expr += texop; + expr += "("; + if (hlsl_options.shader_model < 40) + { + if (combined_image) + SPIRV_CROSS_THROW("Separate images/samplers are not supported in HLSL shader model 2/3."); + expr += to_expression(img); + } + else if (op != OpImageFetch) + { + string sampler_expr; + if (combined_image) + sampler_expr = to_expression(combined_image->sampler); + else + sampler_expr = to_sampler_expression(img); + expr += sampler_expr; + } + + auto swizzle = [](uint32_t comps, uint32_t in_comps) -> const char * { + if (comps == in_comps) + return ""; + + switch (comps) + { + case 1: + return ".x"; + case 2: + return ".xy"; + case 3: + return ".xyz"; + default: + return ""; + } + }; + + bool forward = should_forward(coord); + + + string coord_expr; + auto &coord_type = expression_type(coord); + if (coord_components != coord_type.vecsize) + coord_expr = to_enclosed_expression(coord) + swizzle(coord_components, expression_type(coord).vecsize); + else + coord_expr = to_expression(coord); + + if (proj && hlsl_options.shader_model >= 40) + coord_expr = coord_expr + " / " + to_extract_component_expression(coord, coord_components); + + if (hlsl_options.shader_model < 40 && lod) + { + string coord_filler; + for (uint32_t size = coord_components; size < 3; ++size) + { + coord_filler += ", 0.0"; + } + coord_expr = "float4(" + coord_expr + coord_filler + ", " + to_expression(lod) + ")"; + } + + if (hlsl_options.shader_model < 40 && bias) + { + string coord_filler; + for (uint32_t size = coord_components; size < 3; ++size) + { + coord_filler += ", 0.0"; + } + coord_expr = "float4(" + coord_expr + coord_filler + ", " + to_expression(bias) + ")"; + } + + if (op == OpImageFetch) + { + if (imgtype.image.dim != DimBuffer && !imgtype.image.ms) + coord_expr = + join("int", coord_components + 1, "(", coord_expr, ", ", lod ? to_expression(lod) : string("0"), ")"); + } + else + expr += ", "; + expr += coord_expr; + + if (dref) + { + if (hlsl_options.shader_model < 40) + SPIRV_CROSS_THROW("Legacy HLSL does not support comparison sampling."); + + forward = forward && should_forward(dref); + expr += ", "; + + if (proj) + expr += to_enclosed_expression(dref) + " / " + to_extract_component_expression(coord, coord_components); + else + expr += to_expression(dref); + } + + if (!dref && (grad_x || grad_y)) + { + forward = forward && should_forward(grad_x); + forward = forward && should_forward(grad_y); + expr += ", "; + expr += to_expression(grad_x); + expr += ", "; + expr += to_expression(grad_y); + } + + if (!dref && lod && hlsl_options.shader_model >= 40 && op != OpImageFetch) + { + forward = forward && should_forward(lod); + expr += ", "; + expr += to_expression(lod); + } + + if (!dref && bias && hlsl_options.shader_model >= 40) + { + forward = forward && should_forward(bias); + expr += ", "; + expr += to_expression(bias); + } + + if (coffset) + { + forward = forward && should_forward(coffset); + expr += ", "; + expr += to_expression(coffset); + } + else if (offset) + { + forward = forward && should_forward(offset); + expr += ", "; + expr += to_expression(offset); + } + + if (sample) + { + expr += ", "; + expr += to_expression(sample); + } + + expr += ")"; + + if (op == OpImageQueryLod) + { + + + + + + + statement("float _", id, "_tmp = ", expr, ";"); + statement("float2 _", id, " = _", id, "_tmp.xx;"); + set(id, join("_", id), result_type, true); + } + else + { + emit_op(result_type, id, expr, forward, false); + } + + for (auto &inherit : inherited_expressions) + inherit_expression_dependencies(id, inherit); + + switch (op) + { + case OpImageSampleDrefImplicitLod: + case OpImageSampleImplicitLod: + case OpImageSampleProjImplicitLod: + case OpImageSampleProjDrefImplicitLod: + register_control_dependent_expression(id); + break; + + default: + break; + } +} + +string CompilerHLSL::to_resource_binding(const SPIRVariable &var) +{ + + + + if (!has_decoration(var.self, DecorationBinding)) + return ""; + + const auto &type = get(var.basetype); + char space = '\0'; + + switch (type.basetype) + { + case SPIRType::SampledImage: + space = 't'; + break; + + case SPIRType::Image: + if (type.image.sampled == 2 && type.image.dim != DimSubpassData) + space = 'u'; + else + space = 't'; + break; + + case SPIRType::Sampler: + space = 's'; + break; + + case SPIRType::Struct: + { + auto storage = type.storage; + if (storage == StorageClassUniform) + { + if (has_decoration(type.self, DecorationBufferBlock)) + { + Bitset flags = ir.get_buffer_block_flags(var); + bool is_readonly = flags.get(DecorationNonWritable); + space = is_readonly ? 't' : 'u'; + } + else if (has_decoration(type.self, DecorationBlock)) + space = 'b'; + } + else if (storage == StorageClassPushConstant) + space = 'b'; + else if (storage == StorageClassStorageBuffer) + { + + Bitset flags = ir.get_buffer_block_flags(var); + bool is_readonly = flags.get(DecorationNonWritable); + space = is_readonly ? 't' : 'u'; + } + + break; + } + default: + break; + } + + if (!space) + return ""; + + return to_resource_register(space, get_decoration(var.self, DecorationBinding), + get_decoration(var.self, DecorationDescriptorSet)); +} + +string CompilerHLSL::to_resource_binding_sampler(const SPIRVariable &var) +{ + + if (!has_decoration(var.self, DecorationBinding)) + return ""; + + return to_resource_register('s', get_decoration(var.self, DecorationBinding), + get_decoration(var.self, DecorationDescriptorSet)); +} + +string CompilerHLSL::to_resource_register(char space, uint32_t binding, uint32_t space_set) +{ + if (hlsl_options.shader_model >= 51) + return join(" : register(", space, binding, ", space", space_set, ")"); + else + return join(" : register(", space, binding, ")"); +} + +void CompilerHLSL::emit_modern_uniform(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + switch (type.basetype) + { + case SPIRType::SampledImage: + case SPIRType::Image: + { + bool is_coherent = false; + if (type.basetype == SPIRType::Image && type.image.sampled == 2) + is_coherent = has_decoration(var.self, DecorationCoherent); + + statement(is_coherent ? "globallycoherent " : "", image_type_hlsl_modern(type, var.self), " ", + to_name(var.self), type_to_array_glsl(type), to_resource_binding(var), ";"); + + if (type.basetype == SPIRType::SampledImage && type.image.dim != DimBuffer) + { + + if (image_is_comparison(type, var.self)) + statement("SamplerComparisonState ", to_sampler_expression(var.self), type_to_array_glsl(type), + to_resource_binding_sampler(var), ";"); + else + statement("SamplerState ", to_sampler_expression(var.self), type_to_array_glsl(type), + to_resource_binding_sampler(var), ";"); + } + break; + } + + case SPIRType::Sampler: + if (comparison_ids.count(var.self)) + statement("SamplerComparisonState ", to_name(var.self), type_to_array_glsl(type), to_resource_binding(var), + ";"); + else + statement("SamplerState ", to_name(var.self), type_to_array_glsl(type), to_resource_binding(var), ";"); + break; + + default: + statement(variable_decl(var), to_resource_binding(var), ";"); + break; + } +} + +void CompilerHLSL::emit_legacy_uniform(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + switch (type.basetype) + { + case SPIRType::Sampler: + case SPIRType::Image: + SPIRV_CROSS_THROW("Separate image and samplers not supported in legacy HLSL."); + + default: + statement(variable_decl(var), ";"); + break; + } +} + +void CompilerHLSL::emit_uniform(const SPIRVariable &var) +{ + add_resource_name(var.self); + if (hlsl_options.shader_model >= 40) + emit_modern_uniform(var); + else + emit_legacy_uniform(var); +} + +string CompilerHLSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type) +{ + if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Int) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Int64) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Float) + return "asuint"; + else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::UInt) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::UInt64) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::Float) + return "asint"; + else if (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::UInt) + return "asfloat"; + else if (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::Int) + return "asfloat"; + else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::Double) + SPIRV_CROSS_THROW("Double to Int64 is not supported in HLSL."); + else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Double) + SPIRV_CROSS_THROW("Double to UInt64 is not supported in HLSL."); + else if (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::Int64) + return "asdouble"; + else if (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::UInt64) + return "asdouble"; + else if (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::UInt && in_type.vecsize == 1) + { + if (!requires_explicit_fp16_packing) + { + requires_explicit_fp16_packing = true; + force_recompile(); + } + return "SPIRV_Cross_unpackFloat2x16"; + } + else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Half && in_type.vecsize == 2) + { + if (!requires_explicit_fp16_packing) + { + requires_explicit_fp16_packing = true; + force_recompile(); + } + return "SPIRV_Cross_packFloat2x16"; + } + else + return ""; +} + +void CompilerHLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, uint32_t count) +{ + auto op = static_cast(eop); + + + uint32_t integer_width = get_integer_width_for_glsl_instruction(op, args, count); + auto int_type = to_signed_basetype(integer_width); + auto uint_type = to_unsigned_basetype(integer_width); + + switch (op) + { + case GLSLstd450InverseSqrt: + emit_unary_func_op(result_type, id, args[0], "rsqrt"); + break; + + case GLSLstd450Fract: + emit_unary_func_op(result_type, id, args[0], "frac"); + break; + + case GLSLstd450RoundEven: + SPIRV_CROSS_THROW("roundEven is not supported on HLSL."); + + case GLSLstd450Acosh: + case GLSLstd450Asinh: + case GLSLstd450Atanh: + SPIRV_CROSS_THROW("Inverse hyperbolics are not supported on HLSL."); + + case GLSLstd450FMix: + case GLSLstd450IMix: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "lerp"); + break; + + case GLSLstd450Atan2: + emit_binary_func_op(result_type, id, args[0], args[1], "atan2"); + break; + + case GLSLstd450Fma: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "mad"); + break; + + case GLSLstd450InterpolateAtCentroid: + emit_unary_func_op(result_type, id, args[0], "EvaluateAttributeAtCentroid"); + break; + case GLSLstd450InterpolateAtSample: + emit_binary_func_op(result_type, id, args[0], args[1], "EvaluateAttributeAtSample"); + break; + case GLSLstd450InterpolateAtOffset: + emit_binary_func_op(result_type, id, args[0], args[1], "EvaluateAttributeSnapped"); + break; + + case GLSLstd450PackHalf2x16: + if (!requires_fp16_packing) + { + requires_fp16_packing = true; + force_recompile(); + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_packHalf2x16"); + break; + + case GLSLstd450UnpackHalf2x16: + if (!requires_fp16_packing) + { + requires_fp16_packing = true; + force_recompile(); + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_unpackHalf2x16"); + break; + + case GLSLstd450PackSnorm4x8: + if (!requires_snorm8_packing) + { + requires_snorm8_packing = true; + force_recompile(); + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_packSnorm4x8"); + break; + + case GLSLstd450UnpackSnorm4x8: + if (!requires_snorm8_packing) + { + requires_snorm8_packing = true; + force_recompile(); + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_unpackSnorm4x8"); + break; + + case GLSLstd450PackUnorm4x8: + if (!requires_unorm8_packing) + { + requires_unorm8_packing = true; + force_recompile(); + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_packUnorm4x8"); + break; + + case GLSLstd450UnpackUnorm4x8: + if (!requires_unorm8_packing) + { + requires_unorm8_packing = true; + force_recompile(); + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_unpackUnorm4x8"); + break; + + case GLSLstd450PackSnorm2x16: + if (!requires_snorm16_packing) + { + requires_snorm16_packing = true; + force_recompile(); + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_packSnorm2x16"); + break; + + case GLSLstd450UnpackSnorm2x16: + if (!requires_snorm16_packing) + { + requires_snorm16_packing = true; + force_recompile(); + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_unpackSnorm2x16"); + break; + + case GLSLstd450PackUnorm2x16: + if (!requires_unorm16_packing) + { + requires_unorm16_packing = true; + force_recompile(); + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_packUnorm2x16"); + break; + + case GLSLstd450UnpackUnorm2x16: + if (!requires_unorm16_packing) + { + requires_unorm16_packing = true; + force_recompile(); + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_unpackUnorm2x16"); + break; + + case GLSLstd450PackDouble2x32: + case GLSLstd450UnpackDouble2x32: + SPIRV_CROSS_THROW("packDouble2x32/unpackDouble2x32 not supported in HLSL."); + + case GLSLstd450FindILsb: + { + auto basetype = expression_type(args[0]).basetype; + emit_unary_func_op_cast(result_type, id, args[0], "firstbitlow", basetype, basetype); + break; + } + + case GLSLstd450FindSMsb: + emit_unary_func_op_cast(result_type, id, args[0], "firstbithigh", int_type, int_type); + break; + + case GLSLstd450FindUMsb: + emit_unary_func_op_cast(result_type, id, args[0], "firstbithigh", uint_type, uint_type); + break; + + case GLSLstd450MatrixInverse: + { + auto &type = get(result_type); + if (type.vecsize == 2 && type.columns == 2) + { + if (!requires_inverse_2x2) + { + requires_inverse_2x2 = true; + force_recompile(); + } + } + else if (type.vecsize == 3 && type.columns == 3) + { + if (!requires_inverse_3x3) + { + requires_inverse_3x3 = true; + force_recompile(); + } + } + else if (type.vecsize == 4 && type.columns == 4) + { + if (!requires_inverse_4x4) + { + requires_inverse_4x4 = true; + force_recompile(); + } + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_Inverse"); + break; + } + + case GLSLstd450Normalize: + + if (expression_type(args[0]).vecsize == 1) + { + + emit_unary_func_op(result_type, id, args[0], "sign"); + } + else + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + + case GLSLstd450Reflect: + if (get(result_type).vecsize == 1) + { + if (!requires_scalar_reflect) + { + requires_scalar_reflect = true; + force_recompile(); + } + emit_binary_func_op(result_type, id, args[0], args[1], "SPIRV_Cross_Reflect"); + } + else + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + + case GLSLstd450Refract: + if (get(result_type).vecsize == 1) + { + if (!requires_scalar_refract) + { + requires_scalar_refract = true; + force_recompile(); + } + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "SPIRV_Cross_Refract"); + } + else + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + + case GLSLstd450FaceForward: + if (get(result_type).vecsize == 1) + { + if (!requires_scalar_faceforward) + { + requires_scalar_faceforward = true; + force_recompile(); + } + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "SPIRV_Cross_FaceForward"); + } + else + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + + default: + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + } +} + +string CompilerHLSL::read_access_chain(const SPIRAccessChain &chain) +{ + auto &type = get(chain.basetype); + + SPIRType target_type; + target_type.basetype = SPIRType::UInt; + target_type.vecsize = type.vecsize; + target_type.columns = type.columns; + + if (type.basetype == SPIRType::Struct) + SPIRV_CROSS_THROW("Reading structs from ByteAddressBuffer not yet supported."); + + if (type.width != 32) + SPIRV_CROSS_THROW("Reading types other than 32-bit from ByteAddressBuffer not yet supported."); + + if (!type.array.empty()) + SPIRV_CROSS_THROW("Reading arrays from ByteAddressBuffer not yet supported."); + + string load_expr; + + + if (type.columns == 1 && !chain.row_major_matrix) + { + const char *load_op = nullptr; + switch (type.vecsize) + { + case 1: + load_op = "Load"; + break; + case 2: + load_op = "Load2"; + break; + case 3: + load_op = "Load3"; + break; + case 4: + load_op = "Load4"; + break; + default: + SPIRV_CROSS_THROW("Unknown vector size."); + } + + load_expr = join(chain.base, ".", load_op, "(", chain.dynamic_index, chain.static_index, ")"); + } + else if (type.columns == 1) + { + + if (type.vecsize > 1) + { + load_expr = type_to_glsl(target_type); + load_expr += "("; + } + + for (uint32_t r = 0; r < type.vecsize; r++) + { + load_expr += + join(chain.base, ".Load(", chain.dynamic_index, chain.static_index + r * chain.matrix_stride, ")"); + if (r + 1 < type.vecsize) + load_expr += ", "; + } + + if (type.vecsize > 1) + load_expr += ")"; + } + else if (!chain.row_major_matrix) + { + + const char *load_op = nullptr; + switch (type.vecsize) + { + case 1: + load_op = "Load"; + break; + case 2: + load_op = "Load2"; + break; + case 3: + load_op = "Load3"; + break; + case 4: + load_op = "Load4"; + break; + default: + SPIRV_CROSS_THROW("Unknown vector size."); + } + + + + load_expr = type_to_glsl(target_type); + load_expr += "("; + for (uint32_t c = 0; c < type.columns; c++) + { + load_expr += join(chain.base, ".", load_op, "(", chain.dynamic_index, + chain.static_index + c * chain.matrix_stride, ")"); + if (c + 1 < type.columns) + load_expr += ", "; + } + load_expr += ")"; + } + else + { + + + + load_expr = type_to_glsl(target_type); + load_expr += "("; + for (uint32_t c = 0; c < type.columns; c++) + { + for (uint32_t r = 0; r < type.vecsize; r++) + { + load_expr += join(chain.base, ".Load(", chain.dynamic_index, + chain.static_index + c * (type.width / 8) + r * chain.matrix_stride, ")"); + + if ((r + 1 < type.vecsize) || (c + 1 < type.columns)) + load_expr += ", "; + } + } + load_expr += ")"; + } + + auto bitcast_op = bitcast_glsl_op(type, target_type); + if (!bitcast_op.empty()) + load_expr = join(bitcast_op, "(", load_expr, ")"); + + return load_expr; +} + +void CompilerHLSL::emit_load(const Instruction &instruction) +{ + auto ops = stream(instruction); + + auto *chain = maybe_get(ops[2]); + if (chain) + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + + if (has_decoration(ptr, DecorationNonUniformEXT)) + propagate_nonuniform_qualifier(ptr); + + auto load_expr = read_access_chain(*chain); + + bool forward = should_forward(ptr) && forced_temporaries.find(id) == end(forced_temporaries); + + + + + if (!forward) + track_expression_read(chain->self); + + + auto &type = get(result_type); + if (type.columns > 1 || !type.array.empty() || type.basetype == SPIRType::Struct) + forward = false; + + auto &e = emit_op(result_type, id, load_expr, forward, true); + e.need_transpose = false; + register_read(id, ptr, forward); + inherit_expression_dependencies(id, ptr); + if (forward) + add_implied_read_expression(e, chain->self); + } + else + CompilerGLSL::emit_instruction(instruction); +} + +void CompilerHLSL::write_access_chain(const SPIRAccessChain &chain, uint32_t value) +{ + auto &type = get(chain.basetype); + + + track_expression_read(chain.self); + + if (has_decoration(chain.self, DecorationNonUniformEXT)) + propagate_nonuniform_qualifier(chain.self); + + SPIRType target_type; + target_type.basetype = SPIRType::UInt; + target_type.vecsize = type.vecsize; + target_type.columns = type.columns; + + if (type.basetype == SPIRType::Struct) + SPIRV_CROSS_THROW("Writing structs to RWByteAddressBuffer not yet supported."); + if (type.width != 32) + SPIRV_CROSS_THROW("Writing types other than 32-bit to RWByteAddressBuffer not yet supported."); + if (!type.array.empty()) + SPIRV_CROSS_THROW("Reading arrays from ByteAddressBuffer not yet supported."); + + if (type.columns == 1 && !chain.row_major_matrix) + { + const char *store_op = nullptr; + switch (type.vecsize) + { + case 1: + store_op = "Store"; + break; + case 2: + store_op = "Store2"; + break; + case 3: + store_op = "Store3"; + break; + case 4: + store_op = "Store4"; + break; + default: + SPIRV_CROSS_THROW("Unknown vector size."); + } + + auto store_expr = to_expression(value); + auto bitcast_op = bitcast_glsl_op(target_type, type); + if (!bitcast_op.empty()) + store_expr = join(bitcast_op, "(", store_expr, ")"); + statement(chain.base, ".", store_op, "(", chain.dynamic_index, chain.static_index, ", ", store_expr, ");"); + } + else if (type.columns == 1) + { + + for (uint32_t r = 0; r < type.vecsize; r++) + { + auto store_expr = to_enclosed_expression(value); + if (type.vecsize > 1) + { + store_expr += "."; + store_expr += index_to_swizzle(r); + } + remove_duplicate_swizzle(store_expr); + + auto bitcast_op = bitcast_glsl_op(target_type, type); + if (!bitcast_op.empty()) + store_expr = join(bitcast_op, "(", store_expr, ")"); + statement(chain.base, ".Store(", chain.dynamic_index, chain.static_index + chain.matrix_stride * r, ", ", + store_expr, ");"); + } + } + else if (!chain.row_major_matrix) + { + const char *store_op = nullptr; + switch (type.vecsize) + { + case 1: + store_op = "Store"; + break; + case 2: + store_op = "Store2"; + break; + case 3: + store_op = "Store3"; + break; + case 4: + store_op = "Store4"; + break; + default: + SPIRV_CROSS_THROW("Unknown vector size."); + } + + for (uint32_t c = 0; c < type.columns; c++) + { + auto store_expr = join(to_enclosed_expression(value), "[", c, "]"); + auto bitcast_op = bitcast_glsl_op(target_type, type); + if (!bitcast_op.empty()) + store_expr = join(bitcast_op, "(", store_expr, ")"); + statement(chain.base, ".", store_op, "(", chain.dynamic_index, chain.static_index + c * chain.matrix_stride, + ", ", store_expr, ");"); + } + } + else + { + for (uint32_t r = 0; r < type.vecsize; r++) + { + for (uint32_t c = 0; c < type.columns; c++) + { + auto store_expr = join(to_enclosed_expression(value), "[", c, "].", index_to_swizzle(r)); + remove_duplicate_swizzle(store_expr); + auto bitcast_op = bitcast_glsl_op(target_type, type); + if (!bitcast_op.empty()) + store_expr = join(bitcast_op, "(", store_expr, ")"); + statement(chain.base, ".Store(", chain.dynamic_index, + chain.static_index + c * (type.width / 8) + r * chain.matrix_stride, ", ", store_expr, ");"); + } + } + } + + register_write(chain.self); +} + +void CompilerHLSL::emit_store(const Instruction &instruction) +{ + auto ops = stream(instruction); + auto *chain = maybe_get(ops[0]); + if (chain) + write_access_chain(*chain, ops[1]); + else + CompilerGLSL::emit_instruction(instruction); +} + +void CompilerHLSL::emit_access_chain(const Instruction &instruction) +{ + auto ops = stream(instruction); + uint32_t length = instruction.length; + + bool need_byte_access_chain = false; + auto &type = expression_type(ops[2]); + const auto *chain = maybe_get(ops[2]); + + if (chain) + { + + need_byte_access_chain = true; + } + else if (type.storage == StorageClassStorageBuffer || has_decoration(type.self, DecorationBufferBlock)) + { + + + uint32_t chain_arguments = length - 3; + if (chain_arguments > type.array.size()) + need_byte_access_chain = true; + } + + if (need_byte_access_chain) + { + uint32_t to_plain_buffer_length = static_cast(type.array.size()); + auto *backing_variable = maybe_get_backing_variable(ops[2]); + + string base; + if (to_plain_buffer_length != 0) + base = access_chain(ops[2], &ops[3], to_plain_buffer_length, get(ops[0])); + else if (chain) + base = chain->base; + else + base = to_expression(ops[2]); + + + auto *basetype = &get_pointee_type(type); + + + for (uint32_t i = 0; i < to_plain_buffer_length; i++) + { + assert(basetype->parent_type); + basetype = &get(basetype->parent_type); + } + + uint32_t matrix_stride = 0; + bool row_major_matrix = false; + + + if (chain) + { + matrix_stride = chain->matrix_stride; + row_major_matrix = chain->row_major_matrix; + } + + auto offsets = + flattened_access_chain_offset(*basetype, &ops[3 + to_plain_buffer_length], + length - 3 - to_plain_buffer_length, 0, 1, &row_major_matrix, &matrix_stride); + + auto &e = set(ops[1], ops[0], type.storage, base, offsets.first, offsets.second); + e.row_major_matrix = row_major_matrix; + e.matrix_stride = matrix_stride; + e.immutable = should_forward(ops[2]); + e.loaded_from = backing_variable ? backing_variable->self : ID(0); + + if (chain) + { + e.dynamic_index += chain->dynamic_index; + e.static_index += chain->static_index; + } + + for (uint32_t i = 2; i < length; i++) + { + inherit_expression_dependencies(ops[1], ops[i]); + add_implied_read_expression(e, ops[i]); + } + } + else + { + CompilerGLSL::emit_instruction(instruction); + } +} + +void CompilerHLSL::emit_atomic(const uint32_t *ops, uint32_t length, spv::Op op) +{ + const char *atomic_op = nullptr; + + string value_expr; + if (op != OpAtomicIDecrement && op != OpAtomicIIncrement) + value_expr = to_expression(ops[op == OpAtomicCompareExchange ? 6 : 5]); + + switch (op) + { + case OpAtomicIIncrement: + atomic_op = "InterlockedAdd"; + value_expr = "1"; + break; + + case OpAtomicIDecrement: + atomic_op = "InterlockedAdd"; + value_expr = "-1"; + break; + + case OpAtomicISub: + atomic_op = "InterlockedAdd"; + value_expr = join("-", enclose_expression(value_expr)); + break; + + case OpAtomicSMin: + case OpAtomicUMin: + atomic_op = "InterlockedMin"; + break; + + case OpAtomicSMax: + case OpAtomicUMax: + atomic_op = "InterlockedMax"; + break; + + case OpAtomicAnd: + atomic_op = "InterlockedAnd"; + break; + + case OpAtomicOr: + atomic_op = "InterlockedOr"; + break; + + case OpAtomicXor: + atomic_op = "InterlockedXor"; + break; + + case OpAtomicIAdd: + atomic_op = "InterlockedAdd"; + break; + + case OpAtomicExchange: + atomic_op = "InterlockedExchange"; + break; + + case OpAtomicCompareExchange: + if (length < 8) + SPIRV_CROSS_THROW("Not enough data for opcode."); + atomic_op = "InterlockedCompareExchange"; + value_expr = join(to_expression(ops[7]), ", ", value_expr); + break; + + default: + SPIRV_CROSS_THROW("Unknown atomic opcode."); + } + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + forced_temporaries.insert(ops[1]); + + auto &type = get(result_type); + statement(variable_decl(type, to_name(id)), ";"); + + auto &data_type = expression_type(ops[2]); + auto *chain = maybe_get(ops[2]); + SPIRType::BaseType expr_type; + if (data_type.storage == StorageClassImage || !chain) + { + statement(atomic_op, "(", to_expression(ops[2]), ", ", value_expr, ", ", to_name(id), ");"); + expr_type = data_type.basetype; + } + else + { + + expr_type = SPIRType::UInt; + statement(chain->base, ".", atomic_op, "(", chain->dynamic_index, chain->static_index, ", ", value_expr, ", ", + to_name(id), ");"); + } + + auto expr = bitcast_expression(type, expr_type, to_name(id)); + set(id, expr, result_type, true); + flush_all_atomic_capable_variables(); +} + +void CompilerHLSL::emit_subgroup_op(const Instruction &i) +{ + if (hlsl_options.shader_model < 60) + SPIRV_CROSS_THROW("Wave ops requires SM 6.0 or higher."); + + const uint32_t *ops = stream(i); + auto op = static_cast(i.op); + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto scope = static_cast(get(ops[2]).scalar()); + if (scope != ScopeSubgroup) + SPIRV_CROSS_THROW("Only subgroup scope is supported."); + + const auto make_inclusive_Sum = [&](const string &expr) -> string { + return join(expr, " + ", to_expression(ops[4])); + }; + + const auto make_inclusive_Product = [&](const string &expr) -> string { + return join(expr, " * ", to_expression(ops[4])); + }; + +#define make_inclusive_BitAnd(expr) "" +#define make_inclusive_BitOr(expr) "" +#define make_inclusive_BitXor(expr) "" +#define make_inclusive_Min(expr) "" +#define make_inclusive_Max(expr) "" + + switch (op) + { + case OpGroupNonUniformElect: + emit_op(result_type, id, "WaveIsFirstLane()", true); + break; + + case OpGroupNonUniformBroadcast: + emit_binary_func_op(result_type, id, ops[3], ops[4], "WaveReadLaneAt"); + break; + + case OpGroupNonUniformBroadcastFirst: + emit_unary_func_op(result_type, id, ops[3], "WaveReadLaneFirst"); + break; + + case OpGroupNonUniformBallot: + emit_unary_func_op(result_type, id, ops[3], "WaveActiveBallot"); + break; + + case OpGroupNonUniformInverseBallot: + SPIRV_CROSS_THROW("Cannot trivially implement InverseBallot in HLSL."); + break; + + case OpGroupNonUniformBallotBitExtract: + SPIRV_CROSS_THROW("Cannot trivially implement BallotBitExtract in HLSL."); + break; + + case OpGroupNonUniformBallotFindLSB: + SPIRV_CROSS_THROW("Cannot trivially implement BallotFindLSB in HLSL."); + break; + + case OpGroupNonUniformBallotFindMSB: + SPIRV_CROSS_THROW("Cannot trivially implement BallotFindMSB in HLSL."); + break; + + case OpGroupNonUniformBallotBitCount: + { + auto operation = static_cast(ops[3]); + if (operation == GroupOperationReduce) + { + bool forward = should_forward(ops[4]); + auto left = join("countbits(", to_enclosed_expression(ops[4]), ".x) + countbits(", + to_enclosed_expression(ops[4]), ".y)"); + auto right = join("countbits(", to_enclosed_expression(ops[4]), ".z) + countbits(", + to_enclosed_expression(ops[4]), ".w)"); + emit_op(result_type, id, join(left, " + ", right), forward); + inherit_expression_dependencies(id, ops[4]); + } + else if (operation == GroupOperationInclusiveScan) + SPIRV_CROSS_THROW("Cannot trivially implement BallotBitCount Inclusive Scan in HLSL."); + else if (operation == GroupOperationExclusiveScan) + SPIRV_CROSS_THROW("Cannot trivially implement BallotBitCount Exclusive Scan in HLSL."); + else + SPIRV_CROSS_THROW("Invalid BitCount operation."); + break; + } + + case OpGroupNonUniformShuffle: + SPIRV_CROSS_THROW("Cannot trivially implement Shuffle in HLSL."); + case OpGroupNonUniformShuffleXor: + SPIRV_CROSS_THROW("Cannot trivially implement ShuffleXor in HLSL."); + case OpGroupNonUniformShuffleUp: + SPIRV_CROSS_THROW("Cannot trivially implement ShuffleUp in HLSL."); + case OpGroupNonUniformShuffleDown: + SPIRV_CROSS_THROW("Cannot trivially implement ShuffleDown in HLSL."); + + case OpGroupNonUniformAll: + emit_unary_func_op(result_type, id, ops[3], "WaveActiveAllTrue"); + break; + + case OpGroupNonUniformAny: + emit_unary_func_op(result_type, id, ops[3], "WaveActiveAnyTrue"); + break; + + case OpGroupNonUniformAllEqual: + { + auto &type = get(result_type); + emit_unary_func_op(result_type, id, ops[3], + type.basetype == SPIRType::Boolean ? "WaveActiveAllEqualBool" : "WaveActiveAllEqual"); + break; + } + + +#define HLSL_GROUP_OP(op, hlsl_op, supports_scan) \ +case OpGroupNonUniform##op: \ + { \ + auto operation = static_cast(ops[3]); \ + if (operation == GroupOperationReduce) \ + emit_unary_func_op(result_type, id, ops[4], "WaveActive" #hlsl_op); \ + else if (operation == GroupOperationInclusiveScan && supports_scan) \ + { \ + bool forward = should_forward(ops[4]); \ + emit_op(result_type, id, make_inclusive_##hlsl_op (join("WavePrefix" #hlsl_op, "(", to_expression(ops[4]), ")")), forward); \ + inherit_expression_dependencies(id, ops[4]); \ + } \ + else if (operation == GroupOperationExclusiveScan && supports_scan) \ + emit_unary_func_op(result_type, id, ops[4], "WavePrefix" #hlsl_op); \ + else if (operation == GroupOperationClusteredReduce) \ + SPIRV_CROSS_THROW("Cannot trivially implement ClusteredReduce in HLSL."); \ + else \ + SPIRV_CROSS_THROW("Invalid group operation."); \ + break; \ + } + HLSL_GROUP_OP(FAdd, Sum, true) + HLSL_GROUP_OP(FMul, Product, true) + HLSL_GROUP_OP(FMin, Min, false) + HLSL_GROUP_OP(FMax, Max, false) + HLSL_GROUP_OP(IAdd, Sum, true) + HLSL_GROUP_OP(IMul, Product, true) + HLSL_GROUP_OP(SMin, Min, false) + HLSL_GROUP_OP(SMax, Max, false) + HLSL_GROUP_OP(UMin, Min, false) + HLSL_GROUP_OP(UMax, Max, false) + HLSL_GROUP_OP(BitwiseAnd, BitAnd, false) + HLSL_GROUP_OP(BitwiseOr, BitOr, false) + HLSL_GROUP_OP(BitwiseXor, BitXor, false) +#undef HLSL_GROUP_OP + + + case OpGroupNonUniformQuadSwap: + { + uint32_t direction = get(ops[4]).scalar(); + if (direction == 0) + emit_unary_func_op(result_type, id, ops[3], "QuadReadAcrossX"); + else if (direction == 1) + emit_unary_func_op(result_type, id, ops[3], "QuadReadAcrossY"); + else if (direction == 2) + emit_unary_func_op(result_type, id, ops[3], "QuadReadAcrossDiagonal"); + else + SPIRV_CROSS_THROW("Invalid quad swap direction."); + break; + } + + case OpGroupNonUniformQuadBroadcast: + { + emit_binary_func_op(result_type, id, ops[3], ops[4], "QuadReadLaneAt"); + break; + } + + default: + SPIRV_CROSS_THROW("Invalid opcode for subgroup."); + } + + register_control_dependent_expression(id); +} + +void CompilerHLSL::emit_instruction(const Instruction &instruction) +{ + auto ops = stream(instruction); + auto opcode = static_cast(instruction.op); + +#define HLSL_BOP(op) emit_binary_op(ops[0], ops[1], ops[2], ops[3], #op) +#define HLSL_BOP_CAST(op, type) \ + emit_binary_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, opcode_is_sign_invariant(opcode)) +#define HLSL_UOP(op) emit_unary_op(ops[0], ops[1], ops[2], #op) +#define HLSL_QFOP(op) emit_quaternary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], ops[5], #op) +#define HLSL_TFOP(op) emit_trinary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], #op) +#define HLSL_BFOP(op) emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], #op) +#define HLSL_BFOP_CAST(op, type) \ + emit_binary_func_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, opcode_is_sign_invariant(opcode)) +#define HLSL_BFOP(op) emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], #op) +#define HLSL_UFOP(op) emit_unary_func_op(ops[0], ops[1], ops[2], #op) + + + uint32_t integer_width = get_integer_width_for_instruction(instruction); + auto int_type = to_signed_basetype(integer_width); + auto uint_type = to_unsigned_basetype(integer_width); + + switch (opcode) + { + case OpAccessChain: + case OpInBoundsAccessChain: + { + emit_access_chain(instruction); + break; + } + + case OpStore: + { + emit_store(instruction); + break; + } + + case OpLoad: + { + emit_load(instruction); + break; + } + + case OpMatrixTimesVector: + { + + emit_binary_func_op(ops[0], ops[1], ops[3], ops[2], "mul"); + break; + } + + case OpVectorTimesMatrix: + { + + emit_binary_func_op(ops[0], ops[1], ops[3], ops[2], "mul"); + break; + } + + case OpMatrixTimesMatrix: + { + + emit_binary_func_op(ops[0], ops[1], ops[3], ops[2], "mul"); + break; + } + + case OpOuterProduct: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t a = ops[2]; + uint32_t b = ops[3]; + + auto &type = get(result_type); + string expr = type_to_glsl_constructor(type); + expr += "("; + for (uint32_t col = 0; col < type.columns; col++) + { + expr += to_enclosed_expression(a); + expr += " * "; + expr += to_extract_component_expression(b, col); + if (col + 1 < type.columns) + expr += ", "; + } + expr += ")"; + emit_op(result_type, id, expr, should_forward(a) && should_forward(b)); + inherit_expression_dependencies(id, a); + inherit_expression_dependencies(id, b); + break; + } + + case OpFMod: + { + if (!requires_op_fmod) + { + requires_op_fmod = true; + force_recompile(); + } + CompilerGLSL::emit_instruction(instruction); + break; + } + + case OpFRem: + emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], "fmod"); + break; + + case OpImage: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto *combined = maybe_get(ops[2]); + + if (combined) + { + auto &e = emit_op(result_type, id, to_expression(combined->image), true, true); + auto *var = maybe_get_backing_variable(combined->image); + if (var) + e.loaded_from = var->self; + } + else + { + auto &e = emit_op(result_type, id, to_expression(ops[2]), true, true); + auto *var = maybe_get_backing_variable(ops[2]); + if (var) + e.loaded_from = var->self; + } + break; + } + + case OpDPdx: + HLSL_UFOP(ddx); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdy: + HLSL_UFOP(ddy); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdxFine: + HLSL_UFOP(ddx_fine); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdyFine: + HLSL_UFOP(ddy_fine); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdxCoarse: + HLSL_UFOP(ddx_coarse); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdyCoarse: + HLSL_UFOP(ddy_coarse); + register_control_dependent_expression(ops[1]); + break; + + case OpFwidth: + case OpFwidthCoarse: + case OpFwidthFine: + HLSL_UFOP(fwidth); + register_control_dependent_expression(ops[1]); + break; + + case OpLogicalNot: + { + auto result_type = ops[0]; + auto id = ops[1]; + auto &type = get(result_type); + + if (type.vecsize > 1) + emit_unrolled_unary_op(result_type, id, ops[2], "!"); + else + HLSL_UOP(!); + break; + } + + case OpIEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "==", false, SPIRType::Unknown); + else + HLSL_BOP_CAST(==, int_type); + break; + } + + case OpLogicalEqual: + case OpFOrdEqual: + case OpFUnordEqual: + { + + + + + + + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "==", false, SPIRType::Unknown); + else + HLSL_BOP(==); + break; + } + + case OpINotEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "!=", false, SPIRType::Unknown); + else + HLSL_BOP_CAST(!=, int_type); + break; + } + + case OpLogicalNotEqual: + case OpFOrdNotEqual: + case OpFUnordNotEqual: + { + + + + + + + + + + + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "!=", false, SPIRType::Unknown); + else + HLSL_BOP(!=); + break; + } + + case OpUGreaterThan: + case OpSGreaterThan: + { + auto result_type = ops[0]; + auto id = ops[1]; + auto type = opcode == OpUGreaterThan ? uint_type : int_type; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], ">", false, type); + else + HLSL_BOP_CAST(>, type); + break; + } + + case OpFOrdGreaterThan: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], ">", false, SPIRType::Unknown); + else + HLSL_BOP(>); + break; + } + + case OpFUnordGreaterThan: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "<=", true, SPIRType::Unknown); + else + CompilerGLSL::emit_instruction(instruction); + break; + } + + case OpUGreaterThanEqual: + case OpSGreaterThanEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + auto type = opcode == OpUGreaterThanEqual ? uint_type : int_type; + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], ">=", false, type); + else + HLSL_BOP_CAST(>=, type); + break; + } + + case OpFOrdGreaterThanEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], ">=", false, SPIRType::Unknown); + else + HLSL_BOP(>=); + break; + } + + case OpFUnordGreaterThanEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "<", true, SPIRType::Unknown); + else + CompilerGLSL::emit_instruction(instruction); + break; + } + + case OpULessThan: + case OpSLessThan: + { + auto result_type = ops[0]; + auto id = ops[1]; + + auto type = opcode == OpULessThan ? uint_type : int_type; + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "<", false, type); + else + HLSL_BOP_CAST(<, type); + break; + } + + case OpFOrdLessThan: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "<", false, SPIRType::Unknown); + else + HLSL_BOP(<); + break; + } + + case OpFUnordLessThan: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], ">=", true, SPIRType::Unknown); + else + CompilerGLSL::emit_instruction(instruction); + break; + } + + case OpULessThanEqual: + case OpSLessThanEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + auto type = opcode == OpULessThanEqual ? uint_type : int_type; + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "<=", false, type); + else + HLSL_BOP_CAST(<=, type); + break; + } + + case OpFOrdLessThanEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "<=", false, SPIRType::Unknown); + else + HLSL_BOP(<=); + break; + } + + case OpFUnordLessThanEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], ">", true, SPIRType::Unknown); + else + CompilerGLSL::emit_instruction(instruction); + break; + } + + case OpImageQueryLod: + emit_texture_op(instruction); + break; + + case OpImageQuerySizeLod: + { + auto result_type = ops[0]; + auto id = ops[1]; + + require_texture_query_variant(expression_type(ops[2])); + + auto dummy_samples_levels = join(get_fallback_name(id), "_dummy_parameter"); + statement("uint ", dummy_samples_levels, ";"); + + auto expr = join("SPIRV_Cross_textureSize(", to_expression(ops[2]), ", ", + bitcast_expression(SPIRType::UInt, ops[3]), ", ", dummy_samples_levels, ")"); + + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::UInt, expr); + emit_op(result_type, id, expr, true); + break; + } + + case OpImageQuerySize: + { + auto result_type = ops[0]; + auto id = ops[1]; + + require_texture_query_variant(expression_type(ops[2])); + + auto dummy_samples_levels = join(get_fallback_name(id), "_dummy_parameter"); + statement("uint ", dummy_samples_levels, ";"); + + auto expr = join("SPIRV_Cross_textureSize(", to_expression(ops[2]), ", 0u, ", dummy_samples_levels, ")"); + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::UInt, expr); + emit_op(result_type, id, expr, true); + break; + } + + case OpImageQuerySamples: + case OpImageQueryLevels: + { + auto result_type = ops[0]; + auto id = ops[1]; + + require_texture_query_variant(expression_type(ops[2])); + + + + forced_temporaries.insert(id); + auto &type = get(result_type); + statement(variable_decl(type, to_name(id)), ";"); + statement("SPIRV_Cross_textureSize(", to_expression(ops[2]), ", 0u, ", to_name(id), ");"); + + auto &restype = get(ops[0]); + auto expr = bitcast_expression(restype, SPIRType::UInt, to_name(id)); + set(id, expr, result_type, true); + break; + } + + case OpImageRead: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto *var = maybe_get_backing_variable(ops[2]); + auto &type = expression_type(ops[2]); + bool subpass_data = type.image.dim == DimSubpassData; + bool pure = false; + + string imgexpr; + + if (subpass_data) + { + if (hlsl_options.shader_model < 40) + SPIRV_CROSS_THROW("Subpass loads are not supported in HLSL shader model 2/3."); + + + if (type.image.ms) + { + uint32_t operands = ops[4]; + if (operands != ImageOperandsSampleMask || instruction.length != 6) + SPIRV_CROSS_THROW("Multisampled image used in OpImageRead, but unexpected operand mask was used."); + uint32_t sample = ops[5]; + imgexpr = join(to_expression(ops[2]), ".Load(int2(gl_FragCoord.xy), ", to_expression(sample), ")"); + } + else + imgexpr = join(to_expression(ops[2]), ".Load(int3(int2(gl_FragCoord.xy), 0))"); + + pure = true; + } + else + { + imgexpr = join(to_expression(ops[2]), "[", to_expression(ops[3]), "]"); + + + if (var && !subpass_data) + imgexpr = remap_swizzle(get(result_type), + image_format_to_components(get(var->basetype).image.format), imgexpr); + } + + if (var && var->forwardable) + { + bool forward = forced_temporaries.find(id) == end(forced_temporaries); + auto &e = emit_op(result_type, id, imgexpr, forward); + + if (!pure) + { + e.loaded_from = var->self; + if (forward) + var->dependees.push_back(id); + } + } + else + emit_op(result_type, id, imgexpr, false); + + inherit_expression_dependencies(id, ops[2]); + if (type.image.ms) + inherit_expression_dependencies(id, ops[5]); + break; + } + + case OpImageWrite: + { + auto *var = maybe_get_backing_variable(ops[0]); + + + + auto value_expr = to_expression(ops[2]); + if (var) + { + auto &type = get(var->basetype); + auto narrowed_type = get(type.image.type); + narrowed_type.vecsize = image_format_to_components(type.image.format); + value_expr = remap_swizzle(narrowed_type, expression_type(ops[2]).vecsize, value_expr); + } + + statement(to_expression(ops[0]), "[", to_expression(ops[1]), "] = ", value_expr, ";"); + if (var && variable_storage_is_aliased(*var)) + flush_all_aliased_variables(); + break; + } + + case OpImageTexelPointer: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto &e = + set(id, join(to_expression(ops[2]), "[", to_expression(ops[3]), "]"), result_type, true); + + + auto *var = maybe_get_backing_variable(ops[2]); + e.loaded_from = var ? var->self : ID(0); + inherit_expression_dependencies(id, ops[3]); + break; + } + + case OpAtomicCompareExchange: + case OpAtomicExchange: + case OpAtomicISub: + case OpAtomicSMin: + case OpAtomicUMin: + case OpAtomicSMax: + case OpAtomicUMax: + case OpAtomicAnd: + case OpAtomicOr: + case OpAtomicXor: + case OpAtomicIAdd: + case OpAtomicIIncrement: + case OpAtomicIDecrement: + { + emit_atomic(ops, instruction.length, opcode); + break; + } + + case OpControlBarrier: + case OpMemoryBarrier: + { + uint32_t memory; + uint32_t semantics; + + if (opcode == OpMemoryBarrier) + { + memory = get(ops[0]).scalar(); + semantics = get(ops[1]).scalar(); + } + else + { + memory = get(ops[1]).scalar(); + semantics = get(ops[2]).scalar(); + } + + if (memory == ScopeSubgroup) + { + + break; + } + + + semantics = mask_relevant_memory_semantics(semantics); + + if (opcode == OpMemoryBarrier) + { + + + const Instruction *next = get_next_instruction_in_block(instruction); + if (next && next->op == OpControlBarrier) + { + auto *next_ops = stream(*next); + uint32_t next_memory = get(next_ops[1]).scalar(); + uint32_t next_semantics = get(next_ops[2]).scalar(); + next_semantics = mask_relevant_memory_semantics(next_semantics); + + + + if (next_semantics == 0) + next_semantics = MemorySemanticsWorkgroupMemoryMask; + + bool memory_scope_covered = false; + if (next_memory == memory) + memory_scope_covered = true; + else if (next_semantics == MemorySemanticsWorkgroupMemoryMask) + { + + + if ((next_memory == ScopeDevice || next_memory == ScopeWorkgroup) && + (memory == ScopeDevice || memory == ScopeWorkgroup)) + { + memory_scope_covered = true; + } + } + else if (memory == ScopeWorkgroup && next_memory == ScopeDevice) + { + + memory_scope_covered = true; + } + + + if (memory_scope_covered && (semantics & next_semantics) == semantics) + break; + } + } + + + + if (semantics || opcode == OpControlBarrier) + { + assert(current_emitting_block); + flush_control_dependent_expressions(current_emitting_block->self); + flush_all_active_variables(); + } + + if (opcode == OpControlBarrier) + { + + if (semantics == MemorySemanticsWorkgroupMemoryMask || semantics == 0) + statement("GroupMemoryBarrierWithGroupSync();"); + else if (semantics != 0 && (semantics & MemorySemanticsWorkgroupMemoryMask) == 0) + statement("DeviceMemoryBarrierWithGroupSync();"); + else + statement("AllMemoryBarrierWithGroupSync();"); + } + else + { + if (semantics == MemorySemanticsWorkgroupMemoryMask) + statement("GroupMemoryBarrier();"); + else if (semantics != 0 && (semantics & MemorySemanticsWorkgroupMemoryMask) == 0) + statement("DeviceMemoryBarrier();"); + else + statement("AllMemoryBarrier();"); + } + break; + } + + case OpBitFieldInsert: + { + if (!requires_bitfield_insert) + { + requires_bitfield_insert = true; + force_recompile(); + } + + auto expr = join("SPIRV_Cross_bitfieldInsert(", to_expression(ops[2]), ", ", to_expression(ops[3]), ", ", + to_expression(ops[4]), ", ", to_expression(ops[5]), ")"); + + bool forward = + should_forward(ops[2]) && should_forward(ops[3]) && should_forward(ops[4]) && should_forward(ops[5]); + + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::UInt, expr); + emit_op(ops[0], ops[1], expr, forward); + break; + } + + case OpBitFieldSExtract: + case OpBitFieldUExtract: + { + if (!requires_bitfield_extract) + { + requires_bitfield_extract = true; + force_recompile(); + } + + if (opcode == OpBitFieldSExtract) + HLSL_TFOP(SPIRV_Cross_bitfieldSExtract); + else + HLSL_TFOP(SPIRV_Cross_bitfieldUExtract); + break; + } + + case OpBitCount: + { + auto basetype = expression_type(ops[2]).basetype; + emit_unary_func_op_cast(ops[0], ops[1], ops[2], "countbits", basetype, basetype); + break; + } + + case OpBitReverse: + HLSL_UFOP(reversebits); + break; + + case OpArrayLength: + { + auto *var = maybe_get(ops[2]); + if (!var) + SPIRV_CROSS_THROW("Array length must point directly to an SSBO block."); + + auto &type = get(var->basetype); + if (!has_decoration(type.self, DecorationBlock) && !has_decoration(type.self, DecorationBufferBlock)) + SPIRV_CROSS_THROW("Array length expression must point to a block type."); + + + emit_uninitialized_temporary_expression(ops[0], ops[1]); + statement(to_expression(ops[2]), ".GetDimensions(", to_expression(ops[1]), ");"); + uint32_t offset = type_struct_member_offset(type, ops[3]); + uint32_t stride = type_struct_member_array_stride(type, ops[3]); + statement(to_expression(ops[1]), " = (", to_expression(ops[1]), " - ", offset, ") / ", stride, ";"); + break; + } + + case OpIsHelperInvocationEXT: + SPIRV_CROSS_THROW("helperInvocationEXT() is not supported in HLSL."); + + case OpBeginInvocationInterlockEXT: + case OpEndInvocationInterlockEXT: + if (hlsl_options.shader_model < 51) + SPIRV_CROSS_THROW("Rasterizer order views require Shader Model 5.1."); + break; + + default: + CompilerGLSL::emit_instruction(instruction); + break; + } +} + +void CompilerHLSL::require_texture_query_variant(const SPIRType &type) +{ + uint32_t bit = 0; + switch (type.image.dim) + { + case Dim1D: + bit = type.image.arrayed ? Query1DArray : Query1D; + break; + + case Dim2D: + if (type.image.ms) + bit = type.image.arrayed ? Query2DMSArray : Query2DMS; + else + bit = type.image.arrayed ? Query2DArray : Query2D; + break; + + case Dim3D: + bit = Query3D; + break; + + case DimCube: + bit = type.image.arrayed ? QueryCubeArray : QueryCube; + break; + + case DimBuffer: + bit = QueryBuffer; + break; + + default: + SPIRV_CROSS_THROW("Unsupported query type."); + } + + switch (get(type.image.type).basetype) + { + case SPIRType::Float: + bit += QueryTypeFloat; + break; + + case SPIRType::Int: + bit += QueryTypeInt; + break; + + case SPIRType::UInt: + bit += QueryTypeUInt; + break; + + default: + SPIRV_CROSS_THROW("Unsupported query type."); + } + + uint64_t mask = 1ull << bit; + if ((required_textureSizeVariants & mask) == 0) + { + force_recompile(); + required_textureSizeVariants |= mask; + } +} + +void CompilerHLSL::set_root_constant_layouts(std::vector layout) +{ + root_constants_layout = move(layout); +} + +void CompilerHLSL::add_vertex_attribute_remap(const HLSLVertexAttributeRemap &vertex_attributes) +{ + remap_vertex_attributes.push_back(vertex_attributes); +} + +VariableID CompilerHLSL::remap_num_workgroups_builtin() +{ + update_active_builtins(); + + if (!active_input_builtins.get(BuiltInNumWorkgroups)) + return 0; + + + uint32_t offset = ir.increase_bound_by(4); + + uint32_t uint_type_id = offset; + uint32_t block_type_id = offset + 1; + uint32_t block_pointer_type_id = offset + 2; + uint32_t variable_id = offset + 3; + + SPIRType uint_type; + uint_type.basetype = SPIRType::UInt; + uint_type.width = 32; + uint_type.vecsize = 3; + uint_type.columns = 1; + set(uint_type_id, uint_type); + + SPIRType block_type; + block_type.basetype = SPIRType::Struct; + block_type.member_types.push_back(uint_type_id); + set(block_type_id, block_type); + set_decoration(block_type_id, DecorationBlock); + set_member_name(block_type_id, 0, "count"); + set_member_decoration(block_type_id, 0, DecorationOffset, 0); + + SPIRType block_pointer_type = block_type; + block_pointer_type.pointer = true; + block_pointer_type.storage = StorageClassUniform; + block_pointer_type.parent_type = block_type_id; + auto &ptr_type = set(block_pointer_type_id, block_pointer_type); + + + ptr_type.self = block_type_id; + + set(variable_id, block_pointer_type_id, StorageClassUniform); + ir.meta[variable_id].decoration.alias = "SPIRV_Cross_NumWorkgroups"; + + num_workgroups_builtin = variable_id; + return variable_id; +} + +void CompilerHLSL::validate_shader_model() +{ + + + for (auto &cap : ir.declared_capabilities) + { + switch (cap) + { + case CapabilityShaderNonUniformEXT: + case CapabilityRuntimeDescriptorArrayEXT: + if (hlsl_options.shader_model < 51) + SPIRV_CROSS_THROW( + "Shader model 5.1 or higher is required to use bindless resources or NonUniformResourceIndex."); + default: + break; + } + } + + if (ir.addressing_model != AddressingModelLogical) + SPIRV_CROSS_THROW("Only Logical addressing model can be used with HLSL."); +} + +string CompilerHLSL::compile() +{ + + options.es = false; + options.version = 450; + options.vulkan_semantics = true; + backend.float_literal_suffix = true; + backend.double_literal_suffix = false; + backend.long_long_literal_suffix = true; + backend.uint32_t_literal_suffix = true; + backend.int16_t_literal_suffix = ""; + backend.uint16_t_literal_suffix = "u"; + backend.basic_int_type = "int"; + backend.basic_uint_type = "uint"; + backend.demote_literal = "discard"; + backend.boolean_mix_function = ""; + backend.swizzle_is_function = false; + backend.shared_is_implied = true; + backend.unsized_array_supported = true; + backend.explicit_struct_type = false; + backend.use_initializer_list = true; + backend.use_constructor_splatting = false; + backend.can_swizzle_scalar = true; + backend.can_declare_struct_inline = false; + backend.can_declare_arrays_inline = false; + backend.can_return_array = false; + backend.nonuniform_qualifier = "NonUniformResourceIndex"; + backend.support_case_fallthrough = false; + + fixup_type_alias(); + reorder_type_alias(); + build_function_control_flow_graphs_and_analyze(); + validate_shader_model(); + update_active_builtins(); + analyze_image_and_sampler_usage(); + analyze_interlocked_resource_usage(); + + + if (need_subpass_input) + active_input_builtins.set(BuiltInFragCoord); + + uint32_t pass_count = 0; + do + { + if (pass_count >= 3) + SPIRV_CROSS_THROW("Over 3 compilation loops detected. Must be a bug!"); + + reset(); + + + buffer.reset(); + + emit_header(); + emit_resources(); + + emit_function(get(ir.default_entry_point), Bitset()); + emit_hlsl_entry_point(); + + pass_count++; + } while (is_forcing_recompilation()); + + + get_entry_point().name = "main"; + + return buffer.str(); +} + +void CompilerHLSL::emit_block_hints(const SPIRBlock &block) +{ + switch (block.hint) + { + case SPIRBlock::HintFlatten: + statement("[flatten]"); + break; + case SPIRBlock::HintDontFlatten: + statement("[branch]"); + break; + case SPIRBlock::HintUnroll: + statement("[unroll]"); + break; + case SPIRBlock::HintDontUnroll: + statement("[loop]"); + break; + default: + break; + } +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_hlsl.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_hlsl.hpp new file mode 100644 index 000000000000..c338728189d7 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_hlsl.hpp @@ -0,0 +1,233 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_HLSL_HPP +#define SPIRV_HLSL_HPP + +#include "spirv_glsl.hpp" +#include + +namespace SPIRV_CROSS_NAMESPACE +{ + +struct HLSLVertexAttributeRemap +{ + uint32_t location; + std::string semantic; +}; + + + + +struct RootConstants +{ + uint32_t start; + uint32_t end; + + uint32_t binding; + uint32_t space; +}; + +class CompilerHLSL : public CompilerGLSL +{ +public: + struct Options + { + uint32_t shader_model = 30; + + + bool point_size_compat = false; + + + bool point_coord_compat = false; + + + + + + bool support_nonzero_base_vertex_base_instance = false; + }; + + explicit CompilerHLSL(std::vector spirv_) + : CompilerGLSL(std::move(spirv_)) + { + } + + CompilerHLSL(const uint32_t *ir_, size_t size) + : CompilerGLSL(ir_, size) + { + } + + explicit CompilerHLSL(const ParsedIR &ir_) + : CompilerGLSL(ir_) + { + } + + explicit CompilerHLSL(ParsedIR &&ir_) + : CompilerGLSL(std::move(ir_)) + { + } + + const Options &get_hlsl_options() const + { + return hlsl_options; + } + + void set_hlsl_options(const Options &opts) + { + hlsl_options = opts; + } + + + + + + void set_root_constant_layouts(std::vector layout); + + + + + + void add_vertex_attribute_remap(const HLSLVertexAttributeRemap &vertex_attributes); + std::string compile() override; + + + + + + + + + + + + VariableID remap_num_workgroups_builtin(); + +private: + std::string type_to_glsl(const SPIRType &type, uint32_t id = 0) override; + std::string image_type_hlsl(const SPIRType &type, uint32_t id); + std::string image_type_hlsl_modern(const SPIRType &type, uint32_t id); + std::string image_type_hlsl_legacy(const SPIRType &type, uint32_t id); + void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override; + void emit_hlsl_entry_point(); + void emit_header() override; + void emit_resources(); + void emit_interface_block_globally(const SPIRVariable &type); + void emit_interface_block_in_struct(const SPIRVariable &type, std::unordered_set &active_locations); + void emit_builtin_inputs_in_struct(); + void emit_builtin_outputs_in_struct(); + void emit_texture_op(const Instruction &i) override; + void emit_instruction(const Instruction &instruction) override; + void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, + uint32_t count) override; + void emit_buffer_block(const SPIRVariable &type) override; + void emit_push_constant_block(const SPIRVariable &var) override; + void emit_uniform(const SPIRVariable &var) override; + void emit_modern_uniform(const SPIRVariable &var); + void emit_legacy_uniform(const SPIRVariable &var); + void emit_specialization_constants_and_structs(); + void emit_composite_constants(); + void emit_fixup() override; + std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) override; + std::string layout_for_member(const SPIRType &type, uint32_t index) override; + std::string to_interpolation_qualifiers(const Bitset &flags) override; + std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type) override; + std::string to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id) override; + std::string to_sampler_expression(uint32_t id); + std::string to_resource_binding(const SPIRVariable &var); + std::string to_resource_binding_sampler(const SPIRVariable &var); + std::string to_resource_register(char space, uint32_t binding, uint32_t set); + void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) override; + void emit_access_chain(const Instruction &instruction); + void emit_load(const Instruction &instruction); + std::string read_access_chain(const SPIRAccessChain &chain); + void write_access_chain(const SPIRAccessChain &chain, uint32_t value); + void emit_store(const Instruction &instruction); + void emit_atomic(const uint32_t *ops, uint32_t length, spv::Op op); + void emit_subgroup_op(const Instruction &i) override; + void emit_block_hints(const SPIRBlock &block) override; + + void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, const std::string &qualifier, + uint32_t base_offset = 0) override; + + const char *to_storage_qualifiers_glsl(const SPIRVariable &var) override; + void replace_illegal_names() override; + + Options hlsl_options; + + + bool requires_op_fmod = false; + bool requires_fp16_packing = false; + bool requires_explicit_fp16_packing = false; + bool requires_unorm8_packing = false; + bool requires_snorm8_packing = false; + bool requires_unorm16_packing = false; + bool requires_snorm16_packing = false; + bool requires_bitfield_insert = false; + bool requires_bitfield_extract = false; + bool requires_inverse_2x2 = false; + bool requires_inverse_3x3 = false; + bool requires_inverse_4x4 = false; + bool requires_scalar_reflect = false; + bool requires_scalar_refract = false; + bool requires_scalar_faceforward = false; + uint64_t required_textureSizeVariants = 0; + void require_texture_query_variant(const SPIRType &type); + + enum TextureQueryVariantDim + { + Query1D = 0, + Query1DArray, + Query2D, + Query2DArray, + Query3D, + QueryBuffer, + QueryCube, + QueryCubeArray, + Query2DMS, + Query2DMSArray, + QueryDimCount + }; + + enum TextureQueryVariantType + { + QueryTypeFloat = 0, + QueryTypeInt = 16, + QueryTypeUInt = 32, + QueryTypeCount = 3 + }; + + void emit_builtin_variables(); + bool require_output = false; + bool require_input = false; + SmallVector remap_vertex_attributes; + + uint32_t type_to_consumed_locations(const SPIRType &type) const; + + void emit_io_block(const SPIRVariable &var); + std::string to_semantic(uint32_t location, spv::ExecutionModel em, spv::StorageClass sc); + + uint32_t num_workgroups_builtin = 0; + + + + std::vector root_constants_layout; + + void validate_shader_model(); +}; +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_msl.cpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_msl.cpp new file mode 100644 index 000000000000..f163b2b63242 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_msl.cpp @@ -0,0 +1,12444 @@ + + + + + + + + + + + + + + + + +#include "spirv_msl.hpp" +#include "GLSL.std.450.h" + +#include +#include +#include + +using namespace spv; +using namespace SPIRV_CROSS_NAMESPACE; +using namespace std; + +static const uint32_t k_unknown_location = ~0u; +static const uint32_t k_unknown_component = ~0u; +static const char *force_inline = "static inline __attribute__((always_inline))"; + +CompilerMSL::CompilerMSL(std::vector spirv_) + : CompilerGLSL(move(spirv_)) +{ +} + +CompilerMSL::CompilerMSL(const uint32_t *ir_, size_t word_count) + : CompilerGLSL(ir_, word_count) +{ +} + +CompilerMSL::CompilerMSL(const ParsedIR &ir_) + : CompilerGLSL(ir_) +{ +} + +CompilerMSL::CompilerMSL(ParsedIR &&ir_) + : CompilerGLSL(std::move(ir_)) +{ +} + +void CompilerMSL::add_msl_vertex_attribute(const MSLVertexAttr &va) +{ + vtx_attrs_by_location[va.location] = va; + if (va.builtin != BuiltInMax && !vtx_attrs_by_builtin.count(va.builtin)) + vtx_attrs_by_builtin[va.builtin] = va; +} + +void CompilerMSL::add_msl_resource_binding(const MSLResourceBinding &binding) +{ + StageSetBinding tuple = { binding.stage, binding.desc_set, binding.binding }; + resource_bindings[tuple] = { binding, false }; +} + +void CompilerMSL::add_dynamic_buffer(uint32_t desc_set, uint32_t binding, uint32_t index) +{ + SetBindingPair pair = { desc_set, binding }; + buffers_requiring_dynamic_offset[pair] = { index, 0 }; +} + +void CompilerMSL::add_discrete_descriptor_set(uint32_t desc_set) +{ + if (desc_set < kMaxArgumentBuffers) + argument_buffer_discrete_mask |= 1u << desc_set; +} + +void CompilerMSL::set_argument_buffer_device_address_space(uint32_t desc_set, bool device_storage) +{ + if (desc_set < kMaxArgumentBuffers) + { + if (device_storage) + argument_buffer_device_storage_mask |= 1u << desc_set; + else + argument_buffer_device_storage_mask &= ~(1u << desc_set); + } +} + +bool CompilerMSL::is_msl_vertex_attribute_used(uint32_t location) +{ + return vtx_attrs_in_use.count(location) != 0; +} + +bool CompilerMSL::is_msl_resource_binding_used(ExecutionModel model, uint32_t desc_set, uint32_t binding) +{ + StageSetBinding tuple = { model, desc_set, binding }; + auto itr = resource_bindings.find(tuple); + return itr != end(resource_bindings) && itr->second.second; +} + +uint32_t CompilerMSL::get_automatic_msl_resource_binding(uint32_t id) const +{ + return get_extended_decoration(id, SPIRVCrossDecorationResourceIndexPrimary); +} + +uint32_t CompilerMSL::get_automatic_msl_resource_binding_secondary(uint32_t id) const +{ + return get_extended_decoration(id, SPIRVCrossDecorationResourceIndexSecondary); +} + +uint32_t CompilerMSL::get_automatic_msl_resource_binding_tertiary(uint32_t id) const +{ + return get_extended_decoration(id, SPIRVCrossDecorationResourceIndexTertiary); +} + +uint32_t CompilerMSL::get_automatic_msl_resource_binding_quaternary(uint32_t id) const +{ + return get_extended_decoration(id, SPIRVCrossDecorationResourceIndexQuaternary); +} + +void CompilerMSL::set_fragment_output_components(uint32_t location, uint32_t components) +{ + fragment_output_components[location] = components; +} + +bool CompilerMSL::builtin_translates_to_nonarray(spv::BuiltIn builtin) const +{ + return (builtin == BuiltInSampleMask); +} + +void CompilerMSL::build_implicit_builtins() +{ + bool need_sample_pos = active_input_builtins.get(BuiltInSamplePosition); + bool need_vertex_params = capture_output_to_buffer && get_execution_model() == ExecutionModelVertex; + bool need_tesc_params = get_execution_model() == ExecutionModelTessellationControl; + bool need_subgroup_mask = + active_input_builtins.get(BuiltInSubgroupEqMask) || active_input_builtins.get(BuiltInSubgroupGeMask) || + active_input_builtins.get(BuiltInSubgroupGtMask) || active_input_builtins.get(BuiltInSubgroupLeMask) || + active_input_builtins.get(BuiltInSubgroupLtMask); + bool need_subgroup_ge_mask = !msl_options.is_ios() && (active_input_builtins.get(BuiltInSubgroupGeMask) || + active_input_builtins.get(BuiltInSubgroupGtMask)); + bool need_multiview = get_execution_model() == ExecutionModelVertex && !msl_options.view_index_from_device_index && + (msl_options.multiview || active_input_builtins.get(BuiltInViewIndex)); + bool need_dispatch_base = + msl_options.dispatch_base && get_execution_model() == ExecutionModelGLCompute && + (active_input_builtins.get(BuiltInWorkgroupId) || active_input_builtins.get(BuiltInGlobalInvocationId)); + if (need_subpass_input || need_sample_pos || need_subgroup_mask || need_vertex_params || need_tesc_params || + need_multiview || need_dispatch_base || needs_subgroup_invocation_id) + { + bool has_frag_coord = false; + bool has_sample_id = false; + bool has_vertex_idx = false; + bool has_base_vertex = false; + bool has_instance_idx = false; + bool has_base_instance = false; + bool has_invocation_id = false; + bool has_primitive_id = false; + bool has_subgroup_invocation_id = false; + bool has_subgroup_size = false; + bool has_view_idx = false; + uint32_t workgroup_id_type = 0; + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + if (var.storage != StorageClassInput || !ir.meta[var.self].decoration.builtin) + return; + + + BuiltIn builtin = ir.meta[var.self].decoration.builtin_type; + if (need_subpass_input && (!msl_options.is_ios() || !msl_options.ios_use_framebuffer_fetch_subpasses) && + builtin == BuiltInFragCoord) + { + builtin_frag_coord_id = var.self; + has_frag_coord = true; + } + + if (need_sample_pos && builtin == BuiltInSampleId) + { + builtin_sample_id_id = var.self; + has_sample_id = true; + } + + if (need_vertex_params) + { + switch (builtin) + { + case BuiltInVertexIndex: + builtin_vertex_idx_id = var.self; + has_vertex_idx = true; + break; + case BuiltInBaseVertex: + builtin_base_vertex_id = var.self; + has_base_vertex = true; + break; + case BuiltInInstanceIndex: + builtin_instance_idx_id = var.self; + has_instance_idx = true; + break; + case BuiltInBaseInstance: + builtin_base_instance_id = var.self; + has_base_instance = true; + break; + default: + break; + } + } + + if (need_tesc_params) + { + switch (builtin) + { + case BuiltInInvocationId: + builtin_invocation_id_id = var.self; + has_invocation_id = true; + break; + case BuiltInPrimitiveId: + builtin_primitive_id_id = var.self; + has_primitive_id = true; + break; + default: + break; + } + } + + if ((need_subgroup_mask || needs_subgroup_invocation_id) && builtin == BuiltInSubgroupLocalInvocationId) + { + builtin_subgroup_invocation_id_id = var.self; + has_subgroup_invocation_id = true; + } + + if (need_subgroup_ge_mask && builtin == BuiltInSubgroupSize) + { + builtin_subgroup_size_id = var.self; + has_subgroup_size = true; + } + + if (need_multiview) + { + switch (builtin) + { + case BuiltInInstanceIndex: + + builtin_instance_idx_id = var.self; + has_instance_idx = true; + break; + case BuiltInViewIndex: + builtin_view_idx_id = var.self; + has_view_idx = true; + break; + default: + break; + } + } + + + + + if (need_dispatch_base && workgroup_id_type == 0 && + (builtin == BuiltInWorkgroupId || builtin == BuiltInGlobalInvocationId)) + workgroup_id_type = var.basetype; + }); + + + if (!has_frag_coord && (!msl_options.is_ios() || !msl_options.ios_use_framebuffer_fetch_subpasses) && + need_subpass_input) + { + uint32_t offset = ir.increase_bound_by(3); + uint32_t type_id = offset; + uint32_t type_ptr_id = offset + 1; + uint32_t var_id = offset + 2; + + + SPIRType vec4_type; + vec4_type.basetype = SPIRType::Float; + vec4_type.width = 32; + vec4_type.vecsize = 4; + set(type_id, vec4_type); + + SPIRType vec4_type_ptr; + vec4_type_ptr = vec4_type; + vec4_type_ptr.pointer = true; + vec4_type_ptr.parent_type = type_id; + vec4_type_ptr.storage = StorageClassInput; + auto &ptr_type = set(type_ptr_id, vec4_type_ptr); + ptr_type.self = type_id; + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInFragCoord); + builtin_frag_coord_id = var_id; + mark_implicit_builtin(StorageClassInput, BuiltInFragCoord, var_id); + } + + if (!has_sample_id && need_sample_pos) + { + uint32_t offset = ir.increase_bound_by(3); + uint32_t type_id = offset; + uint32_t type_ptr_id = offset + 1; + uint32_t var_id = offset + 2; + + + SPIRType uint_type; + uint_type.basetype = SPIRType::UInt; + uint_type.width = 32; + set(type_id, uint_type); + + SPIRType uint_type_ptr; + uint_type_ptr = uint_type; + uint_type_ptr.pointer = true; + uint_type_ptr.parent_type = type_id; + uint_type_ptr.storage = StorageClassInput; + auto &ptr_type = set(type_ptr_id, uint_type_ptr); + ptr_type.self = type_id; + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInSampleId); + builtin_sample_id_id = var_id; + mark_implicit_builtin(StorageClassInput, BuiltInSampleId, var_id); + } + + if ((need_vertex_params && (!has_vertex_idx || !has_base_vertex || !has_instance_idx || !has_base_instance)) || + (need_multiview && (!has_instance_idx || !has_view_idx))) + { + uint32_t offset = ir.increase_bound_by(2); + uint32_t type_id = offset; + uint32_t type_ptr_id = offset + 1; + + SPIRType uint_type; + uint_type.basetype = SPIRType::UInt; + uint_type.width = 32; + set(type_id, uint_type); + + SPIRType uint_type_ptr; + uint_type_ptr = uint_type; + uint_type_ptr.pointer = true; + uint_type_ptr.parent_type = type_id; + uint_type_ptr.storage = StorageClassInput; + auto &ptr_type = set(type_ptr_id, uint_type_ptr); + ptr_type.self = type_id; + + if (need_vertex_params && !has_vertex_idx) + { + uint32_t var_id = ir.increase_bound_by(1); + + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInVertexIndex); + builtin_vertex_idx_id = var_id; + mark_implicit_builtin(StorageClassInput, BuiltInVertexIndex, var_id); + } + + if (need_vertex_params && !has_base_vertex) + { + uint32_t var_id = ir.increase_bound_by(1); + + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInBaseVertex); + builtin_base_vertex_id = var_id; + mark_implicit_builtin(StorageClassInput, BuiltInBaseVertex, var_id); + } + + if (!has_instance_idx) + { + uint32_t var_id = ir.increase_bound_by(1); + + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInInstanceIndex); + builtin_instance_idx_id = var_id; + mark_implicit_builtin(StorageClassInput, BuiltInInstanceIndex, var_id); + } + + if (need_vertex_params && !has_base_instance) + { + uint32_t var_id = ir.increase_bound_by(1); + + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInBaseInstance); + builtin_base_instance_id = var_id; + mark_implicit_builtin(StorageClassInput, BuiltInBaseInstance, var_id); + } + + if (need_multiview) + { + + + + + uint32_t type_ptr_out_id = ir.increase_bound_by(2); + SPIRType uint_type_ptr_out; + uint_type_ptr_out = uint_type; + uint_type_ptr_out.pointer = true; + uint_type_ptr_out.parent_type = type_id; + uint_type_ptr_out.storage = StorageClassOutput; + auto &ptr_out_type = set(type_ptr_out_id, uint_type_ptr_out); + ptr_out_type.self = type_id; + uint32_t var_id = type_ptr_out_id + 1; + set(var_id, type_ptr_out_id, StorageClassOutput); + set_decoration(var_id, DecorationBuiltIn, BuiltInLayer); + builtin_layer_id = var_id; + mark_implicit_builtin(StorageClassOutput, BuiltInLayer, var_id); + } + + if (need_multiview && !has_view_idx) + { + uint32_t var_id = ir.increase_bound_by(1); + + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInViewIndex); + builtin_view_idx_id = var_id; + mark_implicit_builtin(StorageClassInput, BuiltInViewIndex, var_id); + } + } + + if (need_tesc_params && (!has_invocation_id || !has_primitive_id)) + { + uint32_t offset = ir.increase_bound_by(2); + uint32_t type_id = offset; + uint32_t type_ptr_id = offset + 1; + + SPIRType uint_type; + uint_type.basetype = SPIRType::UInt; + uint_type.width = 32; + set(type_id, uint_type); + + SPIRType uint_type_ptr; + uint_type_ptr = uint_type; + uint_type_ptr.pointer = true; + uint_type_ptr.parent_type = type_id; + uint_type_ptr.storage = StorageClassInput; + auto &ptr_type = set(type_ptr_id, uint_type_ptr); + ptr_type.self = type_id; + + if (!has_invocation_id) + { + uint32_t var_id = ir.increase_bound_by(1); + + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInInvocationId); + builtin_invocation_id_id = var_id; + mark_implicit_builtin(StorageClassInput, BuiltInInvocationId, var_id); + } + + if (!has_primitive_id) + { + uint32_t var_id = ir.increase_bound_by(1); + + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInPrimitiveId); + builtin_primitive_id_id = var_id; + mark_implicit_builtin(StorageClassInput, BuiltInPrimitiveId, var_id); + } + } + + if (!has_subgroup_invocation_id && (need_subgroup_mask || needs_subgroup_invocation_id)) + { + uint32_t offset = ir.increase_bound_by(3); + uint32_t type_id = offset; + uint32_t type_ptr_id = offset + 1; + uint32_t var_id = offset + 2; + + + SPIRType uint_type; + uint_type.basetype = SPIRType::UInt; + uint_type.width = 32; + set(type_id, uint_type); + + SPIRType uint_type_ptr; + uint_type_ptr = uint_type; + uint_type_ptr.pointer = true; + uint_type_ptr.parent_type = type_id; + uint_type_ptr.storage = StorageClassInput; + auto &ptr_type = set(type_ptr_id, uint_type_ptr); + ptr_type.self = type_id; + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInSubgroupLocalInvocationId); + builtin_subgroup_invocation_id_id = var_id; + mark_implicit_builtin(StorageClassInput, BuiltInSubgroupLocalInvocationId, var_id); + } + + if (!has_subgroup_size && need_subgroup_ge_mask) + { + uint32_t offset = ir.increase_bound_by(3); + uint32_t type_id = offset; + uint32_t type_ptr_id = offset + 1; + uint32_t var_id = offset + 2; + + + SPIRType uint_type; + uint_type.basetype = SPIRType::UInt; + uint_type.width = 32; + set(type_id, uint_type); + + SPIRType uint_type_ptr; + uint_type_ptr = uint_type; + uint_type_ptr.pointer = true; + uint_type_ptr.parent_type = type_id; + uint_type_ptr.storage = StorageClassInput; + auto &ptr_type = set(type_ptr_id, uint_type_ptr); + ptr_type.self = type_id; + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInSubgroupSize); + builtin_subgroup_size_id = var_id; + mark_implicit_builtin(StorageClassInput, BuiltInSubgroupSize, var_id); + } + + if (need_dispatch_base) + { + uint32_t var_id; + if (msl_options.supports_msl_version(1, 2)) + { + + + uint32_t offset = ir.increase_bound_by(1); + var_id = offset; + + set(var_id, workgroup_id_type, StorageClassInput); + set_extended_decoration(var_id, SPIRVCrossDecorationBuiltInDispatchBase); + get_entry_point().interface_variables.push_back(var_id); + } + else + { + + uint32_t offset = ir.increase_bound_by(2); + var_id = offset; + uint32_t type_id = offset + 1; + + SPIRType var_type = get(workgroup_id_type); + var_type.storage = StorageClassUniform; + set(type_id, var_type); + + set(var_id, type_id, StorageClassUniform); + + set_decoration(var_id, DecorationDescriptorSet, ~(5u)); + set_decoration(var_id, DecorationBinding, msl_options.indirect_params_buffer_index); + set_extended_decoration(var_id, SPIRVCrossDecorationResourceIndexPrimary, + msl_options.indirect_params_buffer_index); + } + set_name(var_id, "spvDispatchBase"); + builtin_dispatch_base_id = var_id; + } + } + + if (needs_swizzle_buffer_def) + { + uint32_t var_id = build_constant_uint_array_pointer(); + set_name(var_id, "spvSwizzleConstants"); + + set_decoration(var_id, DecorationDescriptorSet, kSwizzleBufferBinding); + set_decoration(var_id, DecorationBinding, msl_options.swizzle_buffer_index); + set_extended_decoration(var_id, SPIRVCrossDecorationResourceIndexPrimary, msl_options.swizzle_buffer_index); + swizzle_buffer_id = var_id; + } + + if (!buffers_requiring_array_length.empty()) + { + uint32_t var_id = build_constant_uint_array_pointer(); + set_name(var_id, "spvBufferSizeConstants"); + + set_decoration(var_id, DecorationDescriptorSet, kBufferSizeBufferBinding); + set_decoration(var_id, DecorationBinding, msl_options.buffer_size_buffer_index); + set_extended_decoration(var_id, SPIRVCrossDecorationResourceIndexPrimary, msl_options.buffer_size_buffer_index); + buffer_size_buffer_id = var_id; + } + + if (needs_view_mask_buffer()) + { + uint32_t var_id = build_constant_uint_array_pointer(); + set_name(var_id, "spvViewMask"); + + set_decoration(var_id, DecorationDescriptorSet, ~(4u)); + set_decoration(var_id, DecorationBinding, msl_options.view_mask_buffer_index); + set_extended_decoration(var_id, SPIRVCrossDecorationResourceIndexPrimary, msl_options.view_mask_buffer_index); + view_mask_buffer_id = var_id; + } + + if (!buffers_requiring_dynamic_offset.empty()) + { + uint32_t var_id = build_constant_uint_array_pointer(); + set_name(var_id, "spvDynamicOffsets"); + + set_decoration(var_id, DecorationDescriptorSet, ~(5u)); + set_decoration(var_id, DecorationBinding, msl_options.dynamic_offsets_buffer_index); + set_extended_decoration(var_id, SPIRVCrossDecorationResourceIndexPrimary, + msl_options.dynamic_offsets_buffer_index); + dynamic_offsets_buffer_id = var_id; + } +} + + + + +void CompilerMSL::ensure_builtin(spv::StorageClass storage, spv::BuiltIn builtin) +{ + Bitset *active_builtins = nullptr; + switch (storage) + { + case StorageClassInput: + active_builtins = &active_input_builtins; + break; + + case StorageClassOutput: + active_builtins = &active_output_builtins; + break; + + default: + break; + } + + + + if (active_builtins != nullptr && !active_builtins->get(builtin)) + { + active_builtins->set(builtin); + force_recompile(); + } +} + +void CompilerMSL::mark_implicit_builtin(StorageClass storage, BuiltIn builtin, uint32_t id) +{ + Bitset *active_builtins = nullptr; + switch (storage) + { + case StorageClassInput: + active_builtins = &active_input_builtins; + break; + + case StorageClassOutput: + active_builtins = &active_output_builtins; + break; + + default: + break; + } + + assert(active_builtins != nullptr); + active_builtins->set(builtin); + get_entry_point().interface_variables.push_back(id); +} + +uint32_t CompilerMSL::build_constant_uint_array_pointer() +{ + uint32_t offset = ir.increase_bound_by(4); + uint32_t type_id = offset; + uint32_t type_ptr_id = offset + 1; + uint32_t type_ptr_ptr_id = offset + 2; + uint32_t var_id = offset + 3; + + + SPIRType uint_type; + uint_type.basetype = SPIRType::UInt; + uint_type.width = 32; + set(type_id, uint_type); + + SPIRType uint_type_pointer = uint_type; + uint_type_pointer.pointer = true; + uint_type_pointer.pointer_depth = 1; + uint_type_pointer.parent_type = type_id; + uint_type_pointer.storage = StorageClassUniform; + set(type_ptr_id, uint_type_pointer); + set_decoration(type_ptr_id, DecorationArrayStride, 4); + + SPIRType uint_type_pointer2 = uint_type_pointer; + uint_type_pointer2.pointer_depth++; + uint_type_pointer2.parent_type = type_ptr_id; + set(type_ptr_ptr_id, uint_type_pointer2); + + set(var_id, type_ptr_ptr_id, StorageClassUniformConstant); + return var_id; +} + +static string create_sampler_address(const char *prefix, MSLSamplerAddress addr) +{ + switch (addr) + { + case MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE: + return join(prefix, "address::clamp_to_edge"); + case MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO: + return join(prefix, "address::clamp_to_zero"); + case MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER: + return join(prefix, "address::clamp_to_border"); + case MSL_SAMPLER_ADDRESS_REPEAT: + return join(prefix, "address::repeat"); + case MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT: + return join(prefix, "address::mirrored_repeat"); + default: + SPIRV_CROSS_THROW("Invalid sampler addressing mode."); + } +} + +SPIRType &CompilerMSL::get_stage_in_struct_type() +{ + auto &si_var = get(stage_in_var_id); + return get_variable_data_type(si_var); +} + +SPIRType &CompilerMSL::get_stage_out_struct_type() +{ + auto &so_var = get(stage_out_var_id); + return get_variable_data_type(so_var); +} + +SPIRType &CompilerMSL::get_patch_stage_in_struct_type() +{ + auto &si_var = get(patch_stage_in_var_id); + return get_variable_data_type(si_var); +} + +SPIRType &CompilerMSL::get_patch_stage_out_struct_type() +{ + auto &so_var = get(patch_stage_out_var_id); + return get_variable_data_type(so_var); +} + +std::string CompilerMSL::get_tess_factor_struct_name() +{ + if (get_entry_point().flags.get(ExecutionModeTriangles)) + return "MTLTriangleTessellationFactorsHalf"; + return "MTLQuadTessellationFactorsHalf"; +} + +void CompilerMSL::emit_entry_point_declarations() +{ + + + declare_complex_constant_arrays(); + + + for (auto &samp : constexpr_samplers_by_id) + { + auto &var = get(samp.first); + auto &type = get(var.basetype); + if (type.basetype == SPIRType::Sampler) + add_resource_name(samp.first); + + SmallVector args; + auto &s = samp.second; + + if (s.coord != MSL_SAMPLER_COORD_NORMALIZED) + args.push_back("coord::pixel"); + + if (s.min_filter == s.mag_filter) + { + if (s.min_filter != MSL_SAMPLER_FILTER_NEAREST) + args.push_back("filter::linear"); + } + else + { + if (s.min_filter != MSL_SAMPLER_FILTER_NEAREST) + args.push_back("min_filter::linear"); + if (s.mag_filter != MSL_SAMPLER_FILTER_NEAREST) + args.push_back("mag_filter::linear"); + } + + switch (s.mip_filter) + { + case MSL_SAMPLER_MIP_FILTER_NONE: + + break; + case MSL_SAMPLER_MIP_FILTER_NEAREST: + args.push_back("mip_filter::nearest"); + break; + case MSL_SAMPLER_MIP_FILTER_LINEAR: + args.push_back("mip_filter::linear"); + break; + default: + SPIRV_CROSS_THROW("Invalid mip filter."); + } + + if (s.s_address == s.t_address && s.s_address == s.r_address) + { + if (s.s_address != MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE) + args.push_back(create_sampler_address("", s.s_address)); + } + else + { + if (s.s_address != MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE) + args.push_back(create_sampler_address("s_", s.s_address)); + if (s.t_address != MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE) + args.push_back(create_sampler_address("t_", s.t_address)); + if (s.r_address != MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE) + args.push_back(create_sampler_address("r_", s.r_address)); + } + + if (s.compare_enable) + { + switch (s.compare_func) + { + case MSL_SAMPLER_COMPARE_FUNC_ALWAYS: + args.push_back("compare_func::always"); + break; + case MSL_SAMPLER_COMPARE_FUNC_NEVER: + args.push_back("compare_func::never"); + break; + case MSL_SAMPLER_COMPARE_FUNC_EQUAL: + args.push_back("compare_func::equal"); + break; + case MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL: + args.push_back("compare_func::not_equal"); + break; + case MSL_SAMPLER_COMPARE_FUNC_LESS: + args.push_back("compare_func::less"); + break; + case MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL: + args.push_back("compare_func::less_equal"); + break; + case MSL_SAMPLER_COMPARE_FUNC_GREATER: + args.push_back("compare_func::greater"); + break; + case MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL: + args.push_back("compare_func::greater_equal"); + break; + default: + SPIRV_CROSS_THROW("Invalid sampler compare function."); + } + } + + if (s.s_address == MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER || s.t_address == MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER || + s.r_address == MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER) + { + switch (s.border_color) + { + case MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK: + args.push_back("border_color::opaque_black"); + break; + case MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE: + args.push_back("border_color::opaque_white"); + break; + case MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK: + args.push_back("border_color::transparent_black"); + break; + default: + SPIRV_CROSS_THROW("Invalid sampler border color."); + } + } + + if (s.anisotropy_enable) + args.push_back(join("max_anisotropy(", s.max_anisotropy, ")")); + if (s.lod_clamp_enable) + { + args.push_back(join("lod_clamp(", convert_to_string(s.lod_clamp_min, current_locale_radix_character), ", ", + convert_to_string(s.lod_clamp_max, current_locale_radix_character), ")")); + } + + + + if (args.empty()) + statement("constexpr sampler ", + type.basetype == SPIRType::SampledImage ? to_sampler_expression(samp.first) : to_name(samp.first), + ";"); + else + statement("constexpr sampler ", + type.basetype == SPIRType::SampledImage ? to_sampler_expression(samp.first) : to_name(samp.first), + "(", merge(args), ");"); + } + + + for (auto &dynamic_buffer : buffers_requiring_dynamic_offset) + { + if (!dynamic_buffer.second.second) + { + + continue; + } + + const auto &var = get(dynamic_buffer.second.second); + uint32_t var_id = var.self; + const auto &type = get_variable_data_type(var); + string name = to_name(var.self); + uint32_t desc_set = get_decoration(var.self, DecorationDescriptorSet); + uint32_t arg_id = argument_buffer_ids[desc_set]; + uint32_t base_index = dynamic_buffer.second.first; + + if (!type.array.empty()) + { + + + + if (!type.array[type.array.size() - 1]) + SPIRV_CROSS_THROW("Runtime arrays with dynamic offsets are not supported yet."); + else + { + use_builtin_array = true; + statement(get_argument_address_space(var), " ", type_to_glsl(type), "* ", to_restrict(var_id), name, + type_to_array_glsl(type), " ="); + + uint32_t dim = uint32_t(type.array.size()); + uint32_t j = 0; + for (SmallVector indices(type.array.size()); + indices[type.array.size() - 1] < to_array_size_literal(type); j++) + { + while (dim > 0) + { + begin_scope(); + --dim; + } + + string arrays; + for (uint32_t i = uint32_t(type.array.size()); i; --i) + arrays += join("[", indices[i - 1], "]"); + statement("(", get_argument_address_space(var), " ", type_to_glsl(type), "* ", + to_restrict(var_id, false), ")((", get_argument_address_space(var), " char* ", + to_restrict(var_id, false), ")", to_name(arg_id), ".", ensure_valid_name(name, "m"), + arrays, " + ", to_name(dynamic_offsets_buffer_id), "[", base_index + j, "]),"); + + while (++indices[dim] >= to_array_size_literal(type, dim) && dim < type.array.size() - 1) + { + end_scope(","); + indices[dim++] = 0; + } + } + end_scope_decl(); + statement_no_indent(""); + use_builtin_array = false; + } + } + else + { + statement(get_argument_address_space(var), " auto& ", to_restrict(var_id), name, " = *(", + get_argument_address_space(var), " ", type_to_glsl(type), "* ", to_restrict(var_id, false), ")((", + get_argument_address_space(var), " char* ", to_restrict(var_id, false), ")", to_name(arg_id), ".", + ensure_valid_name(name, "m"), " + ", to_name(dynamic_offsets_buffer_id), "[", base_index, "]);"); + } + } + + + for (uint32_t array_id : buffer_arrays) + { + const auto &var = get(array_id); + const auto &type = get_variable_data_type(var); + const auto &buffer_type = get_variable_element_type(var); + string name = to_name(array_id); + statement(get_argument_address_space(var), " ", type_to_glsl(buffer_type), "* ", to_restrict(array_id), name, + "[] ="); + begin_scope(); + for (uint32_t i = 0; i < to_array_size_literal(type); ++i) + statement(name, "_", i, ","); + end_scope_decl(); + statement_no_indent(""); + } + + buffer_arrays.clear(); +} + +string CompilerMSL::compile() +{ + + options.vulkan_semantics = true; + options.es = false; + options.version = 450; + backend.null_pointer_literal = "nullptr"; + backend.float_literal_suffix = false; + backend.uint32_t_literal_suffix = true; + backend.int16_t_literal_suffix = ""; + backend.uint16_t_literal_suffix = ""; + backend.basic_int_type = "int"; + backend.basic_uint_type = "uint"; + backend.basic_int8_type = "char"; + backend.basic_uint8_type = "uchar"; + backend.basic_int16_type = "short"; + backend.basic_uint16_type = "ushort"; + backend.discard_literal = "discard_fragment()"; + backend.demote_literal = "unsupported-demote"; + backend.boolean_mix_function = "select"; + backend.swizzle_is_function = false; + backend.shared_is_implied = false; + backend.use_initializer_list = true; + backend.use_typed_initializer_list = true; + backend.native_row_major_matrix = false; + backend.unsized_array_supported = false; + backend.can_declare_arrays_inline = false; + backend.can_return_array = true; + backend.allow_truncated_access_chain = true; + backend.array_is_value_type = true; + backend.comparison_image_samples_scalar = true; + backend.native_pointers = true; + backend.nonuniform_qualifier = ""; + backend.support_small_type_sampling_result = true; + backend.supports_empty_struct = true; + + capture_output_to_buffer = msl_options.capture_output_to_buffer; + is_rasterization_disabled = msl_options.disable_rasterization || capture_output_to_buffer; + + + for (auto &id : next_metal_resource_ids) + id = 0; + + fixup_type_alias(); + replace_illegal_names(); + + build_function_control_flow_graphs_and_analyze(); + update_active_builtins(); + analyze_image_and_sampler_usage(); + analyze_sampled_image_usage(); + analyze_interlocked_resource_usage(); + preprocess_op_codes(); + build_implicit_builtins(); + + fixup_image_load_store_access(); + + set_enabled_interface_variables(get_active_interface_variables()); + if (swizzle_buffer_id) + active_interface_variables.insert(swizzle_buffer_id); + if (buffer_size_buffer_id) + active_interface_variables.insert(buffer_size_buffer_id); + if (view_mask_buffer_id) + active_interface_variables.insert(view_mask_buffer_id); + if (dynamic_offsets_buffer_id) + active_interface_variables.insert(dynamic_offsets_buffer_id); + if (builtin_layer_id) + active_interface_variables.insert(builtin_layer_id); + if (builtin_dispatch_base_id && !msl_options.supports_msl_version(1, 2)) + active_interface_variables.insert(builtin_dispatch_base_id); + + + + qual_pos_var_name = ""; + stage_out_var_id = add_interface_block(StorageClassOutput); + patch_stage_out_var_id = add_interface_block(StorageClassOutput, true); + stage_in_var_id = add_interface_block(StorageClassInput); + if (get_execution_model() == ExecutionModelTessellationEvaluation) + patch_stage_in_var_id = add_interface_block(StorageClassInput, true); + + if (get_execution_model() == ExecutionModelTessellationControl) + stage_out_ptr_var_id = add_interface_block_pointer(stage_out_var_id, StorageClassOutput); + if (is_tessellation_shader()) + stage_in_ptr_var_id = add_interface_block_pointer(stage_in_var_id, StorageClassInput); + + + if (!stage_out_var_id) + is_rasterization_disabled = true; + + + localize_global_variables(); + extract_global_variables_from_functions(); + + + mark_packable_structs(); + reorder_type_alias(); + + + + fix_up_shader_inputs_outputs(); + + + + if (msl_options.argument_buffers) + { + if (!msl_options.supports_msl_version(2, 0)) + SPIRV_CROSS_THROW("Argument buffers can only be used with MSL 2.0 and up."); + analyze_argument_buffers(); + } + + uint32_t pass_count = 0; + do + { + if (pass_count >= 3) + SPIRV_CROSS_THROW("Over 3 compilation loops detected. Must be a bug!"); + + reset(); + + + next_metal_resource_index_buffer = 0; + next_metal_resource_index_texture = 0; + next_metal_resource_index_sampler = 0; + for (auto &id : next_metal_resource_ids) + id = 0; + + + buffer.reset(); + + emit_header(); + emit_custom_templates(); + emit_specialization_constants_and_structs(); + emit_resources(); + emit_custom_functions(); + emit_function(get(ir.default_entry_point), Bitset()); + + pass_count++; + } while (is_forcing_recompilation()); + + return buffer.str(); +} + + +void CompilerMSL::preprocess_op_codes() +{ + OpCodePreprocessor preproc(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), preproc); + + suppress_missing_prototypes = preproc.suppress_missing_prototypes; + + if (preproc.uses_atomics) + { + add_header_line("#include "); + add_pragma_line("#pragma clang diagnostic ignored \"-Wunused-variable\""); + } + + + if (preproc.uses_resource_write) + is_rasterization_disabled = true; + + + + if (get_execution_model() == ExecutionModelTessellationControl) + { + is_rasterization_disabled = true; + capture_output_to_buffer = true; + } + + if (preproc.needs_subgroup_invocation_id) + needs_subgroup_invocation_id = true; +} + + + +void CompilerMSL::localize_global_variables() +{ + auto &entry_func = get(ir.default_entry_point); + auto iter = global_variables.begin(); + while (iter != global_variables.end()) + { + uint32_t v_id = *iter; + auto &var = get(v_id); + if (var.storage == StorageClassPrivate || var.storage == StorageClassWorkgroup) + { + if (!variable_is_lut(var)) + entry_func.add_local_variable(v_id); + iter = global_variables.erase(iter); + } + else + iter++; + } +} + + + +void CompilerMSL::extract_global_variables_from_functions() +{ + + unordered_set global_var_ids; + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + if (var.storage == StorageClassInput || var.storage == StorageClassOutput || + var.storage == StorageClassUniform || var.storage == StorageClassUniformConstant || + var.storage == StorageClassPushConstant || var.storage == StorageClassStorageBuffer) + { + global_var_ids.insert(var.self); + } + }); + + + auto &entry_func = get(ir.default_entry_point); + for (auto &var : entry_func.local_variables) + if (get(var).storage != StorageClassFunction) + global_var_ids.insert(var); + + std::set added_arg_ids; + unordered_set processed_func_ids; + extract_global_variables_from_function(ir.default_entry_point, added_arg_ids, global_var_ids, processed_func_ids); +} + + + + +void CompilerMSL::extract_global_variables_from_function(uint32_t func_id, std::set &added_arg_ids, + unordered_set &global_var_ids, + unordered_set &processed_func_ids) +{ + + if (processed_func_ids.find(func_id) != processed_func_ids.end()) + { + + added_arg_ids = function_global_vars[func_id]; + return; + } + + processed_func_ids.insert(func_id); + + auto &func = get(func_id); + + + for (auto block : func.blocks) + { + auto &b = get(block); + for (auto &i : b.ops) + { + auto ops = stream(i); + auto op = static_cast(i.op); + + switch (op) + { + case OpLoad: + case OpInBoundsAccessChain: + case OpAccessChain: + case OpPtrAccessChain: + case OpArrayLength: + { + uint32_t base_id = ops[2]; + if (global_var_ids.find(base_id) != global_var_ids.end()) + added_arg_ids.insert(base_id); + + + auto &type = get(ops[0]); + if (type.basetype == SPIRType::Image && type.image.dim == DimSubpassData && + (!msl_options.is_ios() || !msl_options.ios_use_framebuffer_fetch_subpasses)) + { + + assert(builtin_frag_coord_id != 0); + added_arg_ids.insert(builtin_frag_coord_id); + } + + break; + } + + case OpFunctionCall: + { + + for (uint32_t arg_idx = 3; arg_idx < i.length; arg_idx++) + { + uint32_t arg_id = ops[arg_idx]; + if (global_var_ids.find(arg_id) != global_var_ids.end()) + added_arg_ids.insert(arg_id); + } + + + uint32_t inner_func_id = ops[2]; + std::set inner_func_args; + extract_global_variables_from_function(inner_func_id, inner_func_args, global_var_ids, + processed_func_ids); + added_arg_ids.insert(inner_func_args.begin(), inner_func_args.end()); + break; + } + + case OpStore: + { + uint32_t base_id = ops[0]; + if (global_var_ids.find(base_id) != global_var_ids.end()) + added_arg_ids.insert(base_id); + break; + } + + case OpSelect: + { + uint32_t base_id = ops[3]; + if (global_var_ids.find(base_id) != global_var_ids.end()) + added_arg_ids.insert(base_id); + base_id = ops[4]; + if (global_var_ids.find(base_id) != global_var_ids.end()) + added_arg_ids.insert(base_id); + break; + } + + + case OpImageTexelPointer: + { + + uint32_t base_id = ops[2]; + auto *var = maybe_get_backing_variable(base_id); + if (var && atomic_image_vars.count(var->self)) + { + if (global_var_ids.find(base_id) != global_var_ids.end()) + added_arg_ids.insert(base_id); + } + break; + } + + default: + break; + } + + + + + } + } + + function_global_vars[func_id] = added_arg_ids; + + + if (func_id != ir.default_entry_point) + { + bool added_in = false; + bool added_out = false; + for (uint32_t arg_id : added_arg_ids) + { + auto &var = get(arg_id); + uint32_t type_id = var.basetype; + auto *p_type = &get(type_id); + BuiltIn bi_type = BuiltIn(get_decoration(arg_id, DecorationBuiltIn)); + + if (((is_tessellation_shader() && var.storage == StorageClassInput) || + (get_execution_model() == ExecutionModelTessellationControl && var.storage == StorageClassOutput)) && + !(has_decoration(arg_id, DecorationPatch) || is_patch_block(*p_type)) && + (!is_builtin_variable(var) || bi_type == BuiltInPosition || bi_type == BuiltInPointSize || + bi_type == BuiltInClipDistance || bi_type == BuiltInCullDistance || + p_type->basetype == SPIRType::Struct)) + { + + + + + std::string name; + if (var.storage == StorageClassInput) + { + if (added_in) + continue; + name = input_wg_var_name; + arg_id = stage_in_ptr_var_id; + added_in = true; + } + else if (var.storage == StorageClassOutput) + { + if (added_out) + continue; + name = "gl_out"; + arg_id = stage_out_ptr_var_id; + added_out = true; + } + type_id = get(arg_id).basetype; + uint32_t next_id = ir.increase_bound_by(1); + func.add_parameter(type_id, next_id, true); + set(next_id, type_id, StorageClassFunction, 0, arg_id); + + set_name(next_id, name); + } + else if (is_builtin_variable(var) && p_type->basetype == SPIRType::Struct) + { + + type_id = get_pointee_type_id(type_id); + p_type = &get(type_id); + + uint32_t mbr_idx = 0; + for (auto &mbr_type_id : p_type->member_types) + { + BuiltIn builtin = BuiltInMax; + bool is_builtin = is_member_builtin(*p_type, mbr_idx, &builtin); + if (is_builtin && has_active_builtin(builtin, var.storage)) + { + + uint32_t next_ids = ir.increase_bound_by(2); + uint32_t ptr_type_id = next_ids + 0; + uint32_t var_id = next_ids + 1; + + + + auto &ptr = set(ptr_type_id, get(mbr_type_id)); + ptr.self = mbr_type_id; + ptr.storage = var.storage; + ptr.pointer = true; + ptr.parent_type = mbr_type_id; + + func.add_parameter(mbr_type_id, var_id, true); + set(var_id, ptr_type_id, StorageClassFunction); + ir.meta[var_id].decoration = ir.meta[type_id].members[mbr_idx]; + } + mbr_idx++; + } + } + else + { + uint32_t next_id = ir.increase_bound_by(1); + func.add_parameter(type_id, next_id, true); + set(next_id, type_id, StorageClassFunction, 0, arg_id); + + + set_name(arg_id, ensure_valid_name(to_name(arg_id), "v")); + ir.meta[next_id] = ir.meta[arg_id]; + } + } + } +} + + + +void CompilerMSL::mark_packable_structs() +{ + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + if (var.storage != StorageClassFunction && !is_hidden_variable(var)) + { + auto &type = this->get(var.basetype); + if (type.pointer && + (type.storage == StorageClassUniform || type.storage == StorageClassUniformConstant || + type.storage == StorageClassPushConstant || type.storage == StorageClassStorageBuffer) && + (has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock))) + mark_as_packable(type); + } + }); +} + + + +void CompilerMSL::mark_as_packable(SPIRType &type) +{ + + if (type.parent_type) + { + mark_as_packable(get(type.parent_type)); + return; + } + + if (type.basetype == SPIRType::Struct) + { + set_extended_decoration(type.self, SPIRVCrossDecorationBufferBlockRepacked); + + + uint32_t mbr_cnt = uint32_t(type.member_types.size()); + for (uint32_t mbr_idx = 0; mbr_idx < mbr_cnt; mbr_idx++) + { + uint32_t mbr_type_id = type.member_types[mbr_idx]; + auto &mbr_type = get(mbr_type_id); + mark_as_packable(mbr_type); + if (mbr_type.type_alias) + { + auto &mbr_type_alias = get(mbr_type.type_alias); + mark_as_packable(mbr_type_alias); + } + } + } +} + + +void CompilerMSL::mark_location_as_used_by_shader(uint32_t location, StorageClass storage) +{ + if ((get_execution_model() == ExecutionModelVertex || is_tessellation_shader()) && (storage == StorageClassInput)) + vtx_attrs_in_use.insert(location); +} + +uint32_t CompilerMSL::get_target_components_for_fragment_location(uint32_t location) const +{ + auto itr = fragment_output_components.find(location); + if (itr == end(fragment_output_components)) + return 4; + else + return itr->second; +} + +uint32_t CompilerMSL::build_extended_vector_type(uint32_t type_id, uint32_t components) +{ + uint32_t new_type_id = ir.increase_bound_by(1); + auto &type = set(new_type_id, get(type_id)); + type.vecsize = components; + type.self = new_type_id; + type.parent_type = type_id; + type.pointer = false; + + return new_type_id; +} + +void CompilerMSL::add_plain_variable_to_interface_block(StorageClass storage, const string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, bool strip_array) +{ + bool is_builtin = is_builtin_variable(var); + BuiltIn builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn)); + bool is_flat = has_decoration(var.self, DecorationFlat); + bool is_noperspective = has_decoration(var.self, DecorationNoPerspective); + bool is_centroid = has_decoration(var.self, DecorationCentroid); + bool is_sample = has_decoration(var.self, DecorationSample); + + + uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size()); + uint32_t type_id = ensure_correct_builtin_type(var.basetype, builtin); + var.basetype = type_id; + + type_id = get_pointee_type_id(var.basetype); + if (strip_array && is_array(get(type_id))) + type_id = get(type_id).parent_type; + auto &type = get(type_id); + uint32_t target_components = 0; + uint32_t type_components = type.vecsize; + bool padded_output = false; + + + if (get_decoration_bitset(var.self).get(DecorationLocation) && msl_options.pad_fragment_output_components && + get_entry_point().model == ExecutionModelFragment && storage == StorageClassOutput) + { + uint32_t locn = get_decoration(var.self, DecorationLocation); + target_components = get_target_components_for_fragment_location(locn); + if (type_components < target_components) + { + + type_id = build_extended_vector_type(type_id, target_components); + padded_output = true; + } + } + + ib_type.member_types.push_back(type_id); + + + string mbr_name = ensure_valid_name(to_expression(var.self), "m"); + set_member_name(ib_type.self, ib_mbr_idx, mbr_name); + + + string qual_var_name = ib_var_ref + "." + mbr_name; + auto &entry_func = get(ir.default_entry_point); + + if (padded_output) + { + entry_func.add_local_variable(var.self); + vars_needing_early_declaration.push_back(var.self); + + entry_func.fixup_hooks_out.push_back([=, &var]() { + SPIRType &padded_type = this->get(type_id); + statement(qual_var_name, " = ", remap_swizzle(padded_type, type_components, to_name(var.self)), ";"); + }); + } + else if (!strip_array) + ir.meta[var.self].decoration.qualified_alias = qual_var_name; + + if (var.storage == StorageClassOutput && var.initializer != ID(0)) + { + entry_func.fixup_hooks_in.push_back( + [=, &var]() { statement(qual_var_name, " = ", to_expression(var.initializer), ";"); }); + } + + + if (get_decoration_bitset(var.self).get(DecorationLocation)) + { + uint32_t locn = get_decoration(var.self, DecorationLocation); + if (storage == StorageClassInput && (get_execution_model() == ExecutionModelVertex || is_tessellation_shader())) + { + type_id = ensure_correct_attribute_type(var.basetype, locn); + var.basetype = type_id; + type_id = get_pointee_type_id(type_id); + if (strip_array && is_array(get(type_id))) + type_id = get(type_id).parent_type; + ib_type.member_types[ib_mbr_idx] = type_id; + } + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + else if (is_builtin && is_tessellation_shader() && vtx_attrs_by_builtin.count(builtin)) + { + uint32_t locn = vtx_attrs_by_builtin[builtin].location; + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + + if (get_decoration_bitset(var.self).get(DecorationComponent)) + { + uint32_t comp = get_decoration(var.self, DecorationComponent); + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationComponent, comp); + } + + if (get_decoration_bitset(var.self).get(DecorationIndex)) + { + uint32_t index = get_decoration(var.self, DecorationIndex); + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationIndex, index); + } + + + if (is_builtin) + { + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationBuiltIn, builtin); + if (builtin == BuiltInPosition && storage == StorageClassOutput) + qual_pos_var_name = qual_var_name; + } + + + if (is_flat) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationFlat); + if (is_noperspective) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationNoPerspective); + if (is_centroid) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationCentroid); + if (is_sample) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationSample); + + set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceOrigID, var.self); +} + +void CompilerMSL::add_composite_variable_to_interface_block(StorageClass storage, const string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, bool strip_array) +{ + auto &entry_func = get(ir.default_entry_point); + auto &var_type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var); + uint32_t elem_cnt = 0; + + if (is_matrix(var_type)) + { + if (is_array(var_type)) + SPIRV_CROSS_THROW("MSL cannot emit arrays-of-matrices in input and output variables."); + + elem_cnt = var_type.columns; + } + else if (is_array(var_type)) + { + if (var_type.array.size() != 1) + SPIRV_CROSS_THROW("MSL cannot emit arrays-of-arrays in input and output variables."); + + elem_cnt = to_array_size_literal(var_type); + } + + bool is_builtin = is_builtin_variable(var); + BuiltIn builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn)); + bool is_flat = has_decoration(var.self, DecorationFlat); + bool is_noperspective = has_decoration(var.self, DecorationNoPerspective); + bool is_centroid = has_decoration(var.self, DecorationCentroid); + bool is_sample = has_decoration(var.self, DecorationSample); + + auto *usable_type = &var_type; + if (usable_type->pointer) + usable_type = &get(usable_type->parent_type); + while (is_array(*usable_type) || is_matrix(*usable_type)) + usable_type = &get(usable_type->parent_type); + + + if (is_builtin) + set_name(var.self, builtin_to_glsl(builtin, StorageClassFunction)); + + + if (!strip_array) + { + entry_func.add_local_variable(var.self); + + vars_needing_early_declaration.push_back(var.self); + } + + for (uint32_t i = 0; i < elem_cnt; i++) + { + + uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size()); + + uint32_t target_components = 0; + bool padded_output = false; + uint32_t type_id = usable_type->self; + + + if (get_decoration_bitset(var.self).get(DecorationLocation) && msl_options.pad_fragment_output_components && + get_entry_point().model == ExecutionModelFragment && storage == StorageClassOutput) + { + uint32_t locn = get_decoration(var.self, DecorationLocation) + i; + target_components = get_target_components_for_fragment_location(locn); + if (usable_type->vecsize < target_components) + { + + type_id = build_extended_vector_type(usable_type->self, target_components); + padded_output = true; + } + } + + ib_type.member_types.push_back(get_pointee_type_id(type_id)); + + + string mbr_name = ensure_valid_name(join(to_expression(var.self), "_", i), "m"); + set_member_name(ib_type.self, ib_mbr_idx, mbr_name); + + + if (get_decoration_bitset(var.self).get(DecorationLocation)) + { + uint32_t locn = get_decoration(var.self, DecorationLocation) + i; + if (storage == StorageClassInput && + (get_execution_model() == ExecutionModelVertex || is_tessellation_shader())) + { + var.basetype = ensure_correct_attribute_type(var.basetype, locn); + uint32_t mbr_type_id = ensure_correct_attribute_type(usable_type->self, locn); + ib_type.member_types[ib_mbr_idx] = mbr_type_id; + } + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + else if (is_builtin && is_tessellation_shader() && vtx_attrs_by_builtin.count(builtin)) + { + uint32_t locn = vtx_attrs_by_builtin[builtin].location + i; + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + + if (get_decoration_bitset(var.self).get(DecorationIndex)) + { + uint32_t index = get_decoration(var.self, DecorationIndex); + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationIndex, index); + } + + + if (is_flat) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationFlat); + if (is_noperspective) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationNoPerspective); + if (is_centroid) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationCentroid); + if (is_sample) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationSample); + + set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceOrigID, var.self); + + + if (!strip_array) + { + switch (storage) + { + case StorageClassInput: + entry_func.fixup_hooks_in.push_back( + [=, &var]() { statement(to_name(var.self), "[", i, "] = ", ib_var_ref, ".", mbr_name, ";"); }); + break; + + case StorageClassOutput: + entry_func.fixup_hooks_out.push_back([=, &var]() { + if (padded_output) + { + auto &padded_type = this->get(type_id); + statement( + ib_var_ref, ".", mbr_name, " = ", + remap_swizzle(padded_type, usable_type->vecsize, join(to_name(var.self), "[", i, "]")), + ";"); + } + else + statement(ib_var_ref, ".", mbr_name, " = ", to_name(var.self), "[", i, "];"); + }); + break; + + default: + break; + } + } + } +} + +uint32_t CompilerMSL::get_accumulated_member_location(const SPIRVariable &var, uint32_t mbr_idx, bool strip_array) +{ + auto &type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var); + uint32_t location = get_decoration(var.self, DecorationLocation); + + for (uint32_t i = 0; i < mbr_idx; i++) + { + auto &mbr_type = get(type.member_types[i]); + + + if (has_member_decoration(type.self, mbr_idx, DecorationLocation)) + location = get_member_decoration(type.self, mbr_idx, DecorationLocation); + + uint32_t location_count = 1; + + if (mbr_type.columns > 1) + location_count = mbr_type.columns; + + if (!mbr_type.array.empty()) + for (uint32_t j = 0; j < uint32_t(mbr_type.array.size()); j++) + location_count *= to_array_size_literal(mbr_type, j); + + location += location_count; + } + + return location; +} + +void CompilerMSL::add_composite_member_variable_to_interface_block(StorageClass storage, const string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, + uint32_t mbr_idx, bool strip_array) +{ + auto &entry_func = get(ir.default_entry_point); + auto &var_type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var); + + BuiltIn builtin; + bool is_builtin = is_member_builtin(var_type, mbr_idx, &builtin); + bool is_flat = + has_member_decoration(var_type.self, mbr_idx, DecorationFlat) || has_decoration(var.self, DecorationFlat); + bool is_noperspective = has_member_decoration(var_type.self, mbr_idx, DecorationNoPerspective) || + has_decoration(var.self, DecorationNoPerspective); + bool is_centroid = has_member_decoration(var_type.self, mbr_idx, DecorationCentroid) || + has_decoration(var.self, DecorationCentroid); + bool is_sample = + has_member_decoration(var_type.self, mbr_idx, DecorationSample) || has_decoration(var.self, DecorationSample); + + uint32_t mbr_type_id = var_type.member_types[mbr_idx]; + auto &mbr_type = get(mbr_type_id); + uint32_t elem_cnt = 0; + + if (is_matrix(mbr_type)) + { + if (is_array(mbr_type)) + SPIRV_CROSS_THROW("MSL cannot emit arrays-of-matrices in input and output variables."); + + elem_cnt = mbr_type.columns; + } + else if (is_array(mbr_type)) + { + if (mbr_type.array.size() != 1) + SPIRV_CROSS_THROW("MSL cannot emit arrays-of-arrays in input and output variables."); + + elem_cnt = to_array_size_literal(mbr_type); + } + + auto *usable_type = &mbr_type; + if (usable_type->pointer) + usable_type = &get(usable_type->parent_type); + while (is_array(*usable_type) || is_matrix(*usable_type)) + usable_type = &get(usable_type->parent_type); + + for (uint32_t i = 0; i < elem_cnt; i++) + { + + uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size()); + ib_type.member_types.push_back(usable_type->self); + + + string mbr_name = ensure_valid_name(join(to_qualified_member_name(var_type, mbr_idx), "_", i), "m"); + set_member_name(ib_type.self, ib_mbr_idx, mbr_name); + + if (has_member_decoration(var_type.self, mbr_idx, DecorationLocation)) + { + uint32_t locn = get_member_decoration(var_type.self, mbr_idx, DecorationLocation) + i; + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + else if (has_decoration(var.self, DecorationLocation)) + { + uint32_t locn = get_accumulated_member_location(var, mbr_idx, strip_array) + i; + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + else if (is_builtin && is_tessellation_shader() && vtx_attrs_by_builtin.count(builtin)) + { + uint32_t locn = vtx_attrs_by_builtin[builtin].location + i; + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + + if (has_member_decoration(var_type.self, mbr_idx, DecorationComponent)) + SPIRV_CROSS_THROW("DecorationComponent on matrices and arrays make little sense."); + + + if (is_flat) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationFlat); + if (is_noperspective) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationNoPerspective); + if (is_centroid) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationCentroid); + if (is_sample) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationSample); + + set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceOrigID, var.self); + set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceMemberIndex, mbr_idx); + + + if (!strip_array) + { + switch (storage) + { + case StorageClassInput: + entry_func.fixup_hooks_in.push_back([=, &var, &var_type]() { + statement(to_name(var.self), ".", to_member_name(var_type, mbr_idx), "[", i, "] = ", ib_var_ref, + ".", mbr_name, ";"); + }); + break; + + case StorageClassOutput: + entry_func.fixup_hooks_out.push_back([=, &var, &var_type]() { + statement(ib_var_ref, ".", mbr_name, " = ", to_name(var.self), ".", + to_member_name(var_type, mbr_idx), "[", i, "];"); + }); + break; + + default: + break; + } + } + } +} + +void CompilerMSL::add_plain_member_variable_to_interface_block(StorageClass storage, const string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, uint32_t mbr_idx, + bool strip_array) +{ + auto &var_type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var); + auto &entry_func = get(ir.default_entry_point); + + BuiltIn builtin = BuiltInMax; + bool is_builtin = is_member_builtin(var_type, mbr_idx, &builtin); + bool is_flat = + has_member_decoration(var_type.self, mbr_idx, DecorationFlat) || has_decoration(var.self, DecorationFlat); + bool is_noperspective = has_member_decoration(var_type.self, mbr_idx, DecorationNoPerspective) || + has_decoration(var.self, DecorationNoPerspective); + bool is_centroid = has_member_decoration(var_type.self, mbr_idx, DecorationCentroid) || + has_decoration(var.self, DecorationCentroid); + bool is_sample = + has_member_decoration(var_type.self, mbr_idx, DecorationSample) || has_decoration(var.self, DecorationSample); + + + uint32_t mbr_type_id = var_type.member_types[mbr_idx]; + uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size()); + mbr_type_id = ensure_correct_builtin_type(mbr_type_id, builtin); + var_type.member_types[mbr_idx] = mbr_type_id; + ib_type.member_types.push_back(mbr_type_id); + + + string mbr_name = ensure_valid_name(to_qualified_member_name(var_type, mbr_idx), "m"); + set_member_name(ib_type.self, ib_mbr_idx, mbr_name); + + + string qual_var_name = ib_var_ref + "." + mbr_name; + + if (is_builtin && !strip_array) + { + + + set_member_qualified_name(var_type.self, mbr_idx, qual_var_name); + } + else if (!strip_array) + { + + switch (storage) + { + case StorageClassInput: + entry_func.fixup_hooks_in.push_back([=, &var, &var_type]() { + statement(to_name(var.self), ".", to_member_name(var_type, mbr_idx), " = ", qual_var_name, ";"); + }); + break; + + case StorageClassOutput: + entry_func.fixup_hooks_out.push_back([=, &var, &var_type]() { + statement(qual_var_name, " = ", to_name(var.self), ".", to_member_name(var_type, mbr_idx), ";"); + }); + break; + + default: + break; + } + } + + + if (has_member_decoration(var_type.self, mbr_idx, DecorationLocation)) + { + uint32_t locn = get_member_decoration(var_type.self, mbr_idx, DecorationLocation); + if (storage == StorageClassInput && (get_execution_model() == ExecutionModelVertex || is_tessellation_shader())) + { + mbr_type_id = ensure_correct_attribute_type(mbr_type_id, locn); + var_type.member_types[mbr_idx] = mbr_type_id; + ib_type.member_types[ib_mbr_idx] = mbr_type_id; + } + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + else if (has_decoration(var.self, DecorationLocation)) + { + + + uint32_t locn = get_accumulated_member_location(var, mbr_idx, strip_array); + if (storage == StorageClassInput && (get_execution_model() == ExecutionModelVertex || is_tessellation_shader())) + { + mbr_type_id = ensure_correct_attribute_type(mbr_type_id, locn); + var_type.member_types[mbr_idx] = mbr_type_id; + ib_type.member_types[ib_mbr_idx] = mbr_type_id; + } + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + else if (is_builtin && is_tessellation_shader() && vtx_attrs_by_builtin.count(builtin)) + { + uint32_t locn = 0; + auto builtin_itr = vtx_attrs_by_builtin.find(builtin); + if (builtin_itr != end(vtx_attrs_by_builtin)) + locn = builtin_itr->second.location; + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + + + if (has_member_decoration(var_type.self, mbr_idx, DecorationComponent)) + { + uint32_t comp = get_member_decoration(var_type.self, mbr_idx, DecorationComponent); + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationComponent, comp); + } + + + if (is_builtin) + { + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationBuiltIn, builtin); + if (builtin == BuiltInPosition && storage == StorageClassOutput) + qual_pos_var_name = qual_var_name; + } + + + if (is_flat) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationFlat); + if (is_noperspective) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationNoPerspective); + if (is_centroid) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationCentroid); + if (is_sample) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationSample); + + set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceOrigID, var.self); + set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceMemberIndex, mbr_idx); +} + + + + + + +void CompilerMSL::add_tess_level_input_to_interface_block(const std::string &ib_var_ref, SPIRType &ib_type, + SPIRVariable &var) +{ + auto &entry_func = get(ir.default_entry_point); + auto &var_type = get_variable_element_type(var); + + BuiltIn builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn)); + + + set_name(var.self, builtin_to_glsl(builtin, StorageClassFunction)); + + if (get_entry_point().flags.get(ExecutionModeTriangles)) + { + + + + entry_func.add_local_variable(var.self); + vars_needing_early_declaration.push_back(var.self); + + string mbr_name = "gl_TessLevel"; + + + if (!added_builtin_tess_level) + { + + uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size()); + + uint32_t type_id = build_extended_vector_type(var_type.self, 4); + + ib_type.member_types.push_back(type_id); + + + set_member_name(ib_type.self, ib_mbr_idx, mbr_name); + + + if (get_decoration_bitset(var.self).get(DecorationLocation)) + { + uint32_t locn = get_decoration(var.self, DecorationLocation); + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, StorageClassInput); + } + else if (vtx_attrs_by_builtin.count(builtin)) + { + uint32_t locn = vtx_attrs_by_builtin[builtin].location; + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, StorageClassInput); + } + + added_builtin_tess_level = true; + } + + switch (builtin) + { + case BuiltInTessLevelOuter: + entry_func.fixup_hooks_in.push_back([=, &var]() { + statement(to_name(var.self), "[0] = ", ib_var_ref, ".", mbr_name, ".x;"); + statement(to_name(var.self), "[1] = ", ib_var_ref, ".", mbr_name, ".y;"); + statement(to_name(var.self), "[2] = ", ib_var_ref, ".", mbr_name, ".z;"); + }); + break; + + case BuiltInTessLevelInner: + entry_func.fixup_hooks_in.push_back( + [=, &var]() { statement(to_name(var.self), "[0] = ", ib_var_ref, ".", mbr_name, ".w;"); }); + break; + + default: + assert(false); + break; + } + } + else + { + + uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size()); + + uint32_t type_id = build_extended_vector_type(var_type.self, builtin == BuiltInTessLevelOuter ? 4 : 2); + + uint32_t ptr_type_id = ir.increase_bound_by(1); + auto &new_var_type = set(ptr_type_id, get(type_id)); + new_var_type.pointer = true; + new_var_type.storage = StorageClassInput; + new_var_type.parent_type = type_id; + var.basetype = ptr_type_id; + + ib_type.member_types.push_back(type_id); + + + string mbr_name = to_expression(var.self); + set_member_name(ib_type.self, ib_mbr_idx, mbr_name); + + + + string qual_var_name = ib_var_ref + "." + mbr_name; + ir.meta[var.self].decoration.qualified_alias = qual_var_name; + + if (get_decoration_bitset(var.self).get(DecorationLocation)) + { + uint32_t locn = get_decoration(var.self, DecorationLocation); + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, StorageClassInput); + } + else if (vtx_attrs_by_builtin.count(builtin)) + { + uint32_t locn = vtx_attrs_by_builtin[builtin].location; + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, StorageClassInput); + } + } +} + +void CompilerMSL::add_variable_to_interface_block(StorageClass storage, const string &ib_var_ref, SPIRType &ib_type, + SPIRVariable &var, bool strip_array) +{ + auto &entry_func = get(ir.default_entry_point); + + + + auto &var_type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var); + bool is_builtin = is_builtin_variable(var); + auto builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn)); + + if (var_type.basetype == SPIRType::Struct) + { + if (!is_builtin_type(var_type) && (!capture_output_to_buffer || storage == StorageClassInput) && !strip_array) + { + + + + + + + entry_func.add_local_variable(var.self); + vars_needing_early_declaration.push_back(var.self); + } + + if (capture_output_to_buffer && storage != StorageClassInput && !has_decoration(var_type.self, DecorationBlock)) + { + + + + + + add_plain_variable_to_interface_block(storage, ib_var_ref, ib_type, var, strip_array); + } + else + { + + for (uint32_t mbr_idx = 0; mbr_idx < uint32_t(var_type.member_types.size()); mbr_idx++) + { + builtin = BuiltInMax; + is_builtin = is_member_builtin(var_type, mbr_idx, &builtin); + auto &mbr_type = get(var_type.member_types[mbr_idx]); + + if (!is_builtin || has_active_builtin(builtin, storage)) + { + if ((!is_builtin || + (storage == StorageClassInput && get_execution_model() != ExecutionModelFragment)) && + (storage == StorageClassInput || storage == StorageClassOutput) && + (is_matrix(mbr_type) || is_array(mbr_type))) + { + add_composite_member_variable_to_interface_block(storage, ib_var_ref, ib_type, var, mbr_idx, + strip_array); + } + else + { + add_plain_member_variable_to_interface_block(storage, ib_var_ref, ib_type, var, mbr_idx, + strip_array); + } + } + } + } + } + else if (get_execution_model() == ExecutionModelTessellationEvaluation && storage == StorageClassInput && + !strip_array && is_builtin && (builtin == BuiltInTessLevelOuter || builtin == BuiltInTessLevelInner)) + { + add_tess_level_input_to_interface_block(ib_var_ref, ib_type, var); + } + else if (var_type.basetype == SPIRType::Boolean || var_type.basetype == SPIRType::Char || + type_is_integral(var_type) || type_is_floating_point(var_type) || var_type.basetype == SPIRType::Boolean) + { + if (!is_builtin || has_active_builtin(builtin, storage)) + { + + if ((!is_builtin || (storage == StorageClassInput && get_execution_model() != ExecutionModelFragment)) && + (storage == StorageClassInput || (storage == StorageClassOutput && !capture_output_to_buffer)) && + (is_matrix(var_type) || is_array(var_type))) + { + add_composite_variable_to_interface_block(storage, ib_var_ref, ib_type, var, strip_array); + } + else + { + add_plain_variable_to_interface_block(storage, ib_var_ref, ib_type, var, strip_array); + } + } + } +} + + + +void CompilerMSL::fix_up_interface_member_indices(StorageClass storage, uint32_t ib_type_id) +{ + + + + if (get_execution_model() != ExecutionModelTessellationControl && + !(get_execution_model() == ExecutionModelTessellationEvaluation && storage == StorageClassInput)) + return; + + auto mbr_cnt = uint32_t(ir.meta[ib_type_id].members.size()); + for (uint32_t i = 0; i < mbr_cnt; i++) + { + uint32_t var_id = get_extended_member_decoration(ib_type_id, i, SPIRVCrossDecorationInterfaceOrigID); + if (!var_id) + continue; + auto &var = get(var_id); + + auto &type = get_variable_element_type(var); + if (storage == StorageClassInput && type.basetype == SPIRType::Struct) + { + uint32_t mbr_idx = get_extended_member_decoration(ib_type_id, i, SPIRVCrossDecorationInterfaceMemberIndex); + + + + if (!has_extended_member_decoration(var_id, mbr_idx, SPIRVCrossDecorationInterfaceMemberIndex)) + set_extended_member_decoration(var_id, mbr_idx, SPIRVCrossDecorationInterfaceMemberIndex, i); + } + else + { + + + if (!has_extended_decoration(var_id, SPIRVCrossDecorationInterfaceMemberIndex)) + set_extended_decoration(var_id, SPIRVCrossDecorationInterfaceMemberIndex, i); + } + } +} + + + +uint32_t CompilerMSL::add_interface_block(StorageClass storage, bool patch) +{ + + SmallVector vars; + bool incl_builtins = storage == StorageClassOutput || is_tessellation_shader(); + bool has_seen_barycentric = false; + + ir.for_each_typed_id([&](uint32_t var_id, SPIRVariable &var) { + if (var.storage != storage) + return; + + auto &type = this->get(var.basetype); + + bool is_builtin = is_builtin_variable(var); + auto bi_type = BuiltIn(get_decoration(var_id, DecorationBuiltIn)); + + + bool is_interface_block_builtin = + (bi_type == BuiltInPosition || bi_type == BuiltInPointSize || bi_type == BuiltInClipDistance || + bi_type == BuiltInCullDistance || bi_type == BuiltInLayer || bi_type == BuiltInViewportIndex || + bi_type == BuiltInBaryCoordNV || bi_type == BuiltInBaryCoordNoPerspNV || bi_type == BuiltInFragDepth || + bi_type == BuiltInFragStencilRefEXT || bi_type == BuiltInSampleMask) || + (get_execution_model() == ExecutionModelTessellationEvaluation && + (bi_type == BuiltInTessLevelOuter || bi_type == BuiltInTessLevelInner)); + + bool is_active = interface_variable_exists_in_entry_point(var.self); + if (is_builtin && is_active) + { + + is_active = has_active_builtin(bi_type, storage); + } + + bool filter_patch_decoration = (has_decoration(var_id, DecorationPatch) || is_patch_block(type)) == patch; + + bool hidden = is_hidden_variable(var, incl_builtins); + + if (is_active && (bi_type == BuiltInBaryCoordNV || bi_type == BuiltInBaryCoordNoPerspNV)) + { + if (has_seen_barycentric) + SPIRV_CROSS_THROW("Cannot declare both BaryCoordNV and BaryCoordNoPerspNV in same shader in MSL."); + has_seen_barycentric = true; + hidden = false; + } + + if (is_active && !hidden && type.pointer && filter_patch_decoration && + (!is_builtin || is_interface_block_builtin)) + { + vars.push_back(&var); + } + }); + + + + + if (vars.empty() && !(storage == StorageClassInput && patch && stage_in_var_id)) + return 0; + + + + + uint32_t next_id = ir.increase_bound_by(3); + uint32_t ib_type_id = next_id++; + auto &ib_type = set(ib_type_id); + ib_type.basetype = SPIRType::Struct; + ib_type.storage = storage; + set_decoration(ib_type_id, DecorationBlock); + + uint32_t ib_var_id = next_id++; + auto &var = set(ib_var_id, ib_type_id, storage, 0); + var.initializer = next_id++; + + string ib_var_ref; + auto &entry_func = get(ir.default_entry_point); + switch (storage) + { + case StorageClassInput: + ib_var_ref = patch ? patch_stage_in_var_name : stage_in_var_name; + if (get_execution_model() == ExecutionModelTessellationControl) + { + + + entry_func.fixup_hooks_in.push_back([=]() { + + statement("if (", to_expression(builtin_invocation_id_id), " < ", "spvIndirectParams[0])"); + statement(" ", input_wg_var_name, "[", to_expression(builtin_invocation_id_id), "] = ", ib_var_ref, + ";"); + statement("threadgroup_barrier(mem_flags::mem_threadgroup);"); + statement("if (", to_expression(builtin_invocation_id_id), " >= ", get_entry_point().output_vertices, + ")"); + statement(" return;"); + }); + } + break; + + case StorageClassOutput: + { + ib_var_ref = patch ? patch_stage_out_var_name : stage_out_var_name; + + + + + + bool ep_should_return_output = !get_is_rasterization_disabled(); + uint32_t rtn_id = ep_should_return_output ? ib_var_id : 0; + if (!capture_output_to_buffer) + { + entry_func.add_local_variable(ib_var_id); + for (auto &blk_id : entry_func.blocks) + { + auto &blk = get(blk_id); + if (blk.terminator == SPIRBlock::Return) + blk.return_value = rtn_id; + } + vars_needing_early_declaration.push_back(ib_var_id); + } + else + { + switch (get_execution_model()) + { + case ExecutionModelVertex: + case ExecutionModelTessellationEvaluation: + + + + + entry_func.fixup_hooks_in.push_back([=]() { + if (stage_out_var_id) + { + + + + if (msl_options.enable_base_index_zero) + { + statement("device ", to_name(ir.default_entry_point), "_", ib_var_ref, "& ", ib_var_ref, + " = ", output_buffer_var_name, "[", to_expression(builtin_instance_idx_id), + " * spvIndirectParams[0] + ", to_expression(builtin_vertex_idx_id), "];"); + } + else + { + statement("device ", to_name(ir.default_entry_point), "_", ib_var_ref, "& ", ib_var_ref, + " = ", output_buffer_var_name, "[(", to_expression(builtin_instance_idx_id), + " - ", to_expression(builtin_base_instance_id), ") * spvIndirectParams[0] + ", + to_expression(builtin_vertex_idx_id), " - ", + to_expression(builtin_base_vertex_id), "];"); + } + } + }); + break; + case ExecutionModelTessellationControl: + if (patch) + entry_func.fixup_hooks_in.push_back([=]() { + statement("device ", to_name(ir.default_entry_point), "_", ib_var_ref, "& ", ib_var_ref, " = ", + patch_output_buffer_var_name, "[", to_expression(builtin_primitive_id_id), "];"); + }); + else + entry_func.fixup_hooks_in.push_back([=]() { + statement("device ", to_name(ir.default_entry_point), "_", ib_var_ref, "* gl_out = &", + output_buffer_var_name, "[", to_expression(builtin_primitive_id_id), " * ", + get_entry_point().output_vertices, "];"); + }); + break; + default: + break; + } + } + break; + } + + default: + break; + } + + set_name(ib_type_id, to_name(ir.default_entry_point) + "_" + ib_var_ref); + set_name(ib_var_id, ib_var_ref); + + for (auto *p_var : vars) + { + bool strip_array = + (get_execution_model() == ExecutionModelTessellationControl || + (get_execution_model() == ExecutionModelTessellationEvaluation && storage == StorageClassInput)) && + !patch; + add_variable_to_interface_block(storage, ib_var_ref, ib_type, *p_var, strip_array); + } + + + MemberSorter member_sorter(ib_type, ir.meta[ib_type_id], MemberSorter::Location); + member_sorter.sort(); + + + + if (!patch) + fix_up_interface_member_indices(storage, ib_type_id); + + + if (get_execution_model() == ExecutionModelTessellationEvaluation && storage == StorageClassInput && patch && + stage_in_var_id) + { + uint32_t pcp_type_id = ir.increase_bound_by(1); + auto &pcp_type = set(pcp_type_id, ib_type); + pcp_type.basetype = SPIRType::ControlPointArray; + pcp_type.parent_type = pcp_type.type_alias = get_stage_in_struct_type().self; + pcp_type.storage = storage; + ir.meta[pcp_type_id] = ir.meta[ib_type.self]; + uint32_t mbr_idx = uint32_t(ib_type.member_types.size()); + ib_type.member_types.push_back(pcp_type_id); + set_member_name(ib_type.self, mbr_idx, "gl_in"); + } + + return ib_var_id; +} + +uint32_t CompilerMSL::add_interface_block_pointer(uint32_t ib_var_id, StorageClass storage) +{ + if (!ib_var_id) + return 0; + + uint32_t ib_ptr_var_id; + uint32_t next_id = ir.increase_bound_by(3); + auto &ib_type = expression_type(ib_var_id); + if (get_execution_model() == ExecutionModelTessellationControl) + { + + + uint32_t ib_ptr_type_id = next_id++; + auto &ib_ptr_type = set(ib_ptr_type_id, ib_type); + ib_ptr_type.parent_type = ib_ptr_type.type_alias = ib_type.self; + ib_ptr_type.pointer = true; + ib_ptr_type.storage = storage == StorageClassInput ? StorageClassWorkgroup : StorageClassStorageBuffer; + ir.meta[ib_ptr_type_id] = ir.meta[ib_type.self]; + + + uint32_t ib_ptr_ptr_type_id = next_id++; + auto &ib_ptr_ptr_type = set(ib_ptr_ptr_type_id, ib_ptr_type); + ib_ptr_ptr_type.parent_type = ib_ptr_type_id; + ib_ptr_ptr_type.type_alias = ib_type.self; + ib_ptr_ptr_type.storage = StorageClassFunction; + ir.meta[ib_ptr_ptr_type_id] = ir.meta[ib_type.self]; + + ib_ptr_var_id = next_id; + set(ib_ptr_var_id, ib_ptr_ptr_type_id, StorageClassFunction, 0); + set_name(ib_ptr_var_id, storage == StorageClassInput ? input_wg_var_name : "gl_out"); + } + else + { + + + + + + + uint32_t pcp_type_id = next_id++; + auto &pcp_type = set(pcp_type_id, ib_type); + pcp_type.basetype = SPIRType::ControlPointArray; + pcp_type.parent_type = pcp_type.type_alias = ib_type.self; + pcp_type.storage = storage; + ir.meta[pcp_type_id] = ir.meta[ib_type.self]; + + ib_ptr_var_id = next_id; + set(ib_ptr_var_id, pcp_type_id, storage, 0); + set_name(ib_ptr_var_id, "gl_in"); + ir.meta[ib_ptr_var_id].decoration.qualified_alias = join(patch_stage_in_var_name, ".gl_in"); + } + return ib_ptr_var_id; +} + + + + +uint32_t CompilerMSL::ensure_correct_builtin_type(uint32_t type_id, BuiltIn builtin) +{ + auto &type = get(type_id); + + if ((builtin == BuiltInSampleMask && is_array(type)) || + ((builtin == BuiltInLayer || builtin == BuiltInViewportIndex || builtin == BuiltInFragStencilRefEXT) && + type.basetype != SPIRType::UInt)) + { + uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1); + uint32_t base_type_id = next_id++; + auto &base_type = set(base_type_id); + base_type.basetype = SPIRType::UInt; + base_type.width = 32; + + if (!type.pointer) + return base_type_id; + + uint32_t ptr_type_id = next_id++; + auto &ptr_type = set(ptr_type_id); + ptr_type = base_type; + ptr_type.pointer = true; + ptr_type.storage = type.storage; + ptr_type.parent_type = base_type_id; + return ptr_type_id; + } + + return type_id; +} + + + + +uint32_t CompilerMSL::ensure_correct_attribute_type(uint32_t type_id, uint32_t location) +{ + auto &type = get(type_id); + + auto p_va = vtx_attrs_by_location.find(location); + if (p_va == end(vtx_attrs_by_location)) + return type_id; + + switch (p_va->second.format) + { + case MSL_VERTEX_FORMAT_UINT8: + { + switch (type.basetype) + { + case SPIRType::UByte: + case SPIRType::UShort: + case SPIRType::UInt: + return type_id; + case SPIRType::Short: + case SPIRType::Int: + break; + default: + SPIRV_CROSS_THROW("Vertex attribute type mismatch between host and shader"); + } + uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1); + uint32_t base_type_id = next_id++; + auto &base_type = set(base_type_id); + base_type = type; + base_type.basetype = type.basetype == SPIRType::Short ? SPIRType::UShort : SPIRType::UInt; + base_type.pointer = false; + + if (!type.pointer) + return base_type_id; + + uint32_t ptr_type_id = next_id++; + auto &ptr_type = set(ptr_type_id); + ptr_type = base_type; + ptr_type.pointer = true; + ptr_type.storage = type.storage; + ptr_type.parent_type = base_type_id; + return ptr_type_id; + } + + case MSL_VERTEX_FORMAT_UINT16: + { + switch (type.basetype) + { + case SPIRType::UShort: + case SPIRType::UInt: + return type_id; + case SPIRType::Int: + break; + default: + SPIRV_CROSS_THROW("Vertex attribute type mismatch between host and shader"); + } + uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1); + uint32_t base_type_id = next_id++; + auto &base_type = set(base_type_id); + base_type = type; + base_type.basetype = SPIRType::UInt; + base_type.pointer = false; + + if (!type.pointer) + return base_type_id; + + uint32_t ptr_type_id = next_id++; + auto &ptr_type = set(ptr_type_id); + ptr_type = base_type; + ptr_type.pointer = true; + ptr_type.storage = type.storage; + ptr_type.parent_type = base_type_id; + return ptr_type_id; + } + + default: + case MSL_VERTEX_FORMAT_OTHER: + break; + } + + return type_id; +} + +void CompilerMSL::mark_struct_members_packed(const SPIRType &type) +{ + set_extended_decoration(type.self, SPIRVCrossDecorationPhysicalTypePacked); + + + + uint32_t mbr_cnt = uint32_t(type.member_types.size()); + for (uint32_t i = 0; i < mbr_cnt; i++) + { + auto &mbr_type = get(type.member_types[i]); + if (mbr_type.basetype == SPIRType::Struct) + { + + auto *struct_type = &mbr_type; + while (!struct_type->array.empty()) + struct_type = &get(struct_type->parent_type); + mark_struct_members_packed(*struct_type); + } + else if (!is_scalar(mbr_type)) + set_extended_member_decoration(type.self, i, SPIRVCrossDecorationPhysicalTypePacked); + } +} + +void CompilerMSL::mark_scalar_layout_structs(const SPIRType &type) +{ + uint32_t mbr_cnt = uint32_t(type.member_types.size()); + for (uint32_t i = 0; i < mbr_cnt; i++) + { + auto &mbr_type = get(type.member_types[i]); + if (mbr_type.basetype == SPIRType::Struct) + { + auto *struct_type = &mbr_type; + while (!struct_type->array.empty()) + struct_type = &get(struct_type->parent_type); + + if (has_extended_decoration(struct_type->self, SPIRVCrossDecorationPhysicalTypePacked)) + continue; + + uint32_t msl_alignment = get_declared_struct_member_alignment_msl(type, i); + uint32_t msl_size = get_declared_struct_member_size_msl(type, i); + uint32_t spirv_offset = type_struct_member_offset(type, i); + uint32_t spirv_offset_next; + if (i + 1 < mbr_cnt) + spirv_offset_next = type_struct_member_offset(type, i + 1); + else + spirv_offset_next = spirv_offset + msl_size; + + + + bool struct_is_misaligned = (spirv_offset % msl_alignment) != 0; + bool struct_is_too_large = spirv_offset + msl_size > spirv_offset_next; + uint32_t array_stride = 0; + bool struct_needs_explicit_padding = false; + + + if (!mbr_type.array.empty()) + { + array_stride = type_struct_member_array_stride(type, i); + uint32_t dimensions = uint32_t(mbr_type.array.size() - 1); + for (uint32_t dim = 0; dim < dimensions; dim++) + { + uint32_t array_size = to_array_size_literal(mbr_type, dim); + array_stride /= max(array_size, 1u); + } + + + struct_needs_explicit_padding = true; + + + if (get_declared_struct_size_msl(*struct_type) > array_stride) + struct_is_too_large = true; + } + + if (struct_is_misaligned || struct_is_too_large) + mark_struct_members_packed(*struct_type); + mark_scalar_layout_structs(*struct_type); + + if (struct_needs_explicit_padding) + { + msl_size = get_declared_struct_size_msl(*struct_type, true, true); + if (array_stride < msl_size) + { + SPIRV_CROSS_THROW("Cannot express an array stride smaller than size of struct type."); + } + else + { + if (has_extended_decoration(struct_type->self, SPIRVCrossDecorationPaddingTarget)) + { + if (array_stride != + get_extended_decoration(struct_type->self, SPIRVCrossDecorationPaddingTarget)) + SPIRV_CROSS_THROW( + "A struct is used with different array strides. Cannot express this in MSL."); + } + else + set_extended_decoration(struct_type->self, SPIRVCrossDecorationPaddingTarget, array_stride); + } + } + } + } +} + + + + + +void CompilerMSL::align_struct(SPIRType &ib_type, unordered_set &aligned_structs) +{ + + ID &ib_type_id = ib_type.self; + if (aligned_structs.count(ib_type_id)) + return; + aligned_structs.insert(ib_type_id); + + + + MemberSorter member_sorter(ib_type, ir.meta[ib_type_id], MemberSorter::Offset); + member_sorter.sort(); + + auto mbr_cnt = uint32_t(ib_type.member_types.size()); + + for (uint32_t mbr_idx = 0; mbr_idx < mbr_cnt; mbr_idx++) + { + + auto &mbr_type = get(ib_type.member_types[mbr_idx]); + if (mbr_type.basetype == SPIRType::Struct) + align_struct(mbr_type, aligned_structs); + } + + + + + + uint32_t msl_offset = 0; + for (uint32_t mbr_idx = 0; mbr_idx < mbr_cnt; mbr_idx++) + { + + + ensure_member_packing_rules_msl(ib_type, mbr_idx); + + + + uint32_t msl_align_mask = get_declared_struct_member_alignment_msl(ib_type, mbr_idx) - 1; + uint32_t aligned_msl_offset = (msl_offset + msl_align_mask) & ~msl_align_mask; + + + uint32_t spirv_mbr_offset = get_member_decoration(ib_type_id, mbr_idx, DecorationOffset); + if (spirv_mbr_offset > aligned_msl_offset) + { + + + + uint32_t padding_bytes = spirv_mbr_offset - aligned_msl_offset; + set_extended_member_decoration(ib_type_id, mbr_idx, SPIRVCrossDecorationPaddingTarget, padding_bytes); + + + msl_offset += padding_bytes; + aligned_msl_offset = (msl_offset + msl_align_mask) & ~msl_align_mask; + } + else if (spirv_mbr_offset < aligned_msl_offset) + { + + + SPIRV_CROSS_THROW("Cannot represent buffer block correctly in MSL."); + } + + assert(aligned_msl_offset == spirv_mbr_offset); + + + + if (mbr_idx + 1 < mbr_cnt) + msl_offset = aligned_msl_offset + get_declared_struct_member_size_msl(ib_type, mbr_idx); + } +} + +bool CompilerMSL::validate_member_packing_rules_msl(const SPIRType &type, uint32_t index) const +{ + auto &mbr_type = get(type.member_types[index]); + uint32_t spirv_offset = get_member_decoration(type.self, index, DecorationOffset); + + if (index + 1 < type.member_types.size()) + { + + + + uint32_t spirv_offset_next = get_member_decoration(type.self, index + 1, DecorationOffset); + assert(spirv_offset_next >= spirv_offset); + uint32_t maximum_size = spirv_offset_next - spirv_offset; + uint32_t msl_mbr_size = get_declared_struct_member_size_msl(type, index); + if (msl_mbr_size > maximum_size) + return false; + } + + if (!mbr_type.array.empty()) + { + + uint32_t spirv_array_stride = type_struct_member_array_stride(type, index); + uint32_t msl_array_stride = get_declared_struct_member_array_stride_msl(type, index); + if (spirv_array_stride != msl_array_stride) + return false; + } + + if (is_matrix(mbr_type)) + { + + uint32_t spirv_matrix_stride = type_struct_member_matrix_stride(type, index); + uint32_t msl_matrix_stride = get_declared_struct_member_matrix_stride_msl(type, index); + if (spirv_matrix_stride != msl_matrix_stride) + return false; + } + + + uint32_t msl_alignment = get_declared_struct_member_alignment_msl(type, index); + if ((spirv_offset % msl_alignment) != 0) + return false; + + + return true; +} + + + + +void CompilerMSL::ensure_member_packing_rules_msl(SPIRType &ib_type, uint32_t index) +{ + if (validate_member_packing_rules_msl(ib_type, index)) + return; + + + + + + auto &mbr_type = get(ib_type.member_types[index]); + if (mbr_type.basetype == SPIRType::Struct) + SPIRV_CROSS_THROW("Cannot perform any repacking for structs when it is used as a member of another struct."); + + + set_extended_member_decoration(ib_type.self, index, SPIRVCrossDecorationPhysicalTypePacked); + + + if (validate_member_packing_rules_msl(ib_type, index)) + return; + + + + + + + + if (!mbr_type.array.empty() && !is_matrix(mbr_type)) + { + uint32_t array_stride = type_struct_member_array_stride(ib_type, index); + + + uint32_t dimensions = uint32_t(mbr_type.array.size() - 1); + for (uint32_t dim = 0; dim < dimensions; dim++) + array_stride /= max(to_array_size_literal(mbr_type, dim), 1u); + + uint32_t elems_per_stride = array_stride / (mbr_type.width / 8); + + if (elems_per_stride == 3) + SPIRV_CROSS_THROW("Cannot use ArrayStride of 3 elements in remapping scenarios."); + else if (elems_per_stride > 4) + SPIRV_CROSS_THROW("Cannot represent vectors with more than 4 elements in MSL."); + + auto physical_type = mbr_type; + physical_type.vecsize = elems_per_stride; + physical_type.parent_type = 0; + uint32_t type_id = ir.increase_bound_by(1); + set(type_id, physical_type); + set_extended_member_decoration(ib_type.self, index, SPIRVCrossDecorationPhysicalTypeID, type_id); + set_decoration(type_id, DecorationArrayStride, array_stride); + + + if (has_extended_decoration(ib_type.self, SPIRVCrossDecorationPhysicalTypePacked)) + SPIRV_CROSS_THROW("Unable to remove packed decoration as entire struct must be fully packed. Do not mix " + "scalar and std140 layout rules."); + else + unset_extended_member_decoration(ib_type.self, index, SPIRVCrossDecorationPhysicalTypePacked); + } + else if (is_matrix(mbr_type)) + { + + uint32_t matrix_stride = type_struct_member_matrix_stride(ib_type, index); + + uint32_t elems_per_stride = matrix_stride / (mbr_type.width / 8); + + if (elems_per_stride == 3) + SPIRV_CROSS_THROW("Cannot use ArrayStride of 3 elements in remapping scenarios."); + else if (elems_per_stride > 4) + SPIRV_CROSS_THROW("Cannot represent vectors with more than 4 elements in MSL."); + + bool row_major = has_member_decoration(ib_type.self, index, DecorationRowMajor); + + auto physical_type = mbr_type; + physical_type.parent_type = 0; + if (row_major) + physical_type.columns = elems_per_stride; + else + physical_type.vecsize = elems_per_stride; + uint32_t type_id = ir.increase_bound_by(1); + set(type_id, physical_type); + set_extended_member_decoration(ib_type.self, index, SPIRVCrossDecorationPhysicalTypeID, type_id); + + + if (has_extended_decoration(ib_type.self, SPIRVCrossDecorationPhysicalTypePacked)) + SPIRV_CROSS_THROW("Unable to remove packed decoration as entire struct must be fully packed. Do not mix " + "scalar and std140 layout rules."); + else + unset_extended_member_decoration(ib_type.self, index, SPIRVCrossDecorationPhysicalTypePacked); + } + + + if (!validate_member_packing_rules_msl(ib_type, index)) + SPIRV_CROSS_THROW("Found a buffer packing case which we cannot represent in MSL."); +} + +void CompilerMSL::emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression) +{ + auto &type = expression_type(rhs_expression); + + bool lhs_remapped_type = has_extended_decoration(lhs_expression, SPIRVCrossDecorationPhysicalTypeID); + bool lhs_packed_type = has_extended_decoration(lhs_expression, SPIRVCrossDecorationPhysicalTypePacked); + auto *lhs_e = maybe_get(lhs_expression); + auto *rhs_e = maybe_get(rhs_expression); + + bool transpose = lhs_e && lhs_e->need_transpose; + + + if (!lhs_remapped_type && !lhs_packed_type) + { + + + + if (is_matrix(type) && lhs_e && lhs_e->need_transpose) + { + if (!rhs_e) + SPIRV_CROSS_THROW("Need to transpose right-side expression of a store to row-major matrix, but it is " + "not a SPIRExpression."); + lhs_e->need_transpose = false; + + if (rhs_e && rhs_e->need_transpose) + { + + + rhs_e->need_transpose = false; + statement(to_expression(lhs_expression), " = ", to_unpacked_row_major_matrix_expression(rhs_expression), + ";"); + rhs_e->need_transpose = true; + } + else + statement(to_expression(lhs_expression), " = transpose(", to_unpacked_expression(rhs_expression), ");"); + + lhs_e->need_transpose = true; + register_write(lhs_expression); + } + else if (lhs_e && lhs_e->need_transpose) + { + lhs_e->need_transpose = false; + + + for (uint32_t c = 0; c < type.vecsize; c++) + { + auto lhs_expr = to_dereferenced_expression(lhs_expression); + auto column_index = lhs_expr.find_last_of('['); + if (column_index != string::npos) + { + statement(lhs_expr.insert(column_index, join('[', c, ']')), " = ", + to_extract_component_expression(rhs_expression, c), ";"); + } + } + lhs_e->need_transpose = true; + register_write(lhs_expression); + } + else + CompilerGLSL::emit_store_statement(lhs_expression, rhs_expression); + } + else if (!lhs_remapped_type && !is_matrix(type) && !transpose) + { + + + CompilerGLSL::emit_store_statement(lhs_expression, rhs_expression); + } + else + { + + + + TypeID physical_type_id = lhs_remapped_type ? + ID(get_extended_decoration(lhs_expression, SPIRVCrossDecorationPhysicalTypeID)) : + type.self; + + auto &physical_type = get(physical_type_id); + + static const char *swizzle_lut[] = { + ".x", + ".xy", + ".xyz", + "", + }; + + if (is_matrix(type)) + { + + + + + + + + bool rhs_transpose = rhs_e && rhs_e->need_transpose; + + + if (rhs_transpose) + rhs_e->need_transpose = false; + + if (transpose) + { + + lhs_e->need_transpose = false; + + const char *store_swiz = ""; + if (physical_type.columns != type.columns) + store_swiz = swizzle_lut[type.columns - 1]; + + if (rhs_transpose) + { + + for (uint32_t i = 0; i < type.vecsize; i++) + { + statement(to_enclosed_expression(lhs_expression), "[", i, "]", store_swiz, " = ", + to_unpacked_row_major_matrix_expression(rhs_expression), "[", i, "];"); + } + } + else + { + auto vector_type = expression_type(rhs_expression); + vector_type.vecsize = vector_type.columns; + vector_type.columns = 1; + + + + for (uint32_t i = 0; i < type.vecsize; i++) + { + string rhs_row = type_to_glsl_constructor(vector_type) + "("; + for (uint32_t j = 0; j < vector_type.vecsize; j++) + { + rhs_row += join(to_enclosed_unpacked_expression(rhs_expression), "[", j, "][", i, "]"); + if (j + 1 < vector_type.vecsize) + rhs_row += ", "; + } + rhs_row += ")"; + + statement(to_enclosed_expression(lhs_expression), "[", i, "]", store_swiz, " = ", rhs_row, ";"); + } + } + + + lhs_e->need_transpose = true; + } + else + { + const char *store_swiz = ""; + if (physical_type.vecsize != type.vecsize) + store_swiz = swizzle_lut[type.vecsize - 1]; + + if (rhs_transpose) + { + auto vector_type = expression_type(rhs_expression); + vector_type.columns = 1; + + + + for (uint32_t i = 0; i < type.columns; i++) + { + string rhs_row = type_to_glsl_constructor(vector_type) + "("; + for (uint32_t j = 0; j < vector_type.vecsize; j++) + { + + auto unpacked_expr = to_unpacked_row_major_matrix_expression(rhs_expression); + rhs_row += join(unpacked_expr, "[", j, "][", i, "]"); + if (j + 1 < vector_type.vecsize) + rhs_row += ", "; + } + rhs_row += ")"; + + statement(to_enclosed_expression(lhs_expression), "[", i, "]", store_swiz, " = ", rhs_row, ";"); + } + } + else + { + + for (uint32_t i = 0; i < type.columns; i++) + { + statement(to_enclosed_expression(lhs_expression), "[", i, "]", store_swiz, " = ", + to_enclosed_unpacked_expression(rhs_expression), "[", i, "];"); + } + } + } + + + if (rhs_transpose) + rhs_e->need_transpose = true; + } + else if (transpose) + { + lhs_e->need_transpose = false; + + + for (uint32_t c = 0; c < type.vecsize; c++) + { + auto lhs_expr = to_enclosed_expression(lhs_expression); + auto column_index = lhs_expr.find_last_of('['); + if (column_index != string::npos) + { + statement(lhs_expr.insert(column_index, join('[', c, ']')), " = ", + to_extract_component_expression(rhs_expression, c), ";"); + } + } + + lhs_e->need_transpose = true; + } + else if ((is_matrix(physical_type) || is_array(physical_type)) && physical_type.vecsize > type.vecsize) + { + assert(type.vecsize >= 1 && type.vecsize <= 3); + + + + + + assert(!lhs_packed_type); + + string lhs = to_dereferenced_expression(lhs_expression); + string rhs = to_pointer_expression(rhs_expression); + + + + lhs = enclose_expression(lhs) + swizzle_lut[type.vecsize - 1]; + if (!optimize_read_modify_write(expression_type(rhs_expression), lhs, rhs)) + statement(lhs, " = ", rhs, ";"); + } + else if (!is_matrix(type)) + { + string lhs = to_dereferenced_expression(lhs_expression); + string rhs = to_pointer_expression(rhs_expression); + if (!optimize_read_modify_write(expression_type(rhs_expression), lhs, rhs)) + statement(lhs, " = ", rhs, ";"); + } + + register_write(lhs_expression); + } +} + +static bool expression_ends_with(const string &expr_str, const std::string &ending) +{ + if (expr_str.length() >= ending.length()) + return (expr_str.compare(expr_str.length() - ending.length(), ending.length(), ending) == 0); + else + return false; +} + + + + +string CompilerMSL::unpack_expression_type(string expr_str, const SPIRType &type, uint32_t physical_type_id, + bool packed, bool row_major) +{ + + if (physical_type_id == 0 && !packed) + return expr_str; + + const SPIRType *physical_type = nullptr; + if (physical_type_id) + physical_type = &get(physical_type_id); + + static const char *swizzle_lut[] = { + ".x", + ".xy", + ".xyz", + }; + + + if (physical_type && is_vector(*physical_type) && is_array(*physical_type) && + physical_type->vecsize > type.vecsize && !expression_ends_with(expr_str, swizzle_lut[type.vecsize - 1])) + { + assert(type.vecsize >= 1 && type.vecsize <= 3); + return enclose_expression(expr_str) + swizzle_lut[type.vecsize - 1]; + } + else if (is_matrix(type)) + { + + + + if (!physical_type) + physical_type = &type; + + uint32_t vecsize = type.vecsize; + uint32_t columns = type.columns; + if (row_major) + swap(vecsize, columns); + + uint32_t physical_vecsize = row_major ? physical_type->columns : physical_type->vecsize; + + const char *base_type = type.width == 16 ? "half" : "float"; + string unpack_expr = join(base_type, columns, "x", vecsize, "("); + + const char *load_swiz = ""; + + if (physical_vecsize != vecsize) + load_swiz = swizzle_lut[vecsize - 1]; + + for (uint32_t i = 0; i < columns; i++) + { + if (i > 0) + unpack_expr += ", "; + + if (packed) + unpack_expr += join(base_type, physical_vecsize, "(", expr_str, "[", i, "]", ")", load_swiz); + else + unpack_expr += join(expr_str, "[", i, "]", load_swiz); + } + + unpack_expr += ")"; + return unpack_expr; + } + else + { + + + use_builtin_array = true; + string unpack_expr = join(type_to_glsl(type), "(", expr_str, ")"); + use_builtin_array = false; + return unpack_expr; + } +} + + +void CompilerMSL::emit_header() +{ + + if (suppress_missing_prototypes) + statement("#pragma clang diagnostic ignored \"-Wmissing-prototypes\""); + + + if (spv_function_implementations.count(SPVFuncImplUnsafeArray) != 0) + statement("#pragma clang diagnostic ignored \"-Wmissing-braces\""); + + for (auto &pragma : pragma_lines) + statement(pragma); + + if (!pragma_lines.empty() || suppress_missing_prototypes) + statement(""); + + statement("#include "); + statement("#include "); + + for (auto &header : header_lines) + statement(header); + + statement(""); + statement("using namespace metal;"); + statement(""); + + for (auto &td : typedef_lines) + statement(td); + + if (!typedef_lines.empty()) + statement(""); +} + +void CompilerMSL::add_pragma_line(const string &line) +{ + auto rslt = pragma_lines.insert(line); + if (rslt.second) + force_recompile(); +} + +void CompilerMSL::add_typedef_line(const string &line) +{ + auto rslt = typedef_lines.insert(line); + if (rslt.second) + force_recompile(); +} + + +void CompilerMSL::emit_custom_templates() +{ + for (const auto &spv_func : spv_function_implementations) + { + switch (spv_func) + { + case SPVFuncImplUnsafeArray: + statement("template"); + statement("struct spvUnsafeArray"); + begin_scope(); + statement("T elements[Num ? Num : 1];"); + statement(""); + statement("thread T& operator [] (size_t pos) thread"); + begin_scope(); + statement("return elements[pos];"); + end_scope(); + statement("constexpr const thread T& operator [] (size_t pos) const thread"); + begin_scope(); + statement("return elements[pos];"); + end_scope(); + statement(""); + statement("device T& operator [] (size_t pos) device"); + begin_scope(); + statement("return elements[pos];"); + end_scope(); + statement("constexpr const device T& operator [] (size_t pos) const device"); + begin_scope(); + statement("return elements[pos];"); + end_scope(); + statement(""); + statement("constexpr const constant T& operator [] (size_t pos) const constant"); + begin_scope(); + statement("return elements[pos];"); + end_scope(); + statement(""); + statement("threadgroup T& operator [] (size_t pos) threadgroup"); + begin_scope(); + statement("return elements[pos];"); + end_scope(); + statement("constexpr const threadgroup T& operator [] (size_t pos) const threadgroup"); + begin_scope(); + statement("return elements[pos];"); + end_scope(); + end_scope_decl(); + statement(""); + break; + + default: + break; + } + } +} + + + + +void CompilerMSL::emit_custom_functions() +{ + for (uint32_t i = SPVFuncImplArrayCopyMultidimMax; i >= 2; i--) + if (spv_function_implementations.count(static_cast(SPVFuncImplArrayCopyMultidimBase + i))) + spv_function_implementations.insert(static_cast(SPVFuncImplArrayCopyMultidimBase + i - 1)); + + if (spv_function_implementations.count(SPVFuncImplDynamicImageSampler)) + { + + if (!msl_options.supports_msl_version(2)) + SPIRV_CROSS_THROW( + "spvDynamicImageSampler requires default-constructible texture objects, which require MSL 2.0."); + spv_function_implementations.insert(SPVFuncImplForwardArgs); + spv_function_implementations.insert(SPVFuncImplTextureSwizzle); + if (msl_options.swizzle_texture_samples) + spv_function_implementations.insert(SPVFuncImplGatherSwizzle); + for (uint32_t i = SPVFuncImplChromaReconstructNearest2Plane; + i <= SPVFuncImplChromaReconstructLinear420XMidpointYMidpoint3Plane; i++) + spv_function_implementations.insert(static_cast(i)); + spv_function_implementations.insert(SPVFuncImplExpandITUFullRange); + spv_function_implementations.insert(SPVFuncImplExpandITUNarrowRange); + spv_function_implementations.insert(SPVFuncImplConvertYCbCrBT709); + spv_function_implementations.insert(SPVFuncImplConvertYCbCrBT601); + spv_function_implementations.insert(SPVFuncImplConvertYCbCrBT2020); + } + + for (uint32_t i = SPVFuncImplChromaReconstructNearest2Plane; + i <= SPVFuncImplChromaReconstructLinear420XMidpointYMidpoint3Plane; i++) + if (spv_function_implementations.count(static_cast(i))) + spv_function_implementations.insert(SPVFuncImplForwardArgs); + + if (spv_function_implementations.count(SPVFuncImplTextureSwizzle) || + spv_function_implementations.count(SPVFuncImplGatherSwizzle) || + spv_function_implementations.count(SPVFuncImplGatherCompareSwizzle)) + { + spv_function_implementations.insert(SPVFuncImplForwardArgs); + spv_function_implementations.insert(SPVFuncImplGetSwizzle); + } + + for (const auto &spv_func : spv_function_implementations) + { + switch (spv_func) + { + case SPVFuncImplMod: + statement("// Implementation of the GLSL mod() function, which is slightly different than Metal fmod()"); + statement("template"); + statement("inline Tx mod(Tx x, Ty y)"); + begin_scope(); + statement("return x - y * floor(x / y);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplRadians: + statement("// Implementation of the GLSL radians() function"); + statement("template"); + statement("inline T radians(T d)"); + begin_scope(); + statement("return d * T(0.01745329251);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplDegrees: + statement("// Implementation of the GLSL degrees() function"); + statement("template"); + statement("inline T degrees(T r)"); + begin_scope(); + statement("return r * T(57.2957795131);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplFindILsb: + statement("// Implementation of the GLSL findLSB() function"); + statement("template"); + statement("inline T spvFindLSB(T x)"); + begin_scope(); + statement("return select(ctz(x), T(-1), x == T(0));"); + end_scope(); + statement(""); + break; + + case SPVFuncImplFindUMsb: + statement("// Implementation of the unsigned GLSL findMSB() function"); + statement("template"); + statement("inline T spvFindUMSB(T x)"); + begin_scope(); + statement("return select(clz(T(0)) - (clz(x) + T(1)), T(-1), x == T(0));"); + end_scope(); + statement(""); + break; + + case SPVFuncImplFindSMsb: + statement("// Implementation of the signed GLSL findMSB() function"); + statement("template"); + statement("inline T spvFindSMSB(T x)"); + begin_scope(); + statement("T v = select(x, T(-1) - x, x < T(0));"); + statement("return select(clz(T(0)) - (clz(v) + T(1)), T(-1), v == T(0));"); + end_scope(); + statement(""); + break; + + case SPVFuncImplSSign: + statement("// Implementation of the GLSL sign() function for integer types"); + statement("template::value>::type>"); + statement("inline T sign(T x)"); + begin_scope(); + statement("return select(select(select(x, T(0), x == T(0)), T(1), x > T(0)), T(-1), x < T(0));"); + end_scope(); + statement(""); + break; + + case SPVFuncImplArrayCopy: + case SPVFuncImplArrayOfArrayCopy2Dim: + case SPVFuncImplArrayOfArrayCopy3Dim: + case SPVFuncImplArrayOfArrayCopy4Dim: + case SPVFuncImplArrayOfArrayCopy5Dim: + case SPVFuncImplArrayOfArrayCopy6Dim: + { + + static const char *function_name_tags[] = { + "FromConstantToStack", "FromConstantToThreadGroup", "FromStackToStack", + "FromStackToThreadGroup", "FromThreadGroupToStack", "FromThreadGroupToThreadGroup", + }; + + static const char *src_address_space[] = { + "constant", "constant", "thread const", "thread const", "threadgroup const", "threadgroup const", + }; + + static const char *dst_address_space[] = { + "thread", "threadgroup", "thread", "threadgroup", "thread", "threadgroup", + }; + + for (uint32_t variant = 0; variant < 6; variant++) + { + uint32_t dimensions = spv_func - SPVFuncImplArrayCopyMultidimBase; + string tmp = "template 0) + { + string tex_width_str = convert_to_string(msl_options.texel_buffer_texture_width); + statement("// Returns 2D texture coords corresponding to 1D texel buffer coords"); + statement(force_inline); + statement("uint2 spvTexelBufferCoord(uint tc)"); + begin_scope(); + statement(join("return uint2(tc % ", tex_width_str, ", tc / ", tex_width_str, ");")); + end_scope(); + statement(""); + } + else + { + statement("// Returns 2D texture coords corresponding to 1D texel buffer coords"); + statement( + "#define spvTexelBufferCoord(tc, tex) uint2((tc) % (tex).get_width(), (tc) / (tex).get_width())"); + statement(""); + } + break; + } + + + case SPVFuncImplImage2DAtomicCoords: + { + statement("// Returns buffer coords corresponding to 2D texture coords for emulating 2D texture atomics"); + statement("#define spvImage2DAtomicCoord(tc, tex) (((tex).get_width() * (tc).x) + (tc).y)"); + statement(""); + break; + } + + + case SPVFuncImplFAdd: + statement("template"); + statement("T spvFAdd(T l, T r)"); + begin_scope(); + statement("return fma(T(1), l, r);"); + end_scope(); + statement(""); + break; + + + case SPVFuncImplFMul: + statement("template"); + statement("T spvFMul(T l, T r)"); + begin_scope(); + statement("return fma(l, r, T(0));"); + end_scope(); + statement(""); + + statement("template"); + statement("vec spvFMulVectorMatrix(vec v, matrix m)"); + begin_scope(); + statement("vec res = vec(0);"); + statement("for (uint i = Rows; i > 0; --i)"); + begin_scope(); + statement("vec tmp(0);"); + statement("for (uint j = 0; j < Cols; ++j)"); + begin_scope(); + statement("tmp[j] = m[j][i - 1];"); + end_scope(); + statement("res = fma(tmp, vec(v[i - 1]), res);"); + end_scope(); + statement("return res;"); + end_scope(); + statement(""); + + statement("template"); + statement("vec spvFMulMatrixVector(matrix m, vec v)"); + begin_scope(); + statement("vec res = vec(0);"); + statement("for (uint i = Cols; i > 0; --i)"); + begin_scope(); + statement("res = fma(m[i - 1], vec(v[i - 1]), res);"); + end_scope(); + statement("return res;"); + end_scope(); + statement(""); + + statement("template"); + statement( + "matrix spvFMulMatrixMatrix(matrix l, matrix r)"); + begin_scope(); + statement("matrix res;"); + statement("for (uint i = 0; i < RCols; i++)"); + begin_scope(); + statement("vec tmp(0);"); + statement("for (uint j = 0; j < LCols; j++)"); + begin_scope(); + statement("tmp = fma(vec(r[i][j]), l[j], tmp);"); + end_scope(); + statement("res[i] = tmp;"); + end_scope(); + statement("return res;"); + end_scope(); + statement(""); + break; + + + case SPVFuncImplCubemapTo2DArrayFace: + statement(force_inline); + statement("float3 spvCubemapTo2DArrayFace(float3 P)"); + begin_scope(); + statement("float3 Coords = abs(P.xyz);"); + statement("float CubeFace = 0;"); + statement("float ProjectionAxis = 0;"); + statement("float u = 0;"); + statement("float v = 0;"); + statement("if (Coords.x >= Coords.y && Coords.x >= Coords.z)"); + begin_scope(); + statement("CubeFace = P.x >= 0 ? 0 : 1;"); + statement("ProjectionAxis = Coords.x;"); + statement("u = P.x >= 0 ? -P.z : P.z;"); + statement("v = -P.y;"); + end_scope(); + statement("else if (Coords.y >= Coords.x && Coords.y >= Coords.z)"); + begin_scope(); + statement("CubeFace = P.y >= 0 ? 2 : 3;"); + statement("ProjectionAxis = Coords.y;"); + statement("u = P.x;"); + statement("v = P.y >= 0 ? P.z : -P.z;"); + end_scope(); + statement("else"); + begin_scope(); + statement("CubeFace = P.z >= 0 ? 4 : 5;"); + statement("ProjectionAxis = Coords.z;"); + statement("u = P.z >= 0 ? P.x : -P.x;"); + statement("v = -P.y;"); + end_scope(); + statement("u = 0.5 * (u/ProjectionAxis + 1);"); + statement("v = 0.5 * (v/ProjectionAxis + 1);"); + statement("return float3(u, v, CubeFace);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplInverse4x4: + statement("// Returns the determinant of a 2x2 matrix."); + statement(force_inline); + statement("float spvDet2x2(float a1, float a2, float b1, float b2)"); + begin_scope(); + statement("return a1 * b2 - b1 * a2;"); + end_scope(); + statement(""); + + statement("// Returns the determinant of a 3x3 matrix."); + statement(force_inline); + statement("float spvDet3x3(float a1, float a2, float a3, float b1, float b2, float b3, float c1, " + "float c2, float c3)"); + begin_scope(); + statement("return a1 * spvDet2x2(b2, b3, c2, c3) - b1 * spvDet2x2(a2, a3, c2, c3) + c1 * spvDet2x2(a2, a3, " + "b2, b3);"); + end_scope(); + statement(""); + statement("// Returns the inverse of a matrix, by using the algorithm of calculating the classical"); + statement("// adjoint and dividing by the determinant. The contents of the matrix are changed."); + statement(force_inline); + statement("float4x4 spvInverse4x4(float4x4 m)"); + begin_scope(); + statement("float4x4 adj; // The adjoint matrix (inverse after dividing by determinant)"); + statement_no_indent(""); + statement("// Create the transpose of the cofactors, as the classical adjoint of the matrix."); + statement("adj[0][0] = spvDet3x3(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], " + "m[3][3]);"); + statement("adj[0][1] = -spvDet3x3(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], " + "m[3][3]);"); + statement("adj[0][2] = spvDet3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], " + "m[3][3]);"); + statement("adj[0][3] = -spvDet3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], " + "m[2][3]);"); + statement_no_indent(""); + statement("adj[1][0] = -spvDet3x3(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], " + "m[3][3]);"); + statement("adj[1][1] = spvDet3x3(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], " + "m[3][3]);"); + statement("adj[1][2] = -spvDet3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], " + "m[3][3]);"); + statement("adj[1][3] = spvDet3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], " + "m[2][3]);"); + statement_no_indent(""); + statement("adj[2][0] = spvDet3x3(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], " + "m[3][3]);"); + statement("adj[2][1] = -spvDet3x3(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], " + "m[3][3]);"); + statement("adj[2][2] = spvDet3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], " + "m[3][3]);"); + statement("adj[2][3] = -spvDet3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], " + "m[2][3]);"); + statement_no_indent(""); + statement("adj[3][0] = -spvDet3x3(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], " + "m[3][2]);"); + statement("adj[3][1] = spvDet3x3(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], " + "m[3][2]);"); + statement("adj[3][2] = -spvDet3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], " + "m[3][2]);"); + statement("adj[3][3] = spvDet3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], " + "m[2][2]);"); + statement_no_indent(""); + statement("// Calculate the determinant as a combination of the cofactors of the first row."); + statement("float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]) + (adj[0][3] " + "* m[3][0]);"); + statement_no_indent(""); + statement("// Divide the classical adjoint matrix by the determinant."); + statement("// If determinant is zero, matrix is not invertable, so leave it unchanged."); + statement("return (det != 0.0f) ? (adj * (1.0f / det)) : m;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplInverse3x3: + if (spv_function_implementations.count(SPVFuncImplInverse4x4) == 0) + { + statement("// Returns the determinant of a 2x2 matrix."); + statement(force_inline); + statement("float spvDet2x2(float a1, float a2, float b1, float b2)"); + begin_scope(); + statement("return a1 * b2 - b1 * a2;"); + end_scope(); + statement(""); + } + + statement("// Returns the inverse of a matrix, by using the algorithm of calculating the classical"); + statement("// adjoint and dividing by the determinant. The contents of the matrix are changed."); + statement(force_inline); + statement("float3x3 spvInverse3x3(float3x3 m)"); + begin_scope(); + statement("float3x3 adj; // The adjoint matrix (inverse after dividing by determinant)"); + statement_no_indent(""); + statement("// Create the transpose of the cofactors, as the classical adjoint of the matrix."); + statement("adj[0][0] = spvDet2x2(m[1][1], m[1][2], m[2][1], m[2][2]);"); + statement("adj[0][1] = -spvDet2x2(m[0][1], m[0][2], m[2][1], m[2][2]);"); + statement("adj[0][2] = spvDet2x2(m[0][1], m[0][2], m[1][1], m[1][2]);"); + statement_no_indent(""); + statement("adj[1][0] = -spvDet2x2(m[1][0], m[1][2], m[2][0], m[2][2]);"); + statement("adj[1][1] = spvDet2x2(m[0][0], m[0][2], m[2][0], m[2][2]);"); + statement("adj[1][2] = -spvDet2x2(m[0][0], m[0][2], m[1][0], m[1][2]);"); + statement_no_indent(""); + statement("adj[2][0] = spvDet2x2(m[1][0], m[1][1], m[2][0], m[2][1]);"); + statement("adj[2][1] = -spvDet2x2(m[0][0], m[0][1], m[2][0], m[2][1]);"); + statement("adj[2][2] = spvDet2x2(m[0][0], m[0][1], m[1][0], m[1][1]);"); + statement_no_indent(""); + statement("// Calculate the determinant as a combination of the cofactors of the first row."); + statement("float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]);"); + statement_no_indent(""); + statement("// Divide the classical adjoint matrix by the determinant."); + statement("// If determinant is zero, matrix is not invertable, so leave it unchanged."); + statement("return (det != 0.0f) ? (adj * (1.0f / det)) : m;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplInverse2x2: + statement("// Returns the inverse of a matrix, by using the algorithm of calculating the classical"); + statement("// adjoint and dividing by the determinant. The contents of the matrix are changed."); + statement(force_inline); + statement("float2x2 spvInverse2x2(float2x2 m)"); + begin_scope(); + statement("float2x2 adj; // The adjoint matrix (inverse after dividing by determinant)"); + statement_no_indent(""); + statement("// Create the transpose of the cofactors, as the classical adjoint of the matrix."); + statement("adj[0][0] = m[1][1];"); + statement("adj[0][1] = -m[0][1];"); + statement_no_indent(""); + statement("adj[1][0] = -m[1][0];"); + statement("adj[1][1] = m[0][0];"); + statement_no_indent(""); + statement("// Calculate the determinant as a combination of the cofactors of the first row."); + statement("float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]);"); + statement_no_indent(""); + statement("// Divide the classical adjoint matrix by the determinant."); + statement("// If determinant is zero, matrix is not invertable, so leave it unchanged."); + statement("return (det != 0.0f) ? (adj * (1.0f / det)) : m;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplForwardArgs: + statement("template struct spvRemoveReference { typedef T type; };"); + statement("template struct spvRemoveReference { typedef T type; };"); + statement("template struct spvRemoveReference { typedef T type; };"); + statement("template inline constexpr thread T&& spvForward(thread typename " + "spvRemoveReference::type& x)"); + begin_scope(); + statement("return static_cast(x);"); + end_scope(); + statement("template inline constexpr thread T&& spvForward(thread typename " + "spvRemoveReference::type&& x)"); + begin_scope(); + statement("return static_cast(x);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplGetSwizzle: + statement("enum class spvSwizzle : uint"); + begin_scope(); + statement("none = 0,"); + statement("zero,"); + statement("one,"); + statement("red,"); + statement("green,"); + statement("blue,"); + statement("alpha"); + end_scope_decl(); + statement(""); + statement("template"); + statement("inline T spvGetSwizzle(vec x, T c, spvSwizzle s)"); + begin_scope(); + statement("switch (s)"); + begin_scope(); + statement("case spvSwizzle::none:"); + statement(" return c;"); + statement("case spvSwizzle::zero:"); + statement(" return 0;"); + statement("case spvSwizzle::one:"); + statement(" return 1;"); + statement("case spvSwizzle::red:"); + statement(" return x.r;"); + statement("case spvSwizzle::green:"); + statement(" return x.g;"); + statement("case spvSwizzle::blue:"); + statement(" return x.b;"); + statement("case spvSwizzle::alpha:"); + statement(" return x.a;"); + end_scope(); + end_scope(); + statement(""); + break; + + case SPVFuncImplTextureSwizzle: + statement("// Wrapper function that swizzles texture samples and fetches."); + statement("template"); + statement("inline vec spvTextureSwizzle(vec x, uint s)"); + begin_scope(); + statement("if (!s)"); + statement(" return x;"); + statement("return vec(spvGetSwizzle(x, x.r, spvSwizzle((s >> 0) & 0xFF)), " + "spvGetSwizzle(x, x.g, spvSwizzle((s >> 8) & 0xFF)), spvGetSwizzle(x, x.b, spvSwizzle((s >> 16) " + "& 0xFF)), " + "spvGetSwizzle(x, x.a, spvSwizzle((s >> 24) & 0xFF)));"); + end_scope(); + statement(""); + statement("template"); + statement("inline T spvTextureSwizzle(T x, uint s)"); + begin_scope(); + statement("return spvTextureSwizzle(vec(x, 0, 0, 1), s).x;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplGatherSwizzle: + statement("// Wrapper function that swizzles texture gathers."); + statement("template class Tex, " + "typename... Ts>"); + statement("inline vec spvGatherSwizzle(const thread Tex& t, sampler s, " + "uint sw, component c, Ts... params) METAL_CONST_ARG(c)"); + begin_scope(); + statement("if (sw)"); + begin_scope(); + statement("switch (spvSwizzle((sw >> (uint(c) * 8)) & 0xFF))"); + begin_scope(); + statement("case spvSwizzle::none:"); + statement(" break;"); + statement("case spvSwizzle::zero:"); + statement(" return vec(0, 0, 0, 0);"); + statement("case spvSwizzle::one:"); + statement(" return vec(1, 1, 1, 1);"); + statement("case spvSwizzle::red:"); + statement(" return t.gather(s, spvForward(params)..., component::x);"); + statement("case spvSwizzle::green:"); + statement(" return t.gather(s, spvForward(params)..., component::y);"); + statement("case spvSwizzle::blue:"); + statement(" return t.gather(s, spvForward(params)..., component::z);"); + statement("case spvSwizzle::alpha:"); + statement(" return t.gather(s, spvForward(params)..., component::w);"); + end_scope(); + end_scope(); + + + statement("switch (c)"); + begin_scope(); + statement("case component::x:"); + statement(" return t.gather(s, spvForward(params)..., component::x);"); + statement("case component::y:"); + statement(" return t.gather(s, spvForward(params)..., component::y);"); + statement("case component::z:"); + statement(" return t.gather(s, spvForward(params)..., component::z);"); + statement("case component::w:"); + statement(" return t.gather(s, spvForward(params)..., component::w);"); + end_scope(); + end_scope(); + statement(""); + break; + + case SPVFuncImplGatherCompareSwizzle: + statement("// Wrapper function that swizzles depth texture gathers."); + statement("template class Tex, " + "typename... Ts>"); + statement("inline vec spvGatherCompareSwizzle(const thread Tex& t, sampler " + "s, uint sw, Ts... params) "); + begin_scope(); + statement("if (sw)"); + begin_scope(); + statement("switch (spvSwizzle(sw & 0xFF))"); + begin_scope(); + statement("case spvSwizzle::none:"); + statement("case spvSwizzle::red:"); + statement(" break;"); + statement("case spvSwizzle::zero:"); + statement("case spvSwizzle::green:"); + statement("case spvSwizzle::blue:"); + statement("case spvSwizzle::alpha:"); + statement(" return vec(0, 0, 0, 0);"); + statement("case spvSwizzle::one:"); + statement(" return vec(1, 1, 1, 1);"); + end_scope(); + end_scope(); + statement("return t.gather_compare(s, spvForward(params)...);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplSubgroupBallot: + statement("inline uint4 spvSubgroupBallot(bool value)"); + begin_scope(); + statement("simd_vote vote = simd_ballot(value);"); + statement("// simd_ballot() returns a 64-bit integer-like object, but"); + statement("// SPIR-V callers expect a uint4. We must convert."); + statement("// FIXME: This won't include higher bits if Apple ever supports"); + statement("// 128 lanes in an SIMD-group."); + statement("return uint4((uint)((simd_vote::vote_t)vote & 0xFFFFFFFF), (uint)(((simd_vote::vote_t)vote >> " + "32) & 0xFFFFFFFF), 0, 0);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplSubgroupBallotBitExtract: + statement("inline bool spvSubgroupBallotBitExtract(uint4 ballot, uint bit)"); + begin_scope(); + statement("return !!extract_bits(ballot[bit / 32], bit % 32, 1);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplSubgroupBallotFindLSB: + statement("inline uint spvSubgroupBallotFindLSB(uint4 ballot)"); + begin_scope(); + statement("return select(ctz(ballot.x), select(32 + ctz(ballot.y), select(64 + ctz(ballot.z), select(96 + " + "ctz(ballot.w), uint(-1), ballot.w == 0), ballot.z == 0), ballot.y == 0), ballot.x == 0);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplSubgroupBallotFindMSB: + statement("inline uint spvSubgroupBallotFindMSB(uint4 ballot)"); + begin_scope(); + statement("return select(128 - (clz(ballot.w) + 1), select(96 - (clz(ballot.z) + 1), select(64 - " + "(clz(ballot.y) + 1), select(32 - (clz(ballot.x) + 1), uint(-1), ballot.x == 0), ballot.y == 0), " + "ballot.z == 0), ballot.w == 0);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplSubgroupBallotBitCount: + statement("inline uint spvSubgroupBallotBitCount(uint4 ballot)"); + begin_scope(); + statement("return popcount(ballot.x) + popcount(ballot.y) + popcount(ballot.z) + popcount(ballot.w);"); + end_scope(); + statement(""); + statement("inline uint spvSubgroupBallotInclusiveBitCount(uint4 ballot, uint gl_SubgroupInvocationID)"); + begin_scope(); + statement("uint4 mask = uint4(extract_bits(0xFFFFFFFF, 0, min(gl_SubgroupInvocationID + 1, 32u)), " + "extract_bits(0xFFFFFFFF, 0, (uint)max((int)gl_SubgroupInvocationID + 1 - 32, 0)), " + "uint2(0));"); + statement("return spvSubgroupBallotBitCount(ballot & mask);"); + end_scope(); + statement(""); + statement("inline uint spvSubgroupBallotExclusiveBitCount(uint4 ballot, uint gl_SubgroupInvocationID)"); + begin_scope(); + statement("uint4 mask = uint4(extract_bits(0xFFFFFFFF, 0, min(gl_SubgroupInvocationID, 32u)), " + "extract_bits(0xFFFFFFFF, 0, (uint)max((int)gl_SubgroupInvocationID - 32, 0)), uint2(0));"); + statement("return spvSubgroupBallotBitCount(ballot & mask);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplSubgroupAllEqual: + + + + + statement("template"); + statement("inline bool spvSubgroupAllEqual(T value)"); + begin_scope(); + statement("return simd_all(value == simd_broadcast_first(value));"); + end_scope(); + statement(""); + statement("template<>"); + statement("inline bool spvSubgroupAllEqual(bool value)"); + begin_scope(); + statement("return simd_all(value) || !simd_any(value);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplReflectScalar: + + statement("template"); + statement("inline T spvReflect(T i, T n)"); + begin_scope(); + statement("return i - T(2) * i * n * n;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplRefractScalar: + + statement("template"); + statement("inline T spvRefract(T i, T n, T eta)"); + begin_scope(); + statement("T NoI = n * i;"); + statement("T NoI2 = NoI * NoI;"); + statement("T k = T(1) - eta * eta * (T(1) - NoI2);"); + statement("if (k < T(0))"); + begin_scope(); + statement("return T(0);"); + end_scope(); + statement("else"); + begin_scope(); + statement("return eta * i - (eta * NoI + sqrt(k)) * n;"); + end_scope(); + end_scope(); + statement(""); + break; + + case SPVFuncImplFaceForwardScalar: + + statement("template"); + statement("inline T spvFaceForward(T n, T i, T nref)"); + begin_scope(); + statement("return i * nref < T(0) ? n : -n;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructNearest2Plane: + statement("template"); + statement("inline vec spvChromaReconstructNearest(texture2d plane0, texture2d plane1, sampler " + "samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("ycbcr.br = plane1.sample(samp, coord, spvForward(options)...).rg;"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructNearest3Plane: + statement("template"); + statement("inline vec spvChromaReconstructNearest(texture2d plane0, texture2d plane1, " + "texture2d plane2, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("ycbcr.b = plane1.sample(samp, coord, spvForward(options)...).r;"); + statement("ycbcr.r = plane2.sample(samp, coord, spvForward(options)...).r;"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructLinear422CositedEven2Plane: + statement("template"); + statement("inline vec spvChromaReconstructLinear422CositedEven(texture2d plane0, texture2d " + "plane1, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("if (fract(coord.x * plane1.get_width()) != 0.0)"); + begin_scope(); + statement("ycbcr.br = vec(mix(plane1.sample(samp, coord, spvForward(options)...), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 0)), 0.5).rg);"); + end_scope(); + statement("else"); + begin_scope(); + statement("ycbcr.br = plane1.sample(samp, coord, spvForward(options)...).rg;"); + end_scope(); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructLinear422CositedEven3Plane: + statement("template"); + statement("inline vec spvChromaReconstructLinear422CositedEven(texture2d plane0, texture2d " + "plane1, texture2d plane2, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("if (fract(coord.x * plane1.get_width()) != 0.0)"); + begin_scope(); + statement("ycbcr.b = T(mix(plane1.sample(samp, coord, spvForward(options)...), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 0)), 0.5).r);"); + statement("ycbcr.r = T(mix(plane2.sample(samp, coord, spvForward(options)...), " + "plane2.sample(samp, coord, spvForward(options)..., int2(1, 0)), 0.5).r);"); + end_scope(); + statement("else"); + begin_scope(); + statement("ycbcr.b = plane1.sample(samp, coord, spvForward(options)...).r;"); + statement("ycbcr.r = plane2.sample(samp, coord, spvForward(options)...).r;"); + end_scope(); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructLinear422Midpoint2Plane: + statement("template"); + statement("inline vec spvChromaReconstructLinear422Midpoint(texture2d plane0, texture2d " + "plane1, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("int2 offs = int2(fract(coord.x * plane1.get_width()) != 0.0 ? 1 : -1, 0);"); + statement("ycbcr.br = vec(mix(plane1.sample(samp, coord, spvForward(options)...), " + "plane1.sample(samp, coord, spvForward(options)..., offs), 0.25).rg);"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructLinear422Midpoint3Plane: + statement("template"); + statement("inline vec spvChromaReconstructLinear422Midpoint(texture2d plane0, texture2d " + "plane1, texture2d plane2, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("int2 offs = int2(fract(coord.x * plane1.get_width()) != 0.0 ? 1 : -1, 0);"); + statement("ycbcr.b = T(mix(plane1.sample(samp, coord, spvForward(options)...), " + "plane1.sample(samp, coord, spvForward(options)..., offs), 0.25).r);"); + statement("ycbcr.r = T(mix(plane2.sample(samp, coord, spvForward(options)...), " + "plane2.sample(samp, coord, spvForward(options)..., offs), 0.25).r);"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructLinear420XCositedEvenYCositedEven2Plane: + statement("template"); + statement("inline vec spvChromaReconstructLinear420XCositedEvenYCositedEven(texture2d plane0, " + "texture2d plane1, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("float2 ab = fract(round(coord * float2(plane0.get_width(), plane0.get_height())) * 0.5);"); + statement("ycbcr.br = vec(mix(mix(plane1.sample(samp, coord, spvForward(options)...), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 0)), ab.x), " + "mix(plane1.sample(samp, coord, spvForward(options)..., int2(0, 1)), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 1)), ab.x), ab.y).rg);"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructLinear420XCositedEvenYCositedEven3Plane: + statement("template"); + statement("inline vec spvChromaReconstructLinear420XCositedEvenYCositedEven(texture2d plane0, " + "texture2d plane1, texture2d plane2, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("float2 ab = fract(round(coord * float2(plane0.get_width(), plane0.get_height())) * 0.5);"); + statement("ycbcr.b = T(mix(mix(plane1.sample(samp, coord, spvForward(options)...), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 0)), ab.x), " + "mix(plane1.sample(samp, coord, spvForward(options)..., int2(0, 1)), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 1)), ab.x), ab.y).r);"); + statement("ycbcr.r = T(mix(mix(plane2.sample(samp, coord, spvForward(options)...), " + "plane2.sample(samp, coord, spvForward(options)..., int2(1, 0)), ab.x), " + "mix(plane2.sample(samp, coord, spvForward(options)..., int2(0, 1)), " + "plane2.sample(samp, coord, spvForward(options)..., int2(1, 1)), ab.x), ab.y).r);"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructLinear420XMidpointYCositedEven2Plane: + statement("template"); + statement("inline vec spvChromaReconstructLinear420XMidpointYCositedEven(texture2d plane0, " + "texture2d plane1, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("float2 ab = fract((round(coord * float2(plane0.get_width(), plane0.get_height())) - float2(0.5, " + "0)) * 0.5);"); + statement("ycbcr.br = vec(mix(mix(plane1.sample(samp, coord, spvForward(options)...), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 0)), ab.x), " + "mix(plane1.sample(samp, coord, spvForward(options)..., int2(0, 1)), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 1)), ab.x), ab.y).rg);"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructLinear420XMidpointYCositedEven3Plane: + statement("template"); + statement("inline vec spvChromaReconstructLinear420XMidpointYCositedEven(texture2d plane0, " + "texture2d plane1, texture2d plane2, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("float2 ab = fract((round(coord * float2(plane0.get_width(), plane0.get_height())) - float2(0.5, " + "0)) * 0.5);"); + statement("ycbcr.b = T(mix(mix(plane1.sample(samp, coord, spvForward(options)...), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 0)), ab.x), " + "mix(plane1.sample(samp, coord, spvForward(options)..., int2(0, 1)), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 1)), ab.x), ab.y).r);"); + statement("ycbcr.r = T(mix(mix(plane2.sample(samp, coord, spvForward(options)...), " + "plane2.sample(samp, coord, spvForward(options)..., int2(1, 0)), ab.x), " + "mix(plane2.sample(samp, coord, spvForward(options)..., int2(0, 1)), " + "plane2.sample(samp, coord, spvForward(options)..., int2(1, 1)), ab.x), ab.y).r);"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructLinear420XCositedEvenYMidpoint2Plane: + statement("template"); + statement("inline vec spvChromaReconstructLinear420XCositedEvenYMidpoint(texture2d plane0, " + "texture2d plane1, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("float2 ab = fract((round(coord * float2(plane0.get_width(), plane0.get_height())) - float2(0, " + "0.5)) * 0.5);"); + statement("ycbcr.br = vec(mix(mix(plane1.sample(samp, coord, spvForward(options)...), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 0)), ab.x), " + "mix(plane1.sample(samp, coord, spvForward(options)..., int2(0, 1)), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 1)), ab.x), ab.y).rg);"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructLinear420XCositedEvenYMidpoint3Plane: + statement("template"); + statement("inline vec spvChromaReconstructLinear420XCositedEvenYMidpoint(texture2d plane0, " + "texture2d plane1, texture2d plane2, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("float2 ab = fract((round(coord * float2(plane0.get_width(), plane0.get_height())) - float2(0, " + "0.5)) * 0.5);"); + statement("ycbcr.b = T(mix(mix(plane1.sample(samp, coord, spvForward(options)...), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 0)), ab.x), " + "mix(plane1.sample(samp, coord, spvForward(options)..., int2(0, 1)), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 1)), ab.x), ab.y).r);"); + statement("ycbcr.r = T(mix(mix(plane2.sample(samp, coord, spvForward(options)...), " + "plane2.sample(samp, coord, spvForward(options)..., int2(1, 0)), ab.x), " + "mix(plane2.sample(samp, coord, spvForward(options)..., int2(0, 1)), " + "plane2.sample(samp, coord, spvForward(options)..., int2(1, 1)), ab.x), ab.y).r);"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructLinear420XMidpointYMidpoint2Plane: + statement("template"); + statement("inline vec spvChromaReconstructLinear420XMidpointYMidpoint(texture2d plane0, " + "texture2d plane1, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("float2 ab = fract((round(coord * float2(plane0.get_width(), plane0.get_height())) - float2(0.5, " + "0.5)) * 0.5);"); + statement("ycbcr.br = vec(mix(mix(plane1.sample(samp, coord, spvForward(options)...), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 0)), ab.x), " + "mix(plane1.sample(samp, coord, spvForward(options)..., int2(0, 1)), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 1)), ab.x), ab.y).rg);"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplChromaReconstructLinear420XMidpointYMidpoint3Plane: + statement("template"); + statement("inline vec spvChromaReconstructLinear420XMidpointYMidpoint(texture2d plane0, " + "texture2d plane1, texture2d plane2, sampler samp, float2 coord, LodOptions... options)"); + begin_scope(); + statement("vec ycbcr = vec(0, 0, 0, 1);"); + statement("ycbcr.g = plane0.sample(samp, coord, spvForward(options)...).r;"); + statement("float2 ab = fract((round(coord * float2(plane0.get_width(), plane0.get_height())) - float2(0.5, " + "0.5)) * 0.5);"); + statement("ycbcr.b = T(mix(mix(plane1.sample(samp, coord, spvForward(options)...), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 0)), ab.x), " + "mix(plane1.sample(samp, coord, spvForward(options)..., int2(0, 1)), " + "plane1.sample(samp, coord, spvForward(options)..., int2(1, 1)), ab.x), ab.y).r);"); + statement("ycbcr.r = T(mix(mix(plane2.sample(samp, coord, spvForward(options)...), " + "plane2.sample(samp, coord, spvForward(options)..., int2(1, 0)), ab.x), " + "mix(plane2.sample(samp, coord, spvForward(options)..., int2(0, 1)), " + "plane2.sample(samp, coord, spvForward(options)..., int2(1, 1)), ab.x), ab.y).r);"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplExpandITUFullRange: + statement("template"); + statement("inline vec spvExpandITUFullRange(vec ycbcr, int n)"); + begin_scope(); + statement("ycbcr.br -= exp2(T(n-1))/(exp2(T(n))-1);"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplExpandITUNarrowRange: + statement("template"); + statement("inline vec spvExpandITUNarrowRange(vec ycbcr, int n)"); + begin_scope(); + statement("ycbcr.g = (ycbcr.g * (exp2(T(n)) - 1) - ldexp(T(16), n - 8))/ldexp(T(219), n - 8);"); + statement("ycbcr.br = (ycbcr.br * (exp2(T(n)) - 1) - ldexp(T(128), n - 8))/ldexp(T(224), n - 8);"); + statement("return ycbcr;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplConvertYCbCrBT709: + statement("// cf. Khronos Data Format Specification, section 15.1.1"); + statement("constant float3x3 spvBT709Factors = {{1, 1, 1}, {0, -0.13397432/0.7152, 1.8556}, {1.5748, " + "-0.33480248/0.7152, 0}};"); + statement(""); + statement("template"); + statement("inline vec spvConvertYCbCrBT709(vec ycbcr)"); + begin_scope(); + statement("vec rgba;"); + statement("rgba.rgb = vec(spvBT709Factors * ycbcr.gbr);"); + statement("rgba.a = ycbcr.a;"); + statement("return rgba;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplConvertYCbCrBT601: + statement("// cf. Khronos Data Format Specification, section 15.1.2"); + statement("constant float3x3 spvBT601Factors = {{1, 1, 1}, {0, -0.202008/0.587, 1.772}, {1.402, " + "-0.419198/0.587, 0}};"); + statement(""); + statement("template"); + statement("inline vec spvConvertYCbCrBT601(vec ycbcr)"); + begin_scope(); + statement("vec rgba;"); + statement("rgba.rgb = vec(spvBT601Factors * ycbcr.gbr);"); + statement("rgba.a = ycbcr.a;"); + statement("return rgba;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplConvertYCbCrBT2020: + statement("// cf. Khronos Data Format Specification, section 15.1.3"); + statement("constant float3x3 spvBT2020Factors = {{1, 1, 1}, {0, -0.11156702/0.6780, 1.8814}, {1.4746, " + "-0.38737742/0.6780, 0}};"); + statement(""); + statement("template"); + statement("inline vec spvConvertYCbCrBT2020(vec ycbcr)"); + begin_scope(); + statement("vec rgba;"); + statement("rgba.rgb = vec(spvBT2020Factors * ycbcr.gbr);"); + statement("rgba.a = ycbcr.a;"); + statement("return rgba;"); + end_scope(); + statement(""); + break; + + case SPVFuncImplDynamicImageSampler: + statement("enum class spvFormatResolution"); + begin_scope(); + statement("_444 = 0,"); + statement("_422,"); + statement("_420"); + end_scope_decl(); + statement(""); + statement("enum class spvChromaFilter"); + begin_scope(); + statement("nearest = 0,"); + statement("linear"); + end_scope_decl(); + statement(""); + statement("enum class spvXChromaLocation"); + begin_scope(); + statement("cosited_even = 0,"); + statement("midpoint"); + end_scope_decl(); + statement(""); + statement("enum class spvYChromaLocation"); + begin_scope(); + statement("cosited_even = 0,"); + statement("midpoint"); + end_scope_decl(); + statement(""); + statement("enum class spvYCbCrModelConversion"); + begin_scope(); + statement("rgb_identity = 0,"); + statement("ycbcr_identity,"); + statement("ycbcr_bt_709,"); + statement("ycbcr_bt_601,"); + statement("ycbcr_bt_2020"); + end_scope_decl(); + statement(""); + statement("enum class spvYCbCrRange"); + begin_scope(); + statement("itu_full = 0,"); + statement("itu_narrow"); + end_scope_decl(); + statement(""); + statement("struct spvComponentBits"); + begin_scope(); + statement("constexpr explicit spvComponentBits(int v) thread : value(v) {}"); + statement("uchar value : 6;"); + end_scope_decl(); + statement("// A class corresponding to metal::sampler which holds sampler"); + statement("// Y'CbCr conversion info."); + statement("struct spvYCbCrSampler"); + begin_scope(); + statement("constexpr spvYCbCrSampler() thread : val(build()) {}"); + statement("template"); + statement("constexpr spvYCbCrSampler(Ts... t) thread : val(build(t...)) {}"); + statement("constexpr spvYCbCrSampler(const thread spvYCbCrSampler& s) thread = default;"); + statement(""); + statement("spvFormatResolution get_resolution() const thread"); + begin_scope(); + statement("return spvFormatResolution((val & resolution_mask) >> resolution_base);"); + end_scope(); + statement("spvChromaFilter get_chroma_filter() const thread"); + begin_scope(); + statement("return spvChromaFilter((val & chroma_filter_mask) >> chroma_filter_base);"); + end_scope(); + statement("spvXChromaLocation get_x_chroma_offset() const thread"); + begin_scope(); + statement("return spvXChromaLocation((val & x_chroma_off_mask) >> x_chroma_off_base);"); + end_scope(); + statement("spvYChromaLocation get_y_chroma_offset() const thread"); + begin_scope(); + statement("return spvYChromaLocation((val & y_chroma_off_mask) >> y_chroma_off_base);"); + end_scope(); + statement("spvYCbCrModelConversion get_ycbcr_model() const thread"); + begin_scope(); + statement("return spvYCbCrModelConversion((val & ycbcr_model_mask) >> ycbcr_model_base);"); + end_scope(); + statement("spvYCbCrRange get_ycbcr_range() const thread"); + begin_scope(); + statement("return spvYCbCrRange((val & ycbcr_range_mask) >> ycbcr_range_base);"); + end_scope(); + statement("int get_bpc() const thread { return (val & bpc_mask) >> bpc_base; }"); + statement(""); + statement("private:"); + statement("ushort val;"); + statement(""); + statement("constexpr static constant ushort resolution_bits = 2;"); + statement("constexpr static constant ushort chroma_filter_bits = 2;"); + statement("constexpr static constant ushort x_chroma_off_bit = 1;"); + statement("constexpr static constant ushort y_chroma_off_bit = 1;"); + statement("constexpr static constant ushort ycbcr_model_bits = 3;"); + statement("constexpr static constant ushort ycbcr_range_bit = 1;"); + statement("constexpr static constant ushort bpc_bits = 6;"); + statement(""); + statement("constexpr static constant ushort resolution_base = 0;"); + statement("constexpr static constant ushort chroma_filter_base = 2;"); + statement("constexpr static constant ushort x_chroma_off_base = 4;"); + statement("constexpr static constant ushort y_chroma_off_base = 5;"); + statement("constexpr static constant ushort ycbcr_model_base = 6;"); + statement("constexpr static constant ushort ycbcr_range_base = 9;"); + statement("constexpr static constant ushort bpc_base = 10;"); + statement(""); + statement( + "constexpr static constant ushort resolution_mask = ((1 << resolution_bits) - 1) << resolution_base;"); + statement("constexpr static constant ushort chroma_filter_mask = ((1 << chroma_filter_bits) - 1) << " + "chroma_filter_base;"); + statement("constexpr static constant ushort x_chroma_off_mask = ((1 << x_chroma_off_bit) - 1) << " + "x_chroma_off_base;"); + statement("constexpr static constant ushort y_chroma_off_mask = ((1 << y_chroma_off_bit) - 1) << " + "y_chroma_off_base;"); + statement("constexpr static constant ushort ycbcr_model_mask = ((1 << ycbcr_model_bits) - 1) << " + "ycbcr_model_base;"); + statement("constexpr static constant ushort ycbcr_range_mask = ((1 << ycbcr_range_bit) - 1) << " + "ycbcr_range_base;"); + statement("constexpr static constant ushort bpc_mask = ((1 << bpc_bits) - 1) << bpc_base;"); + statement(""); + statement("static constexpr ushort build()"); + begin_scope(); + statement("return 0;"); + end_scope(); + statement(""); + statement("template"); + statement("static constexpr ushort build(spvFormatResolution res, Ts... t)"); + begin_scope(); + statement("return (ushort(res) << resolution_base) | (build(t...) & ~resolution_mask);"); + end_scope(); + statement(""); + statement("template"); + statement("static constexpr ushort build(spvChromaFilter filt, Ts... t)"); + begin_scope(); + statement("return (ushort(filt) << chroma_filter_base) | (build(t...) & ~chroma_filter_mask);"); + end_scope(); + statement(""); + statement("template"); + statement("static constexpr ushort build(spvXChromaLocation loc, Ts... t)"); + begin_scope(); + statement("return (ushort(loc) << x_chroma_off_base) | (build(t...) & ~x_chroma_off_mask);"); + end_scope(); + statement(""); + statement("template"); + statement("static constexpr ushort build(spvYChromaLocation loc, Ts... t)"); + begin_scope(); + statement("return (ushort(loc) << y_chroma_off_base) | (build(t...) & ~y_chroma_off_mask);"); + end_scope(); + statement(""); + statement("template"); + statement("static constexpr ushort build(spvYCbCrModelConversion model, Ts... t)"); + begin_scope(); + statement("return (ushort(model) << ycbcr_model_base) | (build(t...) & ~ycbcr_model_mask);"); + end_scope(); + statement(""); + statement("template"); + statement("static constexpr ushort build(spvYCbCrRange range, Ts... t)"); + begin_scope(); + statement("return (ushort(range) << ycbcr_range_base) | (build(t...) & ~ycbcr_range_mask);"); + end_scope(); + statement(""); + statement("template"); + statement("static constexpr ushort build(spvComponentBits bpc, Ts... t)"); + begin_scope(); + statement("return (ushort(bpc.value) << bpc_base) | (build(t...) & ~bpc_mask);"); + end_scope(); + end_scope_decl(); + statement(""); + statement("// A class which can hold up to three textures and a sampler, including"); + statement("// Y'CbCr conversion info, used to pass combined image-samplers"); + statement("// dynamically to functions."); + statement("template"); + statement("struct spvDynamicImageSampler"); + begin_scope(); + statement("texture2d plane0;"); + statement("texture2d plane1;"); + statement("texture2d plane2;"); + statement("sampler samp;"); + statement("spvYCbCrSampler ycbcr_samp;"); + statement("uint swizzle = 0;"); + statement(""); + if (msl_options.swizzle_texture_samples) + { + statement("constexpr spvDynamicImageSampler(texture2d tex, sampler samp, uint sw) thread :"); + statement(" plane0(tex), samp(samp), swizzle(sw) {}"); + } + else + { + statement("constexpr spvDynamicImageSampler(texture2d tex, sampler samp) thread :"); + statement(" plane0(tex), samp(samp) {}"); + } + statement("constexpr spvDynamicImageSampler(texture2d tex, sampler samp, spvYCbCrSampler ycbcr_samp, " + "uint sw) thread :"); + statement(" plane0(tex), samp(samp), ycbcr_samp(ycbcr_samp), swizzle(sw) {}"); + statement("constexpr spvDynamicImageSampler(texture2d plane0, texture2d plane1,"); + statement(" sampler samp, spvYCbCrSampler ycbcr_samp, uint sw) thread :"); + statement(" plane0(plane0), plane1(plane1), samp(samp), ycbcr_samp(ycbcr_samp), swizzle(sw) {}"); + statement( + "constexpr spvDynamicImageSampler(texture2d plane0, texture2d plane1, texture2d plane2,"); + statement(" sampler samp, spvYCbCrSampler ycbcr_samp, uint sw) thread :"); + statement(" plane0(plane0), plane1(plane1), plane2(plane2), samp(samp), ycbcr_samp(ycbcr_samp), " + "swizzle(sw) {}"); + statement(""); + + statement("template"); + statement("vec do_sample(float2 coord, LodOptions... options) const thread"); + begin_scope(); + statement("if (!is_null_texture(plane1))"); + begin_scope(); + statement("if (ycbcr_samp.get_resolution() == spvFormatResolution::_444 ||"); + statement(" ycbcr_samp.get_chroma_filter() == spvChromaFilter::nearest)"); + begin_scope(); + statement("if (!is_null_texture(plane2))"); + statement(" return spvChromaReconstructNearest(plane0, plane1, plane2, samp, coord,"); + statement(" spvForward(options)...);"); + statement( + "return spvChromaReconstructNearest(plane0, plane1, samp, coord, spvForward(options)...);"); + end_scope(); + statement("switch (ycbcr_samp.get_resolution())"); + begin_scope(); + statement("case spvFormatResolution::_444: break;"); + statement("case spvFormatResolution::_422:"); + begin_scope(); + statement("switch (ycbcr_samp.get_x_chroma_offset())"); + begin_scope(); + statement("case spvXChromaLocation::cosited_even:"); + statement(" if (!is_null_texture(plane2))"); + statement(" return spvChromaReconstructLinear422CositedEven("); + statement(" plane0, plane1, plane2, samp,"); + statement(" coord, spvForward(options)...);"); + statement(" return spvChromaReconstructLinear422CositedEven("); + statement(" plane0, plane1, samp, coord,"); + statement(" spvForward(options)...);"); + statement("case spvXChromaLocation::midpoint:"); + statement(" if (!is_null_texture(plane2))"); + statement(" return spvChromaReconstructLinear422Midpoint("); + statement(" plane0, plane1, plane2, samp,"); + statement(" coord, spvForward(options)...);"); + statement(" return spvChromaReconstructLinear422Midpoint("); + statement(" plane0, plane1, samp, coord,"); + statement(" spvForward(options)...);"); + end_scope(); + end_scope(); + statement("case spvFormatResolution::_420:"); + begin_scope(); + statement("switch (ycbcr_samp.get_x_chroma_offset())"); + begin_scope(); + statement("case spvXChromaLocation::cosited_even:"); + begin_scope(); + statement("switch (ycbcr_samp.get_y_chroma_offset())"); + begin_scope(); + statement("case spvYChromaLocation::cosited_even:"); + statement(" if (!is_null_texture(plane2))"); + statement(" return spvChromaReconstructLinear420XCositedEvenYCositedEven("); + statement(" plane0, plane1, plane2, samp,"); + statement(" coord, spvForward(options)...);"); + statement(" return spvChromaReconstructLinear420XCositedEvenYCositedEven("); + statement(" plane0, plane1, samp, coord,"); + statement(" spvForward(options)...);"); + statement("case spvYChromaLocation::midpoint:"); + statement(" if (!is_null_texture(plane2))"); + statement(" return spvChromaReconstructLinear420XCositedEvenYMidpoint("); + statement(" plane0, plane1, plane2, samp,"); + statement(" coord, spvForward(options)...);"); + statement(" return spvChromaReconstructLinear420XCositedEvenYMidpoint("); + statement(" plane0, plane1, samp, coord,"); + statement(" spvForward(options)...);"); + end_scope(); + end_scope(); + statement("case spvXChromaLocation::midpoint:"); + begin_scope(); + statement("switch (ycbcr_samp.get_y_chroma_offset())"); + begin_scope(); + statement("case spvYChromaLocation::cosited_even:"); + statement(" if (!is_null_texture(plane2))"); + statement(" return spvChromaReconstructLinear420XMidpointYCositedEven("); + statement(" plane0, plane1, plane2, samp,"); + statement(" coord, spvForward(options)...);"); + statement(" return spvChromaReconstructLinear420XMidpointYCositedEven("); + statement(" plane0, plane1, samp, coord,"); + statement(" spvForward(options)...);"); + statement("case spvYChromaLocation::midpoint:"); + statement(" if (!is_null_texture(plane2))"); + statement(" return spvChromaReconstructLinear420XMidpointYMidpoint("); + statement(" plane0, plane1, plane2, samp,"); + statement(" coord, spvForward(options)...);"); + statement(" return spvChromaReconstructLinear420XMidpointYMidpoint("); + statement(" plane0, plane1, samp, coord,"); + statement(" spvForward(options)...);"); + end_scope(); + end_scope(); + end_scope(); + end_scope(); + end_scope(); + end_scope(); + statement("return plane0.sample(samp, coord, spvForward(options)...);"); + end_scope(); + statement("template "); + statement("vec sample(float2 coord, LodOptions... options) const thread"); + begin_scope(); + statement( + "vec s = spvTextureSwizzle(do_sample(coord, spvForward(options)...), swizzle);"); + statement("if (ycbcr_samp.get_ycbcr_model() == spvYCbCrModelConversion::rgb_identity)"); + statement(" return s;"); + statement(""); + statement("switch (ycbcr_samp.get_ycbcr_range())"); + begin_scope(); + statement("case spvYCbCrRange::itu_full:"); + statement(" s = spvExpandITUFullRange(s, ycbcr_samp.get_bpc());"); + statement(" break;"); + statement("case spvYCbCrRange::itu_narrow:"); + statement(" s = spvExpandITUNarrowRange(s, ycbcr_samp.get_bpc());"); + statement(" break;"); + end_scope(); + statement(""); + statement("switch (ycbcr_samp.get_ycbcr_model())"); + begin_scope(); + statement("case spvYCbCrModelConversion::rgb_identity:"); + statement("case spvYCbCrModelConversion::ycbcr_identity:"); + statement(" return s;"); + statement("case spvYCbCrModelConversion::ycbcr_bt_709:"); + statement(" return spvConvertYCbCrBT709(s);"); + statement("case spvYCbCrModelConversion::ycbcr_bt_601:"); + statement(" return spvConvertYCbCrBT601(s);"); + statement("case spvYCbCrModelConversion::ycbcr_bt_2020:"); + statement(" return spvConvertYCbCrBT2020(s);"); + end_scope(); + end_scope(); + statement(""); + + statement("vec sample(float2 coord, int2 offset) const thread"); + begin_scope(); + if (msl_options.swizzle_texture_samples) + statement("return spvTextureSwizzle(plane0.sample(samp, coord, offset), swizzle);"); + else + statement("return plane0.sample(samp, coord, offset);"); + end_scope(); + statement("template"); + statement("vec sample(float2 coord, lod_options options, int2 offset) const thread"); + begin_scope(); + if (msl_options.swizzle_texture_samples) + statement("return spvTextureSwizzle(plane0.sample(samp, coord, options, offset), swizzle);"); + else + statement("return plane0.sample(samp, coord, options, offset);"); + end_scope(); + statement("#if __HAVE_MIN_LOD_CLAMP__"); + statement("vec sample(float2 coord, bias b, min_lod_clamp min_lod, int2 offset) const thread"); + begin_scope(); + statement("return plane0.sample(samp, coord, b, min_lod, offset);"); + end_scope(); + statement( + "vec sample(float2 coord, gradient2d grad, min_lod_clamp min_lod, int2 offset) const thread"); + begin_scope(); + statement("return plane0.sample(samp, coord, grad, min_lod, offset);"); + end_scope(); + statement("#endif"); + statement(""); + + statement("vec read(uint2 coord, uint lod = 0) const thread"); + begin_scope(); + statement("return plane0.read(coord, lod);"); + end_scope(); + statement(""); + statement("vec gather(float2 coord, int2 offset = int2(0), component c = component::x) const thread"); + begin_scope(); + if (msl_options.swizzle_texture_samples) + statement("return spvGatherSwizzle(plane0, samp, swizzle, c, coord, offset);"); + else + statement("return plane0.gather(samp, coord, offset, c);"); + end_scope(); + end_scope_decl(); + statement(""); + + default: + break; + } + } +} + + + +void CompilerMSL::declare_undefined_values() +{ + bool emitted = false; + ir.for_each_typed_id([&](uint32_t, SPIRUndef &undef) { + auto &type = this->get(undef.basetype); + statement("constant ", variable_decl(type, to_name(undef.self), undef.self), " = {};"); + emitted = true; + }); + + if (emitted) + statement(""); +} + +void CompilerMSL::declare_constant_arrays() +{ + bool fully_inlined = ir.ids_for_type[TypeFunction].size() == 1; + + + + bool emitted = false; + + ir.for_each_typed_id([&](uint32_t, SPIRConstant &c) { + if (c.specialization) + return; + + auto &type = this->get(c.constant_type); + + + + + if (!type.array.empty() && (!fully_inlined || is_scalar(type) || is_vector(type))) + { + auto name = to_name(c.self); + statement("constant ", variable_decl(type, name), " = ", constant_expression(c), ";"); + emitted = true; + } + }); + + if (emitted) + statement(""); +} + + +void CompilerMSL::declare_complex_constant_arrays() +{ + + + bool fully_inlined = ir.ids_for_type[TypeFunction].size() == 1; + if (!fully_inlined) + return; + + + + bool emitted = false; + + ir.for_each_typed_id([&](uint32_t, SPIRConstant &c) { + if (c.specialization) + return; + + auto &type = this->get(c.constant_type); + if (!type.array.empty() && !(is_scalar(type) || is_vector(type))) + { + auto name = to_name(c.self); + statement("", variable_decl(type, name), " = ", constant_expression(c), ";"); + emitted = true; + } + }); + + if (emitted) + statement(""); +} + +void CompilerMSL::emit_resources() +{ + declare_constant_arrays(); + declare_undefined_values(); + + + emit_interface_block(stage_out_var_id); + emit_interface_block(patch_stage_out_var_id); + emit_interface_block(stage_in_var_id); + emit_interface_block(patch_stage_in_var_id); +} + + +void CompilerMSL::emit_specialization_constants_and_structs() +{ + SpecializationConstant wg_x, wg_y, wg_z; + ID workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + bool emitted = false; + + unordered_set declared_structs; + unordered_set aligned_structs; + + + + + + + ir.for_each_typed_id([&](uint32_t type_id, const SPIRType &type) { + if (type.basetype == SPIRType::Struct && + has_extended_decoration(type_id, SPIRVCrossDecorationBufferBlockRepacked)) + mark_scalar_layout_structs(type); + }); + + + + + auto loop_lock = ir.create_loop_soft_lock(); + + for (auto &id_ : ir.ids_for_constant_or_type) + { + auto &id = ir.ids[id_]; + + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + + if (c.self == workgroup_size_id) + { + + + + statement("constant uint3 ", builtin_to_glsl(BuiltInWorkgroupSize, StorageClassWorkgroup), + " [[maybe_unused]] = ", constant_expression(get(workgroup_size_id)), ";"); + emitted = true; + } + else if (c.specialization) + { + auto &type = get(c.constant_type); + string sc_type_name = type_to_glsl(type); + string sc_name = to_name(c.self); + string sc_tmp_name = sc_name + "_tmp"; + + + + + + + if (msl_options.supports_msl_version(1, 2) && has_decoration(c.self, DecorationSpecId) && + !c.is_used_as_array_length) + { + uint32_t constant_id = get_decoration(c.self, DecorationSpecId); + + statement("constant ", sc_type_name, " ", sc_tmp_name, " [[function_constant(", constant_id, + ")]];"); + statement("constant ", sc_type_name, " ", sc_name, " = is_function_constant_defined(", sc_tmp_name, + ") ? ", sc_tmp_name, " : ", constant_expression(c), ";"); + } + else if (has_decoration(c.self, DecorationSpecId)) + { + + c.specialization_constant_macro_name = + constant_value_macro_name(get_decoration(c.self, DecorationSpecId)); + + statement("#ifndef ", c.specialization_constant_macro_name); + statement("#define ", c.specialization_constant_macro_name, " ", constant_expression(c)); + statement("#endif"); + statement("constant ", sc_type_name, " ", sc_name, " = ", c.specialization_constant_macro_name, + ";"); + } + else + { + + statement("constant ", sc_type_name, " ", sc_name, " = ", constant_expression(c), ";"); + } + emitted = true; + } + } + else if (id.get_type() == TypeConstantOp) + { + auto &c = id.get(); + auto &type = get(c.basetype); + auto name = to_name(c.self); + statement("constant ", variable_decl(type, name), " = ", constant_op_expression(c), ";"); + emitted = true; + } + else if (id.get_type() == TypeType) + { + + + auto &type = id.get(); + TypeID type_id = type.self; + + bool is_struct = (type.basetype == SPIRType::Struct) && type.array.empty(); + bool is_block = + has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock); + + bool is_builtin_block = is_block && is_builtin_type(type); + bool is_declarable_struct = is_struct && !is_builtin_block; + + + if (stage_out_var_id && get_stage_out_struct_type().self == type_id) + is_declarable_struct = false; + if (patch_stage_out_var_id && get_patch_stage_out_struct_type().self == type_id) + is_declarable_struct = false; + if (stage_in_var_id && get_stage_in_struct_type().self == type_id) + is_declarable_struct = false; + if (patch_stage_in_var_id && get_patch_stage_in_struct_type().self == type_id) + is_declarable_struct = false; + + + if (is_declarable_struct && declared_structs.count(type_id) == 0) + { + if (emitted) + statement(""); + emitted = false; + + declared_structs.insert(type_id); + + if (has_extended_decoration(type_id, SPIRVCrossDecorationBufferBlockRepacked)) + align_struct(type, aligned_structs); + + + emit_struct(get(type_id)); + } + } + } + + if (emitted) + statement(""); +} + +void CompilerMSL::emit_binary_unord_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op) +{ + bool forward = should_forward(op0) && should_forward(op1); + emit_op(result_type, result_id, + join("(isunordered(", to_enclosed_unpacked_expression(op0), ", ", to_enclosed_unpacked_expression(op1), + ") || ", to_enclosed_unpacked_expression(op0), " ", op, " ", to_enclosed_unpacked_expression(op1), + ")"), + forward); + + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +bool CompilerMSL::emit_tessellation_io_load(uint32_t result_type_id, uint32_t id, uint32_t ptr) +{ + auto &ptr_type = expression_type(ptr); + auto &result_type = get(result_type_id); + if (ptr_type.storage != StorageClassInput && ptr_type.storage != StorageClassOutput) + return false; + if (ptr_type.storage == StorageClassOutput && get_execution_model() == ExecutionModelTessellationEvaluation) + return false; + + bool flat_data_type = is_matrix(result_type) || is_array(result_type) || result_type.basetype == SPIRType::Struct; + if (!flat_data_type) + return false; + + if (has_decoration(ptr, DecorationPatch)) + return false; + + + + + string expr; + + uint32_t interface_index = get_extended_decoration(ptr, SPIRVCrossDecorationInterfaceMemberIndex); + auto *var = maybe_get_backing_variable(ptr); + bool ptr_is_io_variable = ir.ids[ptr].get_type() == TypeVariable; + + const auto &iface_type = expression_type(stage_in_ptr_var_id); + + if (result_type.array.size() > 2) + { + SPIRV_CROSS_THROW("Cannot load tessellation IO variables with more than 2 dimensions."); + } + else if (result_type.array.size() == 2) + { + if (!ptr_is_io_variable) + SPIRV_CROSS_THROW("Loading an array-of-array must be loaded directly from an IO variable."); + if (interface_index == uint32_t(-1)) + SPIRV_CROSS_THROW("Interface index is unknown. Cannot continue."); + if (result_type.basetype == SPIRType::Struct || is_matrix(result_type)) + SPIRV_CROSS_THROW("Cannot load array-of-array of composite type in tessellation IO."); + + expr += type_to_glsl(result_type) + "({ "; + uint32_t num_control_points = to_array_size_literal(result_type, 1); + uint32_t base_interface_index = interface_index; + + auto &sub_type = get(result_type.parent_type); + + for (uint32_t i = 0; i < num_control_points; i++) + { + expr += type_to_glsl(sub_type) + "({ "; + interface_index = base_interface_index; + uint32_t array_size = to_array_size_literal(result_type, 0); + for (uint32_t j = 0; j < array_size; j++, interface_index++) + { + const uint32_t indices[2] = { i, interface_index }; + + AccessChainMeta meta; + expr += access_chain_internal(stage_in_ptr_var_id, indices, 2, + ACCESS_CHAIN_INDEX_IS_LITERAL_BIT | ACCESS_CHAIN_PTR_CHAIN_BIT, &meta); + + if (j + 1 < array_size) + expr += ", "; + } + expr += " })"; + if (i + 1 < num_control_points) + expr += ", "; + } + expr += " })"; + } + else if (result_type.basetype == SPIRType::Struct) + { + bool is_array_of_struct = is_array(result_type); + if (is_array_of_struct && !ptr_is_io_variable) + SPIRV_CROSS_THROW("Loading array of struct from IO variable must come directly from IO variable."); + + uint32_t num_control_points = 1; + if (is_array_of_struct) + { + num_control_points = to_array_size_literal(result_type, 0); + expr += type_to_glsl(result_type) + "({ "; + } + + auto &struct_type = is_array_of_struct ? get(result_type.parent_type) : result_type; + assert(struct_type.array.empty()); + + for (uint32_t i = 0; i < num_control_points; i++) + { + expr += type_to_glsl(struct_type) + "{ "; + for (uint32_t j = 0; j < uint32_t(struct_type.member_types.size()); j++) + { + + if (var) + { + interface_index = + get_extended_member_decoration(var->self, j, SPIRVCrossDecorationInterfaceMemberIndex); + } + + if (interface_index == uint32_t(-1)) + SPIRV_CROSS_THROW("Interface index is unknown. Cannot continue."); + + const auto &mbr_type = get(struct_type.member_types[j]); + if (is_matrix(mbr_type)) + { + expr += type_to_glsl(mbr_type) + "("; + for (uint32_t k = 0; k < mbr_type.columns; k++, interface_index++) + { + if (is_array_of_struct) + { + const uint32_t indices[2] = { i, interface_index }; + AccessChainMeta meta; + expr += access_chain_internal( + stage_in_ptr_var_id, indices, 2, + ACCESS_CHAIN_INDEX_IS_LITERAL_BIT | ACCESS_CHAIN_PTR_CHAIN_BIT, &meta); + } + else + expr += to_expression(ptr) + "." + to_member_name(iface_type, interface_index); + + if (k + 1 < mbr_type.columns) + expr += ", "; + } + expr += ")"; + } + else if (is_array(mbr_type)) + { + expr += type_to_glsl(mbr_type) + "({ "; + uint32_t array_size = to_array_size_literal(mbr_type, 0); + for (uint32_t k = 0; k < array_size; k++, interface_index++) + { + if (is_array_of_struct) + { + const uint32_t indices[2] = { i, interface_index }; + AccessChainMeta meta; + expr += access_chain_internal( + stage_in_ptr_var_id, indices, 2, + ACCESS_CHAIN_INDEX_IS_LITERAL_BIT | ACCESS_CHAIN_PTR_CHAIN_BIT, &meta); + } + else + expr += to_expression(ptr) + "." + to_member_name(iface_type, interface_index); + + if (k + 1 < array_size) + expr += ", "; + } + expr += " })"; + } + else + { + if (is_array_of_struct) + { + const uint32_t indices[2] = { i, interface_index }; + AccessChainMeta meta; + expr += access_chain_internal(stage_in_ptr_var_id, indices, 2, + ACCESS_CHAIN_INDEX_IS_LITERAL_BIT | ACCESS_CHAIN_PTR_CHAIN_BIT, + &meta); + } + else + expr += to_expression(ptr) + "." + to_member_name(iface_type, interface_index); + } + + if (j + 1 < struct_type.member_types.size()) + expr += ", "; + } + expr += " }"; + if (i + 1 < num_control_points) + expr += ", "; + } + if (is_array_of_struct) + expr += " })"; + } + else if (is_matrix(result_type)) + { + bool is_array_of_matrix = is_array(result_type); + if (is_array_of_matrix && !ptr_is_io_variable) + SPIRV_CROSS_THROW("Loading array of matrix from IO variable must come directly from IO variable."); + if (interface_index == uint32_t(-1)) + SPIRV_CROSS_THROW("Interface index is unknown. Cannot continue."); + + if (is_array_of_matrix) + { + + uint32_t base_interface_index = interface_index; + uint32_t num_control_points = to_array_size_literal(result_type, 0); + expr += type_to_glsl(result_type) + "({ "; + + auto &matrix_type = get_variable_element_type(get(ptr)); + + for (uint32_t i = 0; i < num_control_points; i++) + { + interface_index = base_interface_index; + expr += type_to_glsl(matrix_type) + "("; + for (uint32_t j = 0; j < result_type.columns; j++, interface_index++) + { + const uint32_t indices[2] = { i, interface_index }; + + AccessChainMeta meta; + expr += + access_chain_internal(stage_in_ptr_var_id, indices, 2, + ACCESS_CHAIN_INDEX_IS_LITERAL_BIT | ACCESS_CHAIN_PTR_CHAIN_BIT, &meta); + if (j + 1 < result_type.columns) + expr += ", "; + } + expr += ")"; + if (i + 1 < num_control_points) + expr += ", "; + } + + expr += " })"; + } + else + { + expr += type_to_glsl(result_type) + "("; + for (uint32_t i = 0; i < result_type.columns; i++, interface_index++) + { + expr += to_expression(ptr) + "." + to_member_name(iface_type, interface_index); + if (i + 1 < result_type.columns) + expr += ", "; + } + expr += ")"; + } + } + else if (ptr_is_io_variable) + { + assert(is_array(result_type)); + assert(result_type.array.size() == 1); + if (interface_index == uint32_t(-1)) + SPIRV_CROSS_THROW("Interface index is unknown. Cannot continue."); + + + + expr += type_to_glsl(result_type) + "({ "; + uint32_t num_control_points = to_array_size_literal(result_type, 0); + + for (uint32_t i = 0; i < num_control_points; i++) + { + const uint32_t indices[2] = { i, interface_index }; + + AccessChainMeta meta; + expr += access_chain_internal(stage_in_ptr_var_id, indices, 2, + ACCESS_CHAIN_INDEX_IS_LITERAL_BIT | ACCESS_CHAIN_PTR_CHAIN_BIT, &meta); + + if (i + 1 < num_control_points) + expr += ", "; + } + expr += " })"; + } + else + { + + assert(is_array(result_type)); + assert(result_type.array.size() == 1); + if (interface_index == uint32_t(-1)) + SPIRV_CROSS_THROW("Interface index is unknown. Cannot continue."); + + expr += type_to_glsl(result_type) + "({ "; + uint32_t array_size = to_array_size_literal(result_type, 0); + for (uint32_t i = 0; i < array_size; i++, interface_index++) + { + expr += to_expression(ptr) + "." + to_member_name(iface_type, interface_index); + if (i + 1 < array_size) + expr += ", "; + } + expr += " })"; + } + + emit_op(result_type_id, id, expr, false); + register_read(id, ptr, false); + return true; +} + +bool CompilerMSL::emit_tessellation_access_chain(const uint32_t *ops, uint32_t length) +{ + + + + + + auto *var = maybe_get_backing_variable(ops[2]); + bool patch = false; + bool flat_data = false; + bool ptr_is_chain = false; + + if (var) + { + patch = has_decoration(ops[2], DecorationPatch) || is_patch_block(get_variable_data_type(*var)); + + + flat_data = var->storage == StorageClassInput || + (var->storage == StorageClassOutput && get_execution_model() == ExecutionModelTessellationControl); + + + + + ptr_is_chain = var->self != ID(ops[2]); + } + + BuiltIn bi_type = BuiltIn(get_decoration(ops[2], DecorationBuiltIn)); + if (var && flat_data && !patch && + (!is_builtin_variable(*var) || bi_type == BuiltInPosition || bi_type == BuiltInPointSize || + bi_type == BuiltInClipDistance || bi_type == BuiltInCullDistance || + get_variable_data_type(*var).basetype == SPIRType::Struct)) + { + AccessChainMeta meta; + SmallVector indices; + uint32_t next_id = ir.increase_bound_by(2); + + indices.reserve(length - 3 + 1); + uint32_t type_id = next_id++; + SPIRType new_uint_type; + new_uint_type.basetype = SPIRType::UInt; + new_uint_type.width = 32; + set(type_id, new_uint_type); + + uint32_t first_non_array_index = ptr_is_chain ? 3 : 4; + VariableID stage_var_id = var->storage == StorageClassInput ? stage_in_ptr_var_id : stage_out_ptr_var_id; + VariableID ptr = ptr_is_chain ? VariableID(ops[2]) : stage_var_id; + if (!ptr_is_chain) + { + + indices.push_back(ops[3]); + } + + auto &result_ptr_type = get(ops[0]); + + uint32_t const_mbr_id = next_id++; + uint32_t index = get_extended_decoration(var->self, SPIRVCrossDecorationInterfaceMemberIndex); + if (var->storage == StorageClassInput || has_decoration(get_variable_element_type(*var).self, DecorationBlock)) + { + uint32_t i = first_non_array_index; + auto *type = &get_variable_element_type(*var); + if (index == uint32_t(-1) && length >= (first_non_array_index + 1)) + { + + + index = get_extended_member_decoration(var->self, get_constant(ops[first_non_array_index]).scalar(), + SPIRVCrossDecorationInterfaceMemberIndex); + assert(index != uint32_t(-1)); + i++; + type = &get(type->member_types[get_constant(ops[first_non_array_index]).scalar()]); + } + + + + + for (; i < length; ++i) + { + if (!is_array(*type) && !is_matrix(*type) && type->basetype != SPIRType::Struct) + break; + + auto *c = maybe_get(ops[i]); + if (!c || c->specialization) + SPIRV_CROSS_THROW("Trying to dynamically index into an array interface variable in tessellation. " + "This is currently unsupported."); + + + + + index += c->scalar(); + + if (type->parent_type) + type = &get(type->parent_type); + else if (type->basetype == SPIRType::Struct) + type = &get(type->member_types[c->scalar()]); + } + + if (is_matrix(result_ptr_type) || is_array(result_ptr_type) || result_ptr_type.basetype == SPIRType::Struct) + { + + + set_extended_decoration(ops[1], SPIRVCrossDecorationInterfaceMemberIndex, index); + } + else + { + + set(const_mbr_id, type_id, index, false); + indices.push_back(const_mbr_id); + + + if (i < length) + indices.insert(indices.end(), ops + i, ops + length); + } + } + else + { + assert(index != uint32_t(-1)); + set(const_mbr_id, type_id, index, false); + indices.push_back(const_mbr_id); + + indices.insert(indices.end(), ops + 4, ops + length); + } + + + + string e; + + if (!ptr_is_chain) + { + + e = access_chain(ptr, indices.data(), uint32_t(indices.size()), result_ptr_type, &meta, true); + } + else + { + + + + + + + + + + + + auto *ptr_expr = maybe_get(ptr); + if (ptr_expr && ptr_expr->implied_read_expressions.size() == 2) + { + e = join(to_expression(ptr), + access_chain_internal(stage_var_id, indices.data(), uint32_t(indices.size()), + ACCESS_CHAIN_CHAIN_ONLY_BIT, &meta)); + } + else + { + e = access_chain_internal(ptr, indices.data(), uint32_t(indices.size()), 0, &meta); + } + } + + auto &expr = set(ops[1], move(e), ops[0], should_forward(ops[2])); + expr.loaded_from = var->self; + expr.need_transpose = meta.need_transpose; + expr.access_chain = true; + + + if (meta.storage_is_packed) + set_extended_decoration(ops[1], SPIRVCrossDecorationPhysicalTypePacked); + if (meta.storage_physical_type != 0) + set_extended_decoration(ops[1], SPIRVCrossDecorationPhysicalTypeID, meta.storage_physical_type); + if (meta.storage_is_invariant) + set_decoration(ops[1], DecorationInvariant); + + + + + forwarded_temporaries.insert(ops[1]); + + suppressed_usage_tracking.insert(ops[1]); + + for (uint32_t i = 2; i < length; i++) + { + inherit_expression_dependencies(ops[1], ops[i]); + add_implied_read_expression(expr, ops[i]); + } + + + + if (expr.expression_dependencies.empty()) + forwarded_temporaries.erase(ops[1]); + + return true; + } + + + + + + + + auto *m = ir.find_meta(var ? var->self : ID(0)); + if (get_execution_model() == ExecutionModelTessellationControl && var && m && + m->decoration.builtin_type == BuiltInTessLevelInner && get_entry_point().flags.get(ExecutionModeTriangles)) + { + auto *c = maybe_get(ops[3]); + if (c && c->scalar() == 1) + return false; + auto &dest_var = set(ops[1], *var); + dest_var.basetype = ops[0]; + ir.meta[ops[1]] = ir.meta[ops[2]]; + inherit_expression_dependencies(ops[1], ops[2]); + return true; + } + + return false; +} + +bool CompilerMSL::is_out_of_bounds_tessellation_level(uint32_t id_lhs) +{ + if (!get_entry_point().flags.get(ExecutionModeTriangles)) + return false; + + + + + + + + + + + const auto *e = maybe_get(id_lhs); + if (!e || !e->access_chain) + return false; + BuiltIn builtin = BuiltIn(get_decoration(e->loaded_from, DecorationBuiltIn)); + if (builtin != BuiltInTessLevelInner && builtin != BuiltInTessLevelOuter) + return false; + auto *c = maybe_get(e->implied_read_expressions[1]); + if (!c) + return false; + return (builtin == BuiltInTessLevelInner && c->scalar() == 1) || + (builtin == BuiltInTessLevelOuter && c->scalar() == 3); +} + + +void CompilerMSL::emit_instruction(const Instruction &instruction) +{ +#define MSL_BOP(op) emit_binary_op(ops[0], ops[1], ops[2], ops[3], #op) +#define MSL_BOP_CAST(op, type) \ + emit_binary_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, opcode_is_sign_invariant(opcode)) +#define MSL_UOP(op) emit_unary_op(ops[0], ops[1], ops[2], #op) +#define MSL_QFOP(op) emit_quaternary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], ops[5], #op) +#define MSL_TFOP(op) emit_trinary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], #op) +#define MSL_BFOP(op) emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], #op) +#define MSL_BFOP_CAST(op, type) \ + emit_binary_func_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, opcode_is_sign_invariant(opcode)) +#define MSL_UFOP(op) emit_unary_func_op(ops[0], ops[1], ops[2], #op) +#define MSL_UNORD_BOP(op) emit_binary_unord_op(ops[0], ops[1], ops[2], ops[3], #op) + + auto ops = stream(instruction); + auto opcode = static_cast(instruction.op); + + + uint32_t integer_width = get_integer_width_for_instruction(instruction); + auto int_type = to_signed_basetype(integer_width); + auto uint_type = to_unsigned_basetype(integer_width); + + switch (opcode) + { + case OpLoad: + { + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + if (is_tessellation_shader()) + { + if (!emit_tessellation_io_load(ops[0], id, ptr)) + CompilerGLSL::emit_instruction(instruction); + } + else + { + + if (BuiltIn(get_decoration(ptr, DecorationBuiltIn)) == BuiltInSampleMask) + set_decoration(id, DecorationBuiltIn, BuiltInSampleMask); + CompilerGLSL::emit_instruction(instruction); + } + break; + } + + + case OpIEqual: + MSL_BOP_CAST(==, int_type); + break; + + case OpLogicalEqual: + case OpFOrdEqual: + MSL_BOP(==); + break; + + case OpINotEqual: + MSL_BOP_CAST(!=, int_type); + break; + + case OpLogicalNotEqual: + case OpFOrdNotEqual: + MSL_BOP(!=); + break; + + case OpUGreaterThan: + MSL_BOP_CAST(>, uint_type); + break; + + case OpSGreaterThan: + MSL_BOP_CAST(>, int_type); + break; + + case OpFOrdGreaterThan: + MSL_BOP(>); + break; + + case OpUGreaterThanEqual: + MSL_BOP_CAST(>=, uint_type); + break; + + case OpSGreaterThanEqual: + MSL_BOP_CAST(>=, int_type); + break; + + case OpFOrdGreaterThanEqual: + MSL_BOP(>=); + break; + + case OpULessThan: + MSL_BOP_CAST(<, uint_type); + break; + + case OpSLessThan: + MSL_BOP_CAST(<, int_type); + break; + + case OpFOrdLessThan: + MSL_BOP(<); + break; + + case OpULessThanEqual: + MSL_BOP_CAST(<=, uint_type); + break; + + case OpSLessThanEqual: + MSL_BOP_CAST(<=, int_type); + break; + + case OpFOrdLessThanEqual: + MSL_BOP(<=); + break; + + case OpFUnordEqual: + MSL_UNORD_BOP(==); + break; + + case OpFUnordNotEqual: + MSL_UNORD_BOP(!=); + break; + + case OpFUnordGreaterThan: + MSL_UNORD_BOP(>); + break; + + case OpFUnordGreaterThanEqual: + MSL_UNORD_BOP(>=); + break; + + case OpFUnordLessThan: + MSL_UNORD_BOP(<); + break; + + case OpFUnordLessThanEqual: + MSL_UNORD_BOP(<=); + break; + + + case OpDPdx: + case OpDPdxFine: + case OpDPdxCoarse: + MSL_UFOP(dfdx); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdy: + case OpDPdyFine: + case OpDPdyCoarse: + MSL_UFOP(dfdy); + register_control_dependent_expression(ops[1]); + break; + + case OpFwidth: + case OpFwidthCoarse: + case OpFwidthFine: + MSL_UFOP(fwidth); + register_control_dependent_expression(ops[1]); + break; + + + case OpBitFieldInsert: + { + emit_bitfield_insert_op(ops[0], ops[1], ops[2], ops[3], ops[4], ops[5], "insert_bits", SPIRType::UInt); + break; + } + + case OpBitFieldSExtract: + { + emit_trinary_func_op_bitextract(ops[0], ops[1], ops[2], ops[3], ops[4], "extract_bits", int_type, int_type, + SPIRType::UInt, SPIRType::UInt); + break; + } + + case OpBitFieldUExtract: + { + emit_trinary_func_op_bitextract(ops[0], ops[1], ops[2], ops[3], ops[4], "extract_bits", uint_type, uint_type, + SPIRType::UInt, SPIRType::UInt); + break; + } + + case OpBitReverse: + + MSL_UFOP(reverse_bits); + break; + + case OpBitCount: + { + auto basetype = expression_type(ops[2]).basetype; + emit_unary_func_op_cast(ops[0], ops[1], ops[2], "popcount", basetype, basetype); + break; + } + + case OpFRem: + MSL_BFOP(fmod); + break; + + case OpFMul: + if (msl_options.invariant_float_math) + MSL_BFOP(spvFMul); + else + MSL_BOP(*); + break; + + case OpFAdd: + if (msl_options.invariant_float_math) + MSL_BFOP(spvFAdd); + else + MSL_BOP(+); + break; + + + case OpAtomicExchange: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + uint32_t mem_sem = ops[4]; + uint32_t val = ops[5]; + emit_atomic_func_op(result_type, id, "atomic_exchange_explicit", mem_sem, mem_sem, false, ptr, val); + break; + } + + case OpAtomicCompareExchange: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + uint32_t mem_sem_pass = ops[4]; + uint32_t mem_sem_fail = ops[5]; + uint32_t val = ops[6]; + uint32_t comp = ops[7]; + emit_atomic_func_op(result_type, id, "atomic_compare_exchange_weak_explicit", mem_sem_pass, mem_sem_fail, true, + ptr, comp, true, false, val); + break; + } + + case OpAtomicCompareExchangeWeak: + SPIRV_CROSS_THROW("OpAtomicCompareExchangeWeak is only supported in kernel profile."); + + case OpAtomicLoad: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + uint32_t mem_sem = ops[4]; + emit_atomic_func_op(result_type, id, "atomic_load_explicit", mem_sem, mem_sem, false, ptr, 0); + break; + } + + case OpAtomicStore: + { + uint32_t result_type = expression_type(ops[0]).self; + uint32_t id = ops[0]; + uint32_t ptr = ops[0]; + uint32_t mem_sem = ops[2]; + uint32_t val = ops[3]; + emit_atomic_func_op(result_type, id, "atomic_store_explicit", mem_sem, mem_sem, false, ptr, val); + break; + } + +#define MSL_AFMO_IMPL(op, valsrc, valconst) \ + do \ + { \ + uint32_t result_type = ops[0]; \ + uint32_t id = ops[1]; \ + uint32_t ptr = ops[2]; \ + uint32_t mem_sem = ops[4]; \ + uint32_t val = valsrc; \ + emit_atomic_func_op(result_type, id, "atomic_fetch_" #op "_explicit", mem_sem, mem_sem, false, ptr, val, \ + false, valconst); \ + } while (false) + +#define MSL_AFMO(op) MSL_AFMO_IMPL(op, ops[5], false) +#define MSL_AFMIO(op) MSL_AFMO_IMPL(op, 1, true) + + case OpAtomicIIncrement: + MSL_AFMIO(add); + break; + + case OpAtomicIDecrement: + MSL_AFMIO(sub); + break; + + case OpAtomicIAdd: + MSL_AFMO(add); + break; + + case OpAtomicISub: + MSL_AFMO(sub); + break; + + case OpAtomicSMin: + case OpAtomicUMin: + MSL_AFMO(min); + break; + + case OpAtomicSMax: + case OpAtomicUMax: + MSL_AFMO(max); + break; + + case OpAtomicAnd: + MSL_AFMO(and); + break; + + case OpAtomicOr: + MSL_AFMO(or); + break; + + case OpAtomicXor: + MSL_AFMO(xor); + break; + + + + + case OpImageRead: + { + + uint32_t img_id = ops[2]; + auto &type = expression_type(img_id); + if (type.image.dim != DimSubpassData) + { + auto *p_var = maybe_get_backing_variable(img_id); + if (p_var && has_decoration(p_var->self, DecorationNonReadable)) + { + unset_decoration(p_var->self, DecorationNonReadable); + force_recompile(); + } + } + + emit_texture_op(instruction); + break; + } + + + case OpImageTexelPointer: + { + + auto *var = maybe_get_backing_variable(ops[2]); + if (var && atomic_image_vars.count(var->self)) + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + std::string coord = to_expression(ops[3]); + auto &type = expression_type(ops[2]); + if (type.image.dim == Dim2D) + { + coord = join("spvImage2DAtomicCoord(", coord, ", ", to_expression(ops[2]), ")"); + } + + auto &e = set(id, join(to_expression(ops[2]), "_atomic[", coord, "]"), result_type, true); + e.loaded_from = var ? var->self : ID(0); + inherit_expression_dependencies(id, ops[3]); + } + else + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto &e = + set(id, join(to_expression(ops[2]), ", ", to_expression(ops[3])), result_type, true); + + + e.loaded_from = var ? var->self : ID(0); + inherit_expression_dependencies(id, ops[3]); + } + break; + } + + case OpImageWrite: + { + uint32_t img_id = ops[0]; + uint32_t coord_id = ops[1]; + uint32_t texel_id = ops[2]; + const uint32_t *opt = &ops[3]; + uint32_t length = instruction.length - 3; + + + auto &type = expression_type(img_id); + auto &img_type = get(type.self); + + + + auto *p_var = maybe_get_backing_variable(img_id); + if (p_var && has_decoration(p_var->self, DecorationNonWritable)) + { + unset_decoration(p_var->self, DecorationNonWritable); + force_recompile(); + } + + bool forward = false; + uint32_t bias = 0; + uint32_t lod = 0; + uint32_t flags = 0; + + if (length) + { + flags = *opt++; + length--; + } + + auto test = [&](uint32_t &v, uint32_t flag) { + if (length && (flags & flag)) + { + v = *opt++; + length--; + } + }; + + test(bias, ImageOperandsBiasMask); + test(lod, ImageOperandsLodMask); + + auto &texel_type = expression_type(texel_id); + auto store_type = texel_type; + store_type.vecsize = 4; + + statement(join(to_expression(img_id), ".write(", + remap_swizzle(store_type, texel_type.vecsize, to_expression(texel_id)), ", ", + to_function_args(img_id, img_type, true, false, false, coord_id, 0, 0, 0, 0, lod, 0, 0, 0, 0, 0, + 0, &forward), + ");")); + + if (p_var && variable_storage_is_aliased(*p_var)) + flush_all_aliased_variables(); + + break; + } + + case OpImageQuerySize: + case OpImageQuerySizeLod: + { + uint32_t rslt_type_id = ops[0]; + auto &rslt_type = get(rslt_type_id); + + uint32_t id = ops[1]; + + uint32_t img_id = ops[2]; + string img_exp = to_expression(img_id); + auto &img_type = expression_type(img_id); + Dim img_dim = img_type.image.dim; + bool img_is_array = img_type.image.arrayed; + + if (img_type.basetype != SPIRType::Image) + SPIRV_CROSS_THROW("Invalid type for OpImageQuerySize."); + + string lod; + if (opcode == OpImageQuerySizeLod) + { + + string decl_lod = to_expression(ops[3]); + if (decl_lod != "0") + lod = decl_lod; + } + + string expr = type_to_glsl(rslt_type) + "("; + expr += img_exp + ".get_width(" + lod + ")"; + + if (img_dim == Dim2D || img_dim == DimCube || img_dim == Dim3D) + expr += ", " + img_exp + ".get_height(" + lod + ")"; + + if (img_dim == Dim3D) + expr += ", " + img_exp + ".get_depth(" + lod + ")"; + + if (img_is_array) + { + expr += ", " + img_exp + ".get_array_size()"; + if (img_dim == DimCube && msl_options.emulate_cube_array) + expr += " / 6"; + } + + expr += ")"; + + emit_op(rslt_type_id, id, expr, should_forward(img_id)); + + break; + } + + case OpImageQueryLod: + { + if (!msl_options.supports_msl_version(2, 2)) + SPIRV_CROSS_THROW("ImageQueryLod is only supported on MSL 2.2 and up."); + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t image_id = ops[2]; + uint32_t coord_id = ops[3]; + emit_uninitialized_temporary_expression(result_type, id); + + auto sampler_expr = to_sampler_expression(image_id); + auto *combined = maybe_get(image_id); + auto image_expr = combined ? to_expression(combined->image) : to_expression(image_id); + + + + + + statement(to_expression(id), ".x = ", image_expr, ".calculate_clamped_lod(", sampler_expr, ", ", + to_expression(coord_id), ");"); + statement(to_expression(id), ".y = ", image_expr, ".calculate_unclamped_lod(", sampler_expr, ", ", + to_expression(coord_id), ");"); + register_control_dependent_expression(id); + break; + } + +#define MSL_ImgQry(qrytype) \ + do \ + { \ + uint32_t rslt_type_id = ops[0]; \ + auto &rslt_type = get(rslt_type_id); \ + uint32_t id = ops[1]; \ + uint32_t img_id = ops[2]; \ + string img_exp = to_expression(img_id); \ + string expr = type_to_glsl(rslt_type) + "(" + img_exp + ".get_num_" #qrytype "())"; \ + emit_op(rslt_type_id, id, expr, should_forward(img_id)); \ + } while (false) + + case OpImageQueryLevels: + MSL_ImgQry(mip_levels); + break; + + case OpImageQuerySamples: + MSL_ImgQry(samples); + break; + + case OpImage: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto *combined = maybe_get(ops[2]); + + if (combined) + { + auto &e = emit_op(result_type, id, to_expression(combined->image), true, true); + auto *var = maybe_get_backing_variable(combined->image); + if (var) + e.loaded_from = var->self; + } + else + { + auto *var = maybe_get_backing_variable(ops[2]); + SPIRExpression *e; + if (var && has_extended_decoration(var->self, SPIRVCrossDecorationDynamicImageSampler)) + e = &emit_op(result_type, id, join(to_expression(ops[2]), ".plane0"), true, true); + else + e = &emit_op(result_type, id, to_expression(ops[2]), true, true); + if (var) + e->loaded_from = var->self; + } + break; + } + + + case OpQuantizeToF16: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t arg = ops[2]; + + string exp; + auto &type = get(result_type); + + switch (type.vecsize) + { + case 1: + exp = join("float(half(", to_expression(arg), "))"); + break; + case 2: + exp = join("float2(half2(", to_expression(arg), "))"); + break; + case 3: + exp = join("float3(half3(", to_expression(arg), "))"); + break; + case 4: + exp = join("float4(half4(", to_expression(arg), "))"); + break; + default: + SPIRV_CROSS_THROW("Illegal argument to OpQuantizeToF16."); + } + + emit_op(result_type, id, exp, should_forward(arg)); + break; + } + + case OpInBoundsAccessChain: + case OpAccessChain: + case OpPtrAccessChain: + if (is_tessellation_shader()) + { + if (!emit_tessellation_access_chain(ops, instruction.length)) + CompilerGLSL::emit_instruction(instruction); + } + else + CompilerGLSL::emit_instruction(instruction); + break; + + case OpStore: + if (is_out_of_bounds_tessellation_level(ops[0])) + break; + + if (maybe_emit_array_assignment(ops[0], ops[1])) + break; + + CompilerGLSL::emit_instruction(instruction); + break; + + + case OpMemoryBarrier: + emit_barrier(0, ops[0], ops[1]); + break; + + case OpControlBarrier: + + + + if (previous_instruction_opcode != OpMemoryBarrier) + emit_barrier(ops[0], ops[1], ops[2]); + break; + + case OpOuterProduct: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t a = ops[2]; + uint32_t b = ops[3]; + + auto &type = get(result_type); + string expr = type_to_glsl_constructor(type); + expr += "("; + for (uint32_t col = 0; col < type.columns; col++) + { + expr += to_enclosed_expression(a); + expr += " * "; + expr += to_extract_component_expression(b, col); + if (col + 1 < type.columns) + expr += ", "; + } + expr += ")"; + emit_op(result_type, id, expr, should_forward(a) && should_forward(b)); + inherit_expression_dependencies(id, a); + inherit_expression_dependencies(id, b); + break; + } + + case OpVectorTimesMatrix: + case OpMatrixTimesVector: + { + if (!msl_options.invariant_float_math) + { + CompilerGLSL::emit_instruction(instruction); + break; + } + + + auto *e = maybe_get(ops[opcode == OpMatrixTimesVector ? 2 : 3]); + if (e && e->need_transpose) + { + e->need_transpose = false; + string expr; + + if (opcode == OpMatrixTimesVector) + { + expr = join("spvFMulVectorMatrix(", to_enclosed_unpacked_expression(ops[3]), ", ", + to_unpacked_row_major_matrix_expression(ops[2]), ")"); + } + else + { + expr = join("spvFMulMatrixVector(", to_unpacked_row_major_matrix_expression(ops[3]), ", ", + to_enclosed_unpacked_expression(ops[2]), ")"); + } + + bool forward = should_forward(ops[2]) && should_forward(ops[3]); + emit_op(ops[0], ops[1], expr, forward); + e->need_transpose = true; + inherit_expression_dependencies(ops[1], ops[2]); + inherit_expression_dependencies(ops[1], ops[3]); + } + else + { + if (opcode == OpMatrixTimesVector) + MSL_BFOP(spvFMulMatrixVector); + else + MSL_BFOP(spvFMulVectorMatrix); + } + break; + } + + case OpMatrixTimesMatrix: + { + if (!msl_options.invariant_float_math) + { + CompilerGLSL::emit_instruction(instruction); + break; + } + + auto *a = maybe_get(ops[2]); + auto *b = maybe_get(ops[3]); + + + + if (a && b && a->need_transpose && b->need_transpose) + { + a->need_transpose = false; + b->need_transpose = false; + + auto expr = + join("spvFMulMatrixMatrix(", enclose_expression(to_unpacked_row_major_matrix_expression(ops[3])), ", ", + enclose_expression(to_unpacked_row_major_matrix_expression(ops[2])), ")"); + + bool forward = should_forward(ops[2]) && should_forward(ops[3]); + auto &e = emit_op(ops[0], ops[1], expr, forward); + e.need_transpose = true; + a->need_transpose = true; + b->need_transpose = true; + inherit_expression_dependencies(ops[1], ops[2]); + inherit_expression_dependencies(ops[1], ops[3]); + } + else + MSL_BFOP(spvFMulMatrixMatrix); + + break; + } + + case OpIAddCarry: + case OpISubBorrow: + { + uint32_t result_type = ops[0]; + uint32_t result_id = ops[1]; + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + auto &type = get(result_type); + emit_uninitialized_temporary_expression(result_type, result_id); + + auto &res_type = get(type.member_types[1]); + if (opcode == OpIAddCarry) + { + statement(to_expression(result_id), ".", to_member_name(type, 0), " = ", to_enclosed_expression(op0), " + ", + to_enclosed_expression(op1), ";"); + statement(to_expression(result_id), ".", to_member_name(type, 1), " = select(", type_to_glsl(res_type), + "(1), ", type_to_glsl(res_type), "(0), ", to_expression(result_id), ".", to_member_name(type, 0), + " >= max(", to_expression(op0), ", ", to_expression(op1), "));"); + } + else + { + statement(to_expression(result_id), ".", to_member_name(type, 0), " = ", to_enclosed_expression(op0), " - ", + to_enclosed_expression(op1), ";"); + statement(to_expression(result_id), ".", to_member_name(type, 1), " = select(", type_to_glsl(res_type), + "(1), ", type_to_glsl(res_type), "(0), ", to_enclosed_expression(op0), + " >= ", to_enclosed_expression(op1), ");"); + } + break; + } + + case OpUMulExtended: + case OpSMulExtended: + { + uint32_t result_type = ops[0]; + uint32_t result_id = ops[1]; + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + auto &type = get(result_type); + emit_uninitialized_temporary_expression(result_type, result_id); + + statement(to_expression(result_id), ".", to_member_name(type, 0), " = ", to_enclosed_expression(op0), " * ", + to_enclosed_expression(op1), ";"); + statement(to_expression(result_id), ".", to_member_name(type, 1), " = mulhi(", to_expression(op0), ", ", + to_expression(op1), ");"); + break; + } + + case OpArrayLength: + { + auto &type = expression_type(ops[2]); + uint32_t offset = type_struct_member_offset(type, ops[3]); + uint32_t stride = type_struct_member_array_stride(type, ops[3]); + + auto expr = join("(", to_buffer_size_expression(ops[2]), " - ", offset, ") / ", stride); + emit_op(ops[0], ops[1], expr, true); + break; + } + + + case OpUCountLeadingZerosINTEL: + MSL_UFOP(clz); + break; + + case OpUCountTrailingZerosINTEL: + MSL_UFOP(ctz); + break; + + case OpAbsISubINTEL: + case OpAbsUSubINTEL: + MSL_BFOP(absdiff); + break; + + case OpIAddSatINTEL: + case OpUAddSatINTEL: + MSL_BFOP(addsat); + break; + + case OpIAverageINTEL: + case OpUAverageINTEL: + MSL_BFOP(hadd); + break; + + case OpIAverageRoundedINTEL: + case OpUAverageRoundedINTEL: + MSL_BFOP(rhadd); + break; + + case OpISubSatINTEL: + case OpUSubSatINTEL: + MSL_BFOP(subsat); + break; + + case OpIMul32x16INTEL: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t a = ops[2], b = ops[3]; + bool forward = should_forward(a) && should_forward(b); + emit_op(result_type, id, join("int(short(", to_expression(a), ")) * int(short(", to_expression(b), "))"), + forward); + inherit_expression_dependencies(id, a); + inherit_expression_dependencies(id, b); + break; + } + + case OpUMul32x16INTEL: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t a = ops[2], b = ops[3]; + bool forward = should_forward(a) && should_forward(b); + emit_op(result_type, id, join("uint(ushort(", to_expression(a), ")) * uint(ushort(", to_expression(b), "))"), + forward); + inherit_expression_dependencies(id, a); + inherit_expression_dependencies(id, b); + break; + } + + case OpIsHelperInvocationEXT: + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("simd_is_helper_thread() is only supported on macOS."); + else if (msl_options.is_macos() && !msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("simd_is_helper_thread() requires version 2.1 on macOS."); + emit_op(ops[0], ops[1], "simd_is_helper_thread()", false); + break; + + case OpBeginInvocationInterlockEXT: + case OpEndInvocationInterlockEXT: + if (!msl_options.supports_msl_version(2, 0)) + SPIRV_CROSS_THROW("Raster order groups require MSL 2.0."); + break; + + default: + CompilerGLSL::emit_instruction(instruction); + break; + } + + previous_instruction_opcode = opcode; +} + +void CompilerMSL::emit_texture_op(const Instruction &i) +{ + if (msl_options.is_ios() && msl_options.ios_use_framebuffer_fetch_subpasses) + { + auto *ops = stream(i); + + uint32_t result_type_id = ops[0]; + uint32_t id = ops[1]; + uint32_t img = ops[2]; + + auto &type = expression_type(img); + auto &imgtype = get(type.self); + + + if (imgtype.image.dim == DimSubpassData) + { + + + string expr = to_expression(img); + emit_op(result_type_id, id, expr, true); + return; + } + } + + + CompilerGLSL::emit_texture_op(i); +} + +void CompilerMSL::emit_barrier(uint32_t id_exe_scope, uint32_t id_mem_scope, uint32_t id_mem_sem) +{ + if (get_execution_model() != ExecutionModelGLCompute && get_execution_model() != ExecutionModelTessellationControl) + return; + + uint32_t exe_scope = id_exe_scope ? get(id_exe_scope).scalar() : uint32_t(ScopeInvocation); + uint32_t mem_scope = id_mem_scope ? get(id_mem_scope).scalar() : uint32_t(ScopeInvocation); + + exe_scope = min(exe_scope, mem_scope); + + string bar_stmt; + if ((msl_options.is_ios() && msl_options.supports_msl_version(1, 2)) || msl_options.supports_msl_version(2)) + bar_stmt = exe_scope < ScopeSubgroup ? "threadgroup_barrier" : "simdgroup_barrier"; + else + bar_stmt = "threadgroup_barrier"; + bar_stmt += "("; + + uint32_t mem_sem = id_mem_sem ? get(id_mem_sem).scalar() : uint32_t(MemorySemanticsMaskNone); + + + if (msl_options.supports_msl_version(1, 2)) + { + string mem_flags = ""; + + + if (get_execution_model() == ExecutionModelTessellationControl || + (mem_sem & (MemorySemanticsUniformMemoryMask | MemorySemanticsCrossWorkgroupMemoryMask))) + mem_flags += "mem_flags::mem_device"; + + + if (get_execution_model() == ExecutionModelTessellationControl || + (mem_sem & (MemorySemanticsSubgroupMemoryMask | MemorySemanticsWorkgroupMemoryMask | + MemorySemanticsAtomicCounterMemoryMask))) + { + if (!mem_flags.empty()) + mem_flags += " | "; + mem_flags += "mem_flags::mem_threadgroup"; + } + if (mem_sem & MemorySemanticsImageMemoryMask) + { + if (!mem_flags.empty()) + mem_flags += " | "; + mem_flags += "mem_flags::mem_texture"; + } + + if (mem_flags.empty()) + mem_flags = "mem_flags::mem_none"; + + bar_stmt += mem_flags; + } + else + { + if ((mem_sem & (MemorySemanticsUniformMemoryMask | MemorySemanticsCrossWorkgroupMemoryMask)) && + (mem_sem & (MemorySemanticsSubgroupMemoryMask | MemorySemanticsWorkgroupMemoryMask | + MemorySemanticsAtomicCounterMemoryMask))) + bar_stmt += "mem_flags::mem_device_and_threadgroup"; + else if (mem_sem & (MemorySemanticsUniformMemoryMask | MemorySemanticsCrossWorkgroupMemoryMask)) + bar_stmt += "mem_flags::mem_device"; + else if (mem_sem & (MemorySemanticsSubgroupMemoryMask | MemorySemanticsWorkgroupMemoryMask | + MemorySemanticsAtomicCounterMemoryMask)) + bar_stmt += "mem_flags::mem_threadgroup"; + else if (mem_sem & MemorySemanticsImageMemoryMask) + bar_stmt += "mem_flags::mem_texture"; + else + bar_stmt += "mem_flags::mem_none"; + } + + if (msl_options.is_ios() && (msl_options.supports_msl_version(2) && !msl_options.supports_msl_version(2, 1))) + { + bar_stmt += ", "; + + switch (mem_scope) + { + case ScopeCrossDevice: + case ScopeDevice: + bar_stmt += "memory_scope_device"; + break; + + case ScopeSubgroup: + case ScopeInvocation: + bar_stmt += "memory_scope_simdgroup"; + break; + + case ScopeWorkgroup: + default: + bar_stmt += "memory_scope_threadgroup"; + break; + } + } + + bar_stmt += ");"; + + statement(bar_stmt); + + assert(current_emitting_block); + flush_control_dependent_expressions(current_emitting_block->self); + flush_all_active_variables(); +} + +void CompilerMSL::emit_array_copy(const string &lhs, uint32_t rhs_id, StorageClass lhs_storage, + StorageClass rhs_storage) +{ + + + bool lhs_thread = (lhs_storage == StorageClassOutput || lhs_storage == StorageClassFunction || + lhs_storage == StorageClassGeneric || lhs_storage == StorageClassPrivate); + bool rhs_thread = (rhs_storage == StorageClassInput || rhs_storage == StorageClassFunction || + rhs_storage == StorageClassGeneric || rhs_storage == StorageClassPrivate); + + + + if (lhs_thread && rhs_thread && !use_builtin_array) + { + statement(lhs, " = ", to_expression(rhs_id), ";"); + } + else + { + + auto &type = expression_type(rhs_id); + auto *var = maybe_get_backing_variable(rhs_id); + + + + bool is_constant = false; + if (ir.ids[rhs_id].get_type() == TypeConstant) + { + is_constant = true; + } + else if (var && var->remapped_variable && var->statically_assigned && + ir.ids[var->static_expression].get_type() == TypeConstant) + { + is_constant = true; + } + + + + + + if (type.array.size() > 1) + { + if (type.array.size() > SPVFuncImplArrayCopyMultidimMax) + SPIRV_CROSS_THROW("Cannot support this many dimensions for arrays of arrays."); + auto func = static_cast(SPVFuncImplArrayCopyMultidimBase + type.array.size()); + add_spv_func_and_recompile(func); + } + else + add_spv_func_and_recompile(SPVFuncImplArrayCopy); + + const char *tag = nullptr; + if (lhs_thread && is_constant) + tag = "FromConstantToStack"; + else if (lhs_storage == StorageClassWorkgroup && is_constant) + tag = "FromConstantToThreadGroup"; + else if (lhs_thread && rhs_thread) + tag = "FromStackToStack"; + else if (lhs_storage == StorageClassWorkgroup && rhs_thread) + tag = "FromStackToThreadGroup"; + else if (lhs_thread && rhs_storage == StorageClassWorkgroup) + tag = "FromThreadGroupToStack"; + else if (lhs_storage == StorageClassWorkgroup && rhs_storage == StorageClassWorkgroup) + tag = "FromThreadGroupToThreadGroup"; + else + SPIRV_CROSS_THROW("Unknown storage class used for copying arrays."); + + + if (lhs_thread) + statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ".elements, ", to_expression(rhs_id), ");"); + else if (rhs_thread) + statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ", ", to_expression(rhs_id), ".elements);"); + else + statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ", ", to_expression(rhs_id), ");"); + } +} + + + + + +bool CompilerMSL::maybe_emit_array_assignment(uint32_t id_lhs, uint32_t id_rhs) +{ + + auto &type = expression_type(id_rhs); + if (type.array.size() == 0) + return false; + + auto *var = maybe_get(id_lhs); + + + if (var && var->remapped_variable && var->statically_assigned) + return true; + + if (ir.ids[id_rhs].get_type() == TypeConstant && var && var->deferred_declaration) + { + + + + + + statement(to_expression(id_lhs), " = ", constant_expression(get(id_rhs)), ";"); + return true; + } + + + auto *p_v_lhs = maybe_get_backing_variable(id_lhs); + if (p_v_lhs) + flush_variable_declaration(p_v_lhs->self); + + emit_array_copy(to_expression(id_lhs), id_rhs, get_backing_variable_storage(id_lhs), + get_backing_variable_storage(id_rhs)); + register_write(id_lhs); + + return true; +} + + +void CompilerMSL::emit_atomic_func_op(uint32_t result_type, uint32_t result_id, const char *op, uint32_t mem_order_1, + uint32_t mem_order_2, bool has_mem_order_2, uint32_t obj, uint32_t op1, + bool op1_is_pointer, bool op1_is_literal, uint32_t op2) +{ + string exp = string(op) + "("; + + auto &type = get_pointee_type(expression_type(obj)); + exp += "("; + auto *var = maybe_get_backing_variable(obj); + if (!var) + SPIRV_CROSS_THROW("No backing variable for atomic operation."); + + + const auto &res_type = get(var->basetype); + if (res_type.storage == StorageClassUniformConstant && res_type.basetype == SPIRType::Image) + { + exp += "device"; + } + else + { + exp += get_argument_address_space(*var); + } + + exp += " atomic_"; + exp += type_to_glsl(type); + exp += "*)"; + + exp += "&"; + exp += to_enclosed_expression(obj); + + bool is_atomic_compare_exchange_strong = op1_is_pointer && op1; + + if (is_atomic_compare_exchange_strong) + { + assert(strcmp(op, "atomic_compare_exchange_weak_explicit") == 0); + assert(op2); + assert(has_mem_order_2); + exp += ", &"; + exp += to_name(result_id); + exp += ", "; + exp += to_expression(op2); + exp += ", "; + exp += get_memory_order(mem_order_1); + exp += ", "; + exp += get_memory_order(mem_order_2); + exp += ")"; + + + + + + + + emit_uninitialized_temporary_expression(result_type, result_id); + statement("do"); + begin_scope(); + statement(to_name(result_id), " = ", to_expression(op1), ";"); + end_scope_decl(join("while (!", exp, " && ", to_name(result_id), " == ", to_enclosed_expression(op1), ")")); + } + else + { + assert(strcmp(op, "atomic_compare_exchange_weak_explicit") != 0); + if (op1) + { + if (op1_is_literal) + exp += join(", ", op1); + else + exp += ", " + to_expression(op1); + } + if (op2) + exp += ", " + to_expression(op2); + + exp += string(", ") + get_memory_order(mem_order_1); + if (has_mem_order_2) + exp += string(", ") + get_memory_order(mem_order_2); + + exp += ")"; + emit_op(result_type, result_id, exp, false); + } + + flush_all_atomic_capable_variables(); +} + + +const char *CompilerMSL::get_memory_order(uint32_t) +{ + return "memory_order_relaxed"; +} + + +void CompilerMSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, uint32_t count) +{ + auto op = static_cast(eop); + + + uint32_t integer_width = get_integer_width_for_glsl_instruction(op, args, count); + auto int_type = to_signed_basetype(integer_width); + auto uint_type = to_unsigned_basetype(integer_width); + + switch (op) + { + case GLSLstd450Atan2: + emit_binary_func_op(result_type, id, args[0], args[1], "atan2"); + break; + case GLSLstd450InverseSqrt: + emit_unary_func_op(result_type, id, args[0], "rsqrt"); + break; + case GLSLstd450RoundEven: + emit_unary_func_op(result_type, id, args[0], "rint"); + break; + + case GLSLstd450FindILsb: + { + + auto basetype = expression_type(args[0]).basetype; + emit_unary_func_op_cast(result_type, id, args[0], "spvFindLSB", basetype, basetype); + break; + } + + case GLSLstd450FindSMsb: + emit_unary_func_op_cast(result_type, id, args[0], "spvFindSMSB", int_type, int_type); + break; + + case GLSLstd450FindUMsb: + emit_unary_func_op_cast(result_type, id, args[0], "spvFindUMSB", uint_type, uint_type); + break; + + case GLSLstd450PackSnorm4x8: + emit_unary_func_op(result_type, id, args[0], "pack_float_to_snorm4x8"); + break; + case GLSLstd450PackUnorm4x8: + emit_unary_func_op(result_type, id, args[0], "pack_float_to_unorm4x8"); + break; + case GLSLstd450PackSnorm2x16: + emit_unary_func_op(result_type, id, args[0], "pack_float_to_snorm2x16"); + break; + case GLSLstd450PackUnorm2x16: + emit_unary_func_op(result_type, id, args[0], "pack_float_to_unorm2x16"); + break; + + case GLSLstd450PackHalf2x16: + { + auto expr = join("as_type(half2(", to_expression(args[0]), "))"); + emit_op(result_type, id, expr, should_forward(args[0])); + inherit_expression_dependencies(id, args[0]); + break; + } + + case GLSLstd450UnpackSnorm4x8: + emit_unary_func_op(result_type, id, args[0], "unpack_snorm4x8_to_float"); + break; + case GLSLstd450UnpackUnorm4x8: + emit_unary_func_op(result_type, id, args[0], "unpack_unorm4x8_to_float"); + break; + case GLSLstd450UnpackSnorm2x16: + emit_unary_func_op(result_type, id, args[0], "unpack_snorm2x16_to_float"); + break; + case GLSLstd450UnpackUnorm2x16: + emit_unary_func_op(result_type, id, args[0], "unpack_unorm2x16_to_float"); + break; + + case GLSLstd450UnpackHalf2x16: + { + auto expr = join("float2(as_type(", to_expression(args[0]), "))"); + emit_op(result_type, id, expr, should_forward(args[0])); + inherit_expression_dependencies(id, args[0]); + break; + } + + case GLSLstd450PackDouble2x32: + emit_unary_func_op(result_type, id, args[0], "unsupported_GLSLstd450PackDouble2x32"); + break; + case GLSLstd450UnpackDouble2x32: + emit_unary_func_op(result_type, id, args[0], "unsupported_GLSLstd450UnpackDouble2x32"); + break; + + case GLSLstd450MatrixInverse: + { + auto &mat_type = get(result_type); + switch (mat_type.columns) + { + case 2: + emit_unary_func_op(result_type, id, args[0], "spvInverse2x2"); + break; + case 3: + emit_unary_func_op(result_type, id, args[0], "spvInverse3x3"); + break; + case 4: + emit_unary_func_op(result_type, id, args[0], "spvInverse4x4"); + break; + default: + break; + } + break; + } + + case GLSLstd450FMin: + + + + if (get(result_type).basetype != SPIRType::Float) + emit_binary_func_op(result_type, id, args[0], args[1], "min"); + else + emit_binary_func_op(result_type, id, args[0], args[1], "fast::min"); + break; + + case GLSLstd450FMax: + if (get(result_type).basetype != SPIRType::Float) + emit_binary_func_op(result_type, id, args[0], args[1], "max"); + else + emit_binary_func_op(result_type, id, args[0], args[1], "fast::max"); + break; + + case GLSLstd450FClamp: + + if (get(result_type).basetype != SPIRType::Float) + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "clamp"); + else + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "fast::clamp"); + break; + + case GLSLstd450NMin: + if (get(result_type).basetype != SPIRType::Float) + emit_binary_func_op(result_type, id, args[0], args[1], "min"); + else + emit_binary_func_op(result_type, id, args[0], args[1], "precise::min"); + break; + + case GLSLstd450NMax: + if (get(result_type).basetype != SPIRType::Float) + emit_binary_func_op(result_type, id, args[0], args[1], "max"); + else + emit_binary_func_op(result_type, id, args[0], args[1], "precise::max"); + break; + + case GLSLstd450NClamp: + + if (get(result_type).basetype != SPIRType::Float) + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "clamp"); + else + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "precise::clamp"); + break; + + + + + + + case GLSLstd450Distance: + + if (expression_type(args[0]).vecsize == 1) + { + + emit_op(result_type, id, + join("abs(", to_unpacked_expression(args[0]), " - ", to_unpacked_expression(args[1]), ")"), + should_forward(args[0]) && should_forward(args[1])); + inherit_expression_dependencies(id, args[0]); + inherit_expression_dependencies(id, args[1]); + } + else + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + + case GLSLstd450Length: + + if (expression_type(args[0]).vecsize == 1) + { + + emit_unary_func_op(result_type, id, args[0], "abs"); + } + else + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + + case GLSLstd450Normalize: + + if (expression_type(args[0]).vecsize == 1) + { + + emit_unary_func_op(result_type, id, args[0], "sign"); + } + else + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + + case GLSLstd450Reflect: + if (get(result_type).vecsize == 1) + emit_binary_func_op(result_type, id, args[0], args[1], "spvReflect"); + else + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + + case GLSLstd450Refract: + if (get(result_type).vecsize == 1) + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "spvRefract"); + else + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + + case GLSLstd450FaceForward: + if (get(result_type).vecsize == 1) + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "spvFaceForward"); + else + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + + case GLSLstd450Modf: + case GLSLstd450Frexp: + { + + auto *ptr = maybe_get(args[1]); + if (ptr && ptr->access_chain && is_scalar(expression_type(args[1]))) + { + register_call_out_argument(args[1]); + forced_temporaries.insert(id); + + + + uint32_t &tmp_id = extra_sub_expressions[id]; + if (!tmp_id) + tmp_id = ir.increase_bound_by(1); + + uint32_t tmp_type_id = get_pointee_type_id(ptr->expression_type); + emit_uninitialized_temporary_expression(tmp_type_id, tmp_id); + emit_binary_func_op(result_type, id, args[0], tmp_id, eop == GLSLstd450Modf ? "modf" : "frexp"); + statement(to_expression(args[1]), " = ", to_expression(tmp_id), ";"); + } + else + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + } + + default: + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + } +} + +void CompilerMSL::emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t id, uint32_t eop, + const uint32_t *args, uint32_t count) +{ + enum AMDShaderTrinaryMinMax + { + FMin3AMD = 1, + UMin3AMD = 2, + SMin3AMD = 3, + FMax3AMD = 4, + UMax3AMD = 5, + SMax3AMD = 6, + FMid3AMD = 7, + UMid3AMD = 8, + SMid3AMD = 9 + }; + + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Trinary min/max functions require MSL 2.1."); + + auto op = static_cast(eop); + + switch (op) + { + case FMid3AMD: + case UMid3AMD: + case SMid3AMD: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "median3"); + break; + default: + CompilerGLSL::emit_spv_amd_shader_trinary_minmax_op(result_type, id, eop, args, count); + break; + } +} + + +void CompilerMSL::emit_interface_block(uint32_t ib_var_id) +{ + if (ib_var_id) + { + auto &ib_var = get(ib_var_id); + auto &ib_type = get_variable_data_type(ib_var); + assert(ib_type.basetype == SPIRType::Struct && !ib_type.member_types.empty()); + emit_struct(ib_type); + } +} + + + +void CompilerMSL::emit_function_prototype(SPIRFunction &func, const Bitset &) +{ + if (func.self != ir.default_entry_point) + add_function_overload(func); + + local_variable_names = resource_names; + string decl; + + processing_entry_point = func.self == ir.default_entry_point; + + + if (!processing_entry_point) + statement(force_inline); + + auto &type = get(func.return_type); + + decl += func_type_decl(type); + decl += " "; + decl += to_name(func.self); + decl += "("; + + if (processing_entry_point) + { + if (msl_options.argument_buffers) + decl += entry_point_args_argument_buffer(!func.arguments.empty()); + else + decl += entry_point_args_classic(!func.arguments.empty()); + + + + + + for (auto var_id : vars_needing_early_declaration) + { + auto &ed_var = get(var_id); + ID &initializer = ed_var.initializer; + if (!initializer) + initializer = ir.increase_bound_by(1); + + + if (ir.ids[initializer].get_type() == TypeNone || ir.ids[initializer].get_type() == TypeExpression) + set(ed_var.initializer, "{}", ed_var.basetype, true); + } + } + + for (auto &arg : func.arguments) + { + uint32_t name_id = arg.id; + + auto *var = maybe_get(arg.id); + if (var) + { + + + if (arg.alias_global_variable && var->basevariable) + name_id = var->basevariable; + + var->parameter = &arg; + } + + add_local_variable_name(name_id); + + decl += argument_decl(arg); + + bool is_dynamic_img_sampler = has_extended_decoration(arg.id, SPIRVCrossDecorationDynamicImageSampler); + + auto &arg_type = get(arg.type); + if (arg_type.basetype == SPIRType::SampledImage && !is_dynamic_img_sampler) + { + + uint32_t planes = 1; + if (auto *constexpr_sampler = find_constexpr_sampler(name_id)) + if (constexpr_sampler->ycbcr_conversion_enable) + planes = constexpr_sampler->planes; + for (uint32_t i = 1; i < planes; i++) + decl += join(", ", argument_decl(arg), plane_name_suffix, i); + + + if (arg_type.image.dim != DimBuffer) + decl += join(", thread const ", sampler_type(arg_type), " ", to_sampler_expression(arg.id)); + } + + + if (msl_options.swizzle_texture_samples && has_sampled_images && is_sampled_image_type(arg_type) && + !is_dynamic_img_sampler) + { + bool arg_is_array = !arg_type.array.empty(); + decl += join(", constant uint", arg_is_array ? "* " : "& ", to_swizzle_expression(arg.id)); + } + + if (buffers_requiring_array_length.count(name_id)) + { + bool arg_is_array = !arg_type.array.empty(); + decl += join(", constant uint", arg_is_array ? "* " : "& ", to_buffer_size_expression(name_id)); + } + + if (&arg != &func.arguments.back()) + decl += ", "; + } + + decl += ")"; + statement(decl); +} + +static bool needs_chroma_reconstruction(const MSLConstexprSampler *constexpr_sampler) +{ + + + return constexpr_sampler && constexpr_sampler->ycbcr_conversion_enable && constexpr_sampler->planes > 1; +} + + +string CompilerMSL::to_function_name(VariableID img, const SPIRType &imgtype, bool is_fetch, bool is_gather, bool, bool, + bool, bool, bool has_dref, uint32_t, uint32_t) +{ + const MSLConstexprSampler *constexpr_sampler = nullptr; + bool is_dynamic_img_sampler = false; + if (auto *var = maybe_get_backing_variable(img)) + { + constexpr_sampler = find_constexpr_sampler(var->basevariable ? var->basevariable : VariableID(var->self)); + is_dynamic_img_sampler = has_extended_decoration(var->self, SPIRVCrossDecorationDynamicImageSampler); + } + + + + if (msl_options.swizzle_texture_samples && is_gather && !is_dynamic_img_sampler && + (!constexpr_sampler || !constexpr_sampler->ycbcr_conversion_enable)) + { + add_spv_func_and_recompile(imgtype.image.depth ? SPVFuncImplGatherCompareSwizzle : SPVFuncImplGatherSwizzle); + return imgtype.image.depth ? "spvGatherCompareSwizzle" : "spvGatherSwizzle"; + } + + auto *combined = maybe_get(img); + + + string fname; + if (needs_chroma_reconstruction(constexpr_sampler) && !is_dynamic_img_sampler) + { + if (constexpr_sampler->planes != 2 && constexpr_sampler->planes != 3) + SPIRV_CROSS_THROW("Unhandled number of color image planes!"); + + if (constexpr_sampler->resolution == MSL_FORMAT_RESOLUTION_444 || + constexpr_sampler->chroma_filter == MSL_SAMPLER_FILTER_NEAREST) + { + if (constexpr_sampler->planes == 2) + add_spv_func_and_recompile(SPVFuncImplChromaReconstructNearest2Plane); + else + add_spv_func_and_recompile(SPVFuncImplChromaReconstructNearest3Plane); + fname = "spvChromaReconstructNearest"; + } + else + { + fname = "spvChromaReconstructLinear"; + switch (constexpr_sampler->resolution) + { + case MSL_FORMAT_RESOLUTION_444: + assert(false); + break; + case MSL_FORMAT_RESOLUTION_422: + switch (constexpr_sampler->x_chroma_offset) + { + case MSL_CHROMA_LOCATION_COSITED_EVEN: + if (constexpr_sampler->planes == 2) + add_spv_func_and_recompile(SPVFuncImplChromaReconstructLinear422CositedEven2Plane); + else + add_spv_func_and_recompile(SPVFuncImplChromaReconstructLinear422CositedEven3Plane); + fname += "422CositedEven"; + break; + case MSL_CHROMA_LOCATION_MIDPOINT: + if (constexpr_sampler->planes == 2) + add_spv_func_and_recompile(SPVFuncImplChromaReconstructLinear422Midpoint2Plane); + else + add_spv_func_and_recompile(SPVFuncImplChromaReconstructLinear422Midpoint3Plane); + fname += "422Midpoint"; + break; + default: + SPIRV_CROSS_THROW("Invalid chroma location."); + } + break; + case MSL_FORMAT_RESOLUTION_420: + fname += "420"; + switch (constexpr_sampler->x_chroma_offset) + { + case MSL_CHROMA_LOCATION_COSITED_EVEN: + switch (constexpr_sampler->y_chroma_offset) + { + case MSL_CHROMA_LOCATION_COSITED_EVEN: + if (constexpr_sampler->planes == 2) + add_spv_func_and_recompile( + SPVFuncImplChromaReconstructLinear420XCositedEvenYCositedEven2Plane); + else + add_spv_func_and_recompile( + SPVFuncImplChromaReconstructLinear420XCositedEvenYCositedEven3Plane); + fname += "XCositedEvenYCositedEven"; + break; + case MSL_CHROMA_LOCATION_MIDPOINT: + if (constexpr_sampler->planes == 2) + add_spv_func_and_recompile( + SPVFuncImplChromaReconstructLinear420XCositedEvenYMidpoint2Plane); + else + add_spv_func_and_recompile( + SPVFuncImplChromaReconstructLinear420XCositedEvenYMidpoint3Plane); + fname += "XCositedEvenYMidpoint"; + break; + default: + SPIRV_CROSS_THROW("Invalid Y chroma location."); + } + break; + case MSL_CHROMA_LOCATION_MIDPOINT: + switch (constexpr_sampler->y_chroma_offset) + { + case MSL_CHROMA_LOCATION_COSITED_EVEN: + if (constexpr_sampler->planes == 2) + add_spv_func_and_recompile( + SPVFuncImplChromaReconstructLinear420XMidpointYCositedEven2Plane); + else + add_spv_func_and_recompile( + SPVFuncImplChromaReconstructLinear420XMidpointYCositedEven3Plane); + fname += "XMidpointYCositedEven"; + break; + case MSL_CHROMA_LOCATION_MIDPOINT: + if (constexpr_sampler->planes == 2) + add_spv_func_and_recompile(SPVFuncImplChromaReconstructLinear420XMidpointYMidpoint2Plane); + else + add_spv_func_and_recompile(SPVFuncImplChromaReconstructLinear420XMidpointYMidpoint3Plane); + fname += "XMidpointYMidpoint"; + break; + default: + SPIRV_CROSS_THROW("Invalid Y chroma location."); + } + break; + default: + SPIRV_CROSS_THROW("Invalid X chroma location."); + } + break; + default: + SPIRV_CROSS_THROW("Invalid format resolution."); + } + } + } + else + { + fname = to_expression(combined ? combined->image : img) + "."; + + + if (is_fetch) + fname += "read"; + else if (is_gather) + fname += "gather"; + else + fname += "sample"; + + if (has_dref) + fname += "_compare"; + } + + return fname; +} + +string CompilerMSL::convert_to_f32(const string &expr, uint32_t components) +{ + SPIRType t; + t.basetype = SPIRType::Float; + t.vecsize = components; + t.columns = 1; + return join(type_to_glsl_constructor(t), "(", expr, ")"); +} + +static inline bool sampling_type_needs_f32_conversion(const SPIRType &type) +{ + + return type.basetype == SPIRType::Half || type.basetype == SPIRType::Double; +} + + +string CompilerMSL::to_function_args(VariableID img, const SPIRType &imgtype, bool is_fetch, bool is_gather, + bool is_proj, uint32_t coord, uint32_t, uint32_t dref, uint32_t grad_x, + uint32_t grad_y, uint32_t lod, uint32_t coffset, uint32_t offset, uint32_t bias, + uint32_t comp, uint32_t sample, uint32_t minlod, bool *p_forward) +{ + const MSLConstexprSampler *constexpr_sampler = nullptr; + bool is_dynamic_img_sampler = false; + if (auto *var = maybe_get_backing_variable(img)) + { + constexpr_sampler = find_constexpr_sampler(var->basevariable ? var->basevariable : VariableID(var->self)); + is_dynamic_img_sampler = has_extended_decoration(var->self, SPIRVCrossDecorationDynamicImageSampler); + } + + string farg_str; + bool forward = true; + + if (!is_dynamic_img_sampler) + { + + if (needs_chroma_reconstruction(constexpr_sampler)) + { + + farg_str += to_expression(img); + for (uint32_t i = 1; i < constexpr_sampler->planes; i++) + farg_str += join(", ", to_expression(img), plane_name_suffix, i); + } + else if ((!constexpr_sampler || !constexpr_sampler->ycbcr_conversion_enable) && + msl_options.swizzle_texture_samples && is_gather) + { + auto *combined = maybe_get(img); + farg_str += to_expression(combined ? combined->image : img); + } + + + if (!is_fetch) + { + if (!farg_str.empty()) + farg_str += ", "; + farg_str += to_sampler_expression(img); + } + + if ((!constexpr_sampler || !constexpr_sampler->ycbcr_conversion_enable) && + msl_options.swizzle_texture_samples && is_gather) + { + + farg_str += ", " + to_swizzle_expression(img); + used_swizzle_buffer = true; + } + + + + if (comp && msl_options.swizzle_texture_samples) + { + forward = should_forward(comp); + farg_str += ", " + to_component_argument(comp); + } + } + + + forward = forward && should_forward(coord); + auto coord_expr = to_enclosed_expression(coord); + auto &coord_type = expression_type(coord); + bool coord_is_fp = type_is_floating_point(coord_type); + bool is_cube_fetch = false; + + string tex_coords = coord_expr; + uint32_t alt_coord_component = 0; + + switch (imgtype.image.dim) + { + + case Dim1D: + if (coord_type.vecsize > 1) + tex_coords = enclose_expression(tex_coords) + ".x"; + + if (is_fetch) + tex_coords = "uint(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")"; + else if (sampling_type_needs_f32_conversion(coord_type)) + tex_coords = convert_to_f32(tex_coords, 1); + + if (msl_options.texture_1D_as_2D) + { + if (is_fetch) + tex_coords = "uint2(" + tex_coords + ", 0)"; + else + tex_coords = "float2(" + tex_coords + ", 0.5)"; + } + + alt_coord_component = 1; + break; + + case DimBuffer: + if (coord_type.vecsize > 1) + tex_coords = enclose_expression(tex_coords) + ".x"; + + if (msl_options.texture_buffer_native) + { + tex_coords = "uint(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")"; + } + else + { + + + if (is_fetch) + { + if (msl_options.texel_buffer_texture_width > 0) + { + tex_coords = "spvTexelBufferCoord(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")"; + } + else + { + tex_coords = "spvTexelBufferCoord(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ", " + + to_expression(img) + ")"; + } + } + } + + alt_coord_component = 1; + break; + + case DimSubpassData: + + + if (imgtype.image.ms) + tex_coords = "uint2(gl_FragCoord.xy)"; + else + tex_coords = join("uint2(gl_FragCoord.xy), 0"); + break; + + case Dim2D: + if (coord_type.vecsize > 2) + tex_coords = enclose_expression(tex_coords) + ".xy"; + + if (is_fetch) + tex_coords = "uint2(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")"; + else if (sampling_type_needs_f32_conversion(coord_type)) + tex_coords = convert_to_f32(tex_coords, 2); + + alt_coord_component = 2; + break; + + case Dim3D: + if (coord_type.vecsize > 3) + tex_coords = enclose_expression(tex_coords) + ".xyz"; + + if (is_fetch) + tex_coords = "uint3(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")"; + else if (sampling_type_needs_f32_conversion(coord_type)) + tex_coords = convert_to_f32(tex_coords, 3); + + alt_coord_component = 3; + break; + + case DimCube: + if (is_fetch) + { + is_cube_fetch = true; + tex_coords += ".xy"; + tex_coords = "uint2(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")"; + } + else + { + if (coord_type.vecsize > 3) + tex_coords = enclose_expression(tex_coords) + ".xyz"; + } + + if (sampling_type_needs_f32_conversion(coord_type)) + tex_coords = convert_to_f32(tex_coords, 3); + + alt_coord_component = 3; + break; + + default: + break; + } + + if (is_fetch && offset) + { + + forward = forward && should_forward(offset); + auto &type = expression_type(offset); + if (type.basetype != SPIRType::UInt) + tex_coords += " + " + bitcast_expression(SPIRType::UInt, offset); + else + tex_coords += " + " + to_enclosed_expression(offset); + } + else if (is_fetch && coffset) + { + + forward = forward && should_forward(coffset); + auto &type = expression_type(coffset); + if (type.basetype != SPIRType::UInt) + tex_coords += " + " + bitcast_expression(SPIRType::UInt, coffset); + else + tex_coords += " + " + to_enclosed_expression(coffset); + } + + + if (is_proj) + { + if (sampling_type_needs_f32_conversion(coord_type)) + tex_coords += " / " + convert_to_f32(to_extract_component_expression(coord, alt_coord_component), 1); + else + tex_coords += " / " + to_extract_component_expression(coord, alt_coord_component); + } + + if (!farg_str.empty()) + farg_str += ", "; + + if (imgtype.image.dim == DimCube && imgtype.image.arrayed && msl_options.emulate_cube_array) + { + farg_str += "spvCubemapTo2DArrayFace(" + tex_coords + ").xy"; + + if (is_cube_fetch) + farg_str += ", uint(" + to_extract_component_expression(coord, 2) + ")"; + else + farg_str += ", uint(spvCubemapTo2DArrayFace(" + tex_coords + ").z) + (uint(" + + round_fp_tex_coords(to_extract_component_expression(coord, alt_coord_component), coord_is_fp) + + ") * 6u)"; + + add_spv_func_and_recompile(SPVFuncImplCubemapTo2DArrayFace); + } + else + { + farg_str += tex_coords; + + + if (is_cube_fetch) + { + + if (imgtype.image.arrayed) + farg_str += ", uint(" + to_extract_component_expression(coord, 2) + ") % 6u"; + else + farg_str += + ", uint(" + round_fp_tex_coords(to_extract_component_expression(coord, 2), coord_is_fp) + ")"; + } + + + if (imgtype.image.arrayed) + { + + if (imgtype.image.dim == DimCube && is_fetch) + farg_str += ", uint(" + to_extract_component_expression(coord, 2) + ") / 6u"; + else + farg_str += + ", uint(" + + round_fp_tex_coords(to_extract_component_expression(coord, alt_coord_component), coord_is_fp) + ")"; + } + } + + + if (dref) + { + forward = forward && should_forward(dref); + farg_str += ", "; + + auto &dref_type = expression_type(dref); + + string dref_expr; + if (is_proj) + dref_expr = + join(to_enclosed_expression(dref), " / ", to_extract_component_expression(coord, alt_coord_component)); + else + dref_expr = to_expression(dref); + + if (sampling_type_needs_f32_conversion(dref_type)) + dref_expr = convert_to_f32(dref_expr, 1); + + farg_str += dref_expr; + + if (msl_options.is_macos() && (grad_x || grad_y)) + { + + + + + bool constant_zero_x = !grad_x || expression_is_constant_null(grad_x); + bool constant_zero_y = !grad_y || expression_is_constant_null(grad_y); + if (constant_zero_x && constant_zero_y) + { + lod = 0; + grad_x = 0; + grad_y = 0; + farg_str += ", level(0)"; + } + else + { + SPIRV_CROSS_THROW("Using non-constant 0.0 gradient() qualifier for sample_compare. This is not " + "supported in MSL macOS."); + } + } + + if (msl_options.is_macos() && bias) + { + + + if (expression_is_constant_null(bias)) + { + bias = 0; + } + else + { + SPIRV_CROSS_THROW( + "Using non-constant 0.0 bias() qualifier for sample_compare. This is not supported in MSL macOS."); + } + } + } + + + + if (bias && (imgtype.image.dim != Dim1D || msl_options.texture_1D_as_2D)) + { + forward = forward && should_forward(bias); + farg_str += ", bias(" + to_expression(bias) + ")"; + } + + + if (lod && (imgtype.image.dim != Dim1D || msl_options.texture_1D_as_2D)) + { + forward = forward && should_forward(lod); + if (is_fetch) + { + farg_str += ", " + to_expression(lod); + } + else + { + farg_str += ", level(" + to_expression(lod) + ")"; + } + } + else if (is_fetch && !lod && (imgtype.image.dim != Dim1D || msl_options.texture_1D_as_2D) && + imgtype.image.dim != DimBuffer && !imgtype.image.ms && imgtype.image.sampled != 2) + { + + + farg_str += ", 0"; + } + + + if ((grad_x || grad_y) && (imgtype.image.dim != Dim1D || msl_options.texture_1D_as_2D)) + { + forward = forward && should_forward(grad_x); + forward = forward && should_forward(grad_y); + string grad_opt; + switch (imgtype.image.dim) + { + case Dim2D: + grad_opt = "2d"; + break; + case Dim3D: + grad_opt = "3d"; + break; + case DimCube: + if (imgtype.image.arrayed && msl_options.emulate_cube_array) + grad_opt = "2d"; + else + grad_opt = "cube"; + break; + default: + grad_opt = "unsupported_gradient_dimension"; + break; + } + farg_str += ", gradient" + grad_opt + "(" + to_expression(grad_x) + ", " + to_expression(grad_y) + ")"; + } + + if (minlod) + { + if (msl_options.is_macos()) + { + if (!msl_options.supports_msl_version(2, 2)) + SPIRV_CROSS_THROW("min_lod_clamp() is only supported in MSL 2.2+ and up on macOS."); + } + else if (msl_options.is_ios()) + SPIRV_CROSS_THROW("min_lod_clamp() is not supported on iOS."); + + forward = forward && should_forward(minlod); + farg_str += ", min_lod_clamp(" + to_expression(minlod) + ")"; + } + + + string offset_expr; + if (coffset && !is_fetch) + { + forward = forward && should_forward(coffset); + offset_expr = to_expression(coffset); + } + else if (offset && !is_fetch) + { + forward = forward && should_forward(offset); + offset_expr = to_expression(offset); + } + + if (!offset_expr.empty()) + { + switch (imgtype.image.dim) + { + case Dim2D: + if (coord_type.vecsize > 2) + offset_expr = enclose_expression(offset_expr) + ".xy"; + + farg_str += ", " + offset_expr; + break; + + case Dim3D: + if (coord_type.vecsize > 3) + offset_expr = enclose_expression(offset_expr) + ".xyz"; + + farg_str += ", " + offset_expr; + break; + + default: + break; + } + } + + if (comp) + { + + if (imgtype.image.dim == Dim2D && offset_expr.empty()) + farg_str += ", int2(0)"; + + if (!msl_options.swizzle_texture_samples || is_dynamic_img_sampler) + { + forward = forward && should_forward(comp); + farg_str += ", " + to_component_argument(comp); + } + } + + if (sample) + { + forward = forward && should_forward(sample); + farg_str += ", "; + farg_str += to_expression(sample); + } + + *p_forward = forward; + + return farg_str; +} + + +string CompilerMSL::round_fp_tex_coords(string tex_coords, bool coord_is_fp) +{ + return coord_is_fp ? ("round(" + tex_coords + ")") : tex_coords; +} + + + +string CompilerMSL::to_component_argument(uint32_t id) +{ + if (ir.ids[id].get_type() != TypeConstant) + { + SPIRV_CROSS_THROW("ID " + to_string(id) + " is not an OpConstant."); + return "component::x"; + } + + uint32_t component_index = get(id).scalar(); + switch (component_index) + { + case 0: + return "component::x"; + case 1: + return "component::y"; + case 2: + return "component::z"; + case 3: + return "component::w"; + + default: + SPIRV_CROSS_THROW("The value (" + to_string(component_index) + ") of OpConstant ID " + to_string(id) + + " is not a valid Component index, which must be one of 0, 1, 2, or 3."); + return "component::x"; + } +} + + +void CompilerMSL::emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) +{ + set(result_id, result_type, image_id, samp_id); +} + +string CompilerMSL::to_texture_op(const Instruction &i, bool *forward, SmallVector &inherited_expressions) +{ + auto *ops = stream(i); + uint32_t result_type_id = ops[0]; + uint32_t img = ops[2]; + auto &result_type = get(result_type_id); + auto op = static_cast(i.op); + bool is_gather = (op == OpImageGather || op == OpImageDrefGather); + + + auto &type = expression_type(img); + auto &imgtype = get(type.self); + + const MSLConstexprSampler *constexpr_sampler = nullptr; + bool is_dynamic_img_sampler = false; + if (auto *var = maybe_get_backing_variable(img)) + { + constexpr_sampler = find_constexpr_sampler(var->basevariable ? var->basevariable : VariableID(var->self)); + is_dynamic_img_sampler = has_extended_decoration(var->self, SPIRVCrossDecorationDynamicImageSampler); + } + + string expr; + if (constexpr_sampler && constexpr_sampler->ycbcr_conversion_enable && !is_dynamic_img_sampler) + { + + + switch (constexpr_sampler->ycbcr_model) + { + case MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_709: + add_spv_func_and_recompile(SPVFuncImplConvertYCbCrBT709); + expr += "spvConvertYCbCrBT709("; + break; + case MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_601: + add_spv_func_and_recompile(SPVFuncImplConvertYCbCrBT601); + expr += "spvConvertYCbCrBT601("; + break; + case MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_2020: + add_spv_func_and_recompile(SPVFuncImplConvertYCbCrBT2020); + expr += "spvConvertYCbCrBT2020("; + break; + default: + SPIRV_CROSS_THROW("Invalid Y'CbCr model conversion."); + } + + if (constexpr_sampler->ycbcr_model != MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY) + { + switch (constexpr_sampler->ycbcr_range) + { + case MSL_SAMPLER_YCBCR_RANGE_ITU_FULL: + add_spv_func_and_recompile(SPVFuncImplExpandITUFullRange); + expr += "spvExpandITUFullRange("; + break; + case MSL_SAMPLER_YCBCR_RANGE_ITU_NARROW: + add_spv_func_and_recompile(SPVFuncImplExpandITUNarrowRange); + expr += "spvExpandITUNarrowRange("; + break; + default: + SPIRV_CROSS_THROW("Invalid Y'CbCr range."); + } + } + } + else if (msl_options.swizzle_texture_samples && !is_gather && is_sampled_image_type(imgtype) && + !is_dynamic_img_sampler) + { + add_spv_func_and_recompile(SPVFuncImplTextureSwizzle); + expr += "spvTextureSwizzle("; + } + + string inner_expr = CompilerGLSL::to_texture_op(i, forward, inherited_expressions); + + if (constexpr_sampler && constexpr_sampler->ycbcr_conversion_enable && !is_dynamic_img_sampler) + { + if (!constexpr_sampler->swizzle_is_identity()) + { + static const char swizzle_names[] = "rgba"; + if (!constexpr_sampler->swizzle_has_one_or_zero()) + { + + expr += inner_expr + "."; + for (uint32_t c = 0; c < 4; c++) + { + switch (constexpr_sampler->swizzle[c]) + { + case MSL_COMPONENT_SWIZZLE_IDENTITY: + expr += swizzle_names[c]; + break; + case MSL_COMPONENT_SWIZZLE_R: + case MSL_COMPONENT_SWIZZLE_G: + case MSL_COMPONENT_SWIZZLE_B: + case MSL_COMPONENT_SWIZZLE_A: + expr += swizzle_names[constexpr_sampler->swizzle[c] - MSL_COMPONENT_SWIZZLE_R]; + break; + default: + SPIRV_CROSS_THROW("Invalid component swizzle."); + } + } + } + else + { + + uint32_t temp_id = ir.increase_bound_by(1); + emit_op(result_type_id, temp_id, inner_expr, false); + for (auto &inherit : inherited_expressions) + inherit_expression_dependencies(temp_id, inherit); + inherited_expressions.clear(); + inherited_expressions.push_back(temp_id); + + switch (op) + { + case OpImageSampleDrefImplicitLod: + case OpImageSampleImplicitLod: + case OpImageSampleProjImplicitLod: + case OpImageSampleProjDrefImplicitLod: + register_control_dependent_expression(temp_id); + break; + + default: + break; + } + expr += type_to_glsl(result_type) + "("; + for (uint32_t c = 0; c < 4; c++) + { + switch (constexpr_sampler->swizzle[c]) + { + case MSL_COMPONENT_SWIZZLE_IDENTITY: + expr += to_expression(temp_id) + "." + swizzle_names[c]; + break; + case MSL_COMPONENT_SWIZZLE_ZERO: + expr += "0"; + break; + case MSL_COMPONENT_SWIZZLE_ONE: + expr += "1"; + break; + case MSL_COMPONENT_SWIZZLE_R: + case MSL_COMPONENT_SWIZZLE_G: + case MSL_COMPONENT_SWIZZLE_B: + case MSL_COMPONENT_SWIZZLE_A: + expr += to_expression(temp_id) + "." + + swizzle_names[constexpr_sampler->swizzle[c] - MSL_COMPONENT_SWIZZLE_R]; + break; + default: + SPIRV_CROSS_THROW("Invalid component swizzle."); + } + if (c < 3) + expr += ", "; + } + expr += ")"; + } + } + else + expr += inner_expr; + if (constexpr_sampler->ycbcr_model != MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY) + { + expr += join(", ", constexpr_sampler->bpc, ")"); + if (constexpr_sampler->ycbcr_model != MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY) + expr += ")"; + } + } + else + { + expr += inner_expr; + if (msl_options.swizzle_texture_samples && !is_gather && is_sampled_image_type(imgtype) && + !is_dynamic_img_sampler) + { + + expr += ", " + to_swizzle_expression(img) + ")"; + used_swizzle_buffer = true; + } + } + + return expr; +} + +static string create_swizzle(MSLComponentSwizzle swizzle) +{ + switch (swizzle) + { + case MSL_COMPONENT_SWIZZLE_IDENTITY: + return "spvSwizzle::none"; + case MSL_COMPONENT_SWIZZLE_ZERO: + return "spvSwizzle::zero"; + case MSL_COMPONENT_SWIZZLE_ONE: + return "spvSwizzle::one"; + case MSL_COMPONENT_SWIZZLE_R: + return "spvSwizzle::red"; + case MSL_COMPONENT_SWIZZLE_G: + return "spvSwizzle::green"; + case MSL_COMPONENT_SWIZZLE_B: + return "spvSwizzle::blue"; + case MSL_COMPONENT_SWIZZLE_A: + return "spvSwizzle::alpha"; + default: + SPIRV_CROSS_THROW("Invalid component swizzle."); + return ""; + } +} + + + +string CompilerMSL::to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id) +{ + string arg_str; + + auto &type = expression_type(id); + bool is_dynamic_img_sampler = has_extended_decoration(arg.id, SPIRVCrossDecorationDynamicImageSampler); + + bool arg_is_dynamic_img_sampler = has_extended_decoration(id, SPIRVCrossDecorationDynamicImageSampler); + if (is_dynamic_img_sampler && !arg_is_dynamic_img_sampler) + arg_str = join("spvDynamicImageSampler<", type_to_glsl(get(type.image.type)), ">("); + + arg_str += CompilerGLSL::to_func_call_arg(arg, id); + + + uint32_t var_id = 0; + auto *var = maybe_get(id); + if (var) + var_id = var->basevariable; + + if (!arg_is_dynamic_img_sampler) + { + auto *constexpr_sampler = find_constexpr_sampler(var_id ? var_id : id); + if (type.basetype == SPIRType::SampledImage) + { + + uint32_t planes = 1; + if (constexpr_sampler && constexpr_sampler->ycbcr_conversion_enable) + { + planes = constexpr_sampler->planes; + + + + + + if (!arg.alias_global_variable) + add_spv_func_and_recompile(SPVFuncImplDynamicImageSampler); + } + for (uint32_t i = 1; i < planes; i++) + arg_str += join(", ", CompilerGLSL::to_func_call_arg(arg, id), plane_name_suffix, i); + + if (type.image.dim != DimBuffer) + arg_str += ", " + to_sampler_expression(var_id ? var_id : id); + + + if (is_dynamic_img_sampler && constexpr_sampler && constexpr_sampler->ycbcr_conversion_enable) + { + SmallVector samp_args; + + switch (constexpr_sampler->resolution) + { + case MSL_FORMAT_RESOLUTION_444: + + break; + case MSL_FORMAT_RESOLUTION_422: + samp_args.push_back("spvFormatResolution::_422"); + break; + case MSL_FORMAT_RESOLUTION_420: + samp_args.push_back("spvFormatResolution::_420"); + break; + default: + SPIRV_CROSS_THROW("Invalid format resolution."); + } + + if (constexpr_sampler->chroma_filter != MSL_SAMPLER_FILTER_NEAREST) + samp_args.push_back("spvChromaFilter::linear"); + + if (constexpr_sampler->x_chroma_offset != MSL_CHROMA_LOCATION_COSITED_EVEN) + samp_args.push_back("spvXChromaLocation::midpoint"); + if (constexpr_sampler->y_chroma_offset != MSL_CHROMA_LOCATION_COSITED_EVEN) + samp_args.push_back("spvYChromaLocation::midpoint"); + switch (constexpr_sampler->ycbcr_model) + { + case MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY: + + break; + case MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY: + samp_args.push_back("spvYCbCrModelConversion::ycbcr_identity"); + break; + case MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_709: + samp_args.push_back("spvYCbCrModelConversion::ycbcr_bt_709"); + break; + case MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_601: + samp_args.push_back("spvYCbCrModelConversion::ycbcr_bt_601"); + break; + case MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_2020: + samp_args.push_back("spvYCbCrModelConversion::ycbcr_bt_2020"); + break; + default: + SPIRV_CROSS_THROW("Invalid Y'CbCr model conversion."); + } + if (constexpr_sampler->ycbcr_range != MSL_SAMPLER_YCBCR_RANGE_ITU_FULL) + samp_args.push_back("spvYCbCrRange::itu_narrow"); + samp_args.push_back(join("spvComponentBits(", constexpr_sampler->bpc, ")")); + arg_str += join(", spvYCbCrSampler(", merge(samp_args), ")"); + } + } + + if (is_dynamic_img_sampler && constexpr_sampler && constexpr_sampler->ycbcr_conversion_enable) + arg_str += join(", (uint(", create_swizzle(constexpr_sampler->swizzle[3]), ") << 24) | (uint(", + create_swizzle(constexpr_sampler->swizzle[2]), ") << 16) | (uint(", + create_swizzle(constexpr_sampler->swizzle[1]), ") << 8) | uint(", + create_swizzle(constexpr_sampler->swizzle[0]), ")"); + else if (msl_options.swizzle_texture_samples && has_sampled_images && is_sampled_image_type(type)) + arg_str += ", " + to_swizzle_expression(var_id ? var_id : id); + + if (buffers_requiring_array_length.count(var_id)) + arg_str += ", " + to_buffer_size_expression(var_id ? var_id : id); + + if (is_dynamic_img_sampler) + arg_str += ")"; + } + + + auto *backing_var = maybe_get_backing_variable(var_id); + if (backing_var && atomic_image_vars.count(backing_var->self)) + { + arg_str += ", " + to_expression(var_id) + "_atomic"; + } + + return arg_str; +} + + + + +string CompilerMSL::to_sampler_expression(uint32_t id) +{ + auto *combined = maybe_get(id); + auto expr = to_expression(combined ? combined->image : VariableID(id)); + auto index = expr.find_first_of('['); + + uint32_t samp_id = 0; + if (combined) + samp_id = combined->sampler; + + if (index == string::npos) + return samp_id ? to_expression(samp_id) : expr + sampler_name_suffix; + else + { + auto image_expr = expr.substr(0, index); + auto array_expr = expr.substr(index); + return samp_id ? to_expression(samp_id) : (image_expr + sampler_name_suffix + array_expr); + } +} + +string CompilerMSL::to_swizzle_expression(uint32_t id) +{ + auto *combined = maybe_get(id); + + auto expr = to_expression(combined ? combined->image : VariableID(id)); + auto index = expr.find_first_of('['); + + + for (auto &c : expr) + if (c == '.') + c = '_'; + + if (index == string::npos) + return expr + swizzle_name_suffix; + else + { + auto image_expr = expr.substr(0, index); + auto array_expr = expr.substr(index); + return image_expr + swizzle_name_suffix + array_expr; + } +} + +string CompilerMSL::to_buffer_size_expression(uint32_t id) +{ + auto expr = to_expression(id); + auto index = expr.find_first_of('['); + + + + + if (expr.size() >= 3 && expr[0] == '(' && expr[1] == '*') + expr = address_of_expression(expr); + + + for (auto &c : expr) + if (c == '.') + c = '_'; + + if (index == string::npos) + return expr + buffer_size_name_suffix; + else + { + auto buffer_expr = expr.substr(0, index); + auto array_expr = expr.substr(index); + return buffer_expr + buffer_size_name_suffix + array_expr; + } +} + + +bool CompilerMSL::is_patch_block(const SPIRType &type) +{ + if (!has_decoration(type.self, DecorationBlock)) + return false; + + for (uint32_t i = 0; i < type.member_types.size(); i++) + { + if (!has_member_decoration(type.self, i, DecorationPatch)) + return false; + } + + return true; +} + + +bool CompilerMSL::is_non_native_row_major_matrix(uint32_t id) +{ + auto *e = maybe_get(id); + if (e) + return e->need_transpose; + else + return has_decoration(id, DecorationRowMajor); +} + + +bool CompilerMSL::member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index) +{ + return has_member_decoration(type.self, index, DecorationRowMajor); +} + +string CompilerMSL::convert_row_major_matrix(string exp_str, const SPIRType &exp_type, uint32_t physical_type_id, + bool is_packed) +{ + if (!is_matrix(exp_type)) + { + return CompilerGLSL::convert_row_major_matrix(move(exp_str), exp_type, physical_type_id, is_packed); + } + else + { + strip_enclosed_expression(exp_str); + if (physical_type_id != 0 || is_packed) + exp_str = unpack_expression_type(exp_str, exp_type, physical_type_id, is_packed, true); + return join("transpose(", exp_str, ")"); + } +} + + +void CompilerMSL::emit_fixup() +{ + if ((get_execution_model() == ExecutionModelVertex || + get_execution_model() == ExecutionModelTessellationEvaluation) && + stage_out_var_id && !qual_pos_var_name.empty() && !capture_output_to_buffer) + { + if (options.vertex.fixup_clipspace) + statement(qual_pos_var_name, ".z = (", qual_pos_var_name, ".z + ", qual_pos_var_name, + ".w) * 0.5; // Adjust clip-space for Metal"); + + if (options.vertex.flip_vert_y) + statement(qual_pos_var_name, ".y = -(", qual_pos_var_name, ".y);", " // Invert Y-axis for Metal"); + } +} + + +string CompilerMSL::to_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const string &qualifier) +{ + if (member_is_remapped_physical_type(type, index)) + member_type_id = get_extended_member_decoration(type.self, index, SPIRVCrossDecorationPhysicalTypeID); + auto &physical_type = get(member_type_id); + + + string pack_pfx; + + + uint32_t orig_id = 0; + if (has_extended_member_decoration(type.self, index, SPIRVCrossDecorationInterfaceOrigID)) + orig_id = get_extended_member_decoration(type.self, index, SPIRVCrossDecorationInterfaceOrigID); + + bool row_major = false; + if (is_matrix(physical_type)) + row_major = has_member_decoration(type.self, index, DecorationRowMajor); + + SPIRType row_major_physical_type; + const SPIRType *declared_type = &physical_type; + + + + + + + + if (has_member_decoration(type.self, index, DecorationOffset)) + use_builtin_array = true; + else if (has_extended_member_decoration(type.self, index, SPIRVCrossDecorationResourceIndexPrimary)) + use_builtin_array = true; + + if (member_is_packed_physical_type(type, index)) + { + + if (physical_type.basetype == SPIRType::Struct) + { + SPIRV_CROSS_THROW("Cannot emit a packed struct currently."); + } + else if (is_matrix(physical_type)) + { + uint32_t rows = physical_type.vecsize; + uint32_t cols = physical_type.columns; + pack_pfx = "packed_"; + if (row_major) + { + + rows = physical_type.columns; + cols = physical_type.vecsize; + pack_pfx = "packed_rm_"; + } + string base_type = physical_type.width == 16 ? "half" : "float"; + string td_line = "typedef "; + td_line += "packed_" + base_type + to_string(rows); + td_line += " " + pack_pfx; + + td_line += base_type + to_string(physical_type.columns) + "x" + to_string(physical_type.vecsize); + td_line += "[" + to_string(cols) + "]"; + td_line += ";"; + add_typedef_line(td_line); + } + else + pack_pfx = "packed_"; + } + else if (row_major) + { + + row_major_physical_type = physical_type; + swap(row_major_physical_type.vecsize, row_major_physical_type.columns); + declared_type = &row_major_physical_type; + } + + + if (msl_options.is_ios() && physical_type.basetype == SPIRType::Image && physical_type.image.sampled == 2) + { + if (!has_decoration(orig_id, DecorationNonWritable)) + SPIRV_CROSS_THROW("Writable images are not allowed in argument buffers on iOS."); + } + + + string array_type; + if (physical_type.basetype != SPIRType::Image && physical_type.basetype != SPIRType::Sampler && + physical_type.basetype != SPIRType::SampledImage) + { + BuiltIn builtin = BuiltInMax; + if (is_member_builtin(type, index, &builtin)) + use_builtin_array = true; + array_type = type_to_array_glsl(physical_type); + } + + auto result = join(pack_pfx, type_to_glsl(*declared_type, orig_id), " ", qualifier, to_member_name(type, index), + member_attribute_qualifier(type, index), array_type, ";"); + + use_builtin_array = false; + return result; +} + + +void CompilerMSL::emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const string &qualifier, uint32_t) +{ + + if (has_extended_member_decoration(type.self, index, SPIRVCrossDecorationPaddingTarget)) + { + uint32_t pad_len = get_extended_member_decoration(type.self, index, SPIRVCrossDecorationPaddingTarget); + statement("char _m", index, "_pad", "[", pad_len, "];"); + } + + + builtin_declaration = true; + statement(to_struct_member(type, member_type_id, index, qualifier)); + builtin_declaration = false; +} + +void CompilerMSL::emit_struct_padding_target(const SPIRType &type) +{ + uint32_t struct_size = get_declared_struct_size_msl(type, true, true); + uint32_t target_size = get_extended_decoration(type.self, SPIRVCrossDecorationPaddingTarget); + if (target_size < struct_size) + SPIRV_CROSS_THROW("Cannot pad with negative bytes."); + else if (target_size > struct_size) + statement("char _m0_final_padding[", target_size - struct_size, "];"); +} + + +string CompilerMSL::member_attribute_qualifier(const SPIRType &type, uint32_t index) +{ + auto &execution = get_entry_point(); + + uint32_t mbr_type_id = type.member_types[index]; + auto &mbr_type = get(mbr_type_id); + + BuiltIn builtin = BuiltInMax; + bool is_builtin = is_member_builtin(type, index, &builtin); + + if (has_extended_member_decoration(type.self, index, SPIRVCrossDecorationResourceIndexPrimary)) + { + string quals = join( + " [[id(", get_extended_member_decoration(type.self, index, SPIRVCrossDecorationResourceIndexPrimary), ")"); + if (interlocked_resources.count( + get_extended_member_decoration(type.self, index, SPIRVCrossDecorationInterfaceOrigID))) + quals += ", raster_order_group(0)"; + quals += "]]"; + return quals; + } + + + if (execution.model == ExecutionModelVertex && type.storage == StorageClassInput) + { + if (is_builtin) + { + switch (builtin) + { + case BuiltInVertexId: + case BuiltInVertexIndex: + case BuiltInBaseVertex: + case BuiltInInstanceId: + case BuiltInInstanceIndex: + case BuiltInBaseInstance: + return string(" [[") + builtin_qualifier(builtin) + "]]"; + + case BuiltInDrawIndex: + SPIRV_CROSS_THROW("DrawIndex is not supported in MSL."); + + default: + return ""; + } + } + uint32_t locn = get_ordered_member_location(type.self, index); + if (locn != k_unknown_location) + return string(" [[attribute(") + convert_to_string(locn) + ")]]"; + } + + + if ((execution.model == ExecutionModelVertex || execution.model == ExecutionModelTessellationEvaluation) && + type.storage == StorageClassOutput) + { + if (is_builtin) + { + switch (builtin) + { + case BuiltInPointSize: + + + + + return msl_options.enable_point_size_builtin ? (string(" [[") + builtin_qualifier(builtin) + "]]") : ""; + + case BuiltInViewportIndex: + if (!msl_options.supports_msl_version(2, 0)) + SPIRV_CROSS_THROW("ViewportIndex requires Metal 2.0."); + + case BuiltInPosition: + case BuiltInLayer: + case BuiltInClipDistance: + return string(" [[") + builtin_qualifier(builtin) + "]]" + (mbr_type.array.empty() ? "" : " "); + + default: + return ""; + } + } + uint32_t comp; + uint32_t locn = get_ordered_member_location(type.self, index, &comp); + if (locn != k_unknown_location) + { + if (comp != k_unknown_component) + return string(" [[user(locn") + convert_to_string(locn) + "_" + convert_to_string(comp) + ")]]"; + else + return string(" [[user(locn") + convert_to_string(locn) + ")]]"; + } + } + + + if (execution.model == ExecutionModelTessellationControl && type.storage == StorageClassInput) + { + if (is_builtin) + { + switch (builtin) + { + case BuiltInInvocationId: + case BuiltInPrimitiveId: + case BuiltInSubgroupLocalInvocationId: + case BuiltInSubgroupSize: + return string(" [[") + builtin_qualifier(builtin) + "]]" + (mbr_type.array.empty() ? "" : " "); + case BuiltInPatchVertices: + return ""; + + default: + break; + } + } + uint32_t locn = get_ordered_member_location(type.self, index); + if (locn != k_unknown_location) + return string(" [[attribute(") + convert_to_string(locn) + ")]]"; + } + + + if (execution.model == ExecutionModelTessellationControl && type.storage == StorageClassOutput) + { + + + return ""; + } + + + if (execution.model == ExecutionModelTessellationEvaluation && type.storage == StorageClassInput) + { + if (is_builtin) + { + switch (builtin) + { + case BuiltInPrimitiveId: + case BuiltInTessCoord: + return string(" [[") + builtin_qualifier(builtin) + "]]"; + case BuiltInPatchVertices: + return ""; + + default: + break; + } + } + + if (get_type(type.member_types[index]).basetype == SPIRType::ControlPointArray) + return ""; + uint32_t locn = get_ordered_member_location(type.self, index); + if (locn != k_unknown_location) + return string(" [[attribute(") + convert_to_string(locn) + ")]]"; + } + + + + + if (execution.model == ExecutionModelFragment && type.storage == StorageClassInput) + { + string quals; + if (is_builtin) + { + switch (builtin) + { + case BuiltInViewIndex: + if (!msl_options.multiview) + break; + + case BuiltInFrontFacing: + case BuiltInPointCoord: + case BuiltInFragCoord: + case BuiltInSampleId: + case BuiltInSampleMask: + case BuiltInLayer: + case BuiltInBaryCoordNV: + case BuiltInBaryCoordNoPerspNV: + quals = builtin_qualifier(builtin); + break; + + default: + break; + } + } + else + { + uint32_t comp; + uint32_t locn = get_ordered_member_location(type.self, index, &comp); + if (locn != k_unknown_location) + { + if (comp != k_unknown_component) + quals = string("user(locn") + convert_to_string(locn) + "_" + convert_to_string(comp) + ")"; + else + quals = string("user(locn") + convert_to_string(locn) + ")"; + } + } + + if (builtin == BuiltInBaryCoordNV || builtin == BuiltInBaryCoordNoPerspNV) + { + if (has_member_decoration(type.self, index, DecorationFlat) || + has_member_decoration(type.self, index, DecorationCentroid) || + has_member_decoration(type.self, index, DecorationSample) || + has_member_decoration(type.self, index, DecorationNoPerspective)) + { + + SPIRV_CROSS_THROW( + "Flat, Centroid, Sample, NoPerspective decorations are not supported for BaryCoord inputs."); + } + } + + + + + if (!type_is_integral(mbr_type) && (!is_builtin || builtin != BuiltInFragCoord)) + { + if (has_member_decoration(type.self, index, DecorationFlat)) + { + if (!quals.empty()) + quals += ", "; + quals += "flat"; + } + else if (has_member_decoration(type.self, index, DecorationCentroid)) + { + if (!quals.empty()) + quals += ", "; + if (has_member_decoration(type.self, index, DecorationNoPerspective)) + quals += "centroid_no_perspective"; + else + quals += "centroid_perspective"; + } + else if (has_member_decoration(type.self, index, DecorationSample)) + { + if (!quals.empty()) + quals += ", "; + if (has_member_decoration(type.self, index, DecorationNoPerspective)) + quals += "sample_no_perspective"; + else + quals += "sample_perspective"; + } + else if (has_member_decoration(type.self, index, DecorationNoPerspective)) + { + if (!quals.empty()) + quals += ", "; + quals += "center_no_perspective"; + } + } + + if (!quals.empty()) + return " [[" + quals + "]]"; + } + + + if (execution.model == ExecutionModelFragment && type.storage == StorageClassOutput) + { + if (is_builtin) + { + switch (builtin) + { + case BuiltInFragStencilRefEXT: + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Stencil export only supported in MSL 2.1 and up."); + return string(" [[") + builtin_qualifier(builtin) + "]]"; + + case BuiltInSampleMask: + case BuiltInFragDepth: + return string(" [[") + builtin_qualifier(builtin) + "]]"; + + default: + return ""; + } + } + uint32_t locn = get_ordered_member_location(type.self, index); + if (locn != k_unknown_location && has_member_decoration(type.self, index, DecorationIndex)) + return join(" [[color(", locn, "), index(", get_member_decoration(type.self, index, DecorationIndex), + ")]]"); + else if (locn != k_unknown_location) + return join(" [[color(", locn, ")]]"); + else if (has_member_decoration(type.self, index, DecorationIndex)) + return join(" [[index(", get_member_decoration(type.self, index, DecorationIndex), ")]]"); + else + return ""; + } + + + if (execution.model == ExecutionModelGLCompute && type.storage == StorageClassInput) + { + if (is_builtin) + { + switch (builtin) + { + case BuiltInGlobalInvocationId: + case BuiltInWorkgroupId: + case BuiltInNumWorkgroups: + case BuiltInLocalInvocationId: + case BuiltInLocalInvocationIndex: + case BuiltInNumSubgroups: + case BuiltInSubgroupId: + case BuiltInSubgroupLocalInvocationId: + case BuiltInSubgroupSize: + return string(" [[") + builtin_qualifier(builtin) + "]]"; + + default: + return ""; + } + } + } + + return ""; +} + + + + + +uint32_t CompilerMSL::get_ordered_member_location(uint32_t type_id, uint32_t index, uint32_t *comp) +{ + auto &m = ir.meta[type_id]; + if (index < m.members.size()) + { + auto &dec = m.members[index]; + if (comp) + { + if (dec.decoration_flags.get(DecorationComponent)) + *comp = dec.component; + else + *comp = k_unknown_component; + } + if (dec.decoration_flags.get(DecorationLocation)) + return dec.location; + } + + return index; +} + + + +string CompilerMSL::func_type_decl(SPIRType &type) +{ + + string return_type = type_to_glsl(type) + type_to_array_glsl(type); + if (!processing_entry_point) + return return_type; + + + bool ep_should_return_output = !get_is_rasterization_disabled(); + if (stage_out_var_id && ep_should_return_output) + return_type = type_to_glsl(get_stage_out_struct_type()) + type_to_array_glsl(type); + + + string entry_type; + auto &execution = get_entry_point(); + switch (execution.model) + { + case ExecutionModelVertex: + entry_type = "vertex"; + break; + case ExecutionModelTessellationEvaluation: + if (!msl_options.supports_msl_version(1, 2)) + SPIRV_CROSS_THROW("Tessellation requires Metal 1.2."); + if (execution.flags.get(ExecutionModeIsolines)) + SPIRV_CROSS_THROW("Metal does not support isoline tessellation."); + if (msl_options.is_ios()) + entry_type = + join("[[ patch(", execution.flags.get(ExecutionModeTriangles) ? "triangle" : "quad", ") ]] vertex"); + else + entry_type = join("[[ patch(", execution.flags.get(ExecutionModeTriangles) ? "triangle" : "quad", ", ", + execution.output_vertices, ") ]] vertex"); + break; + case ExecutionModelFragment: + entry_type = execution.flags.get(ExecutionModeEarlyFragmentTests) || + execution.flags.get(ExecutionModePostDepthCoverage) ? + "[[ early_fragment_tests ]] fragment" : + "fragment"; + break; + case ExecutionModelTessellationControl: + if (!msl_options.supports_msl_version(1, 2)) + SPIRV_CROSS_THROW("Tessellation requires Metal 1.2."); + if (execution.flags.get(ExecutionModeIsolines)) + SPIRV_CROSS_THROW("Metal does not support isoline tessellation."); + + case ExecutionModelGLCompute: + case ExecutionModelKernel: + entry_type = "kernel"; + break; + default: + entry_type = "unknown"; + break; + } + + return entry_type + " " + return_type; +} + + +string CompilerMSL::get_argument_address_space(const SPIRVariable &argument) +{ + const auto &type = get(argument.basetype); + return get_type_address_space(type, argument.self, true); +} + +string CompilerMSL::get_type_address_space(const SPIRType &type, uint32_t id, bool argument) +{ + + Bitset flags; + auto *var = maybe_get(id); + if (var && type.basetype == SPIRType::Struct && + (has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock))) + flags = get_buffer_block_flags(id); + else + flags = get_decoration_bitset(id); + + const char *addr_space = nullptr; + switch (type.storage) + { + case StorageClassWorkgroup: + addr_space = "threadgroup"; + break; + + case StorageClassStorageBuffer: + { + + + bool readonly = false; + if (!var || has_decoration(type.self, DecorationBlock)) + readonly = flags.get(DecorationNonWritable); + + addr_space = readonly ? "const device" : "device"; + break; + } + + case StorageClassUniform: + case StorageClassUniformConstant: + case StorageClassPushConstant: + if (type.basetype == SPIRType::Struct) + { + bool ssbo = has_decoration(type.self, DecorationBufferBlock); + if (ssbo) + addr_space = flags.get(DecorationNonWritable) ? "const device" : "device"; + else + addr_space = "constant"; + } + else if (!argument) + addr_space = "constant"; + break; + + case StorageClassFunction: + case StorageClassGeneric: + break; + + case StorageClassInput: + if (get_execution_model() == ExecutionModelTessellationControl && var && + var->basevariable == stage_in_ptr_var_id) + addr_space = "threadgroup"; + break; + + case StorageClassOutput: + if (capture_output_to_buffer) + addr_space = "device"; + break; + + default: + break; + } + + if (!addr_space) + + addr_space = type.pointer || (argument && type.basetype == SPIRType::ControlPointArray) ? "thread" : ""; + + return join(flags.get(DecorationVolatile) || flags.get(DecorationCoherent) ? "volatile " : "", addr_space); +} + +const char *CompilerMSL::to_restrict(uint32_t id, bool space) +{ + + Bitset flags; + if (ir.ids[id].get_type() == TypeVariable) + { + uint32_t type_id = expression_type_id(id); + auto &type = expression_type(id); + if (type.basetype == SPIRType::Struct && + (has_decoration(type_id, DecorationBlock) || has_decoration(type_id, DecorationBufferBlock))) + flags = get_buffer_block_flags(id); + else + flags = get_decoration_bitset(id); + } + else + flags = get_decoration_bitset(id); + + return flags.get(DecorationRestrict) ? (space ? "restrict " : "restrict") : ""; +} + +string CompilerMSL::entry_point_arg_stage_in() +{ + string decl; + + + uint32_t stage_in_id; + if (get_execution_model() == ExecutionModelTessellationEvaluation) + stage_in_id = patch_stage_in_var_id; + else + stage_in_id = stage_in_var_id; + + if (stage_in_id) + { + auto &var = get(stage_in_id); + auto &type = get_variable_data_type(var); + + add_resource_name(var.self); + decl = join(type_to_glsl(type), " ", to_name(var.self), " [[stage_in]]"); + } + + return decl; +} + +void CompilerMSL::entry_point_args_builtin(string &ep_args) +{ + + SmallVector, 8> active_builtins; + ir.for_each_typed_id([&](uint32_t var_id, SPIRVariable &var) { + auto bi_type = BuiltIn(get_decoration(var_id, DecorationBuiltIn)); + + + + if (var.storage == StorageClassInput && is_builtin_variable(var) && + get_variable_data_type(var).basetype != SPIRType::Struct && + get_variable_data_type(var).basetype != SPIRType::ControlPointArray) + { + + + if (!active_input_builtins.get(bi_type) || !interface_variable_exists_in_entry_point(var_id)) + return; + + + active_builtins.push_back(make_pair(&var, bi_type)); + + + + if (bi_type != BuiltInSamplePosition && bi_type != BuiltInHelperInvocation && + bi_type != BuiltInPatchVertices && bi_type != BuiltInTessLevelInner && + bi_type != BuiltInTessLevelOuter && bi_type != BuiltInPosition && bi_type != BuiltInPointSize && + bi_type != BuiltInClipDistance && bi_type != BuiltInCullDistance && bi_type != BuiltInSubgroupEqMask && + bi_type != BuiltInBaryCoordNV && bi_type != BuiltInBaryCoordNoPerspNV && + bi_type != BuiltInSubgroupGeMask && bi_type != BuiltInSubgroupGtMask && + bi_type != BuiltInSubgroupLeMask && bi_type != BuiltInSubgroupLtMask && bi_type != BuiltInDeviceIndex && + ((get_execution_model() == ExecutionModelFragment && msl_options.multiview) || + bi_type != BuiltInViewIndex) && + (get_execution_model() == ExecutionModelGLCompute || + (get_execution_model() == ExecutionModelFragment && msl_options.supports_msl_version(2, 2)) || + (bi_type != BuiltInSubgroupLocalInvocationId && bi_type != BuiltInSubgroupSize))) + { + if (!ep_args.empty()) + ep_args += ", "; + + + builtin_declaration = true; + ep_args += builtin_type_decl(bi_type, var_id) + " " + to_expression(var_id); + ep_args += " [[" + builtin_qualifier(bi_type); + if (bi_type == BuiltInSampleMask && get_entry_point().flags.get(ExecutionModePostDepthCoverage)) + { + if (!msl_options.supports_msl_version(2)) + SPIRV_CROSS_THROW("Post-depth coverage requires Metal 2.0."); + if (!msl_options.is_ios()) + SPIRV_CROSS_THROW("Post-depth coverage is only supported on iOS."); + ep_args += ", post_depth_coverage"; + } + ep_args += "]]"; + builtin_declaration = false; + } + } + + if (var.storage == StorageClassInput && + has_extended_decoration(var_id, SPIRVCrossDecorationBuiltInDispatchBase)) + { + + + + assert(msl_options.supports_msl_version(1, 2)); + if (!ep_args.empty()) + ep_args += ", "; + + ep_args += type_to_glsl(get_variable_data_type(var)) + " " + to_expression(var_id) + " [[grid_origin]]"; + } + }); + + + + + for (auto &var : active_builtins) + var.first->basetype = ensure_correct_builtin_type(var.first->basetype, var.second); + + + if (needs_base_vertex_arg == TriState::Yes) + ep_args += built_in_func_arg(BuiltInBaseVertex, !ep_args.empty()); + + if (needs_base_instance_arg == TriState::Yes) + ep_args += built_in_func_arg(BuiltInBaseInstance, !ep_args.empty()); + + if (capture_output_to_buffer) + { + + + if (stage_out_var_id) + { + if (!ep_args.empty()) + ep_args += ", "; + ep_args += join("device ", type_to_glsl(get_stage_out_struct_type()), "* ", output_buffer_var_name, + " [[buffer(", msl_options.shader_output_buffer_index, ")]]"); + } + + if (get_execution_model() == ExecutionModelTessellationControl) + { + if (!ep_args.empty()) + ep_args += ", "; + ep_args += + join("constant uint* spvIndirectParams [[buffer(", msl_options.indirect_params_buffer_index, ")]]"); + } + else if (stage_out_var_id) + { + if (!ep_args.empty()) + ep_args += ", "; + ep_args += + join("device uint* spvIndirectParams [[buffer(", msl_options.indirect_params_buffer_index, ")]]"); + } + + + + + + if (get_execution_model() == ExecutionModelTessellationControl) + { + if (patch_stage_out_var_id) + { + if (!ep_args.empty()) + ep_args += ", "; + ep_args += + join("device ", type_to_glsl(get_patch_stage_out_struct_type()), "* ", patch_output_buffer_var_name, + " [[buffer(", convert_to_string(msl_options.shader_patch_output_buffer_index), ")]]"); + } + if (!ep_args.empty()) + ep_args += ", "; + ep_args += join("device ", get_tess_factor_struct_name(), "* ", tess_factor_buffer_var_name, " [[buffer(", + convert_to_string(msl_options.shader_tess_factor_buffer_index), ")]]"); + if (stage_in_var_id) + { + if (!ep_args.empty()) + ep_args += ", "; + ep_args += join("threadgroup ", type_to_glsl(get_stage_in_struct_type()), "* ", input_wg_var_name, + " [[threadgroup(", convert_to_string(msl_options.shader_input_wg_index), ")]]"); + } + } + } +} + +string CompilerMSL::entry_point_args_argument_buffer(bool append_comma) +{ + string ep_args = entry_point_arg_stage_in(); + Bitset claimed_bindings; + + for (uint32_t i = 0; i < kMaxArgumentBuffers; i++) + { + uint32_t id = argument_buffer_ids[i]; + if (id == 0) + continue; + + add_resource_name(id); + auto &var = get(id); + auto &type = get_variable_data_type(var); + + if (!ep_args.empty()) + ep_args += ", "; + + + uint32_t buffer_binding; + auto itr = resource_bindings.find({ get_entry_point().model, i, kArgumentBufferBinding }); + if (itr != end(resource_bindings)) + { + buffer_binding = itr->second.first.msl_buffer; + itr->second.second = true; + } + else + { + + + if (claimed_bindings.get(i)) + buffer_binding = next_metal_resource_index_buffer; + else + buffer_binding = i; + } + + claimed_bindings.set(buffer_binding); + + ep_args += get_argument_address_space(var) + " " + type_to_glsl(type) + "& " + to_restrict(id) + to_name(id); + ep_args += " [[buffer(" + convert_to_string(buffer_binding) + ")]]"; + + next_metal_resource_index_buffer = max(next_metal_resource_index_buffer, buffer_binding + 1); + } + + entry_point_args_discrete_descriptors(ep_args); + entry_point_args_builtin(ep_args); + + if (!ep_args.empty() && append_comma) + ep_args += ", "; + + return ep_args; +} + +const MSLConstexprSampler *CompilerMSL::find_constexpr_sampler(uint32_t id) const +{ + + { + auto itr = constexpr_samplers_by_id.find(id); + if (itr != end(constexpr_samplers_by_id)) + return &itr->second; + } + + + { + uint32_t desc_set = get_decoration(id, DecorationDescriptorSet); + uint32_t binding = get_decoration(id, DecorationBinding); + + auto itr = constexpr_samplers_by_binding.find({ desc_set, binding }); + if (itr != end(constexpr_samplers_by_binding)) + return &itr->second; + } + + return nullptr; +} + +void CompilerMSL::entry_point_args_discrete_descriptors(string &ep_args) +{ + + + + struct Resource + { + SPIRVariable *var; + string name; + SPIRType::BaseType basetype; + uint32_t index; + uint32_t plane; + uint32_t secondary_index; + }; + + SmallVector resources; + + ir.for_each_typed_id([&](uint32_t var_id, SPIRVariable &var) { + if ((var.storage == StorageClassUniform || var.storage == StorageClassUniformConstant || + var.storage == StorageClassPushConstant || var.storage == StorageClassStorageBuffer) && + !is_hidden_variable(var)) + { + auto &type = get_variable_data_type(var); + + + + + + if (!(msl_options.is_ios() && type.basetype == SPIRType::Image && type.image.sampled == 2) && + var.storage != StorageClassPushConstant) + { + uint32_t desc_set = get_decoration(var_id, DecorationDescriptorSet); + if (descriptor_set_is_argument_buffer(desc_set)) + return; + } + + const MSLConstexprSampler *constexpr_sampler = nullptr; + if (type.basetype == SPIRType::SampledImage || type.basetype == SPIRType::Sampler) + { + constexpr_sampler = find_constexpr_sampler(var_id); + if (constexpr_sampler) + { + + constexpr_samplers_by_id[var_id] = *constexpr_sampler; + } + } + + + uint32_t secondary_index = 0; + if (atomic_image_vars.count(var.self)) + { + secondary_index = get_metal_resource_index(var, SPIRType::AtomicCounter, 0); + } + + if (type.basetype == SPIRType::SampledImage) + { + add_resource_name(var_id); + + uint32_t plane_count = 1; + if (constexpr_sampler && constexpr_sampler->ycbcr_conversion_enable) + plane_count = constexpr_sampler->planes; + + for (uint32_t i = 0; i < plane_count; i++) + resources.push_back({ &var, to_name(var_id), SPIRType::Image, + get_metal_resource_index(var, SPIRType::Image, i), i, secondary_index }); + + if (type.image.dim != DimBuffer && !constexpr_sampler) + { + resources.push_back({ &var, to_sampler_expression(var_id), SPIRType::Sampler, + get_metal_resource_index(var, SPIRType::Sampler), 0, 0 }); + } + } + else if (!constexpr_sampler) + { + + add_resource_name(var_id); + resources.push_back({ &var, to_name(var_id), type.basetype, + get_metal_resource_index(var, type.basetype), 0, secondary_index }); + } + } + }); + + sort(resources.begin(), resources.end(), [](const Resource &lhs, const Resource &rhs) { + return tie(lhs.basetype, lhs.index) < tie(rhs.basetype, rhs.index); + }); + + for (auto &r : resources) + { + auto &var = *r.var; + auto &type = get_variable_data_type(var); + + uint32_t var_id = var.self; + + switch (r.basetype) + { + case SPIRType::Struct: + { + auto &m = ir.meta[type.self]; + if (m.members.size() == 0) + break; + if (!type.array.empty()) + { + if (type.array.size() > 1) + SPIRV_CROSS_THROW("Arrays of arrays of buffers are not supported."); + + + + + uint32_t array_size = to_array_size_literal(type); + + if (array_size == 0) + SPIRV_CROSS_THROW("Unsized arrays of buffers are not supported in MSL."); + + + use_builtin_array = true; + buffer_arrays.push_back(var_id); + for (uint32_t i = 0; i < array_size; ++i) + { + if (!ep_args.empty()) + ep_args += ", "; + ep_args += get_argument_address_space(var) + " " + type_to_glsl(type) + "* " + to_restrict(var_id) + + r.name + "_" + convert_to_string(i); + ep_args += " [[buffer(" + convert_to_string(r.index + i) + ")"; + if (interlocked_resources.count(var_id)) + ep_args += ", raster_order_group(0)"; + ep_args += "]]"; + } + use_builtin_array = false; + } + else + { + if (!ep_args.empty()) + ep_args += ", "; + ep_args += + get_argument_address_space(var) + " " + type_to_glsl(type) + "& " + to_restrict(var_id) + r.name; + ep_args += " [[buffer(" + convert_to_string(r.index) + ")"; + if (interlocked_resources.count(var_id)) + ep_args += ", raster_order_group(0)"; + ep_args += "]]"; + } + break; + } + case SPIRType::Sampler: + if (!ep_args.empty()) + ep_args += ", "; + ep_args += sampler_type(type) + " " + r.name; + ep_args += " [[sampler(" + convert_to_string(r.index) + ")]]"; + break; + case SPIRType::Image: + { + if (!ep_args.empty()) + ep_args += ", "; + + + const auto &basetype = get(var.basetype); + if (basetype.image.dim != DimSubpassData || !msl_options.is_ios() || + !msl_options.ios_use_framebuffer_fetch_subpasses) + { + ep_args += image_type_glsl(type, var_id) + " " + r.name; + if (r.plane > 0) + ep_args += join(plane_name_suffix, r.plane); + ep_args += " [[texture(" + convert_to_string(r.index) + ")"; + if (interlocked_resources.count(var_id)) + ep_args += ", raster_order_group(0)"; + ep_args += "]]"; + } + else + { + ep_args += image_type_glsl(type, var_id) + "4 " + r.name; + ep_args += " [[color(" + convert_to_string(r.index) + ")]]"; + } + + + if (atomic_image_vars.count(var.self)) + { + ep_args += ", device atomic_" + type_to_glsl(get(basetype.image.type), 0); + ep_args += "* " + r.name + "_atomic"; + ep_args += " [[buffer(" + convert_to_string(r.secondary_index) + ")]]"; + } + break; + } + default: + if (!ep_args.empty()) + ep_args += ", "; + if (!type.pointer) + ep_args += get_type_address_space(get(var.basetype), var_id) + " " + + type_to_glsl(type, var_id) + "& " + r.name; + else + ep_args += type_to_glsl(type, var_id) + " " + r.name; + ep_args += " [[buffer(" + convert_to_string(r.index) + ")"; + if (interlocked_resources.count(var_id)) + ep_args += ", raster_order_group(0)"; + ep_args += "]]"; + break; + } + } +} + + + +string CompilerMSL::entry_point_args_classic(bool append_comma) +{ + string ep_args = entry_point_arg_stage_in(); + entry_point_args_discrete_descriptors(ep_args); + entry_point_args_builtin(ep_args); + + if (!ep_args.empty() && append_comma) + ep_args += ", "; + + return ep_args; +} + +void CompilerMSL::fix_up_shader_inputs_outputs() +{ + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + auto &type = get_variable_data_type(var); + uint32_t var_id = var.self; + bool ssbo = has_decoration(type.self, DecorationBufferBlock); + + if (var.storage == StorageClassUniformConstant && !is_hidden_variable(var)) + { + if (msl_options.swizzle_texture_samples && has_sampled_images && is_sampled_image_type(type)) + { + auto &entry_func = this->get(ir.default_entry_point); + entry_func.fixup_hooks_in.push_back([this, &type, &var, var_id]() { + bool is_array_type = !type.array.empty(); + + uint32_t desc_set = get_decoration(var_id, DecorationDescriptorSet); + if (descriptor_set_is_argument_buffer(desc_set)) + { + statement("constant uint", is_array_type ? "* " : "& ", to_swizzle_expression(var_id), + is_array_type ? " = &" : " = ", to_name(argument_buffer_ids[desc_set]), + ".spvSwizzleConstants", "[", + convert_to_string(get_metal_resource_index(var, SPIRType::Image)), "];"); + } + else + { + + statement("constant uint", is_array_type ? "* " : "& ", to_swizzle_expression(var_id), + is_array_type ? " = &" : " = ", to_name(swizzle_buffer_id), "[", + convert_to_string(get_metal_resource_index(var, SPIRType::Image)), "];"); + } + }); + } + } + else if ((var.storage == StorageClassStorageBuffer || (var.storage == StorageClassUniform && ssbo)) && + !is_hidden_variable(var)) + { + if (buffers_requiring_array_length.count(var.self)) + { + auto &entry_func = this->get(ir.default_entry_point); + entry_func.fixup_hooks_in.push_back([this, &type, &var, var_id]() { + bool is_array_type = !type.array.empty(); + + uint32_t desc_set = get_decoration(var_id, DecorationDescriptorSet); + if (descriptor_set_is_argument_buffer(desc_set)) + { + statement("constant uint", is_array_type ? "* " : "& ", to_buffer_size_expression(var_id), + is_array_type ? " = &" : " = ", to_name(argument_buffer_ids[desc_set]), + ".spvBufferSizeConstants", "[", + convert_to_string(get_metal_resource_index(var, SPIRType::Image)), "];"); + } + else + { + + statement("constant uint", is_array_type ? "* " : "& ", to_buffer_size_expression(var_id), + is_array_type ? " = &" : " = ", to_name(buffer_size_buffer_id), "[", + convert_to_string(get_metal_resource_index(var, type.basetype)), "];"); + } + }); + } + } + }); + + + ir.for_each_typed_id([&](uint32_t, SPIRVariable &var) { + uint32_t var_id = var.self; + BuiltIn bi_type = ir.meta[var_id].decoration.builtin_type; + + if (var.storage == StorageClassInput && is_builtin_variable(var)) + { + auto &entry_func = this->get(ir.default_entry_point); + switch (bi_type) + { + case BuiltInSamplePosition: + entry_func.fixup_hooks_in.push_back([=]() { + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = get_sample_position(", + to_expression(builtin_sample_id_id), ");"); + }); + break; + case BuiltInHelperInvocation: + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("simd_is_helper_thread() is only supported on macOS."); + else if (msl_options.is_macos() && !msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("simd_is_helper_thread() requires version 2.1 on macOS."); + + entry_func.fixup_hooks_in.push_back([=]() { + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = simd_is_helper_thread();"); + }); + break; + case BuiltInPatchVertices: + if (get_execution_model() == ExecutionModelTessellationEvaluation) + entry_func.fixup_hooks_in.push_back([=]() { + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = ", + to_expression(patch_stage_in_var_id), ".gl_in.size();"); + }); + else + entry_func.fixup_hooks_in.push_back([=]() { + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = spvIndirectParams[0];"); + }); + break; + case BuiltInTessCoord: + + + if (msl_options.tess_domain_origin_lower_left && !get_entry_point().flags.get(ExecutionModeTriangles)) + { + string tc = to_expression(var_id); + entry_func.fixup_hooks_in.push_back([=]() { statement(tc, ".y = 1.0 - ", tc, ".y;"); }); + } + break; + case BuiltInSubgroupLocalInvocationId: + + if (get_execution_model() == ExecutionModelGLCompute) + break; + + + if (get_execution_model() == ExecutionModelFragment && msl_options.supports_msl_version(2, 2)) + break; + + if (msl_options.is_ios()) + SPIRV_CROSS_THROW( + "SubgroupLocalInvocationId cannot be used outside of compute shaders before MSL 2.2 on iOS."); + + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW( + "SubgroupLocalInvocationId cannot be used outside of compute shaders before MSL 2.1."); + + + + + + entry_func.fixup_hooks_in.push_back([=]() { + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), + " = simd_prefix_exclusive_sum(1);"); + }); + break; + case BuiltInSubgroupSize: + + if (get_execution_model() == ExecutionModelGLCompute) + break; + + + if (get_execution_model() == ExecutionModelFragment && msl_options.supports_msl_version(2, 2)) + break; + + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("SubgroupSize cannot be used outside of compute shaders on iOS."); + + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("SubgroupSize cannot be used outside of compute shaders before Metal 2.1."); + + entry_func.fixup_hooks_in.push_back( + [=]() { statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = simd_sum(1);"); }); + break; + case BuiltInSubgroupEqMask: + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("Subgroup ballot functionality is unavailable on iOS."); + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Subgroup ballot functionality requires Metal 2.1."); + entry_func.fixup_hooks_in.push_back([=]() { + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = ", + to_expression(builtin_subgroup_invocation_id_id), " > 32 ? uint4(0, (1 << (", + to_expression(builtin_subgroup_invocation_id_id), " - 32)), uint2(0)) : uint4(1 << ", + to_expression(builtin_subgroup_invocation_id_id), ", uint3(0));"); + }); + break; + case BuiltInSubgroupGeMask: + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("Subgroup ballot functionality is unavailable on iOS."); + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Subgroup ballot functionality requires Metal 2.1."); + entry_func.fixup_hooks_in.push_back([=]() { + + + + + + + + + + + + + + + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), + " = uint4(extract_bits(0xFFFFFFFF, min(", + to_expression(builtin_subgroup_invocation_id_id), ", 32u), (uint)max(min((int)", + to_expression(builtin_subgroup_size_id), ", 32) - (int)", + to_expression(builtin_subgroup_invocation_id_id), + ", 0)), extract_bits(0xFFFFFFFF, (uint)max((int)", + to_expression(builtin_subgroup_invocation_id_id), " - 32, 0), (uint)max((int)", + to_expression(builtin_subgroup_size_id), " - (int)max(", + to_expression(builtin_subgroup_invocation_id_id), ", 32u), 0)), uint2(0));"); + }); + break; + case BuiltInSubgroupGtMask: + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("Subgroup ballot functionality is unavailable on iOS."); + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Subgroup ballot functionality requires Metal 2.1."); + entry_func.fixup_hooks_in.push_back([=]() { + + + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), + " = uint4(extract_bits(0xFFFFFFFF, min(", + to_expression(builtin_subgroup_invocation_id_id), " + 1, 32u), (uint)max(min((int)", + to_expression(builtin_subgroup_size_id), ", 32) - (int)", + to_expression(builtin_subgroup_invocation_id_id), + " - 1, 0)), extract_bits(0xFFFFFFFF, (uint)max((int)", + to_expression(builtin_subgroup_invocation_id_id), " + 1 - 32, 0), (uint)max((int)", + to_expression(builtin_subgroup_size_id), " - (int)max(", + to_expression(builtin_subgroup_invocation_id_id), " + 1, 32u), 0)), uint2(0));"); + }); + break; + case BuiltInSubgroupLeMask: + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("Subgroup ballot functionality is unavailable on iOS."); + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Subgroup ballot functionality requires Metal 2.1."); + entry_func.fixup_hooks_in.push_back([=]() { + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), + " = uint4(extract_bits(0xFFFFFFFF, 0, min(", + to_expression(builtin_subgroup_invocation_id_id), + " + 1, 32u)), extract_bits(0xFFFFFFFF, 0, (uint)max((int)", + to_expression(builtin_subgroup_invocation_id_id), " + 1 - 32, 0)), uint2(0));"); + }); + break; + case BuiltInSubgroupLtMask: + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("Subgroup ballot functionality is unavailable on iOS."); + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Subgroup ballot functionality requires Metal 2.1."); + entry_func.fixup_hooks_in.push_back([=]() { + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), + " = uint4(extract_bits(0xFFFFFFFF, 0, min(", + to_expression(builtin_subgroup_invocation_id_id), + ", 32u)), extract_bits(0xFFFFFFFF, 0, (uint)max((int)", + to_expression(builtin_subgroup_invocation_id_id), " - 32, 0)), uint2(0));"); + }); + break; + case BuiltInViewIndex: + if (!msl_options.multiview) + { + + + entry_func.fixup_hooks_in.push_back([=]() { + statement("const ", builtin_type_decl(bi_type), " ", to_expression(var_id), " = 0;"); + }); + } + else if (msl_options.view_index_from_device_index) + { + + entry_func.fixup_hooks_in.push_back([=]() { + statement("const ", builtin_type_decl(bi_type), " ", to_expression(var_id), " = ", + msl_options.device_index, ";"); + }); + + + + } + else if (get_execution_model() == ExecutionModelFragment) + { + + + entry_func.fixup_hooks_in.push_back([=]() { + statement(to_expression(var_id), " += ", to_expression(view_mask_buffer_id), "[0];"); + }); + } + else if (get_execution_model() == ExecutionModelVertex) + { + + + entry_func.fixup_hooks_in.push_back([=]() { + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = ", + to_expression(view_mask_buffer_id), "[0] + ", to_expression(builtin_instance_idx_id), + " % ", to_expression(view_mask_buffer_id), "[1];"); + statement(to_expression(builtin_instance_idx_id), " /= ", to_expression(view_mask_buffer_id), + "[1];"); + }); + + + + + entry_func.fixup_hooks_out.push_back([=]() { + statement(to_expression(builtin_layer_id), " = ", to_expression(var_id), " - ", + to_expression(view_mask_buffer_id), "[0];"); + }); + } + break; + case BuiltInDeviceIndex: + + + + entry_func.fixup_hooks_in.push_back([=]() { + statement("const ", builtin_type_decl(bi_type), " ", to_expression(var_id), " = ", + msl_options.device_index, ";"); + }); + break; + case BuiltInWorkgroupId: + if (!msl_options.dispatch_base || !active_input_builtins.get(BuiltInWorkgroupId)) + break; + + + + + entry_func.fixup_hooks_in.push_back([=]() { + statement(to_expression(var_id), " += ", to_dereferenced_expression(builtin_dispatch_base_id), ";"); + }); + break; + case BuiltInGlobalInvocationId: + if (!msl_options.dispatch_base || !active_input_builtins.get(BuiltInGlobalInvocationId)) + break; + + + + entry_func.fixup_hooks_in.push_back([=]() { + auto &execution = this->get_entry_point(); + uint32_t workgroup_size_id = execution.workgroup_size.constant; + if (workgroup_size_id) + statement(to_expression(var_id), " += ", to_dereferenced_expression(builtin_dispatch_base_id), + " * ", to_expression(workgroup_size_id), ";"); + else + statement(to_expression(var_id), " += ", to_dereferenced_expression(builtin_dispatch_base_id), + " * uint3(", execution.workgroup_size.x, ", ", execution.workgroup_size.y, ", ", + execution.workgroup_size.z, ");"); + }); + break; + default: + break; + } + } + }); +} + + +uint32_t CompilerMSL::get_metal_resource_index(SPIRVariable &var, SPIRType::BaseType basetype, uint32_t plane) +{ + auto &execution = get_entry_point(); + auto &var_dec = ir.meta[var.self].decoration; + auto &var_type = get(var.basetype); + uint32_t var_desc_set = (var.storage == StorageClassPushConstant) ? kPushConstDescSet : var_dec.set; + uint32_t var_binding = (var.storage == StorageClassPushConstant) ? kPushConstBinding : var_dec.binding; + + + auto itr = resource_bindings.find({ execution.model, var_desc_set, var_binding }); + + auto resource_decoration = var_type.basetype == SPIRType::SampledImage && basetype == SPIRType::Sampler ? + SPIRVCrossDecorationResourceIndexSecondary : + SPIRVCrossDecorationResourceIndexPrimary; + if (plane == 1) + resource_decoration = SPIRVCrossDecorationResourceIndexTertiary; + if (plane == 2) + resource_decoration = SPIRVCrossDecorationResourceIndexQuaternary; + + if (itr != end(resource_bindings)) + { + auto &remap = itr->second; + remap.second = true; + switch (basetype) + { + case SPIRType::Image: + set_extended_decoration(var.self, resource_decoration, remap.first.msl_texture + plane); + return remap.first.msl_texture + plane; + case SPIRType::Sampler: + set_extended_decoration(var.self, resource_decoration, remap.first.msl_sampler); + return remap.first.msl_sampler; + default: + set_extended_decoration(var.self, resource_decoration, remap.first.msl_buffer); + return remap.first.msl_buffer; + } + } + + + if (has_extended_decoration(var.self, resource_decoration)) + return get_extended_decoration(var.self, resource_decoration); + + + if (msl_options.enable_decoration_binding) + { + + if (has_decoration(var.self, DecorationBinding)) + { + var_binding = get_decoration(var.self, DecorationBinding); + + if (var_binding < 0x80000000u) + return var_binding; + } + } + + + + + uint32_t binding_stride = 1; + auto &type = get(var.basetype); + for (uint32_t i = 0; i < uint32_t(type.array.size()); i++) + binding_stride *= to_array_size_literal(type, i); + + assert(binding_stride != 0); + + + uint32_t resource_index; + + bool allocate_argument_buffer_ids = false; + uint32_t desc_set = 0; + + if (var.storage != StorageClassPushConstant) + { + desc_set = get_decoration(var.self, DecorationDescriptorSet); + allocate_argument_buffer_ids = descriptor_set_is_argument_buffer(desc_set); + } + + if (allocate_argument_buffer_ids) + { + + resource_index = next_metal_resource_ids[desc_set]; + next_metal_resource_ids[desc_set] += binding_stride; + } + else + { + + switch (basetype) + { + case SPIRType::Image: + resource_index = next_metal_resource_index_texture; + next_metal_resource_index_texture += binding_stride; + break; + case SPIRType::Sampler: + resource_index = next_metal_resource_index_sampler; + next_metal_resource_index_sampler += binding_stride; + break; + default: + resource_index = next_metal_resource_index_buffer; + next_metal_resource_index_buffer += binding_stride; + break; + } + } + + set_extended_decoration(var.self, resource_decoration, resource_index); + return resource_index; +} + +string CompilerMSL::argument_decl(const SPIRFunction::Parameter &arg) +{ + auto &var = get(arg.id); + auto &type = get_variable_data_type(var); + auto &var_type = get(arg.type); + StorageClass storage = var_type.storage; + bool is_pointer = var_type.pointer; + + + + uint32_t name_id = var.self; + if (arg.alias_global_variable && var.basevariable) + name_id = var.basevariable; + + bool constref = !arg.alias_global_variable && is_pointer && arg.write_count == 0; + + bool type_is_image = type.basetype == SPIRType::Image || type.basetype == SPIRType::SampledImage || + type.basetype == SPIRType::Sampler; + + + if (!type.array.empty() && type_is_image) + constref = true; + + string decl; + if (constref) + decl += "const "; + + + + + + + bool is_dynamic_img_sampler = !arg.alias_global_variable && type.basetype == SPIRType::SampledImage && + type.image.dim == Dim2D && type_is_floating_point(get(type.image.type)) && + spv_function_implementations.count(SPVFuncImplDynamicImageSampler); + + + string address_space = get_argument_address_space(var); + bool builtin = is_builtin_variable(var); + use_builtin_array = builtin; + if (address_space == "threadgroup") + use_builtin_array = true; + + if (var.basevariable && (var.basevariable == stage_in_ptr_var_id || var.basevariable == stage_out_ptr_var_id)) + decl += type_to_glsl(type, arg.id); + else if (builtin) + decl += builtin_type_decl(static_cast(get_decoration(arg.id, DecorationBuiltIn)), arg.id); + else if ((storage == StorageClassUniform || storage == StorageClassStorageBuffer) && is_array(type)) + { + use_builtin_array = true; + decl += join(type_to_glsl(type, arg.id), "*"); + } + else if (is_dynamic_img_sampler) + { + decl += join("spvDynamicImageSampler<", type_to_glsl(get(type.image.type)), ">"); + + set_extended_decoration(arg.id, SPIRVCrossDecorationDynamicImageSampler); + } + else + decl += type_to_glsl(type, arg.id); + + bool opaque_handle = storage == StorageClassUniformConstant; + + if (!builtin && !opaque_handle && !is_pointer && + (storage == StorageClassFunction || storage == StorageClassGeneric)) + { + + if (!address_space.empty()) + decl = join(address_space, " ", decl); + decl += " "; + decl += to_expression(name_id); + } + else if (is_array(type) && !type_is_image) + { + + if (!address_space.empty()) + decl = join(address_space, " ", decl); + + if (msl_options.argument_buffers) + { + uint32_t desc_set = get_decoration(name_id, DecorationDescriptorSet); + if ((storage == StorageClassUniform || storage == StorageClassStorageBuffer) && + descriptor_set_is_argument_buffer(desc_set)) + { + + + + + + + + + + + + + if (argument_buffer_device_storage_mask & (1u << desc_set)) + decl += " const device"; + else + decl += " constant"; + } + } + + decl += " (&"; + const char *restrict_kw = to_restrict(name_id); + if (*restrict_kw) + { + decl += " "; + decl += restrict_kw; + } + decl += to_expression(name_id); + decl += ")"; + decl += type_to_array_glsl(type); + } + else if (!opaque_handle) + { + + + if (!address_space.empty()) + { + if (decl.back() == '*') + decl += join(" ", address_space, " "); + else + decl = join(address_space, " ", decl); + } + decl += "&"; + decl += " "; + decl += to_restrict(name_id); + decl += to_expression(name_id); + } + else + { + if (!address_space.empty()) + decl = join(address_space, " ", decl); + decl += " "; + decl += to_expression(name_id); + } + + + auto *backing_var = maybe_get_backing_variable(name_id); + if (backing_var && atomic_image_vars.count(backing_var->self)) + { + decl += ", device atomic_" + type_to_glsl(get(var_type.image.type), 0); + decl += "* " + to_expression(name_id) + "_atomic"; + } + + use_builtin_array = false; + + return decl; +} + + + +string CompilerMSL::to_name(uint32_t id, bool allow_alias) const +{ + if (current_function && (current_function->self == ir.default_entry_point)) + { + auto *m = ir.find_meta(id); + if (m && !m->decoration.qualified_alias.empty()) + return m->decoration.qualified_alias; + } + return Compiler::to_name(id, allow_alias); +} + + +string CompilerMSL::to_qualified_member_name(const SPIRType &type, uint32_t index) +{ + + BuiltIn builtin = BuiltInMax; + if (is_member_builtin(type, index, &builtin)) + return builtin_to_glsl(builtin, type.storage); + + + string mbr_name = to_member_name(type, index); + size_t startPos = mbr_name.find_first_not_of("_"); + mbr_name = (startPos != string::npos) ? mbr_name.substr(startPos) : ""; + return join(to_name(type.self), "_", mbr_name); +} + + + +string CompilerMSL::ensure_valid_name(string name, string pfx) +{ + return (name.size() >= 2 && name[0] == '_' && isdigit(name[1])) ? (pfx + name) : name; +} + + +void CompilerMSL::replace_illegal_names() +{ + + + static const unordered_set keywords = { + "kernel", + "vertex", + "fragment", + "compute", + "bias", + "assert", + "VARIABLE_TRACEPOINT", + "STATIC_DATA_TRACEPOINT", + "STATIC_DATA_TRACEPOINT_V", + "METAL_ALIGN", + "METAL_ASM", + "METAL_CONST", + "METAL_DEPRECATED", + "METAL_ENABLE_IF", + "METAL_FUNC", + "METAL_INTERNAL", + "METAL_NON_NULL_RETURN", + "METAL_NORETURN", + "METAL_NOTHROW", + "METAL_PURE", + "METAL_UNAVAILABLE", + "METAL_IMPLICIT", + "METAL_EXPLICIT", + "METAL_CONST_ARG", + "METAL_ARG_UNIFORM", + "METAL_ZERO_ARG", + "METAL_VALID_LOD_ARG", + "METAL_VALID_LEVEL_ARG", + "METAL_VALID_STORE_ORDER", + "METAL_VALID_LOAD_ORDER", + "METAL_VALID_COMPARE_EXCHANGE_FAILURE_ORDER", + "METAL_COMPATIBLE_COMPARE_EXCHANGE_ORDERS", + "METAL_VALID_RENDER_TARGET", + "is_function_constant_defined", + "CHAR_BIT", + "SCHAR_MAX", + "SCHAR_MIN", + "UCHAR_MAX", + "CHAR_MAX", + "CHAR_MIN", + "USHRT_MAX", + "SHRT_MAX", + "SHRT_MIN", + "UINT_MAX", + "INT_MAX", + "INT_MIN", + "FLT_DIG", + "FLT_MANT_DIG", + "FLT_MAX_10_EXP", + "FLT_MAX_EXP", + "FLT_MIN_10_EXP", + "FLT_MIN_EXP", + "FLT_RADIX", + "FLT_MAX", + "FLT_MIN", + "FLT_EPSILON", + "FP_ILOGB0", + "FP_ILOGBNAN", + "MAXFLOAT", + "HUGE_VALF", + "INFINITY", + "NAN", + "M_E_F", + "M_LOG2E_F", + "M_LOG10E_F", + "M_LN2_F", + "M_LN10_F", + "M_PI_F", + "M_PI_2_F", + "M_PI_4_F", + "M_1_PI_F", + "M_2_PI_F", + "M_2_SQRTPI_F", + "M_SQRT2_F", + "M_SQRT1_2_F", + "HALF_DIG", + "HALF_MANT_DIG", + "HALF_MAX_10_EXP", + "HALF_MAX_EXP", + "HALF_MIN_10_EXP", + "HALF_MIN_EXP", + "HALF_RADIX", + "HALF_MAX", + "HALF_MIN", + "HALF_EPSILON", + "MAXHALF", + "HUGE_VALH", + "M_E_H", + "M_LOG2E_H", + "M_LOG10E_H", + "M_LN2_H", + "M_LN10_H", + "M_PI_H", + "M_PI_2_H", + "M_PI_4_H", + "M_1_PI_H", + "M_2_PI_H", + "M_2_SQRTPI_H", + "M_SQRT2_H", + "M_SQRT1_2_H", + "DBL_DIG", + "DBL_MANT_DIG", + "DBL_MAX_10_EXP", + "DBL_MAX_EXP", + "DBL_MIN_10_EXP", + "DBL_MIN_EXP", + "DBL_RADIX", + "DBL_MAX", + "DBL_MIN", + "DBL_EPSILON", + "HUGE_VAL", + "M_E", + "M_LOG2E", + "M_LOG10E", + "M_LN2", + "M_LN10", + "M_PI", + "M_PI_2", + "M_PI_4", + "M_1_PI", + "M_2_PI", + "M_2_SQRTPI", + "M_SQRT2", + "M_SQRT1_2", + "quad_broadcast", + }; + + static const unordered_set illegal_func_names = { + "main", + "saturate", + "assert", + "VARIABLE_TRACEPOINT", + "STATIC_DATA_TRACEPOINT", + "STATIC_DATA_TRACEPOINT_V", + "METAL_ALIGN", + "METAL_ASM", + "METAL_CONST", + "METAL_DEPRECATED", + "METAL_ENABLE_IF", + "METAL_FUNC", + "METAL_INTERNAL", + "METAL_NON_NULL_RETURN", + "METAL_NORETURN", + "METAL_NOTHROW", + "METAL_PURE", + "METAL_UNAVAILABLE", + "METAL_IMPLICIT", + "METAL_EXPLICIT", + "METAL_CONST_ARG", + "METAL_ARG_UNIFORM", + "METAL_ZERO_ARG", + "METAL_VALID_LOD_ARG", + "METAL_VALID_LEVEL_ARG", + "METAL_VALID_STORE_ORDER", + "METAL_VALID_LOAD_ORDER", + "METAL_VALID_COMPARE_EXCHANGE_FAILURE_ORDER", + "METAL_COMPATIBLE_COMPARE_EXCHANGE_ORDERS", + "METAL_VALID_RENDER_TARGET", + "is_function_constant_defined", + "CHAR_BIT", + "SCHAR_MAX", + "SCHAR_MIN", + "UCHAR_MAX", + "CHAR_MAX", + "CHAR_MIN", + "USHRT_MAX", + "SHRT_MAX", + "SHRT_MIN", + "UINT_MAX", + "INT_MAX", + "INT_MIN", + "FLT_DIG", + "FLT_MANT_DIG", + "FLT_MAX_10_EXP", + "FLT_MAX_EXP", + "FLT_MIN_10_EXP", + "FLT_MIN_EXP", + "FLT_RADIX", + "FLT_MAX", + "FLT_MIN", + "FLT_EPSILON", + "FP_ILOGB0", + "FP_ILOGBNAN", + "MAXFLOAT", + "HUGE_VALF", + "INFINITY", + "NAN", + "M_E_F", + "M_LOG2E_F", + "M_LOG10E_F", + "M_LN2_F", + "M_LN10_F", + "M_PI_F", + "M_PI_2_F", + "M_PI_4_F", + "M_1_PI_F", + "M_2_PI_F", + "M_2_SQRTPI_F", + "M_SQRT2_F", + "M_SQRT1_2_F", + "HALF_DIG", + "HALF_MANT_DIG", + "HALF_MAX_10_EXP", + "HALF_MAX_EXP", + "HALF_MIN_10_EXP", + "HALF_MIN_EXP", + "HALF_RADIX", + "HALF_MAX", + "HALF_MIN", + "HALF_EPSILON", + "MAXHALF", + "HUGE_VALH", + "M_E_H", + "M_LOG2E_H", + "M_LOG10E_H", + "M_LN2_H", + "M_LN10_H", + "M_PI_H", + "M_PI_2_H", + "M_PI_4_H", + "M_1_PI_H", + "M_2_PI_H", + "M_2_SQRTPI_H", + "M_SQRT2_H", + "M_SQRT1_2_H", + "DBL_DIG", + "DBL_MANT_DIG", + "DBL_MAX_10_EXP", + "DBL_MAX_EXP", + "DBL_MIN_10_EXP", + "DBL_MIN_EXP", + "DBL_RADIX", + "DBL_MAX", + "DBL_MIN", + "DBL_EPSILON", + "HUGE_VAL", + "M_E", + "M_LOG2E", + "M_LOG10E", + "M_LN2", + "M_LN10", + "M_PI", + "M_PI_2", + "M_PI_4", + "M_1_PI", + "M_2_PI", + "M_2_SQRTPI", + "M_SQRT2", + "M_SQRT1_2", + }; + + ir.for_each_typed_id([&](uint32_t self, SPIRVariable &) { + auto &dec = ir.meta[self].decoration; + if (keywords.find(dec.alias) != end(keywords)) + dec.alias += "0"; + }); + + ir.for_each_typed_id([&](uint32_t self, SPIRFunction &) { + auto &dec = ir.meta[self].decoration; + if (illegal_func_names.find(dec.alias) != end(illegal_func_names)) + dec.alias += "0"; + }); + + ir.for_each_typed_id([&](uint32_t self, SPIRType &) { + for (auto &mbr_dec : ir.meta[self].members) + if (keywords.find(mbr_dec.alias) != end(keywords)) + mbr_dec.alias += "0"; + }); + + for (auto &entry : ir.entry_points) + { + + string &ep_name = entry.second.name; + if (illegal_func_names.find(ep_name) != end(illegal_func_names)) + ep_name += "0"; + + + ir.meta[entry.first].decoration.alias = ep_name; + } + + CompilerGLSL::replace_illegal_names(); +} + +string CompilerMSL::to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain) +{ + auto *var = maybe_get(base); + + + + bool declared_as_pointer = false; + + if (var) + { + bool is_buffer_variable = var->storage == StorageClassUniform || var->storage == StorageClassStorageBuffer; + declared_as_pointer = is_buffer_variable && is_array(get(var->basetype)); + } + + if (declared_as_pointer || (!ptr_chain && should_dereference(base))) + return join("->", to_member_name(type, index)); + else + return join(".", to_member_name(type, index)); +} + +string CompilerMSL::to_qualifiers_glsl(uint32_t id) +{ + string quals; + + auto &type = expression_type(id); + if (type.storage == StorageClassWorkgroup) + quals += "threadgroup "; + + return quals; +} + + + + +string CompilerMSL::type_to_glsl(const SPIRType &type, uint32_t id) +{ + string type_name; + + + if (type.pointer) + { + const char *restrict_kw; + type_name = join(get_type_address_space(type, id), " ", type_to_glsl(get(type.parent_type), id)); + + switch (type.basetype) + { + case SPIRType::Image: + case SPIRType::SampledImage: + case SPIRType::Sampler: + + break; + default: + + type_name += "*"; + restrict_kw = to_restrict(id); + if (*restrict_kw) + { + type_name += " "; + type_name += restrict_kw; + } + break; + } + return type_name; + } + + switch (type.basetype) + { + case SPIRType::Struct: + + + type_name = to_name(type.self); + break; + + case SPIRType::Image: + case SPIRType::SampledImage: + return image_type_glsl(type, id); + + case SPIRType::Sampler: + return sampler_type(type); + + case SPIRType::Void: + return "void"; + + case SPIRType::AtomicCounter: + return "atomic_uint"; + + case SPIRType::ControlPointArray: + return join("patch_control_point<", type_to_glsl(get(type.parent_type), id), ">"); + + + case SPIRType::Boolean: + type_name = "bool"; + break; + case SPIRType::Char: + case SPIRType::SByte: + type_name = "char"; + break; + case SPIRType::UByte: + type_name = "uchar"; + break; + case SPIRType::Short: + type_name = "short"; + break; + case SPIRType::UShort: + type_name = "ushort"; + break; + case SPIRType::Int: + type_name = "int"; + break; + case SPIRType::UInt: + type_name = "uint"; + break; + case SPIRType::Int64: + if (!msl_options.supports_msl_version(2, 2)) + SPIRV_CROSS_THROW("64-bit integers are only supported in MSL 2.2 and above."); + type_name = "long"; + break; + case SPIRType::UInt64: + if (!msl_options.supports_msl_version(2, 2)) + SPIRV_CROSS_THROW("64-bit integers are only supported in MSL 2.2 and above."); + type_name = "ulong"; + break; + case SPIRType::Half: + type_name = "half"; + break; + case SPIRType::Float: + type_name = "float"; + break; + case SPIRType::Double: + type_name = "double"; + break; + + default: + return "unknown_type"; + } + + + if (type.columns > 1) + type_name += to_string(type.columns) + "x"; + + + if (type.vecsize > 1) + type_name += to_string(type.vecsize); + + if (type.array.empty() || use_builtin_array) + { + return type_name; + } + else + { + + add_spv_func_and_recompile(SPVFuncImplUnsafeArray); + string res; + string sizes; + + for (uint32_t i = 0; i < uint32_t(type.array.size()); i++) + { + res += "spvUnsafeArray<"; + sizes += ", "; + sizes += to_array_size(type, i); + sizes += ">"; + } + + res += type_name + sizes; + return res; + } +} + +string CompilerMSL::type_to_array_glsl(const SPIRType &type) +{ + + switch (type.basetype) + { + case SPIRType::AtomicCounter: + case SPIRType::ControlPointArray: + { + return CompilerGLSL::type_to_array_glsl(type); + } + default: + { + if (use_builtin_array) + return CompilerGLSL::type_to_array_glsl(type); + else + return ""; + } + } +} + + +std::string CompilerMSL::variable_decl(const SPIRVariable &variable) +{ + if (variable.storage == StorageClassWorkgroup) + { + use_builtin_array = true; + } + std::string expr = CompilerGLSL::variable_decl(variable); + if (variable.storage == StorageClassWorkgroup) + { + use_builtin_array = false; + } + return expr; +} + + +std::string CompilerMSL::variable_decl(const SPIRType &type, const std::string &name, uint32_t id) +{ + return CompilerGLSL::variable_decl(type, name, id); +} + +std::string CompilerMSL::sampler_type(const SPIRType &type) +{ + if (!type.array.empty()) + { + if (!msl_options.supports_msl_version(2)) + SPIRV_CROSS_THROW("MSL 2.0 or greater is required for arrays of samplers."); + + if (type.array.size() > 1) + SPIRV_CROSS_THROW("Arrays of arrays of samplers are not supported in MSL."); + + + uint32_t array_size = to_array_size_literal(type); + if (array_size == 0) + SPIRV_CROSS_THROW("Unsized array of samplers is not supported in MSL."); + + auto &parent = get(get_pointee_type(type).parent_type); + return join("array<", sampler_type(parent), ", ", array_size, ">"); + } + else + return "sampler"; +} + + +string CompilerMSL::image_type_glsl(const SPIRType &type, uint32_t id) +{ + auto *var = maybe_get(id); + if (var && var->basevariable) + { + + + id = var->basevariable; + } + + if (!type.array.empty()) + { + uint32_t major = 2, minor = 0; + if (msl_options.is_ios()) + { + major = 1; + minor = 2; + } + if (!msl_options.supports_msl_version(major, minor)) + { + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("MSL 1.2 or greater is required for arrays of textures."); + else + SPIRV_CROSS_THROW("MSL 2.0 or greater is required for arrays of textures."); + } + + if (type.array.size() > 1) + SPIRV_CROSS_THROW("Arrays of arrays of textures are not supported in MSL."); + + + uint32_t array_size = to_array_size_literal(type); + if (array_size == 0) + SPIRV_CROSS_THROW("Unsized array of images is not supported in MSL."); + + auto &parent = get(get_pointee_type(type).parent_type); + return join("array<", image_type_glsl(parent, id), ", ", array_size, ">"); + } + + string img_type_name; + + + auto &img_type = get(type.self).image; + if (image_is_comparison(type, id)) + { + switch (img_type.dim) + { + case Dim1D: + case Dim2D: + if (img_type.dim == Dim1D && !msl_options.texture_1D_as_2D) + { + + img_type_name += "depth1d_unsupported_by_metal"; + break; + } + + if (img_type.ms && img_type.arrayed) + { + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Multisampled array textures are supported from 2.1."); + img_type_name += "depth2d_ms_array"; + } + else if (img_type.ms) + img_type_name += "depth2d_ms"; + else if (img_type.arrayed) + img_type_name += "depth2d_array"; + else + img_type_name += "depth2d"; + break; + case Dim3D: + img_type_name += "depth3d_unsupported_by_metal"; + break; + case DimCube: + if (!msl_options.emulate_cube_array) + img_type_name += (img_type.arrayed ? "depthcube_array" : "depthcube"); + else + img_type_name += (img_type.arrayed ? "depth2d_array" : "depthcube"); + break; + default: + img_type_name += "unknown_depth_texture_type"; + break; + } + } + else + { + switch (img_type.dim) + { + case DimBuffer: + if (img_type.ms || img_type.arrayed) + SPIRV_CROSS_THROW("Cannot use texel buffers with multisampling or array layers."); + + if (msl_options.texture_buffer_native) + { + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Native texture_buffer type is only supported in MSL 2.1."); + img_type_name = "texture_buffer"; + } + else + img_type_name += "texture2d"; + break; + case Dim1D: + case Dim2D: + case DimSubpassData: + if (img_type.dim == Dim1D && !msl_options.texture_1D_as_2D) + { + + img_type_name += (img_type.arrayed ? "texture1d_array" : "texture1d"); + break; + } + + + if (img_type.dim == DimSubpassData && msl_options.is_ios() && + msl_options.ios_use_framebuffer_fetch_subpasses) + { + return type_to_glsl(get(img_type.type)); + } + if (img_type.ms && img_type.arrayed) + { + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Multisampled array textures are supported from 2.1."); + img_type_name += "texture2d_ms_array"; + } + else if (img_type.ms) + img_type_name += "texture2d_ms"; + else if (img_type.arrayed) + img_type_name += "texture2d_array"; + else + img_type_name += "texture2d"; + break; + case Dim3D: + img_type_name += "texture3d"; + break; + case DimCube: + if (!msl_options.emulate_cube_array) + img_type_name += (img_type.arrayed ? "texturecube_array" : "texturecube"); + else + img_type_name += (img_type.arrayed ? "texture2d_array" : "texturecube"); + break; + default: + img_type_name += "unknown_texture_type"; + break; + } + } + + + img_type_name += "<"; + img_type_name += type_to_glsl(get(img_type.type)); + + + + + if (type.basetype == SPIRType::Image && type.image.sampled == 2 && type.image.dim != DimSubpassData) + { + switch (img_type.access) + { + case AccessQualifierReadOnly: + img_type_name += ", access::read"; + break; + + case AccessQualifierWriteOnly: + img_type_name += ", access::write"; + break; + + case AccessQualifierReadWrite: + img_type_name += ", access::read_write"; + break; + + default: + { + auto *p_var = maybe_get_backing_variable(id); + if (p_var && p_var->basevariable) + p_var = maybe_get(p_var->basevariable); + if (p_var && !has_decoration(p_var->self, DecorationNonWritable)) + { + img_type_name += ", access::"; + + if (!has_decoration(p_var->self, DecorationNonReadable)) + img_type_name += "read_"; + + img_type_name += "write"; + } + break; + } + } + } + + img_type_name += ">"; + + return img_type_name; +} + +void CompilerMSL::emit_subgroup_op(const Instruction &i) +{ + const uint32_t *ops = stream(i); + auto op = static_cast(i.op); + + + + + + if (!msl_options.supports_msl_version(2)) + SPIRV_CROSS_THROW("Subgroups are only supported in Metal 2.0 and up."); + + if (msl_options.is_ios()) + { + switch (op) + { + default: + SPIRV_CROSS_THROW("iOS only supports quad-group operations."); + case OpGroupNonUniformBroadcast: + case OpGroupNonUniformShuffle: + case OpGroupNonUniformShuffleXor: + case OpGroupNonUniformShuffleUp: + case OpGroupNonUniformShuffleDown: + case OpGroupNonUniformQuadSwap: + case OpGroupNonUniformQuadBroadcast: + break; + } + } + + if (msl_options.is_macos() && !msl_options.supports_msl_version(2, 1)) + { + switch (op) + { + default: + SPIRV_CROSS_THROW("Subgroup ops beyond broadcast and shuffle on macOS require Metal 2.0 and up."); + case OpGroupNonUniformBroadcast: + case OpGroupNonUniformShuffle: + case OpGroupNonUniformShuffleXor: + case OpGroupNonUniformShuffleUp: + case OpGroupNonUniformShuffleDown: + break; + } + } + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto scope = static_cast(get(ops[2]).scalar()); + if (scope != ScopeSubgroup) + SPIRV_CROSS_THROW("Only subgroup scope is supported."); + + switch (op) + { + case OpGroupNonUniformElect: + emit_op(result_type, id, "simd_is_first()", true); + break; + + case OpGroupNonUniformBroadcast: + emit_binary_func_op(result_type, id, ops[3], ops[4], + msl_options.is_ios() ? "quad_broadcast" : "simd_broadcast"); + break; + + case OpGroupNonUniformBroadcastFirst: + emit_unary_func_op(result_type, id, ops[3], "simd_broadcast_first"); + break; + + case OpGroupNonUniformBallot: + emit_unary_func_op(result_type, id, ops[3], "spvSubgroupBallot"); + break; + + case OpGroupNonUniformInverseBallot: + emit_binary_func_op(result_type, id, ops[3], builtin_subgroup_invocation_id_id, "spvSubgroupBallotBitExtract"); + break; + + case OpGroupNonUniformBallotBitExtract: + emit_binary_func_op(result_type, id, ops[3], ops[4], "spvSubgroupBallotBitExtract"); + break; + + case OpGroupNonUniformBallotFindLSB: + emit_unary_func_op(result_type, id, ops[3], "spvSubgroupBallotFindLSB"); + break; + + case OpGroupNonUniformBallotFindMSB: + emit_unary_func_op(result_type, id, ops[3], "spvSubgroupBallotFindMSB"); + break; + + case OpGroupNonUniformBallotBitCount: + { + auto operation = static_cast(ops[3]); + if (operation == GroupOperationReduce) + emit_unary_func_op(result_type, id, ops[4], "spvSubgroupBallotBitCount"); + else if (operation == GroupOperationInclusiveScan) + emit_binary_func_op(result_type, id, ops[4], builtin_subgroup_invocation_id_id, + "spvSubgroupBallotInclusiveBitCount"); + else if (operation == GroupOperationExclusiveScan) + emit_binary_func_op(result_type, id, ops[4], builtin_subgroup_invocation_id_id, + "spvSubgroupBallotExclusiveBitCount"); + else + SPIRV_CROSS_THROW("Invalid BitCount operation."); + break; + } + + case OpGroupNonUniformShuffle: + emit_binary_func_op(result_type, id, ops[3], ops[4], msl_options.is_ios() ? "quad_shuffle" : "simd_shuffle"); + break; + + case OpGroupNonUniformShuffleXor: + emit_binary_func_op(result_type, id, ops[3], ops[4], + msl_options.is_ios() ? "quad_shuffle_xor" : "simd_shuffle_xor"); + break; + + case OpGroupNonUniformShuffleUp: + emit_binary_func_op(result_type, id, ops[3], ops[4], + msl_options.is_ios() ? "quad_shuffle_up" : "simd_shuffle_up"); + break; + + case OpGroupNonUniformShuffleDown: + emit_binary_func_op(result_type, id, ops[3], ops[4], + msl_options.is_ios() ? "quad_shuffle_down" : "simd_shuffle_down"); + break; + + case OpGroupNonUniformAll: + emit_unary_func_op(result_type, id, ops[3], "simd_all"); + break; + + case OpGroupNonUniformAny: + emit_unary_func_op(result_type, id, ops[3], "simd_any"); + break; + + case OpGroupNonUniformAllEqual: + emit_unary_func_op(result_type, id, ops[3], "spvSubgroupAllEqual"); + break; + + +#define MSL_GROUP_OP(op, msl_op) \ +case OpGroupNonUniform##op: \ + { \ + auto operation = static_cast(ops[3]); \ + if (operation == GroupOperationReduce) \ + emit_unary_func_op(result_type, id, ops[4], "simd_" #msl_op); \ + else if (operation == GroupOperationInclusiveScan) \ + emit_unary_func_op(result_type, id, ops[4], "simd_prefix_inclusive_" #msl_op); \ + else if (operation == GroupOperationExclusiveScan) \ + emit_unary_func_op(result_type, id, ops[4], "simd_prefix_exclusive_" #msl_op); \ + else if (operation == GroupOperationClusteredReduce) \ + { \ + /* Only cluster sizes of 4 are supported. */ \ + uint32_t cluster_size = get(ops[5]).scalar(); \ + if (cluster_size != 4) \ + SPIRV_CROSS_THROW("Metal only supports quad ClusteredReduce."); \ + emit_unary_func_op(result_type, id, ops[4], "quad_" #msl_op); \ + } \ + else \ + SPIRV_CROSS_THROW("Invalid group operation."); \ + break; \ + } + MSL_GROUP_OP(FAdd, sum) + MSL_GROUP_OP(FMul, product) + MSL_GROUP_OP(IAdd, sum) + MSL_GROUP_OP(IMul, product) +#undef MSL_GROUP_OP + +#define MSL_GROUP_OP(op, msl_op) \ +case OpGroupNonUniform##op: \ + { \ + auto operation = static_cast(ops[3]); \ + if (operation == GroupOperationReduce) \ + emit_unary_func_op(result_type, id, ops[4], "simd_" #msl_op); \ + else if (operation == GroupOperationInclusiveScan) \ + SPIRV_CROSS_THROW("Metal doesn't support InclusiveScan for OpGroupNonUniform" #op "."); \ + else if (operation == GroupOperationExclusiveScan) \ + SPIRV_CROSS_THROW("Metal doesn't support ExclusiveScan for OpGroupNonUniform" #op "."); \ + else if (operation == GroupOperationClusteredReduce) \ + { \ + /* Only cluster sizes of 4 are supported. */ \ + uint32_t cluster_size = get(ops[5]).scalar(); \ + if (cluster_size != 4) \ + SPIRV_CROSS_THROW("Metal only supports quad ClusteredReduce."); \ + emit_unary_func_op(result_type, id, ops[4], "quad_" #msl_op); \ + } \ + else \ + SPIRV_CROSS_THROW("Invalid group operation."); \ + break; \ + } + MSL_GROUP_OP(FMin, min) + MSL_GROUP_OP(FMax, max) + MSL_GROUP_OP(SMin, min) + MSL_GROUP_OP(SMax, max) + MSL_GROUP_OP(UMin, min) + MSL_GROUP_OP(UMax, max) + MSL_GROUP_OP(BitwiseAnd, and) + MSL_GROUP_OP(BitwiseOr, or) + MSL_GROUP_OP(BitwiseXor, xor) + MSL_GROUP_OP(LogicalAnd, and) + MSL_GROUP_OP(LogicalOr, or) + MSL_GROUP_OP(LogicalXor, xor) + + + case OpGroupNonUniformQuadSwap: + { + + + + + + + + + + + uint32_t mask = get(ops[4]).scalar() + 1; + uint32_t mask_id = ir.increase_bound_by(1); + set(mask_id, expression_type_id(ops[4]), mask, false); + emit_binary_func_op(result_type, id, ops[3], mask_id, "quad_shuffle_xor"); + break; + } + + case OpGroupNonUniformQuadBroadcast: + emit_binary_func_op(result_type, id, ops[3], ops[4], "quad_broadcast"); + break; + + default: + SPIRV_CROSS_THROW("Invalid opcode for subgroup."); + } + + register_control_dependent_expression(id); +} + +string CompilerMSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type) +{ + if (out_type.basetype == in_type.basetype) + return ""; + + assert(out_type.basetype != SPIRType::Boolean); + assert(in_type.basetype != SPIRType::Boolean); + + bool integral_cast = type_is_integral(out_type) && type_is_integral(in_type); + bool same_size_cast = out_type.width == in_type.width; + + if (integral_cast && same_size_cast) + { + + return type_to_glsl(out_type); + } + else + { + + return "as_type<" + type_to_glsl(out_type) + ">"; + } +} + + + +string CompilerMSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage) +{ + switch (builtin) + { + + + + case BuiltInVertexId: + ensure_builtin(StorageClassInput, BuiltInVertexId); + if (msl_options.enable_base_index_zero && msl_options.supports_msl_version(1, 1) && + (msl_options.ios_support_base_vertex_instance || msl_options.is_macos())) + { + if (builtin_declaration) + { + if (needs_base_vertex_arg != TriState::No) + needs_base_vertex_arg = TriState::Yes; + return "gl_VertexID"; + } + else + { + ensure_builtin(StorageClassInput, BuiltInBaseVertex); + return "(gl_VertexID - gl_BaseVertex)"; + } + } + else + { + return "gl_VertexID"; + } + case BuiltInInstanceId: + ensure_builtin(StorageClassInput, BuiltInInstanceId); + if (msl_options.enable_base_index_zero && msl_options.supports_msl_version(1, 1) && + (msl_options.ios_support_base_vertex_instance || msl_options.is_macos())) + { + if (builtin_declaration) + { + if (needs_base_instance_arg != TriState::No) + needs_base_instance_arg = TriState::Yes; + return "gl_InstanceID"; + } + else + { + ensure_builtin(StorageClassInput, BuiltInBaseInstance); + return "(gl_InstanceID - gl_BaseInstance)"; + } + } + else + { + return "gl_InstanceID"; + } + case BuiltInVertexIndex: + ensure_builtin(StorageClassInput, BuiltInVertexIndex); + if (msl_options.enable_base_index_zero && msl_options.supports_msl_version(1, 1) && + (msl_options.ios_support_base_vertex_instance || msl_options.is_macos())) + { + if (builtin_declaration) + { + if (needs_base_vertex_arg != TriState::No) + needs_base_vertex_arg = TriState::Yes; + return "gl_VertexIndex"; + } + else + { + ensure_builtin(StorageClassInput, BuiltInBaseVertex); + return "(gl_VertexIndex - gl_BaseVertex)"; + } + } + else + { + return "gl_VertexIndex"; + } + case BuiltInInstanceIndex: + ensure_builtin(StorageClassInput, BuiltInInstanceIndex); + if (msl_options.enable_base_index_zero && msl_options.supports_msl_version(1, 1) && + (msl_options.ios_support_base_vertex_instance || msl_options.is_macos())) + { + if (builtin_declaration) + { + if (needs_base_instance_arg != TriState::No) + needs_base_instance_arg = TriState::Yes; + return "gl_InstanceIndex"; + } + else + { + ensure_builtin(StorageClassInput, BuiltInBaseInstance); + return "(gl_InstanceIndex - gl_BaseInstance)"; + } + } + else + { + return "gl_InstanceIndex"; + } + case BuiltInBaseVertex: + if (msl_options.supports_msl_version(1, 1) && + (msl_options.ios_support_base_vertex_instance || msl_options.is_macos())) + { + needs_base_vertex_arg = TriState::No; + return "gl_BaseVertex"; + } + else + { + SPIRV_CROSS_THROW("BaseVertex requires Metal 1.1 and Mac or Apple A9+ hardware."); + } + case BuiltInBaseInstance: + if (msl_options.supports_msl_version(1, 1) && + (msl_options.ios_support_base_vertex_instance || msl_options.is_macos())) + { + needs_base_instance_arg = TriState::No; + return "gl_BaseInstance"; + } + else + { + SPIRV_CROSS_THROW("BaseInstance requires Metal 1.1 and Mac or Apple A9+ hardware."); + } + case BuiltInDrawIndex: + SPIRV_CROSS_THROW("DrawIndex is not supported in MSL."); + + + + + case BuiltInViewportIndex: + if (!msl_options.supports_msl_version(2, 0)) + SPIRV_CROSS_THROW("ViewportIndex requires Metal 2.0."); + + case BuiltInPosition: + case BuiltInPointSize: + case BuiltInClipDistance: + case BuiltInCullDistance: + case BuiltInLayer: + case BuiltInFragDepth: + case BuiltInFragStencilRefEXT: + case BuiltInSampleMask: + if (get_execution_model() == ExecutionModelTessellationControl) + break; + if (storage != StorageClassInput && current_function && (current_function->self == ir.default_entry_point)) + return stage_out_var_name + "." + CompilerGLSL::builtin_to_glsl(builtin, storage); + + break; + + case BuiltInBaryCoordNV: + case BuiltInBaryCoordNoPerspNV: + if (storage == StorageClassInput && current_function && (current_function->self == ir.default_entry_point)) + return stage_in_var_name + "." + CompilerGLSL::builtin_to_glsl(builtin, storage); + break; + + case BuiltInTessLevelOuter: + if (get_execution_model() == ExecutionModelTessellationEvaluation) + { + if (storage != StorageClassOutput && !get_entry_point().flags.get(ExecutionModeTriangles) && + current_function && (current_function->self == ir.default_entry_point)) + return join(patch_stage_in_var_name, ".", CompilerGLSL::builtin_to_glsl(builtin, storage)); + else + break; + } + if (storage != StorageClassInput && current_function && (current_function->self == ir.default_entry_point)) + return join(tess_factor_buffer_var_name, "[", to_expression(builtin_primitive_id_id), + "].edgeTessellationFactor"); + break; + + case BuiltInTessLevelInner: + if (get_execution_model() == ExecutionModelTessellationEvaluation) + { + if (storage != StorageClassOutput && !get_entry_point().flags.get(ExecutionModeTriangles) && + current_function && (current_function->self == ir.default_entry_point)) + return join(patch_stage_in_var_name, ".", CompilerGLSL::builtin_to_glsl(builtin, storage)); + else + break; + } + if (storage != StorageClassInput && current_function && (current_function->self == ir.default_entry_point)) + return join(tess_factor_buffer_var_name, "[", to_expression(builtin_primitive_id_id), + "].insideTessellationFactor"); + break; + + default: + break; + } + + return CompilerGLSL::builtin_to_glsl(builtin, storage); +} + + +string CompilerMSL::builtin_qualifier(BuiltIn builtin) +{ + auto &execution = get_entry_point(); + + switch (builtin) + { + + case BuiltInVertexId: + return "vertex_id"; + case BuiltInVertexIndex: + return "vertex_id"; + case BuiltInBaseVertex: + return "base_vertex"; + case BuiltInInstanceId: + return "instance_id"; + case BuiltInInstanceIndex: + return "instance_id"; + case BuiltInBaseInstance: + return "base_instance"; + case BuiltInDrawIndex: + SPIRV_CROSS_THROW("DrawIndex is not supported in MSL."); + + + case BuiltInClipDistance: + return "clip_distance"; + case BuiltInPointSize: + return "point_size"; + case BuiltInPosition: + if (position_invariant) + { + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Invariant position is only supported on MSL 2.1 and up."); + return "position, invariant"; + } + else + return "position"; + case BuiltInLayer: + return "render_target_array_index"; + case BuiltInViewportIndex: + if (!msl_options.supports_msl_version(2, 0)) + SPIRV_CROSS_THROW("ViewportIndex requires Metal 2.0."); + return "viewport_array_index"; + + + case BuiltInInvocationId: + return "thread_index_in_threadgroup"; + case BuiltInPatchVertices: + + SPIRV_CROSS_THROW("PatchVertices is derived from the auxiliary buffer in MSL."); + case BuiltInPrimitiveId: + switch (execution.model) + { + case ExecutionModelTessellationControl: + return "threadgroup_position_in_grid"; + case ExecutionModelTessellationEvaluation: + return "patch_id"; + case ExecutionModelFragment: + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("PrimitiveId is not supported in fragment on iOS."); + else if (msl_options.is_macos() && !msl_options.supports_msl_version(2, 2)) + SPIRV_CROSS_THROW("PrimitiveId on macOS requires MSL 2.2."); + return "primitive_id"; + default: + SPIRV_CROSS_THROW("PrimitiveId is not supported in this execution model."); + } + + + case BuiltInTessLevelOuter: + case BuiltInTessLevelInner: + + SPIRV_CROSS_THROW("Tessellation levels are handled specially in MSL."); + + + case BuiltInTessCoord: + return "position_in_patch"; + + + case BuiltInFrontFacing: + return "front_facing"; + case BuiltInPointCoord: + return "point_coord"; + case BuiltInFragCoord: + return "position"; + case BuiltInSampleId: + return "sample_id"; + case BuiltInSampleMask: + return "sample_mask"; + case BuiltInSamplePosition: + + SPIRV_CROSS_THROW("Sample position is retrieved by a function in MSL."); + case BuiltInViewIndex: + if (execution.model != ExecutionModelFragment) + SPIRV_CROSS_THROW("ViewIndex is handled specially outside fragment shaders."); + + + return "render_target_array_index"; + + + case BuiltInFragDepth: + if (execution.flags.get(ExecutionModeDepthGreater)) + return "depth(greater)"; + else if (execution.flags.get(ExecutionModeDepthLess)) + return "depth(less)"; + else + return "depth(any)"; + + case BuiltInFragStencilRefEXT: + return "stencil"; + + + case BuiltInGlobalInvocationId: + return "thread_position_in_grid"; + + case BuiltInWorkgroupId: + return "threadgroup_position_in_grid"; + + case BuiltInNumWorkgroups: + return "threadgroups_per_grid"; + + case BuiltInLocalInvocationId: + return "thread_position_in_threadgroup"; + + case BuiltInLocalInvocationIndex: + return "thread_index_in_threadgroup"; + + case BuiltInSubgroupSize: + if (execution.model == ExecutionModelFragment) + { + if (!msl_options.supports_msl_version(2, 2)) + SPIRV_CROSS_THROW("threads_per_simdgroup requires Metal 2.2 in fragment shaders."); + return "threads_per_simdgroup"; + } + else + { + + + return "thread_execution_width"; + } + + case BuiltInNumSubgroups: + if (!msl_options.supports_msl_version(2)) + SPIRV_CROSS_THROW("Subgroup builtins require Metal 2.0."); + return msl_options.is_ios() ? "quadgroups_per_threadgroup" : "simdgroups_per_threadgroup"; + + case BuiltInSubgroupId: + if (!msl_options.supports_msl_version(2)) + SPIRV_CROSS_THROW("Subgroup builtins require Metal 2.0."); + return msl_options.is_ios() ? "quadgroup_index_in_threadgroup" : "simdgroup_index_in_threadgroup"; + + case BuiltInSubgroupLocalInvocationId: + if (execution.model == ExecutionModelFragment) + { + if (!msl_options.supports_msl_version(2, 2)) + SPIRV_CROSS_THROW("thread_index_in_simdgroup requires Metal 2.2 in fragment shaders."); + return "thread_index_in_simdgroup"; + } + else + { + if (!msl_options.supports_msl_version(2)) + SPIRV_CROSS_THROW("Subgroup builtins require Metal 2.0."); + return msl_options.is_ios() ? "thread_index_in_quadgroup" : "thread_index_in_simdgroup"; + } + + case BuiltInSubgroupEqMask: + case BuiltInSubgroupGeMask: + case BuiltInSubgroupGtMask: + case BuiltInSubgroupLeMask: + case BuiltInSubgroupLtMask: + + SPIRV_CROSS_THROW("Subgroup ballot masks are handled specially in MSL."); + + case BuiltInBaryCoordNV: + + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("Barycentrics not supported on iOS."); + else if (!msl_options.supports_msl_version(2, 2)) + SPIRV_CROSS_THROW("Barycentrics are only supported in MSL 2.2 and above on macOS."); + return "barycentric_coord, center_perspective"; + + case BuiltInBaryCoordNoPerspNV: + + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("Barycentrics not supported on iOS."); + else if (!msl_options.supports_msl_version(2, 2)) + SPIRV_CROSS_THROW("Barycentrics are only supported in MSL 2.2 and above on macOS."); + return "barycentric_coord, center_no_perspective"; + + default: + return "unsupported-built-in"; + } +} + + +string CompilerMSL::builtin_type_decl(BuiltIn builtin, uint32_t id) +{ + const SPIREntryPoint &execution = get_entry_point(); + switch (builtin) + { + + case BuiltInVertexId: + return "uint"; + case BuiltInVertexIndex: + return "uint"; + case BuiltInBaseVertex: + return "uint"; + case BuiltInInstanceId: + return "uint"; + case BuiltInInstanceIndex: + return "uint"; + case BuiltInBaseInstance: + return "uint"; + case BuiltInDrawIndex: + SPIRV_CROSS_THROW("DrawIndex is not supported in MSL."); + + + case BuiltInClipDistance: + return "float"; + case BuiltInPointSize: + return "float"; + case BuiltInPosition: + return "float4"; + case BuiltInLayer: + return "uint"; + case BuiltInViewportIndex: + if (!msl_options.supports_msl_version(2, 0)) + SPIRV_CROSS_THROW("ViewportIndex requires Metal 2.0."); + return "uint"; + + + case BuiltInInvocationId: + return "uint"; + case BuiltInPatchVertices: + return "uint"; + case BuiltInPrimitiveId: + return "uint"; + + + case BuiltInTessLevelInner: + if (execution.model == ExecutionModelTessellationEvaluation) + return !execution.flags.get(ExecutionModeTriangles) ? "float2" : "float"; + return "half"; + case BuiltInTessLevelOuter: + if (execution.model == ExecutionModelTessellationEvaluation) + return !execution.flags.get(ExecutionModeTriangles) ? "float4" : "float"; + return "half"; + + + case BuiltInTessCoord: + return execution.flags.get(ExecutionModeTriangles) ? "float3" : "float2"; + + + case BuiltInFrontFacing: + return "bool"; + case BuiltInPointCoord: + return "float2"; + case BuiltInFragCoord: + return "float4"; + case BuiltInSampleId: + return "uint"; + case BuiltInSampleMask: + return "uint"; + case BuiltInSamplePosition: + return "float2"; + case BuiltInViewIndex: + return "uint"; + + case BuiltInHelperInvocation: + return "bool"; + + case BuiltInBaryCoordNV: + case BuiltInBaryCoordNoPerspNV: + + return type_to_glsl(get_variable_data_type(get(id))); + + + case BuiltInFragDepth: + return "float"; + + case BuiltInFragStencilRefEXT: + return "uint"; + + + case BuiltInGlobalInvocationId: + case BuiltInLocalInvocationId: + case BuiltInNumWorkgroups: + case BuiltInWorkgroupId: + return "uint3"; + case BuiltInLocalInvocationIndex: + case BuiltInNumSubgroups: + case BuiltInSubgroupId: + case BuiltInSubgroupSize: + case BuiltInSubgroupLocalInvocationId: + return "uint"; + case BuiltInSubgroupEqMask: + case BuiltInSubgroupGeMask: + case BuiltInSubgroupGtMask: + case BuiltInSubgroupLeMask: + case BuiltInSubgroupLtMask: + return "uint4"; + + case BuiltInDeviceIndex: + return "int"; + + default: + return "unsupported-built-in-type"; + } +} + + +string CompilerMSL::built_in_func_arg(BuiltIn builtin, bool prefix_comma) +{ + string bi_arg; + if (prefix_comma) + bi_arg += ", "; + + + builtin_declaration = true; + bi_arg += builtin_type_decl(builtin); + bi_arg += " " + builtin_to_glsl(builtin, StorageClassInput); + bi_arg += " [[" + builtin_qualifier(builtin) + "]]"; + builtin_declaration = false; + + return bi_arg; +} + +const SPIRType &CompilerMSL::get_physical_member_type(const SPIRType &type, uint32_t index) const +{ + if (member_is_remapped_physical_type(type, index)) + return get(get_extended_member_decoration(type.self, index, SPIRVCrossDecorationPhysicalTypeID)); + else + return get(type.member_types[index]); +} + +uint32_t CompilerMSL::get_declared_type_array_stride_msl(const SPIRType &type, bool is_packed, bool row_major) const +{ + + + + + + + + auto basic_type = type; + basic_type.array.clear(); + basic_type.array_size_literal.clear(); + uint32_t value_size = get_declared_type_size_msl(basic_type, is_packed, row_major); + + uint32_t dimensions = uint32_t(type.array.size()); + assert(dimensions > 0); + dimensions--; + + + for (uint32_t dim = 0; dim < dimensions; dim++) + { + uint32_t array_size = to_array_size_literal(type, dim); + value_size *= max(array_size, 1u); + } + + return value_size; +} + +uint32_t CompilerMSL::get_declared_struct_member_array_stride_msl(const SPIRType &type, uint32_t index) const +{ + return get_declared_type_array_stride_msl(get_physical_member_type(type, index), + member_is_packed_physical_type(type, index), + has_member_decoration(type.self, index, DecorationRowMajor)); +} + +uint32_t CompilerMSL::get_declared_type_matrix_stride_msl(const SPIRType &type, bool packed, bool row_major) const +{ + + + if (packed) + return (type.width / 8) * (row_major ? type.columns : type.vecsize); + else + return get_declared_type_alignment_msl(type, false, row_major); +} + +uint32_t CompilerMSL::get_declared_struct_member_matrix_stride_msl(const SPIRType &type, uint32_t index) const +{ + return get_declared_type_matrix_stride_msl(get_physical_member_type(type, index), + member_is_packed_physical_type(type, index), + has_member_decoration(type.self, index, DecorationRowMajor)); +} + +uint32_t CompilerMSL::get_declared_struct_size_msl(const SPIRType &struct_type, bool ignore_alignment, + bool ignore_padding) const +{ + + if (!ignore_padding && has_extended_decoration(struct_type.self, SPIRVCrossDecorationPaddingTarget)) + return get_extended_decoration(struct_type.self, SPIRVCrossDecorationPaddingTarget); + + if (struct_type.member_types.empty()) + return 0; + + uint32_t mbr_cnt = uint32_t(struct_type.member_types.size()); + + + uint32_t alignment = 1; + + if (!ignore_alignment) + { + for (uint32_t i = 0; i < mbr_cnt; i++) + { + uint32_t mbr_alignment = get_declared_struct_member_alignment_msl(struct_type, i); + alignment = max(alignment, mbr_alignment); + } + } + + + + uint32_t spirv_offset = type_struct_member_offset(struct_type, mbr_cnt - 1); + uint32_t msl_size = spirv_offset + get_declared_struct_member_size_msl(struct_type, mbr_cnt - 1); + msl_size = (msl_size + alignment - 1) & ~(alignment - 1); + return msl_size; +} + + +uint32_t CompilerMSL::get_declared_type_size_msl(const SPIRType &type, bool is_packed, bool row_major) const +{ + switch (type.basetype) + { + case SPIRType::Unknown: + case SPIRType::Void: + case SPIRType::AtomicCounter: + case SPIRType::Image: + case SPIRType::SampledImage: + case SPIRType::Sampler: + SPIRV_CROSS_THROW("Querying size of opaque object."); + + default: + { + if (!type.array.empty()) + { + uint32_t array_size = to_array_size_literal(type); + return get_declared_type_array_stride_msl(type, is_packed, row_major) * max(array_size, 1u); + } + + if (type.basetype == SPIRType::Struct) + return get_declared_struct_size_msl(type); + + if (is_packed) + { + return type.vecsize * type.columns * (type.width / 8); + } + else + { + + uint32_t vecsize = type.vecsize; + uint32_t columns = type.columns; + + if (row_major) + swap(vecsize, columns); + + if (vecsize == 3) + vecsize = 4; + + return vecsize * columns * (type.width / 8); + } + } + } +} + +uint32_t CompilerMSL::get_declared_struct_member_size_msl(const SPIRType &type, uint32_t index) const +{ + return get_declared_type_size_msl(get_physical_member_type(type, index), + member_is_packed_physical_type(type, index), + has_member_decoration(type.self, index, DecorationRowMajor)); +} + + +uint32_t CompilerMSL::get_declared_type_alignment_msl(const SPIRType &type, bool is_packed, bool row_major) const +{ + switch (type.basetype) + { + case SPIRType::Unknown: + case SPIRType::Void: + case SPIRType::AtomicCounter: + case SPIRType::Image: + case SPIRType::SampledImage: + case SPIRType::Sampler: + SPIRV_CROSS_THROW("Querying alignment of opaque object."); + + case SPIRType::Int64: + SPIRV_CROSS_THROW("long types are not supported in buffers in MSL."); + case SPIRType::UInt64: + SPIRV_CROSS_THROW("ulong types are not supported in buffers in MSL."); + case SPIRType::Double: + SPIRV_CROSS_THROW("double types are not supported in buffers in MSL."); + + case SPIRType::Struct: + { + + uint32_t alignment = 1; + for (uint32_t i = 0; i < type.member_types.size(); i++) + alignment = max(alignment, uint32_t(get_declared_struct_member_alignment_msl(type, i))); + return alignment; + } + + default: + { + + + + if (is_packed) + { + + return type.width / 8; + } + else + { + + uint32_t vecsize = row_major ? type.columns : type.vecsize; + return (type.width / 8) * (vecsize == 3 ? 4 : vecsize); + } + } + } +} + +uint32_t CompilerMSL::get_declared_struct_member_alignment_msl(const SPIRType &type, uint32_t index) const +{ + return get_declared_type_alignment_msl(get_physical_member_type(type, index), + member_is_packed_physical_type(type, index), + has_member_decoration(type.self, index, DecorationRowMajor)); +} + +bool CompilerMSL::skip_argument(uint32_t) const +{ + return false; +} + +void CompilerMSL::analyze_sampled_image_usage() +{ + if (msl_options.swizzle_texture_samples) + { + SampledImageScanner scanner(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), scanner); + } +} + +bool CompilerMSL::SampledImageScanner::handle(spv::Op opcode, const uint32_t *args, uint32_t length) +{ + switch (opcode) + { + case OpLoad: + case OpImage: + case OpSampledImage: + { + if (length < 3) + return false; + + uint32_t result_type = args[0]; + auto &type = compiler.get(result_type); + if ((type.basetype != SPIRType::Image && type.basetype != SPIRType::SampledImage) || type.image.sampled != 1) + return true; + + uint32_t id = args[1]; + compiler.set(id, "", result_type, true); + break; + } + case OpImageSampleExplicitLod: + case OpImageSampleProjExplicitLod: + case OpImageSampleDrefExplicitLod: + case OpImageSampleProjDrefExplicitLod: + case OpImageSampleImplicitLod: + case OpImageSampleProjImplicitLod: + case OpImageSampleDrefImplicitLod: + case OpImageSampleProjDrefImplicitLod: + case OpImageFetch: + case OpImageGather: + case OpImageDrefGather: + compiler.has_sampled_images = + compiler.has_sampled_images || compiler.is_sampled_image_type(compiler.expression_type(args[2])); + compiler.needs_swizzle_buffer_def = compiler.needs_swizzle_buffer_def || compiler.has_sampled_images; + break; + default: + break; + } + return true; +} + + +void CompilerMSL::add_spv_func_and_recompile(SPVFuncImpl spv_func) +{ + if (spv_function_implementations.count(spv_func) == 0) + { + spv_function_implementations.insert(spv_func); + suppress_missing_prototypes = true; + force_recompile(); + } +} + +bool CompilerMSL::OpCodePreprocessor::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + + + + + + + SPVFuncImpl spv_func = get_spv_func_impl(opcode, args); + if (spv_func != SPVFuncImplNone) + { + compiler.spv_function_implementations.insert(spv_func); + suppress_missing_prototypes = true; + } + + switch (opcode) + { + + case OpFunctionCall: + suppress_missing_prototypes = true; + break; + + + case OpImageTexelPointer: + { + auto *var = compiler.maybe_get_backing_variable(args[2]); + image_pointers[args[1]] = var ? var->self : ID(0); + break; + } + + case OpImageWrite: + uses_resource_write = true; + break; + + case OpStore: + check_resource_write(args[0]); + break; + + + case OpAtomicExchange: + case OpAtomicCompareExchange: + case OpAtomicCompareExchangeWeak: + case OpAtomicIIncrement: + case OpAtomicIDecrement: + case OpAtomicIAdd: + case OpAtomicISub: + case OpAtomicSMin: + case OpAtomicUMin: + case OpAtomicSMax: + case OpAtomicUMax: + case OpAtomicAnd: + case OpAtomicOr: + case OpAtomicXor: + { + uses_atomics = true; + auto it = image_pointers.find(args[2]); + if (it != image_pointers.end()) + { + compiler.atomic_image_vars.insert(it->second); + } + check_resource_write(args[2]); + break; + } + + case OpAtomicStore: + { + uses_atomics = true; + auto it = image_pointers.find(args[0]); + if (it != image_pointers.end()) + { + compiler.atomic_image_vars.insert(it->second); + } + check_resource_write(args[0]); + break; + } + + case OpAtomicLoad: + { + uses_atomics = true; + auto it = image_pointers.find(args[2]); + if (it != image_pointers.end()) + { + compiler.atomic_image_vars.insert(it->second); + } + break; + } + + case OpGroupNonUniformInverseBallot: + needs_subgroup_invocation_id = true; + break; + + case OpGroupNonUniformBallotBitCount: + if (args[3] != GroupOperationReduce) + needs_subgroup_invocation_id = true; + break; + + case OpArrayLength: + { + auto *var = compiler.maybe_get_backing_variable(args[2]); + if (var) + compiler.buffers_requiring_array_length.insert(var->self); + break; + } + + case OpInBoundsAccessChain: + case OpAccessChain: + case OpPtrAccessChain: + { + + uint32_t result_type = args[0]; + uint32_t id = args[1]; + uint32_t ptr = args[2]; + + compiler.set(id, "", result_type, true); + compiler.register_read(id, ptr, true); + compiler.ir.ids[id].set_allow_type_rewrite(); + break; + } + + default: + break; + } + + + uint32_t result_type, result_id; + if (compiler.instruction_to_result_type(result_type, result_id, opcode, args, length)) + result_types[result_id] = result_type; + + return true; +} + + +void CompilerMSL::OpCodePreprocessor::check_resource_write(uint32_t var_id) +{ + auto *p_var = compiler.maybe_get_backing_variable(var_id); + StorageClass sc = p_var ? p_var->storage : StorageClassMax; + if (sc == StorageClassUniform || sc == StorageClassStorageBuffer) + uses_resource_write = true; +} + + +CompilerMSL::SPVFuncImpl CompilerMSL::OpCodePreprocessor::get_spv_func_impl(Op opcode, const uint32_t *args) +{ + switch (opcode) + { + case OpFMod: + return SPVFuncImplMod; + + case OpFAdd: + if (compiler.msl_options.invariant_float_math) + { + return SPVFuncImplFAdd; + } + break; + + case OpFMul: + case OpOuterProduct: + case OpMatrixTimesVector: + case OpVectorTimesMatrix: + case OpMatrixTimesMatrix: + if (compiler.msl_options.invariant_float_math) + { + return SPVFuncImplFMul; + } + break; + + case OpTypeArray: + { + + return SPVFuncImplUnsafeArray; + } + + + case OpAtomicExchange: + case OpAtomicCompareExchange: + case OpAtomicCompareExchangeWeak: + case OpAtomicIIncrement: + case OpAtomicIDecrement: + case OpAtomicIAdd: + case OpAtomicISub: + case OpAtomicSMin: + case OpAtomicUMin: + case OpAtomicSMax: + case OpAtomicUMax: + case OpAtomicAnd: + case OpAtomicOr: + case OpAtomicXor: + case OpAtomicLoad: + case OpAtomicStore: + { + auto it = image_pointers.find(args[opcode == OpAtomicStore ? 0 : 2]); + if (it != image_pointers.end()) + { + uint32_t tid = compiler.get(it->second).basetype; + if (tid && compiler.get(tid).image.dim == Dim2D) + return SPVFuncImplImage2DAtomicCoords; + } + break; + } + + case OpImageFetch: + case OpImageRead: + case OpImageWrite: + { + + uint32_t tid = result_types[args[opcode == OpImageWrite ? 0 : 2]]; + if (tid && compiler.get(tid).image.dim == DimBuffer && !compiler.msl_options.texture_buffer_native) + return SPVFuncImplTexelBufferCoords; + break; + } + + case OpExtInst: + { + uint32_t extension_set = args[2]; + if (compiler.get(extension_set).ext == SPIRExtension::GLSL) + { + auto op_450 = static_cast(args[3]); + switch (op_450) + { + case GLSLstd450Radians: + return SPVFuncImplRadians; + case GLSLstd450Degrees: + return SPVFuncImplDegrees; + case GLSLstd450FindILsb: + return SPVFuncImplFindILsb; + case GLSLstd450FindSMsb: + return SPVFuncImplFindSMsb; + case GLSLstd450FindUMsb: + return SPVFuncImplFindUMsb; + case GLSLstd450SSign: + return SPVFuncImplSSign; + case GLSLstd450Reflect: + { + auto &type = compiler.get(args[0]); + if (type.vecsize == 1) + return SPVFuncImplReflectScalar; + break; + } + case GLSLstd450Refract: + { + auto &type = compiler.get(args[0]); + if (type.vecsize == 1) + return SPVFuncImplRefractScalar; + break; + } + case GLSLstd450FaceForward: + { + auto &type = compiler.get(args[0]); + if (type.vecsize == 1) + return SPVFuncImplFaceForwardScalar; + break; + } + case GLSLstd450MatrixInverse: + { + auto &mat_type = compiler.get(args[0]); + switch (mat_type.columns) + { + case 2: + return SPVFuncImplInverse2x2; + case 3: + return SPVFuncImplInverse3x3; + case 4: + return SPVFuncImplInverse4x4; + default: + break; + } + break; + } + default: + break; + } + } + break; + } + + case OpGroupNonUniformBallot: + return SPVFuncImplSubgroupBallot; + + case OpGroupNonUniformInverseBallot: + case OpGroupNonUniformBallotBitExtract: + return SPVFuncImplSubgroupBallotBitExtract; + + case OpGroupNonUniformBallotFindLSB: + return SPVFuncImplSubgroupBallotFindLSB; + + case OpGroupNonUniformBallotFindMSB: + return SPVFuncImplSubgroupBallotFindMSB; + + case OpGroupNonUniformBallotBitCount: + return SPVFuncImplSubgroupBallotBitCount; + + case OpGroupNonUniformAllEqual: + return SPVFuncImplSubgroupAllEqual; + + default: + break; + } + return SPVFuncImplNone; +} + + + +void CompilerMSL::MemberSorter::sort() +{ + + + size_t mbr_cnt = type.member_types.size(); + SmallVector mbr_idxs(mbr_cnt); + iota(mbr_idxs.begin(), mbr_idxs.end(), 0); + std::sort(mbr_idxs.begin(), mbr_idxs.end(), *this); + + + + + auto mbr_types_cpy = type.member_types; + auto mbr_meta_cpy = meta.members; + for (uint32_t mbr_idx = 0; mbr_idx < mbr_cnt; mbr_idx++) + { + type.member_types[mbr_idx] = mbr_types_cpy[mbr_idxs[mbr_idx]]; + meta.members[mbr_idx] = mbr_meta_cpy[mbr_idxs[mbr_idx]]; + } +} + + +bool CompilerMSL::MemberSorter::operator()(uint32_t mbr_idx1, uint32_t mbr_idx2) +{ + auto &mbr_meta1 = meta.members[mbr_idx1]; + auto &mbr_meta2 = meta.members[mbr_idx2]; + if (mbr_meta1.builtin != mbr_meta2.builtin) + return mbr_meta2.builtin; + else + switch (sort_aspect) + { + case Location: + return mbr_meta1.location < mbr_meta2.location; + case LocationReverse: + return mbr_meta1.location > mbr_meta2.location; + case Offset: + return mbr_meta1.offset < mbr_meta2.offset; + case OffsetThenLocationReverse: + return (mbr_meta1.offset < mbr_meta2.offset) || + ((mbr_meta1.offset == mbr_meta2.offset) && (mbr_meta1.location > mbr_meta2.location)); + case Alphabetical: + return mbr_meta1.alias < mbr_meta2.alias; + default: + return false; + } +} + +CompilerMSL::MemberSorter::MemberSorter(SPIRType &t, Meta &m, SortAspect sa) + : type(t) + , meta(m) + , sort_aspect(sa) +{ + + meta.members.resize(max(type.member_types.size(), meta.members.size())); +} + +void CompilerMSL::remap_constexpr_sampler(VariableID id, const MSLConstexprSampler &sampler) +{ + auto &type = get(get(id).basetype); + if (type.basetype != SPIRType::SampledImage && type.basetype != SPIRType::Sampler) + SPIRV_CROSS_THROW("Can only remap SampledImage and Sampler type."); + if (!type.array.empty()) + SPIRV_CROSS_THROW("Can not remap array of samplers."); + constexpr_samplers_by_id[id] = sampler; +} + +void CompilerMSL::remap_constexpr_sampler_by_binding(uint32_t desc_set, uint32_t binding, + const MSLConstexprSampler &sampler) +{ + constexpr_samplers_by_binding[{ desc_set, binding }] = sampler; +} + +void CompilerMSL::bitcast_from_builtin_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type) +{ + auto *var = maybe_get_backing_variable(source_id); + if (var) + source_id = var->self; + + + if (!has_decoration(source_id, DecorationBuiltIn)) + return; + + auto builtin = static_cast(get_decoration(source_id, DecorationBuiltIn)); + auto expected_type = expr_type.basetype; + switch (builtin) + { + case BuiltInGlobalInvocationId: + case BuiltInLocalInvocationId: + case BuiltInWorkgroupId: + case BuiltInLocalInvocationIndex: + case BuiltInWorkgroupSize: + case BuiltInNumWorkgroups: + case BuiltInLayer: + case BuiltInViewportIndex: + case BuiltInFragStencilRefEXT: + case BuiltInPrimitiveId: + case BuiltInSubgroupSize: + case BuiltInSubgroupLocalInvocationId: + case BuiltInViewIndex: + case BuiltInVertexIndex: + case BuiltInInstanceIndex: + case BuiltInBaseInstance: + case BuiltInBaseVertex: + expected_type = SPIRType::UInt; + break; + + case BuiltInTessLevelInner: + case BuiltInTessLevelOuter: + if (get_execution_model() == ExecutionModelTessellationControl) + expected_type = SPIRType::Half; + break; + + default: + break; + } + + if (expected_type != expr_type.basetype) + expr = bitcast_expression(expr_type, expected_type, expr); + + if (builtin == BuiltInTessCoord && get_entry_point().flags.get(ExecutionModeQuads) && expr_type.vecsize == 3) + { + + + expr = join("float3(", expr, ", 0)"); + } +} + +void CompilerMSL::bitcast_to_builtin_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type) +{ + auto *var = maybe_get_backing_variable(target_id); + if (var) + target_id = var->self; + + + if (!has_decoration(target_id, DecorationBuiltIn)) + return; + + auto builtin = static_cast(get_decoration(target_id, DecorationBuiltIn)); + auto expected_type = expr_type.basetype; + switch (builtin) + { + case BuiltInLayer: + case BuiltInViewportIndex: + case BuiltInFragStencilRefEXT: + case BuiltInPrimitiveId: + case BuiltInViewIndex: + expected_type = SPIRType::UInt; + break; + + case BuiltInTessLevelInner: + case BuiltInTessLevelOuter: + expected_type = SPIRType::Half; + break; + + default: + break; + } + + if (expected_type != expr_type.basetype) + { + if (expected_type == SPIRType::Half && expr_type.basetype == SPIRType::Float) + { + + expr = join("half(", expr, ")"); + } + else + { + auto type = expr_type; + type.basetype = expected_type; + expr = bitcast_expression(type, expr_type.basetype, expr); + } + } +} + +std::string CompilerMSL::to_initializer_expression(const SPIRVariable &var) +{ + + + + auto &type = get(var.basetype); + if (ir.ids[var.initializer].get_type() == TypeConstant && + (!type.array.empty() || type.basetype == SPIRType::Struct)) + return constant_expression(get(var.initializer)); + else + return CompilerGLSL::to_initializer_expression(var); +} + +bool CompilerMSL::descriptor_set_is_argument_buffer(uint32_t desc_set) const +{ + if (!msl_options.argument_buffers) + return false; + if (desc_set >= kMaxArgumentBuffers) + return false; + + return (argument_buffer_discrete_mask & (1u << desc_set)) == 0; +} + +void CompilerMSL::analyze_argument_buffers() +{ + + + + + + + + for (auto &id : argument_buffer_ids) + id = 0; + + + struct Resource + { + SPIRVariable *var; + string name; + SPIRType::BaseType basetype; + uint32_t index; + uint32_t plane; + }; + SmallVector resources_in_set[kMaxArgumentBuffers]; + + bool set_needs_swizzle_buffer[kMaxArgumentBuffers] = {}; + bool set_needs_buffer_sizes[kMaxArgumentBuffers] = {}; + bool needs_buffer_sizes = false; + + ir.for_each_typed_id([&](uint32_t self, SPIRVariable &var) { + if ((var.storage == StorageClassUniform || var.storage == StorageClassUniformConstant || + var.storage == StorageClassStorageBuffer) && + !is_hidden_variable(var)) + { + uint32_t desc_set = get_decoration(self, DecorationDescriptorSet); + + if (!descriptor_set_is_argument_buffer(desc_set)) + return; + + uint32_t var_id = var.self; + auto &type = get_variable_data_type(var); + + if (desc_set >= kMaxArgumentBuffers) + SPIRV_CROSS_THROW("Descriptor set index is out of range."); + + const MSLConstexprSampler *constexpr_sampler = nullptr; + if (type.basetype == SPIRType::SampledImage || type.basetype == SPIRType::Sampler) + { + constexpr_sampler = find_constexpr_sampler(var_id); + if (constexpr_sampler) + { + + constexpr_samplers_by_id[var_id] = *constexpr_sampler; + } + } + + if (type.basetype == SPIRType::SampledImage) + { + add_resource_name(var_id); + + uint32_t plane_count = 1; + if (constexpr_sampler && constexpr_sampler->ycbcr_conversion_enable) + plane_count = constexpr_sampler->planes; + + for (uint32_t i = 0; i < plane_count; i++) + { + uint32_t image_resource_index = get_metal_resource_index(var, SPIRType::Image, i); + resources_in_set[desc_set].push_back( + { &var, to_name(var_id), SPIRType::Image, image_resource_index, i }); + } + + if (type.image.dim != DimBuffer && !constexpr_sampler) + { + uint32_t sampler_resource_index = get_metal_resource_index(var, SPIRType::Sampler); + resources_in_set[desc_set].push_back( + { &var, to_sampler_expression(var_id), SPIRType::Sampler, sampler_resource_index, 0 }); + } + } + else if (!constexpr_sampler) + { + + if (!msl_options.is_ios() || type.basetype != SPIRType::Image || type.image.sampled != 2) + { + add_resource_name(var_id); + resources_in_set[desc_set].push_back( + { &var, to_name(var_id), type.basetype, get_metal_resource_index(var, type.basetype), 0 }); + } + } + + + if (needs_swizzle_buffer_def && is_sampled_image_type(type)) + set_needs_swizzle_buffer[desc_set] = true; + else if (buffers_requiring_array_length.count(var_id) != 0) + { + set_needs_buffer_sizes[desc_set] = true; + needs_buffer_sizes = true; + } + } + }); + + if (needs_swizzle_buffer_def || needs_buffer_sizes) + { + uint32_t uint_ptr_type_id = 0; + + + for (uint32_t desc_set = 0; desc_set < kMaxArgumentBuffers; desc_set++) + { + if (!set_needs_swizzle_buffer[desc_set] && !set_needs_buffer_sizes[desc_set]) + continue; + + if (uint_ptr_type_id == 0) + { + uint32_t offset = ir.increase_bound_by(2); + uint32_t type_id = offset; + uint_ptr_type_id = offset + 1; + + + SPIRType uint_type; + uint_type.basetype = SPIRType::UInt; + uint_type.width = 32; + set(type_id, uint_type); + + SPIRType uint_type_pointer = uint_type; + uint_type_pointer.pointer = true; + uint_type_pointer.pointer_depth = 1; + uint_type_pointer.parent_type = type_id; + uint_type_pointer.storage = StorageClassUniform; + set(uint_ptr_type_id, uint_type_pointer); + set_decoration(uint_ptr_type_id, DecorationArrayStride, 4); + } + + if (set_needs_swizzle_buffer[desc_set]) + { + uint32_t var_id = ir.increase_bound_by(1); + auto &var = set(var_id, uint_ptr_type_id, StorageClassUniformConstant); + set_name(var_id, "spvSwizzleConstants"); + set_decoration(var_id, DecorationDescriptorSet, desc_set); + set_decoration(var_id, DecorationBinding, kSwizzleBufferBinding); + resources_in_set[desc_set].push_back( + { &var, to_name(var_id), SPIRType::UInt, get_metal_resource_index(var, SPIRType::UInt), 0 }); + } + + if (set_needs_buffer_sizes[desc_set]) + { + uint32_t var_id = ir.increase_bound_by(1); + auto &var = set(var_id, uint_ptr_type_id, StorageClassUniformConstant); + set_name(var_id, "spvBufferSizeConstants"); + set_decoration(var_id, DecorationDescriptorSet, desc_set); + set_decoration(var_id, DecorationBinding, kBufferSizeBufferBinding); + resources_in_set[desc_set].push_back( + { &var, to_name(var_id), SPIRType::UInt, get_metal_resource_index(var, SPIRType::UInt), 0 }); + } + } + } + + for (uint32_t desc_set = 0; desc_set < kMaxArgumentBuffers; desc_set++) + { + auto &resources = resources_in_set[desc_set]; + if (resources.empty()) + continue; + + assert(descriptor_set_is_argument_buffer(desc_set)); + + uint32_t next_id = ir.increase_bound_by(3); + uint32_t type_id = next_id + 1; + uint32_t ptr_type_id = next_id + 2; + argument_buffer_ids[desc_set] = next_id; + + auto &buffer_type = set(type_id); + + buffer_type.basetype = SPIRType::Struct; + + if ((argument_buffer_device_storage_mask & (1u << desc_set)) != 0) + { + buffer_type.storage = StorageClassStorageBuffer; + + set_decoration(next_id, DecorationNonWritable); + + set_decoration(type_id, DecorationBlock); + } + else + buffer_type.storage = StorageClassUniform; + + set_name(type_id, join("spvDescriptorSetBuffer", desc_set)); + + auto &ptr_type = set(ptr_type_id); + ptr_type = buffer_type; + ptr_type.pointer = true; + ptr_type.pointer_depth = 1; + ptr_type.parent_type = type_id; + + uint32_t buffer_variable_id = next_id; + set(buffer_variable_id, ptr_type_id, StorageClassUniform); + set_name(buffer_variable_id, join("spvDescriptorSet", desc_set)); + + + sort(begin(resources), end(resources), [&](const Resource &lhs, const Resource &rhs) -> bool { + return tie(lhs.index, lhs.basetype) < tie(rhs.index, rhs.basetype); + }); + + uint32_t member_index = 0; + for (auto &resource : resources) + { + auto &var = *resource.var; + auto &type = get_variable_data_type(var); + string mbr_name = ensure_valid_name(resource.name, "m"); + if (resource.plane > 0) + mbr_name += join(plane_name_suffix, resource.plane); + set_member_name(buffer_type.self, member_index, mbr_name); + + if (resource.basetype == SPIRType::Sampler && type.basetype != SPIRType::Sampler) + { + + + bool type_is_array = !type.array.empty(); + uint32_t sampler_type_id = ir.increase_bound_by(type_is_array ? 2 : 1); + auto &new_sampler_type = set(sampler_type_id); + new_sampler_type.basetype = SPIRType::Sampler; + new_sampler_type.storage = StorageClassUniformConstant; + + if (type_is_array) + { + uint32_t sampler_type_array_id = sampler_type_id + 1; + auto &sampler_type_array = set(sampler_type_array_id); + sampler_type_array = new_sampler_type; + sampler_type_array.array = type.array; + sampler_type_array.array_size_literal = type.array_size_literal; + sampler_type_array.parent_type = sampler_type_id; + buffer_type.member_types.push_back(sampler_type_array_id); + } + else + buffer_type.member_types.push_back(sampler_type_id); + } + else + { + uint32_t binding = get_decoration(var.self, DecorationBinding); + SetBindingPair pair = { desc_set, binding }; + + if (resource.basetype == SPIRType::Image || resource.basetype == SPIRType::Sampler || + resource.basetype == SPIRType::SampledImage) + { + + buffer_type.member_types.push_back(get_variable_data_type_id(var)); + if (resource.plane == 0) + set_qualified_name(var.self, join(to_name(buffer_variable_id), ".", mbr_name)); + } + else if (buffers_requiring_dynamic_offset.count(pair)) + { + + buffer_type.member_types.push_back(var.basetype); + buffers_requiring_dynamic_offset[pair].second = var.self; + } + else + { + + buffer_type.member_types.push_back(var.basetype); + if (type.array.empty()) + set_qualified_name(var.self, join("(*", to_name(buffer_variable_id), ".", mbr_name, ")")); + else + set_qualified_name(var.self, join(to_name(buffer_variable_id), ".", mbr_name)); + } + } + + set_extended_member_decoration(buffer_type.self, member_index, SPIRVCrossDecorationResourceIndexPrimary, + resource.index); + set_extended_member_decoration(buffer_type.self, member_index, SPIRVCrossDecorationInterfaceOrigID, + var.self); + member_index++; + } + } +} + +bool CompilerMSL::SetBindingPair::operator==(const SetBindingPair &other) const +{ + return desc_set == other.desc_set && binding == other.binding; +} + +bool CompilerMSL::SetBindingPair::operator<(const SetBindingPair &other) const +{ + return desc_set < other.desc_set || (desc_set == other.desc_set && binding < other.binding); +} + +bool CompilerMSL::StageSetBinding::operator==(const StageSetBinding &other) const +{ + return model == other.model && desc_set == other.desc_set && binding == other.binding; +} + +size_t CompilerMSL::InternalHasher::operator()(const SetBindingPair &value) const +{ + + auto hash_set = std::hash()(value.desc_set); + auto hash_binding = std::hash()(value.binding); + return (hash_set * 0x10001b31) ^ hash_binding; +} + +size_t CompilerMSL::InternalHasher::operator()(const StageSetBinding &value) const +{ + + auto hash_model = std::hash()(value.model); + auto hash_set = std::hash()(value.desc_set); + auto tmp_hash = (hash_model * 0x10001b31) ^ hash_set; + return (tmp_hash * 0x10001b31) ^ value.binding; +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_msl.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_msl.hpp new file mode 100644 index 000000000000..63fce6195511 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_msl.hpp @@ -0,0 +1,934 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_MSL_HPP +#define SPIRV_CROSS_MSL_HPP + +#include "spirv_glsl.hpp" +#include +#include +#include +#include +#include + +namespace SPIRV_CROSS_NAMESPACE +{ + + + + +enum MSLVertexFormat +{ + MSL_VERTEX_FORMAT_OTHER = 0, + MSL_VERTEX_FORMAT_UINT8 = 1, + MSL_VERTEX_FORMAT_UINT16 = 2, + MSL_VERTEX_FORMAT_INT_MAX = 0x7fffffff +}; + + + +struct MSLVertexAttr +{ + uint32_t location = 0; + uint32_t msl_buffer = 0; + uint32_t msl_offset = 0; + uint32_t msl_stride = 0; + bool per_instance = false; + MSLVertexFormat format = MSL_VERTEX_FORMAT_OTHER; + spv::BuiltIn builtin = spv::BuiltInMax; +}; + + + + + + + + + +struct MSLResourceBinding +{ + spv::ExecutionModel stage = spv::ExecutionModelMax; + uint32_t desc_set = 0; + uint32_t binding = 0; + uint32_t msl_buffer = 0; + uint32_t msl_texture = 0; + uint32_t msl_sampler = 0; +}; + +enum MSLSamplerCoord +{ + MSL_SAMPLER_COORD_NORMALIZED = 0, + MSL_SAMPLER_COORD_PIXEL = 1, + MSL_SAMPLER_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerFilter +{ + MSL_SAMPLER_FILTER_NEAREST = 0, + MSL_SAMPLER_FILTER_LINEAR = 1, + MSL_SAMPLER_FILTER_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerMipFilter +{ + MSL_SAMPLER_MIP_FILTER_NONE = 0, + MSL_SAMPLER_MIP_FILTER_NEAREST = 1, + MSL_SAMPLER_MIP_FILTER_LINEAR = 2, + MSL_SAMPLER_MIP_FILTER_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerAddress +{ + MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO = 0, + MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE = 1, + MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER = 2, + MSL_SAMPLER_ADDRESS_REPEAT = 3, + MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT = 4, + MSL_SAMPLER_ADDRESS_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerCompareFunc +{ + MSL_SAMPLER_COMPARE_FUNC_NEVER = 0, + MSL_SAMPLER_COMPARE_FUNC_LESS = 1, + MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL = 2, + MSL_SAMPLER_COMPARE_FUNC_GREATER = 3, + MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL = 4, + MSL_SAMPLER_COMPARE_FUNC_EQUAL = 5, + MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL = 6, + MSL_SAMPLER_COMPARE_FUNC_ALWAYS = 7, + MSL_SAMPLER_COMPARE_FUNC_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerBorderColor +{ + MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK = 0, + MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK = 1, + MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE = 2, + MSL_SAMPLER_BORDER_COLOR_INT_MAX = 0x7fffffff +}; + +enum MSLFormatResolution +{ + MSL_FORMAT_RESOLUTION_444 = 0, + MSL_FORMAT_RESOLUTION_422, + MSL_FORMAT_RESOLUTION_420, + MSL_FORMAT_RESOLUTION_INT_MAX = 0x7fffffff +}; + +enum MSLChromaLocation +{ + MSL_CHROMA_LOCATION_COSITED_EVEN = 0, + MSL_CHROMA_LOCATION_MIDPOINT, + MSL_CHROMA_LOCATION_INT_MAX = 0x7fffffff +}; + +enum MSLComponentSwizzle +{ + MSL_COMPONENT_SWIZZLE_IDENTITY = 0, + MSL_COMPONENT_SWIZZLE_ZERO, + MSL_COMPONENT_SWIZZLE_ONE, + MSL_COMPONENT_SWIZZLE_R, + MSL_COMPONENT_SWIZZLE_G, + MSL_COMPONENT_SWIZZLE_B, + MSL_COMPONENT_SWIZZLE_A, + MSL_COMPONENT_SWIZZLE_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerYCbCrModelConversion +{ + MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0, + MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_709, + MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_601, + MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_2020, + MSL_SAMPLER_YCBCR_MODEL_CONVERSION_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerYCbCrRange +{ + MSL_SAMPLER_YCBCR_RANGE_ITU_FULL = 0, + MSL_SAMPLER_YCBCR_RANGE_ITU_NARROW, + MSL_SAMPLER_YCBCR_RANGE_INT_MAX = 0x7fffffff +}; + +struct MSLConstexprSampler +{ + MSLSamplerCoord coord = MSL_SAMPLER_COORD_NORMALIZED; + MSLSamplerFilter min_filter = MSL_SAMPLER_FILTER_NEAREST; + MSLSamplerFilter mag_filter = MSL_SAMPLER_FILTER_NEAREST; + MSLSamplerMipFilter mip_filter = MSL_SAMPLER_MIP_FILTER_NONE; + MSLSamplerAddress s_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE; + MSLSamplerAddress t_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE; + MSLSamplerAddress r_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE; + MSLSamplerCompareFunc compare_func = MSL_SAMPLER_COMPARE_FUNC_NEVER; + MSLSamplerBorderColor border_color = MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK; + float lod_clamp_min = 0.0f; + float lod_clamp_max = 1000.0f; + int max_anisotropy = 1; + + + uint32_t planes = 0; + MSLFormatResolution resolution = MSL_FORMAT_RESOLUTION_444; + MSLSamplerFilter chroma_filter = MSL_SAMPLER_FILTER_NEAREST; + MSLChromaLocation x_chroma_offset = MSL_CHROMA_LOCATION_COSITED_EVEN; + MSLChromaLocation y_chroma_offset = MSL_CHROMA_LOCATION_COSITED_EVEN; + MSLComponentSwizzle swizzle[4]; + MSLSamplerYCbCrModelConversion ycbcr_model = MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; + MSLSamplerYCbCrRange ycbcr_range = MSL_SAMPLER_YCBCR_RANGE_ITU_FULL; + uint32_t bpc = 8; + + bool compare_enable = false; + bool lod_clamp_enable = false; + bool anisotropy_enable = false; + bool ycbcr_conversion_enable = false; + + MSLConstexprSampler() + { + for (uint32_t i = 0; i < 4; i++) + swizzle[i] = MSL_COMPONENT_SWIZZLE_IDENTITY; + } + bool swizzle_is_identity() const + { + return (swizzle[0] == MSL_COMPONENT_SWIZZLE_IDENTITY && swizzle[1] == MSL_COMPONENT_SWIZZLE_IDENTITY && + swizzle[2] == MSL_COMPONENT_SWIZZLE_IDENTITY && swizzle[3] == MSL_COMPONENT_SWIZZLE_IDENTITY); + } + bool swizzle_has_one_or_zero() const + { + return (swizzle[0] == MSL_COMPONENT_SWIZZLE_ZERO || swizzle[0] == MSL_COMPONENT_SWIZZLE_ONE || + swizzle[1] == MSL_COMPONENT_SWIZZLE_ZERO || swizzle[1] == MSL_COMPONENT_SWIZZLE_ONE || + swizzle[2] == MSL_COMPONENT_SWIZZLE_ZERO || swizzle[2] == MSL_COMPONENT_SWIZZLE_ONE || + swizzle[3] == MSL_COMPONENT_SWIZZLE_ZERO || swizzle[3] == MSL_COMPONENT_SWIZZLE_ONE); + } +}; + + + +static const uint32_t kPushConstDescSet = ~(0u); + + + +static const uint32_t kPushConstBinding = 0; + + + +static const uint32_t kSwizzleBufferBinding = ~(1u); + + + +static const uint32_t kBufferSizeBufferBinding = ~(2u); + + + + + +static const uint32_t kArgumentBufferBinding = ~(3u); + +static const uint32_t kMaxArgumentBuffers = 8; + + +class CompilerMSL : public CompilerGLSL +{ +public: + + struct Options + { + typedef enum + { + iOS = 0, + macOS = 1 + } Platform; + + Platform platform = macOS; + uint32_t msl_version = make_msl_version(1, 2); + uint32_t texel_buffer_texture_width = 4096; + uint32_t swizzle_buffer_index = 30; + uint32_t indirect_params_buffer_index = 29; + uint32_t shader_output_buffer_index = 28; + uint32_t shader_patch_output_buffer_index = 27; + uint32_t shader_tess_factor_buffer_index = 26; + uint32_t buffer_size_buffer_index = 25; + uint32_t view_mask_buffer_index = 24; + uint32_t dynamic_offsets_buffer_index = 23; + uint32_t shader_input_wg_index = 0; + uint32_t device_index = 0; + bool enable_point_size_builtin = true; + bool disable_rasterization = false; + bool capture_output_to_buffer = false; + bool swizzle_texture_samples = false; + bool tess_domain_origin_lower_left = false; + bool multiview = false; + bool view_index_from_device_index = false; + bool dispatch_base = false; + bool texture_1D_as_2D = false; + + + + bool argument_buffers = false; + + + bool enable_base_index_zero = false; + + + + bool pad_fragment_output_components = false; + + + bool ios_support_base_vertex_instance = false; + + + bool ios_use_framebuffer_fetch_subpasses = false; + + + bool invariant_float_math = false; + + + bool emulate_cube_array = false; + + + bool enable_decoration_binding = false; + + + bool texture_buffer_native = false; + + bool is_ios() + { + return platform == iOS; + } + + bool is_macos() + { + return platform == macOS; + } + + void set_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0) + { + msl_version = make_msl_version(major, minor, patch); + } + + bool supports_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0) const + { + return msl_version >= make_msl_version(major, minor, patch); + } + + static uint32_t make_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0) + { + return (major * 10000) + (minor * 100) + patch; + } + }; + + const Options &get_msl_options() const + { + return msl_options; + } + + void set_msl_options(const Options &opts) + { + msl_options = opts; + } + + + + bool get_is_rasterization_disabled() const + { + return is_rasterization_disabled && (get_entry_point().model == spv::ExecutionModelVertex || + get_entry_point().model == spv::ExecutionModelTessellationControl || + get_entry_point().model == spv::ExecutionModelTessellationEvaluation); + } + + + + bool needs_swizzle_buffer() const + { + return used_swizzle_buffer; + } + + + + bool needs_buffer_size_buffer() const + { + return !buffers_requiring_array_length.empty(); + } + + + + bool needs_view_mask_buffer() const + { + return msl_options.multiview && !msl_options.view_index_from_device_index; + } + + + + bool needs_dispatch_base_buffer() const + { + return msl_options.dispatch_base && !msl_options.supports_msl_version(1, 2); + } + + + + bool needs_output_buffer() const + { + return capture_output_to_buffer && stage_out_var_id != ID(0); + } + + + + bool needs_patch_output_buffer() const + { + return capture_output_to_buffer && patch_stage_out_var_id != ID(0); + } + + + + bool needs_input_threadgroup_mem() const + { + return capture_output_to_buffer && stage_in_var_id != ID(0); + } + + explicit CompilerMSL(std::vector spirv); + CompilerMSL(const uint32_t *ir, size_t word_count); + explicit CompilerMSL(const ParsedIR &ir); + explicit CompilerMSL(ParsedIR &&ir); + + + + + + void add_msl_vertex_attribute(const MSLVertexAttr &attr); + + + + + + + void add_msl_resource_binding(const MSLResourceBinding &resource); + + + + + + + + void add_dynamic_buffer(uint32_t desc_set, uint32_t binding, uint32_t index); + + + + void add_discrete_descriptor_set(uint32_t desc_set); + + + + void set_argument_buffer_device_address_space(uint32_t desc_set, bool device_storage); + + + bool is_msl_vertex_attribute_used(uint32_t location); + + + + + + bool is_msl_resource_binding_used(spv::ExecutionModel model, uint32_t set, uint32_t binding); + + + + + + + + + uint32_t get_automatic_msl_resource_binding(uint32_t id) const; + + + + uint32_t get_automatic_msl_resource_binding_secondary(uint32_t id) const; + + + + uint32_t get_automatic_msl_resource_binding_tertiary(uint32_t id) const; + + + + uint32_t get_automatic_msl_resource_binding_quaternary(uint32_t id) const; + + + std::string compile() override; + + + + + + + + + void remap_constexpr_sampler(VariableID id, const MSLConstexprSampler &sampler); + + + + void remap_constexpr_sampler_by_binding(uint32_t desc_set, uint32_t binding, const MSLConstexprSampler &sampler); + + + + void set_fragment_output_components(uint32_t location, uint32_t components); + +protected: + + + enum SPVFuncImpl + { + SPVFuncImplNone, + SPVFuncImplMod, + SPVFuncImplRadians, + SPVFuncImplDegrees, + SPVFuncImplFindILsb, + SPVFuncImplFindSMsb, + SPVFuncImplFindUMsb, + SPVFuncImplSSign, + SPVFuncImplArrayCopyMultidimBase, + + + SPVFuncImplArrayCopy = SPVFuncImplArrayCopyMultidimBase + 1, + SPVFuncImplArrayOfArrayCopy2Dim = SPVFuncImplArrayCopyMultidimBase + 2, + SPVFuncImplArrayOfArrayCopy3Dim = SPVFuncImplArrayCopyMultidimBase + 3, + SPVFuncImplArrayOfArrayCopy4Dim = SPVFuncImplArrayCopyMultidimBase + 4, + SPVFuncImplArrayOfArrayCopy5Dim = SPVFuncImplArrayCopyMultidimBase + 5, + SPVFuncImplArrayOfArrayCopy6Dim = SPVFuncImplArrayCopyMultidimBase + 6, + SPVFuncImplTexelBufferCoords, + SPVFuncImplImage2DAtomicCoords, + SPVFuncImplFMul, + SPVFuncImplFAdd, + SPVFuncImplCubemapTo2DArrayFace, + SPVFuncImplUnsafeArray, + SPVFuncImplInverse4x4, + SPVFuncImplInverse3x3, + SPVFuncImplInverse2x2, + + + SPVFuncImplForwardArgs, + + SPVFuncImplGetSwizzle, + SPVFuncImplTextureSwizzle, + SPVFuncImplGatherSwizzle, + SPVFuncImplGatherCompareSwizzle, + SPVFuncImplSubgroupBallot, + SPVFuncImplSubgroupBallotBitExtract, + SPVFuncImplSubgroupBallotFindLSB, + SPVFuncImplSubgroupBallotFindMSB, + SPVFuncImplSubgroupBallotBitCount, + SPVFuncImplSubgroupAllEqual, + SPVFuncImplReflectScalar, + SPVFuncImplRefractScalar, + SPVFuncImplFaceForwardScalar, + SPVFuncImplChromaReconstructNearest2Plane, + SPVFuncImplChromaReconstructNearest3Plane, + SPVFuncImplChromaReconstructLinear422CositedEven2Plane, + SPVFuncImplChromaReconstructLinear422CositedEven3Plane, + SPVFuncImplChromaReconstructLinear422Midpoint2Plane, + SPVFuncImplChromaReconstructLinear422Midpoint3Plane, + SPVFuncImplChromaReconstructLinear420XCositedEvenYCositedEven2Plane, + SPVFuncImplChromaReconstructLinear420XCositedEvenYCositedEven3Plane, + SPVFuncImplChromaReconstructLinear420XMidpointYCositedEven2Plane, + SPVFuncImplChromaReconstructLinear420XMidpointYCositedEven3Plane, + SPVFuncImplChromaReconstructLinear420XCositedEvenYMidpoint2Plane, + SPVFuncImplChromaReconstructLinear420XCositedEvenYMidpoint3Plane, + SPVFuncImplChromaReconstructLinear420XMidpointYMidpoint2Plane, + SPVFuncImplChromaReconstructLinear420XMidpointYMidpoint3Plane, + SPVFuncImplExpandITUFullRange, + SPVFuncImplExpandITUNarrowRange, + SPVFuncImplConvertYCbCrBT709, + SPVFuncImplConvertYCbCrBT601, + SPVFuncImplConvertYCbCrBT2020, + SPVFuncImplDynamicImageSampler, + + SPVFuncImplArrayCopyMultidimMax = 6 + }; + + + + void emit_texture_op(const Instruction &i) override; + void emit_binary_unord_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); + void emit_instruction(const Instruction &instr) override; + void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, + uint32_t count) override; + void emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t result_id, uint32_t op, + const uint32_t *args, uint32_t count) override; + void emit_header() override; + void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override; + void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) override; + void emit_subgroup_op(const Instruction &i) override; + std::string to_texture_op(const Instruction &i, bool *forward, + SmallVector &inherited_expressions) override; + void emit_fixup() override; + std::string to_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const std::string &qualifier = ""); + void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const std::string &qualifier = "", uint32_t base_offset = 0) override; + void emit_struct_padding_target(const SPIRType &type) override; + std::string type_to_glsl(const SPIRType &type, uint32_t id = 0) override; + + + std::string type_to_array_glsl(const SPIRType &type) override; + + + std::string variable_decl(const SPIRVariable &variable) override; + + + std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id = 0) override; + + std::string image_type_glsl(const SPIRType &type, uint32_t id = 0) override; + std::string sampler_type(const SPIRType &type); + std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) override; + std::string to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id) override; + std::string to_name(uint32_t id, bool allow_alias = true) const override; + std::string to_function_name(VariableID img, const SPIRType &imgtype, bool is_fetch, bool is_gather, bool is_proj, + bool has_array_offsets, bool has_offset, bool has_grad, bool has_dref, uint32_t lod, + uint32_t minlod) override; + std::string to_function_args(VariableID img, const SPIRType &imgtype, bool is_fetch, bool is_gather, bool is_proj, + uint32_t coord, uint32_t coord_components, uint32_t dref, uint32_t grad_x, + uint32_t grad_y, uint32_t lod, uint32_t coffset, uint32_t offset, uint32_t bias, + uint32_t comp, uint32_t sample, uint32_t minlod, bool *p_forward) override; + std::string to_initializer_expression(const SPIRVariable &var) override; + + std::string unpack_expression_type(std::string expr_str, const SPIRType &type, uint32_t physical_type_id, + bool is_packed, bool row_major) override; + + + bool builtin_translates_to_nonarray(spv::BuiltIn builtin) const override; + + std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type) override; + bool skip_argument(uint32_t id) const override; + std::string to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain) override; + std::string to_qualifiers_glsl(uint32_t id) override; + void replace_illegal_names() override; + void declare_undefined_values() override; + void declare_constant_arrays(); + + + void declare_complex_constant_arrays(); + + bool is_patch_block(const SPIRType &type); + bool is_non_native_row_major_matrix(uint32_t id) override; + bool member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index) override; + std::string convert_row_major_matrix(std::string exp_str, const SPIRType &exp_type, uint32_t physical_type_id, + bool is_packed) override; + + void preprocess_op_codes(); + void localize_global_variables(); + void extract_global_variables_from_functions(); + void mark_packable_structs(); + void mark_as_packable(SPIRType &type); + + std::unordered_map> function_global_vars; + void extract_global_variables_from_function(uint32_t func_id, std::set &added_arg_ids, + std::unordered_set &global_var_ids, + std::unordered_set &processed_func_ids); + uint32_t add_interface_block(spv::StorageClass storage, bool patch = false); + uint32_t add_interface_block_pointer(uint32_t ib_var_id, spv::StorageClass storage); + + void add_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, SPIRType &ib_type, + SPIRVariable &var, bool strip_array); + void add_composite_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, bool strip_array); + void add_plain_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, bool strip_array); + void add_plain_member_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, uint32_t index, + bool strip_array); + void add_composite_member_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, uint32_t index, + bool strip_array); + uint32_t get_accumulated_member_location(const SPIRVariable &var, uint32_t mbr_idx, bool strip_array); + void add_tess_level_input_to_interface_block(const std::string &ib_var_ref, SPIRType &ib_type, SPIRVariable &var); + + void fix_up_interface_member_indices(spv::StorageClass storage, uint32_t ib_type_id); + + void mark_location_as_used_by_shader(uint32_t location, spv::StorageClass storage); + uint32_t ensure_correct_builtin_type(uint32_t type_id, spv::BuiltIn builtin); + uint32_t ensure_correct_attribute_type(uint32_t type_id, uint32_t location); + + void emit_custom_templates(); + void emit_custom_functions(); + void emit_resources(); + void emit_specialization_constants_and_structs(); + void emit_interface_block(uint32_t ib_var_id); + bool maybe_emit_array_assignment(uint32_t id_lhs, uint32_t id_rhs); + + void fix_up_shader_inputs_outputs(); + + std::string func_type_decl(SPIRType &type); + std::string entry_point_args_classic(bool append_comma); + std::string entry_point_args_argument_buffer(bool append_comma); + std::string entry_point_arg_stage_in(); + void entry_point_args_builtin(std::string &args); + void entry_point_args_discrete_descriptors(std::string &args); + std::string to_qualified_member_name(const SPIRType &type, uint32_t index); + std::string ensure_valid_name(std::string name, std::string pfx); + std::string to_sampler_expression(uint32_t id); + std::string to_swizzle_expression(uint32_t id); + std::string to_buffer_size_expression(uint32_t id); + std::string builtin_qualifier(spv::BuiltIn builtin); + std::string builtin_type_decl(spv::BuiltIn builtin, uint32_t id = 0); + std::string built_in_func_arg(spv::BuiltIn builtin, bool prefix_comma); + std::string member_attribute_qualifier(const SPIRType &type, uint32_t index); + std::string argument_decl(const SPIRFunction::Parameter &arg); + std::string round_fp_tex_coords(std::string tex_coords, bool coord_is_fp); + uint32_t get_metal_resource_index(SPIRVariable &var, SPIRType::BaseType basetype, uint32_t plane = 0); + uint32_t get_ordered_member_location(uint32_t type_id, uint32_t index, uint32_t *comp = nullptr); + + + + + uint32_t get_declared_type_size_msl(const SPIRType &type, bool packed, bool row_major) const; + uint32_t get_declared_type_array_stride_msl(const SPIRType &type, bool packed, bool row_major) const; + uint32_t get_declared_type_matrix_stride_msl(const SPIRType &type, bool packed, bool row_major) const; + uint32_t get_declared_type_alignment_msl(const SPIRType &type, bool packed, bool row_major) const; + + uint32_t get_declared_struct_member_size_msl(const SPIRType &struct_type, uint32_t index) const; + uint32_t get_declared_struct_member_array_stride_msl(const SPIRType &struct_type, uint32_t index) const; + uint32_t get_declared_struct_member_matrix_stride_msl(const SPIRType &struct_type, uint32_t index) const; + uint32_t get_declared_struct_member_alignment_msl(const SPIRType &struct_type, uint32_t index) const; + + const SPIRType &get_physical_member_type(const SPIRType &struct_type, uint32_t index) const; + + uint32_t get_declared_struct_size_msl(const SPIRType &struct_type, bool ignore_alignment = false, + bool ignore_padding = false) const; + + std::string to_component_argument(uint32_t id); + void align_struct(SPIRType &ib_type, std::unordered_set &aligned_structs); + void mark_scalar_layout_structs(const SPIRType &ib_type); + void mark_struct_members_packed(const SPIRType &type); + void ensure_member_packing_rules_msl(SPIRType &ib_type, uint32_t index); + bool validate_member_packing_rules_msl(const SPIRType &type, uint32_t index) const; + std::string get_argument_address_space(const SPIRVariable &argument); + std::string get_type_address_space(const SPIRType &type, uint32_t id, bool argument = false); + const char *to_restrict(uint32_t id, bool space = true); + SPIRType &get_stage_in_struct_type(); + SPIRType &get_stage_out_struct_type(); + SPIRType &get_patch_stage_in_struct_type(); + SPIRType &get_patch_stage_out_struct_type(); + std::string get_tess_factor_struct_name(); + void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, const char *op, uint32_t mem_order_1, + uint32_t mem_order_2, bool has_mem_order_2, uint32_t op0, uint32_t op1 = 0, + bool op1_is_pointer = false, bool op1_is_literal = false, uint32_t op2 = 0); + const char *get_memory_order(uint32_t spv_mem_sem); + void add_pragma_line(const std::string &line); + void add_typedef_line(const std::string &line); + void emit_barrier(uint32_t id_exe_scope, uint32_t id_mem_scope, uint32_t id_mem_sem); + void emit_array_copy(const std::string &lhs, uint32_t rhs_id, spv::StorageClass lhs_storage, + spv::StorageClass rhs_storage) override; + void build_implicit_builtins(); + uint32_t build_constant_uint_array_pointer(); + void emit_entry_point_declarations() override; + uint32_t builtin_frag_coord_id = 0; + uint32_t builtin_sample_id_id = 0; + uint32_t builtin_vertex_idx_id = 0; + uint32_t builtin_base_vertex_id = 0; + uint32_t builtin_instance_idx_id = 0; + uint32_t builtin_base_instance_id = 0; + uint32_t builtin_view_idx_id = 0; + uint32_t builtin_layer_id = 0; + uint32_t builtin_invocation_id_id = 0; + uint32_t builtin_primitive_id_id = 0; + uint32_t builtin_subgroup_invocation_id_id = 0; + uint32_t builtin_subgroup_size_id = 0; + uint32_t builtin_dispatch_base_id = 0; + uint32_t swizzle_buffer_id = 0; + uint32_t buffer_size_buffer_id = 0; + uint32_t view_mask_buffer_id = 0; + uint32_t dynamic_offsets_buffer_id = 0; + + void bitcast_to_builtin_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type) override; + void bitcast_from_builtin_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type) override; + void emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression) override; + + void analyze_sampled_image_usage(); + + bool emit_tessellation_access_chain(const uint32_t *ops, uint32_t length); + bool emit_tessellation_io_load(uint32_t result_type, uint32_t id, uint32_t ptr); + bool is_out_of_bounds_tessellation_level(uint32_t id_lhs); + + void ensure_builtin(spv::StorageClass storage, spv::BuiltIn builtin); + + void mark_implicit_builtin(spv::StorageClass storage, spv::BuiltIn builtin, uint32_t id); + + std::string convert_to_f32(const std::string &expr, uint32_t components); + + Options msl_options; + std::set spv_function_implementations; + std::unordered_map vtx_attrs_by_location; + std::unordered_map vtx_attrs_by_builtin; + std::unordered_set vtx_attrs_in_use; + std::unordered_map fragment_output_components; + std::set pragma_lines; + std::set typedef_lines; + SmallVector vars_needing_early_declaration; + + struct SetBindingPair + { + uint32_t desc_set; + uint32_t binding; + bool operator==(const SetBindingPair &other) const; + bool operator<(const SetBindingPair &other) const; + }; + + struct StageSetBinding + { + spv::ExecutionModel model; + uint32_t desc_set; + uint32_t binding; + bool operator==(const StageSetBinding &other) const; + }; + + struct InternalHasher + { + size_t operator()(const SetBindingPair &value) const; + size_t operator()(const StageSetBinding &value) const; + }; + + std::unordered_map, InternalHasher> resource_bindings; + + uint32_t next_metal_resource_index_buffer = 0; + uint32_t next_metal_resource_index_texture = 0; + uint32_t next_metal_resource_index_sampler = 0; + + uint32_t next_metal_resource_ids[kMaxArgumentBuffers]; + + VariableID stage_in_var_id = 0; + VariableID stage_out_var_id = 0; + VariableID patch_stage_in_var_id = 0; + VariableID patch_stage_out_var_id = 0; + VariableID stage_in_ptr_var_id = 0; + VariableID stage_out_ptr_var_id = 0; + + + enum class TriState + { + Neutral, + No, + Yes + }; + TriState needs_base_vertex_arg = TriState::Neutral; + TriState needs_base_instance_arg = TriState::Neutral; + + bool has_sampled_images = false; + bool builtin_declaration = false; + bool use_builtin_array = false; + bool is_rasterization_disabled = false; + bool capture_output_to_buffer = false; + bool needs_swizzle_buffer_def = false; + bool used_swizzle_buffer = false; + bool added_builtin_tess_level = false; + bool needs_subgroup_invocation_id = false; + std::string qual_pos_var_name; + std::string stage_in_var_name = "in"; + std::string stage_out_var_name = "out"; + std::string patch_stage_in_var_name = "patchIn"; + std::string patch_stage_out_var_name = "patchOut"; + std::string sampler_name_suffix = "Smplr"; + std::string swizzle_name_suffix = "Swzl"; + std::string buffer_size_name_suffix = "BufferSize"; + std::string plane_name_suffix = "Plane"; + std::string input_wg_var_name = "gl_in"; + std::string output_buffer_var_name = "spvOut"; + std::string patch_output_buffer_var_name = "spvPatchOut"; + std::string tess_factor_buffer_var_name = "spvTessLevel"; + spv::Op previous_instruction_opcode = spv::OpNop; + + + std::map constexpr_samplers_by_id; + std::unordered_map constexpr_samplers_by_binding; + const MSLConstexprSampler *find_constexpr_sampler(uint32_t id) const; + + std::unordered_set buffers_requiring_array_length; + SmallVector buffer_arrays; + std::unordered_set atomic_image_vars; + + + std::map> buffers_requiring_dynamic_offset; + + uint32_t argument_buffer_ids[kMaxArgumentBuffers]; + uint32_t argument_buffer_discrete_mask = 0; + uint32_t argument_buffer_device_storage_mask = 0; + + void analyze_argument_buffers(); + bool descriptor_set_is_argument_buffer(uint32_t desc_set) const; + + uint32_t get_target_components_for_fragment_location(uint32_t location) const; + uint32_t build_extended_vector_type(uint32_t type_id, uint32_t components); + + bool suppress_missing_prototypes = false; + + void add_spv_func_and_recompile(SPVFuncImpl spv_func); + + + struct OpCodePreprocessor : OpcodeHandler + { + OpCodePreprocessor(CompilerMSL &compiler_) + : compiler(compiler_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + CompilerMSL::SPVFuncImpl get_spv_func_impl(spv::Op opcode, const uint32_t *args); + void check_resource_write(uint32_t var_id); + + CompilerMSL &compiler; + std::unordered_map result_types; + std::unordered_map image_pointers; + bool suppress_missing_prototypes = false; + bool uses_atomics = false; + bool uses_resource_write = false; + bool needs_subgroup_invocation_id = false; + }; + + + struct SampledImageScanner : OpcodeHandler + { + SampledImageScanner(CompilerMSL &compiler_) + : compiler(compiler_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t) override; + + CompilerMSL &compiler; + }; + + + + + struct MemberSorter + { + enum SortAspect + { + Location, + LocationReverse, + Offset, + OffsetThenLocationReverse, + Alphabetical + }; + + void sort(); + bool operator()(uint32_t mbr_idx1, uint32_t mbr_idx2); + MemberSorter(SPIRType &t, Meta &m, SortAspect sa); + + SPIRType &type; + Meta &meta; + SortAspect sort_aspect; + }; +}; +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_parser.cpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_parser.cpp new file mode 100644 index 000000000000..997575f75379 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_parser.cpp @@ -0,0 +1,1185 @@ + + + + + + + + + + + + + + + + +#include "spirv_parser.hpp" +#include + +using namespace std; +using namespace spv; + +namespace SPIRV_CROSS_NAMESPACE +{ +Parser::Parser(vector spirv) +{ + ir.spirv = move(spirv); +} + +Parser::Parser(const uint32_t *spirv_data, size_t word_count) +{ + ir.spirv = vector(spirv_data, spirv_data + word_count); +} + +static bool decoration_is_string(Decoration decoration) +{ + switch (decoration) + { + case DecorationHlslSemanticGOOGLE: + return true; + + default: + return false; + } +} + +static inline uint32_t swap_endian(uint32_t v) +{ + return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u); +} + +static bool is_valid_spirv_version(uint32_t version) +{ + switch (version) + { + + case 99: + case 0x10000: + case 0x10100: + case 0x10200: + case 0x10300: + case 0x10400: + case 0x10500: + return true; + + default: + return false; + } +} + +void Parser::parse() +{ + auto &spirv = ir.spirv; + + auto len = spirv.size(); + if (len < 5) + SPIRV_CROSS_THROW("SPIRV file too small."); + + auto s = spirv.data(); + + + if (s[0] == swap_endian(MagicNumber)) + transform(begin(spirv), end(spirv), begin(spirv), [](uint32_t c) { return swap_endian(c); }); + + if (s[0] != MagicNumber || !is_valid_spirv_version(s[1])) + SPIRV_CROSS_THROW("Invalid SPIRV format."); + + uint32_t bound = s[3]; + ir.set_id_bounds(bound); + + uint32_t offset = 5; + + SmallVector instructions; + while (offset < len) + { + Instruction instr = {}; + instr.op = spirv[offset] & 0xffff; + instr.count = (spirv[offset] >> 16) & 0xffff; + + if (instr.count == 0) + SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file."); + + instr.offset = offset + 1; + instr.length = instr.count - 1; + + offset += instr.count; + + if (offset > spirv.size()) + SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds."); + + instructions.push_back(instr); + } + + for (auto &i : instructions) + parse(i); + + if (current_function) + SPIRV_CROSS_THROW("Function was not terminated."); + if (current_block) + SPIRV_CROSS_THROW("Block was not terminated."); +} + +const uint32_t *Parser::stream(const Instruction &instr) const +{ + + + + if (!instr.length) + return nullptr; + + if (instr.offset + instr.length > ir.spirv.size()) + SPIRV_CROSS_THROW("Compiler::stream() out of range."); + return &ir.spirv[instr.offset]; +} + +static string extract_string(const vector &spirv, uint32_t offset) +{ + string ret; + for (uint32_t i = offset; i < spirv.size(); i++) + { + uint32_t w = spirv[i]; + + for (uint32_t j = 0; j < 4; j++, w >>= 8) + { + char c = w & 0xff; + if (c == '\0') + return ret; + ret += c; + } + } + + SPIRV_CROSS_THROW("String was not terminated before EOF"); +} + +void Parser::parse(const Instruction &instruction) +{ + auto *ops = stream(instruction); + auto op = static_cast(instruction.op); + uint32_t length = instruction.length; + + switch (op) + { + case OpSourceContinued: + case OpSourceExtension: + case OpNop: + case OpModuleProcessed: + break; + + case OpString: + { + set(ops[0], extract_string(ir.spirv, instruction.offset + 1)); + break; + } + + case OpMemoryModel: + ir.addressing_model = static_cast(ops[0]); + ir.memory_model = static_cast(ops[1]); + break; + + case OpSource: + { + auto lang = static_cast(ops[0]); + switch (lang) + { + case SourceLanguageESSL: + ir.source.es = true; + ir.source.version = ops[1]; + ir.source.known = true; + ir.source.hlsl = false; + break; + + case SourceLanguageGLSL: + ir.source.es = false; + ir.source.version = ops[1]; + ir.source.known = true; + ir.source.hlsl = false; + break; + + case SourceLanguageHLSL: + + ir.source.es = false; + ir.source.version = 450; + ir.source.known = true; + ir.source.hlsl = true; + break; + + default: + ir.source.known = false; + break; + } + break; + } + + case OpUndef: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + set(id, result_type); + if (current_block) + current_block->ops.push_back(instruction); + break; + } + + case OpCapability: + { + uint32_t cap = ops[0]; + if (cap == CapabilityKernel) + SPIRV_CROSS_THROW("Kernel capability not supported."); + + ir.declared_capabilities.push_back(static_cast(ops[0])); + break; + } + + case OpExtension: + { + auto ext = extract_string(ir.spirv, instruction.offset); + ir.declared_extensions.push_back(move(ext)); + break; + } + + case OpExtInstImport: + { + uint32_t id = ops[0]; + auto ext = extract_string(ir.spirv, instruction.offset + 1); + if (ext == "GLSL.std.450") + set(id, SPIRExtension::GLSL); + else if (ext == "DebugInfo") + set(id, SPIRExtension::SPV_debug_info); + else if (ext == "SPV_AMD_shader_ballot") + set(id, SPIRExtension::SPV_AMD_shader_ballot); + else if (ext == "SPV_AMD_shader_explicit_vertex_parameter") + set(id, SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter); + else if (ext == "SPV_AMD_shader_trinary_minmax") + set(id, SPIRExtension::SPV_AMD_shader_trinary_minmax); + else if (ext == "SPV_AMD_gcn_shader") + set(id, SPIRExtension::SPV_AMD_gcn_shader); + else + set(id, SPIRExtension::Unsupported); + + + + break; + } + + case OpExtInst: + { + + if (current_block) + current_block->ops.push_back(instruction); + break; + } + + case OpEntryPoint: + { + auto itr = + ir.entry_points.insert(make_pair(ops[1], SPIREntryPoint(ops[1], static_cast(ops[0]), + extract_string(ir.spirv, instruction.offset + 2)))); + auto &e = itr.first->second; + + + uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2); + + for (uint32_t i = strlen_words + 2; i < instruction.length; i++) + e.interface_variables.push_back(ops[i]); + + + ir.set_name(ops[1], e.name); + + + if (!ir.default_entry_point) + ir.default_entry_point = ops[1]; + break; + } + + case OpExecutionMode: + { + auto &execution = ir.entry_points[ops[0]]; + auto mode = static_cast(ops[1]); + execution.flags.set(mode); + + switch (mode) + { + case ExecutionModeInvocations: + execution.invocations = ops[2]; + break; + + case ExecutionModeLocalSize: + execution.workgroup_size.x = ops[2]; + execution.workgroup_size.y = ops[3]; + execution.workgroup_size.z = ops[4]; + break; + + case ExecutionModeOutputVertices: + execution.output_vertices = ops[2]; + break; + + default: + break; + } + break; + } + + case OpName: + { + uint32_t id = ops[0]; + ir.set_name(id, extract_string(ir.spirv, instruction.offset + 1)); + break; + } + + case OpMemberName: + { + uint32_t id = ops[0]; + uint32_t member = ops[1]; + ir.set_member_name(id, member, extract_string(ir.spirv, instruction.offset + 2)); + break; + } + + case OpDecorationGroup: + { + + + break; + } + + case OpGroupDecorate: + { + uint32_t group_id = ops[0]; + auto &decorations = ir.meta[group_id].decoration; + auto &flags = decorations.decoration_flags; + + + + for (uint32_t i = 1; i < length; i++) + { + uint32_t target = ops[i]; + flags.for_each_bit([&](uint32_t bit) { + auto decoration = static_cast(bit); + + if (decoration_is_string(decoration)) + { + ir.set_decoration_string(target, decoration, ir.get_decoration_string(group_id, decoration)); + } + else + { + ir.meta[target].decoration_word_offset[decoration] = + ir.meta[group_id].decoration_word_offset[decoration]; + ir.set_decoration(target, decoration, ir.get_decoration(group_id, decoration)); + } + }); + } + break; + } + + case OpGroupMemberDecorate: + { + uint32_t group_id = ops[0]; + auto &flags = ir.meta[group_id].decoration.decoration_flags; + + + + for (uint32_t i = 1; i + 1 < length; i += 2) + { + uint32_t target = ops[i + 0]; + uint32_t index = ops[i + 1]; + flags.for_each_bit([&](uint32_t bit) { + auto decoration = static_cast(bit); + + if (decoration_is_string(decoration)) + ir.set_member_decoration_string(target, index, decoration, + ir.get_decoration_string(group_id, decoration)); + else + ir.set_member_decoration(target, index, decoration, ir.get_decoration(group_id, decoration)); + }); + } + break; + } + + case OpDecorate: + case OpDecorateId: + { + + + uint32_t id = ops[0]; + + auto decoration = static_cast(ops[1]); + if (length >= 3) + { + ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data()); + ir.set_decoration(id, decoration, ops[2]); + } + else + ir.set_decoration(id, decoration); + + break; + } + + case OpDecorateStringGOOGLE: + { + uint32_t id = ops[0]; + auto decoration = static_cast(ops[1]); + ir.set_decoration_string(id, decoration, extract_string(ir.spirv, instruction.offset + 2)); + break; + } + + case OpMemberDecorate: + { + uint32_t id = ops[0]; + uint32_t member = ops[1]; + auto decoration = static_cast(ops[2]); + if (length >= 4) + ir.set_member_decoration(id, member, decoration, ops[3]); + else + ir.set_member_decoration(id, member, decoration); + break; + } + + case OpMemberDecorateStringGOOGLE: + { + uint32_t id = ops[0]; + uint32_t member = ops[1]; + auto decoration = static_cast(ops[2]); + ir.set_member_decoration_string(id, member, decoration, extract_string(ir.spirv, instruction.offset + 3)); + break; + } + + + case OpTypeVoid: + { + uint32_t id = ops[0]; + auto &type = set(id); + type.basetype = SPIRType::Void; + break; + } + + case OpTypeBool: + { + uint32_t id = ops[0]; + auto &type = set(id); + type.basetype = SPIRType::Boolean; + type.width = 1; + break; + } + + case OpTypeFloat: + { + uint32_t id = ops[0]; + uint32_t width = ops[1]; + auto &type = set(id); + if (width == 64) + type.basetype = SPIRType::Double; + else if (width == 32) + type.basetype = SPIRType::Float; + else if (width == 16) + type.basetype = SPIRType::Half; + else + SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type."); + type.width = width; + break; + } + + case OpTypeInt: + { + uint32_t id = ops[0]; + uint32_t width = ops[1]; + bool signedness = ops[2] != 0; + auto &type = set(id); + type.basetype = signedness ? to_signed_basetype(width) : to_unsigned_basetype(width); + type.width = width; + break; + } + + + + + case OpTypeVector: + { + uint32_t id = ops[0]; + uint32_t vecsize = ops[2]; + + auto &base = get(ops[1]); + auto &vecbase = set(id); + + vecbase = base; + vecbase.vecsize = vecsize; + vecbase.self = id; + vecbase.parent_type = ops[1]; + break; + } + + case OpTypeMatrix: + { + uint32_t id = ops[0]; + uint32_t colcount = ops[2]; + + auto &base = get(ops[1]); + auto &matrixbase = set(id); + + matrixbase = base; + matrixbase.columns = colcount; + matrixbase.self = id; + matrixbase.parent_type = ops[1]; + break; + } + + case OpTypeArray: + { + uint32_t id = ops[0]; + auto &arraybase = set(id); + + uint32_t tid = ops[1]; + auto &base = get(tid); + + arraybase = base; + arraybase.parent_type = tid; + + uint32_t cid = ops[2]; + ir.mark_used_as_array_length(cid); + auto *c = maybe_get(cid); + bool literal = c && !c->specialization; + + arraybase.array_size_literal.push_back(literal); + arraybase.array.push_back(literal ? c->scalar() : cid); + + break; + } + + case OpTypeRuntimeArray: + { + uint32_t id = ops[0]; + + auto &base = get(ops[1]); + auto &arraybase = set(id); + + arraybase = base; + arraybase.array.push_back(0); + arraybase.array_size_literal.push_back(true); + arraybase.parent_type = ops[1]; + + break; + } + + case OpTypeImage: + { + uint32_t id = ops[0]; + auto &type = set(id); + type.basetype = SPIRType::Image; + type.image.type = ops[1]; + type.image.dim = static_cast(ops[2]); + type.image.depth = ops[3] == 1; + type.image.arrayed = ops[4] != 0; + type.image.ms = ops[5] != 0; + type.image.sampled = ops[6]; + type.image.format = static_cast(ops[7]); + type.image.access = (length >= 9) ? static_cast(ops[8]) : AccessQualifierMax; + break; + } + + case OpTypeSampledImage: + { + uint32_t id = ops[0]; + uint32_t imagetype = ops[1]; + auto &type = set(id); + type = get(imagetype); + type.basetype = SPIRType::SampledImage; + type.self = id; + break; + } + + case OpTypeSampler: + { + uint32_t id = ops[0]; + auto &type = set(id); + type.basetype = SPIRType::Sampler; + break; + } + + case OpTypePointer: + { + uint32_t id = ops[0]; + + auto &base = get(ops[2]); + auto &ptrbase = set(id); + + ptrbase = base; + ptrbase.pointer = true; + ptrbase.pointer_depth++; + ptrbase.storage = static_cast(ops[1]); + + if (ptrbase.storage == StorageClassAtomicCounter) + ptrbase.basetype = SPIRType::AtomicCounter; + + ptrbase.parent_type = ops[2]; + + + break; + } + + case OpTypeForwardPointer: + { + uint32_t id = ops[0]; + auto &ptrbase = set(id); + ptrbase.pointer = true; + ptrbase.pointer_depth++; + ptrbase.storage = static_cast(ops[1]); + + if (ptrbase.storage == StorageClassAtomicCounter) + ptrbase.basetype = SPIRType::AtomicCounter; + + break; + } + + case OpTypeStruct: + { + uint32_t id = ops[0]; + auto &type = set(id); + type.basetype = SPIRType::Struct; + for (uint32_t i = 1; i < length; i++) + type.member_types.push_back(ops[i]); + + + + + + + + + + + + + bool consider_aliasing = !ir.get_name(type.self).empty(); + if (consider_aliasing) + { + for (auto &other : global_struct_cache) + { + if (ir.get_name(type.self) == ir.get_name(other) && + types_are_logically_equivalent(type, get(other))) + { + type.type_alias = other; + break; + } + } + + if (type.type_alias == TypeID(0)) + global_struct_cache.push_back(id); + } + break; + } + + case OpTypeFunction: + { + uint32_t id = ops[0]; + uint32_t ret = ops[1]; + + auto &func = set(id, ret); + for (uint32_t i = 2; i < length; i++) + func.parameter_types.push_back(ops[i]); + break; + } + + case OpTypeAccelerationStructureNV: + { + uint32_t id = ops[0]; + auto &type = set(id); + type.basetype = SPIRType::AccelerationStructureNV; + break; + } + + + + case OpVariable: + { + uint32_t type = ops[0]; + uint32_t id = ops[1]; + auto storage = static_cast(ops[2]); + uint32_t initializer = length == 4 ? ops[3] : 0; + + if (storage == StorageClassFunction) + { + if (!current_function) + SPIRV_CROSS_THROW("No function currently in scope"); + current_function->add_local_variable(id); + } + + set(id, type, storage, initializer); + + + auto &ttype = get(type); + if (ttype.basetype == SPIRType::BaseType::Image) + { + ir.set_decoration(id, DecorationNonWritable); + ir.set_decoration(id, DecorationNonReadable); + } + + break; + } + + + + + + + case OpPhi: + { + if (!current_function) + SPIRV_CROSS_THROW("No function currently in scope"); + if (!current_block) + SPIRV_CROSS_THROW("No block currently in scope"); + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + + auto &var = set(id, result_type, spv::StorageClassFunction); + var.phi_variable = true; + + current_function->add_local_variable(id); + + for (uint32_t i = 2; i + 2 <= length; i += 2) + current_block->phi_variables.push_back({ ops[i], ops[i + 1], id }); + break; + } + + + case OpSpecConstant: + case OpConstant: + { + uint32_t id = ops[1]; + auto &type = get(ops[0]); + + if (type.width > 32) + set(id, ops[0], ops[2] | (uint64_t(ops[3]) << 32), op == OpSpecConstant); + else + set(id, ops[0], ops[2], op == OpSpecConstant); + break; + } + + case OpSpecConstantFalse: + case OpConstantFalse: + { + uint32_t id = ops[1]; + set(id, ops[0], uint32_t(0), op == OpSpecConstantFalse); + break; + } + + case OpSpecConstantTrue: + case OpConstantTrue: + { + uint32_t id = ops[1]; + set(id, ops[0], uint32_t(1), op == OpSpecConstantTrue); + break; + } + + case OpConstantNull: + { + uint32_t id = ops[1]; + uint32_t type = ops[0]; + make_constant_null(id, type); + break; + } + + case OpSpecConstantComposite: + case OpConstantComposite: + { + uint32_t id = ops[1]; + uint32_t type = ops[0]; + + auto &ctype = get(type); + + + + + if (ctype.basetype == SPIRType::Struct || !ctype.array.empty()) + { + set(id, type, ops + 2, length - 2, op == OpSpecConstantComposite); + } + else + { + uint32_t elements = length - 2; + if (elements > 4) + SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements."); + + SPIRConstant remapped_constant_ops[4]; + const SPIRConstant *c[4]; + for (uint32_t i = 0; i < elements; i++) + { + + + + auto *constant_op = maybe_get(ops[2 + i]); + auto *undef_op = maybe_get(ops[2 + i]); + if (constant_op) + { + if (op == OpConstantComposite) + SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite."); + + remapped_constant_ops[i].make_null(get(constant_op->basetype)); + remapped_constant_ops[i].self = constant_op->self; + remapped_constant_ops[i].constant_type = constant_op->basetype; + remapped_constant_ops[i].specialization = true; + c[i] = &remapped_constant_ops[i]; + } + else if (undef_op) + { + + remapped_constant_ops[i].make_null(get(undef_op->basetype)); + remapped_constant_ops[i].constant_type = undef_op->basetype; + c[i] = &remapped_constant_ops[i]; + } + else + c[i] = &get(ops[2 + i]); + } + set(id, type, c, elements, op == OpSpecConstantComposite); + } + break; + } + + + case OpFunction: + { + uint32_t res = ops[0]; + uint32_t id = ops[1]; + + uint32_t type = ops[3]; + + if (current_function) + SPIRV_CROSS_THROW("Must end a function before starting a new one!"); + + current_function = &set(id, res, type); + break; + } + + case OpFunctionParameter: + { + uint32_t type = ops[0]; + uint32_t id = ops[1]; + + if (!current_function) + SPIRV_CROSS_THROW("Must be in a function!"); + + current_function->add_parameter(type, id); + set(id, type, StorageClassFunction); + break; + } + + case OpFunctionEnd: + { + if (current_block) + { + + SPIRV_CROSS_THROW( + "Cannot end a function before ending the current block.\n" + "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid."); + } + current_function = nullptr; + break; + } + + + case OpLabel: + { + + if (!current_function) + SPIRV_CROSS_THROW("Blocks cannot exist outside functions!"); + + uint32_t id = ops[0]; + + current_function->blocks.push_back(id); + if (!current_function->entry_block) + current_function->entry_block = id; + + if (current_block) + SPIRV_CROSS_THROW("Cannot start a block before ending the current block."); + + current_block = &set(id); + break; + } + + + case OpBranch: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + + uint32_t target = ops[0]; + current_block->terminator = SPIRBlock::Direct; + current_block->next_block = target; + current_block = nullptr; + break; + } + + case OpBranchConditional: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + + current_block->condition = ops[0]; + current_block->true_block = ops[1]; + current_block->false_block = ops[2]; + + current_block->terminator = SPIRBlock::Select; + current_block = nullptr; + break; + } + + case OpSwitch: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + + current_block->terminator = SPIRBlock::MultiSelect; + + current_block->condition = ops[0]; + current_block->default_block = ops[1]; + + for (uint32_t i = 2; i + 2 <= length; i += 2) + current_block->cases.push_back({ ops[i], ops[i + 1] }); + + + ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT; + + current_block = nullptr; + break; + } + + case OpKill: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + current_block->terminator = SPIRBlock::Kill; + current_block = nullptr; + break; + } + + case OpReturn: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + current_block->terminator = SPIRBlock::Return; + current_block = nullptr; + break; + } + + case OpReturnValue: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + current_block->terminator = SPIRBlock::Return; + current_block->return_value = ops[0]; + current_block = nullptr; + break; + } + + case OpUnreachable: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + current_block->terminator = SPIRBlock::Unreachable; + current_block = nullptr; + break; + } + + case OpSelectionMerge: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to modify a non-existing block."); + + current_block->next_block = ops[0]; + current_block->merge = SPIRBlock::MergeSelection; + ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT; + + if (length >= 2) + { + if (ops[1] & SelectionControlFlattenMask) + current_block->hint = SPIRBlock::HintFlatten; + else if (ops[1] & SelectionControlDontFlattenMask) + current_block->hint = SPIRBlock::HintDontFlatten; + } + break; + } + + case OpLoopMerge: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to modify a non-existing block."); + + current_block->merge_block = ops[0]; + current_block->continue_block = ops[1]; + current_block->merge = SPIRBlock::MergeLoop; + + ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT; + ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT; + + ir.continue_block_to_loop_header[current_block->continue_block] = BlockID(current_block->self); + + + + + if (current_block->continue_block != BlockID(current_block->self)) + ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT; + + if (length >= 3) + { + if (ops[2] & LoopControlUnrollMask) + current_block->hint = SPIRBlock::HintUnroll; + else if (ops[2] & LoopControlDontUnrollMask) + current_block->hint = SPIRBlock::HintDontUnroll; + } + break; + } + + case OpSpecConstantOp: + { + if (length < 3) + SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments."); + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto spec_op = static_cast(ops[2]); + + set(id, result_type, spec_op, ops + 3, length - 3); + break; + } + + case OpLine: + { + + + + if (current_block) + current_block->ops.push_back(instruction); + + + + + if (current_function) + { + + if (current_function->entry_line.file_id == 0) + { + current_function->entry_line.file_id = ops[0]; + current_function->entry_line.line_literal = ops[1]; + } + } + break; + } + + case OpNoLine: + { + + if (current_block) + current_block->ops.push_back(instruction); + break; + } + + + default: + { + if (!current_block) + SPIRV_CROSS_THROW("Currently no block to insert opcode."); + + current_block->ops.push_back(instruction); + break; + } + } +} + +bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const +{ + if (a.basetype != b.basetype) + return false; + if (a.width != b.width) + return false; + if (a.vecsize != b.vecsize) + return false; + if (a.columns != b.columns) + return false; + if (a.array.size() != b.array.size()) + return false; + + size_t array_count = a.array.size(); + if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0) + return false; + + if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage) + { + if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0) + return false; + } + + if (a.member_types.size() != b.member_types.size()) + return false; + + size_t member_types = a.member_types.size(); + for (size_t i = 0; i < member_types; i++) + { + if (!types_are_logically_equivalent(get(a.member_types[i]), get(b.member_types[i]))) + return false; + } + + return true; +} + +bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const +{ + auto &type = get(v.basetype); + + auto *type_meta = ir.find_meta(type.self); + + bool ssbo = v.storage == StorageClassStorageBuffer || + (type_meta && type_meta->decoration.decoration_flags.get(DecorationBufferBlock)); + bool image = type.basetype == SPIRType::Image; + bool counter = type.basetype == SPIRType::AtomicCounter; + + bool is_restrict; + if (ssbo) + is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict); + else + is_restrict = ir.has_decoration(v.self, DecorationRestrict); + + return !is_restrict && (ssbo || image || counter); +} + +void Parser::make_constant_null(uint32_t id, uint32_t type) +{ + auto &constant_type = get(type); + + if (constant_type.pointer) + { + auto &constant = set(id, type); + constant.make_null(constant_type); + } + else if (!constant_type.array.empty()) + { + assert(constant_type.parent_type); + uint32_t parent_id = ir.increase_bound_by(1); + make_constant_null(parent_id, constant_type.parent_type); + + if (!constant_type.array_size_literal.back()) + SPIRV_CROSS_THROW("Array size of OpConstantNull must be a literal."); + + SmallVector elements(constant_type.array.back()); + for (uint32_t i = 0; i < constant_type.array.back(); i++) + elements[i] = parent_id; + set(id, type, elements.data(), uint32_t(elements.size()), false); + } + else if (!constant_type.member_types.empty()) + { + uint32_t member_ids = ir.increase_bound_by(uint32_t(constant_type.member_types.size())); + SmallVector elements(constant_type.member_types.size()); + for (uint32_t i = 0; i < constant_type.member_types.size(); i++) + { + make_constant_null(member_ids + i, constant_type.member_types[i]); + elements[i] = member_ids + i; + } + set(id, type, elements.data(), uint32_t(elements.size()), false); + } + else + { + auto &constant = set(id, type); + constant.make_null(constant_type); + } +} + +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_parser.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_parser.hpp new file mode 100644 index 000000000000..b586bbb3f6b9 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_parser.hpp @@ -0,0 +1,94 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_PARSER_HPP +#define SPIRV_CROSS_PARSER_HPP + +#include "spirv_cross_parsed_ir.hpp" +#include + +namespace SPIRV_CROSS_NAMESPACE +{ +class Parser +{ +public: + Parser(const uint32_t *spirv_data, size_t word_count); + Parser(std::vector spirv); + + void parse(); + + ParsedIR &get_parsed_ir() + { + return ir; + } + +private: + ParsedIR ir; + SPIRFunction *current_function = nullptr; + SPIRBlock *current_block = nullptr; + + void parse(const Instruction &instr); + const uint32_t *stream(const Instruction &instr) const; + + template + T &set(uint32_t id, P &&... args) + { + ir.add_typed_id(static_cast(T::type), id); + auto &var = variant_set(ir.ids[id], std::forward

(args)...); + var.self = id; + return var; + } + + template + T &get(uint32_t id) + { + return variant_get(ir.ids[id]); + } + + template + T *maybe_get(uint32_t id) + { + if (ir.ids[id].get_type() == static_cast(T::type)) + return &get(id); + else + return nullptr; + } + + template + const T &get(uint32_t id) const + { + return variant_get(ir.ids[id]); + } + + template + const T *maybe_get(uint32_t id) const + { + if (ir.ids[id].get_type() == T::type) + return &get(id); + else + return nullptr; + } + + + SmallVector global_struct_cache; + + bool types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const; + bool variable_storage_is_aliased(const SPIRVariable &v) const; + void make_constant_null(uint32_t id, uint32_t type); +}; +} + +#endif diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_reflect.cpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_reflect.cpp new file mode 100644 index 000000000000..aa42ef5c35a7 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_reflect.cpp @@ -0,0 +1,635 @@ + + + + + + + + + + + + + + + + +#include "spirv_reflect.hpp" +#include "spirv_glsl.hpp" +#include + +using namespace spv; +using namespace SPIRV_CROSS_NAMESPACE; +using namespace std; + +namespace simple_json +{ +enum class Type +{ + Object, + Array, +}; + +using State = std::pair; +using Stack = std::stack; + +class Stream +{ + Stack stack; + StringStream<> buffer; + uint32_t indent{ 0 }; + char current_locale_radix_character = '.'; + +public: + void set_current_locale_radix_character(char c) + { + current_locale_radix_character = c; + } + + void begin_json_object(); + void end_json_object(); + void emit_json_key(const std::string &key); + void emit_json_key_value(const std::string &key, const std::string &value); + void emit_json_key_value(const std::string &key, bool value); + void emit_json_key_value(const std::string &key, uint32_t value); + void emit_json_key_value(const std::string &key, int32_t value); + void emit_json_key_value(const std::string &key, float value); + void emit_json_key_object(const std::string &key); + void emit_json_key_array(const std::string &key); + + void begin_json_array(); + void end_json_array(); + void emit_json_array_value(const std::string &value); + void emit_json_array_value(uint32_t value); + void emit_json_array_value(bool value); + + std::string str() const + { + return buffer.str(); + } + +private: + inline void statement_indent() + { + for (uint32_t i = 0; i < indent; i++) + buffer << " "; + } + + template + inline void statement_inner(T &&t) + { + buffer << std::forward(t); + } + + template + inline void statement_inner(T &&t, Ts &&... ts) + { + buffer << std::forward(t); + statement_inner(std::forward(ts)...); + } + + template + inline void statement(Ts &&... ts) + { + statement_indent(); + statement_inner(std::forward(ts)...); + buffer << '\n'; + } + + template + void statement_no_return(Ts &&... ts) + { + statement_indent(); + statement_inner(std::forward(ts)...); + } +}; +} + +using namespace simple_json; + + + +void Stream::begin_json_array() +{ + if (!stack.empty() && stack.top().second) + { + statement_inner(",\n"); + } + statement("["); + ++indent; + stack.emplace(Type::Array, false); +} + +void Stream::end_json_array() +{ + if (stack.empty() || stack.top().first != Type::Array) + SPIRV_CROSS_THROW("Invalid JSON state"); + if (stack.top().second) + { + statement_inner("\n"); + } + --indent; + statement_no_return("]"); + stack.pop(); + if (!stack.empty()) + { + stack.top().second = true; + } +} + +void Stream::emit_json_array_value(const std::string &value) +{ + if (stack.empty() || stack.top().first != Type::Array) + SPIRV_CROSS_THROW("Invalid JSON state"); + + if (stack.top().second) + statement_inner(",\n"); + + statement_no_return("\"", value, "\""); + stack.top().second = true; +} + +void Stream::emit_json_array_value(uint32_t value) +{ + if (stack.empty() || stack.top().first != Type::Array) + SPIRV_CROSS_THROW("Invalid JSON state"); + if (stack.top().second) + statement_inner(",\n"); + statement_no_return(std::to_string(value)); + stack.top().second = true; +} + +void Stream::emit_json_array_value(bool value) +{ + if (stack.empty() || stack.top().first != Type::Array) + SPIRV_CROSS_THROW("Invalid JSON state"); + if (stack.top().second) + statement_inner(",\n"); + statement_no_return(value ? "true" : "false"); + stack.top().second = true; +} + +void Stream::begin_json_object() +{ + if (!stack.empty() && stack.top().second) + { + statement_inner(",\n"); + } + statement("{"); + ++indent; + stack.emplace(Type::Object, false); +} + +void Stream::end_json_object() +{ + if (stack.empty() || stack.top().first != Type::Object) + SPIRV_CROSS_THROW("Invalid JSON state"); + if (stack.top().second) + { + statement_inner("\n"); + } + --indent; + statement_no_return("}"); + stack.pop(); + if (!stack.empty()) + { + stack.top().second = true; + } +} + +void Stream::emit_json_key(const std::string &key) +{ + if (stack.empty() || stack.top().first != Type::Object) + SPIRV_CROSS_THROW("Invalid JSON state"); + + if (stack.top().second) + statement_inner(",\n"); + statement_no_return("\"", key, "\" : "); + stack.top().second = true; +} + +void Stream::emit_json_key_value(const std::string &key, const std::string &value) +{ + emit_json_key(key); + statement_inner("\"", value, "\""); +} + +void Stream::emit_json_key_value(const std::string &key, uint32_t value) +{ + emit_json_key(key); + statement_inner(value); +} + +void Stream::emit_json_key_value(const std::string &key, int32_t value) +{ + emit_json_key(key); + statement_inner(value); +} + +void Stream::emit_json_key_value(const std::string &key, float value) +{ + emit_json_key(key); + statement_inner(convert_to_string(value, current_locale_radix_character)); +} + +void Stream::emit_json_key_value(const std::string &key, bool value) +{ + emit_json_key(key); + statement_inner(value ? "true" : "false"); +} + +void Stream::emit_json_key_object(const std::string &key) +{ + emit_json_key(key); + statement_inner("{\n"); + ++indent; + stack.emplace(Type::Object, false); +} + +void Stream::emit_json_key_array(const std::string &key) +{ + emit_json_key(key); + statement_inner("[\n"); + ++indent; + stack.emplace(Type::Array, false); +} + +void CompilerReflection::set_format(const std::string &format) +{ + if (format != "json") + { + SPIRV_CROSS_THROW("Unsupported format"); + } +} + +string CompilerReflection::compile() +{ + json_stream = std::make_shared(); + json_stream->set_current_locale_radix_character(current_locale_radix_character); + json_stream->begin_json_object(); + fixup_type_alias(); + reorder_type_alias(); + emit_entry_points(); + emit_types(); + emit_resources(); + emit_specialization_constants(); + json_stream->end_json_object(); + return json_stream->str(); +} + +void CompilerReflection::emit_types() +{ + bool emitted_open_tag = false; + + ir.for_each_typed_id([&](uint32_t, SPIRType &type) { + if (type.basetype == SPIRType::Struct && !type.pointer && type.array.empty()) + emit_type(type, emitted_open_tag); + }); + + if (emitted_open_tag) + { + json_stream->end_json_object(); + } +} + +void CompilerReflection::emit_type(const SPIRType &type, bool &emitted_open_tag) +{ + auto name = type_to_glsl(type); + + if (type.type_alias != TypeID(0)) + return; + + if (!emitted_open_tag) + { + json_stream->emit_json_key_object("types"); + emitted_open_tag = true; + } + json_stream->emit_json_key_object("_" + std::to_string(type.self)); + json_stream->emit_json_key_value("name", name); + json_stream->emit_json_key_array("members"); + + + + + + + + + + + + auto size = type.member_types.size(); + for (uint32_t i = 0; i < size; ++i) + { + emit_type_member(type, i); + } + json_stream->end_json_array(); + json_stream->end_json_object(); +} + +void CompilerReflection::emit_type_member(const SPIRType &type, uint32_t index) +{ + auto &membertype = get(type.member_types[index]); + json_stream->begin_json_object(); + auto name = to_member_name(type, index); + + + json_stream->emit_json_key_value("name", name); + if (membertype.basetype == SPIRType::Struct) + { + json_stream->emit_json_key_value("type", "_" + std::to_string(membertype.self)); + } + else + { + json_stream->emit_json_key_value("type", type_to_glsl(membertype)); + } + emit_type_member_qualifiers(type, index); + json_stream->end_json_object(); +} + +void CompilerReflection::emit_type_array(const SPIRType &type) +{ + if (!type.array.empty()) + { + json_stream->emit_json_key_array("array"); + + + + for (const auto &value : type.array) + json_stream->emit_json_array_value(value); + json_stream->end_json_array(); + } +} + +void CompilerReflection::emit_type_member_qualifiers(const SPIRType &type, uint32_t index) +{ + auto flags = combined_decoration_for_member(type, index); + if (flags.get(DecorationRowMajor)) + json_stream->emit_json_key_value("row_major", true); + + auto &membertype = get(type.member_types[index]); + emit_type_array(membertype); + auto &memb = ir.meta[type.self].members; + if (index < memb.size()) + { + auto &dec = memb[index]; + if (dec.decoration_flags.get(DecorationLocation)) + json_stream->emit_json_key_value("location", dec.location); + if (dec.decoration_flags.get(DecorationOffset)) + json_stream->emit_json_key_value("offset", dec.offset); + } +} + +string CompilerReflection::execution_model_to_str(spv::ExecutionModel model) +{ + switch (model) + { + case ExecutionModelVertex: + return "vert"; + case ExecutionModelTessellationControl: + return "tesc"; + case ExecutionModelTessellationEvaluation: + return "tese"; + case ExecutionModelGeometry: + return "geom"; + case ExecutionModelFragment: + return "frag"; + case ExecutionModelGLCompute: + return "comp"; + case ExecutionModelRayGenerationNV: + return "rgen"; + case ExecutionModelIntersectionNV: + return "rint"; + case ExecutionModelAnyHitNV: + return "rahit"; + case ExecutionModelClosestHitNV: + return "rchit"; + case ExecutionModelMissNV: + return "rmiss"; + case ExecutionModelCallableNV: + return "rcall"; + default: + return "???"; + } +} + + +void CompilerReflection::emit_entry_points() +{ + auto entries = get_entry_points_and_stages(); + if (!entries.empty()) + { + + sort(begin(entries), end(entries), [](const EntryPoint &a, const EntryPoint &b) -> bool { + if (a.execution_model < b.execution_model) + return true; + else if (a.execution_model > b.execution_model) + return false; + else + return a.name < b.name; + }); + + json_stream->emit_json_key_array("entryPoints"); + for (auto &e : entries) + { + json_stream->begin_json_object(); + json_stream->emit_json_key_value("name", e.name); + json_stream->emit_json_key_value("mode", execution_model_to_str(e.execution_model)); + if (e.execution_model == ExecutionModelGLCompute) + { + const auto &spv_entry = get_entry_point(e.name, e.execution_model); + + SpecializationConstant spec_x, spec_y, spec_z; + get_work_group_size_specialization_constants(spec_x, spec_y, spec_z); + + json_stream->emit_json_key_array("workgroup_size"); + json_stream->emit_json_array_value(spec_x.id != ID(0) ? spec_x.constant_id : + spv_entry.workgroup_size.x); + json_stream->emit_json_array_value(spec_y.id != ID(0) ? spec_y.constant_id : + spv_entry.workgroup_size.y); + json_stream->emit_json_array_value(spec_z.id != ID(0) ? spec_z.constant_id : + spv_entry.workgroup_size.z); + json_stream->end_json_array(); + + json_stream->emit_json_key_array("workgroup_size_is_spec_constant_id"); + json_stream->emit_json_array_value(spec_x.id != ID(0)); + json_stream->emit_json_array_value(spec_y.id != ID(0)); + json_stream->emit_json_array_value(spec_z.id != ID(0)); + json_stream->end_json_array(); + } + json_stream->end_json_object(); + } + json_stream->end_json_array(); + } +} + +void CompilerReflection::emit_resources() +{ + auto res = get_shader_resources(); + emit_resources("subpass_inputs", res.subpass_inputs); + emit_resources("inputs", res.stage_inputs); + emit_resources("outputs", res.stage_outputs); + emit_resources("textures", res.sampled_images); + emit_resources("separate_images", res.separate_images); + emit_resources("separate_samplers", res.separate_samplers); + emit_resources("images", res.storage_images); + emit_resources("ssbos", res.storage_buffers); + emit_resources("ubos", res.uniform_buffers); + emit_resources("push_constants", res.push_constant_buffers); + emit_resources("counters", res.atomic_counters); + emit_resources("acceleration_structures", res.acceleration_structures); +} + +void CompilerReflection::emit_resources(const char *tag, const SmallVector &resources) +{ + if (resources.empty()) + { + return; + } + + json_stream->emit_json_key_array(tag); + for (auto &res : resources) + { + auto &type = get_type(res.type_id); + auto typeflags = ir.meta[type.self].decoration.decoration_flags; + auto &mask = get_decoration_bitset(res.id); + + + + + bool is_push_constant = get_storage_class(res.id) == StorageClassPushConstant; + bool is_block = get_decoration_bitset(type.self).get(DecorationBlock) || + get_decoration_bitset(type.self).get(DecorationBufferBlock); + + ID fallback_id = !is_push_constant && is_block ? ID(res.base_type_id) : ID(res.id); + + json_stream->begin_json_object(); + + if (type.basetype == SPIRType::Struct) + { + json_stream->emit_json_key_value("type", "_" + std::to_string(res.base_type_id)); + } + else + { + json_stream->emit_json_key_value("type", type_to_glsl(type)); + } + + json_stream->emit_json_key_value("name", !res.name.empty() ? res.name : get_fallback_name(fallback_id)); + { + bool ssbo_block = type.storage == StorageClassStorageBuffer || + (type.storage == StorageClassUniform && typeflags.get(DecorationBufferBlock)); + if (ssbo_block) + { + auto buffer_flags = get_buffer_block_flags(res.id); + if (buffer_flags.get(DecorationNonReadable)) + json_stream->emit_json_key_value("writeonly", true); + if (buffer_flags.get(DecorationNonWritable)) + json_stream->emit_json_key_value("readonly", true); + if (buffer_flags.get(DecorationRestrict)) + json_stream->emit_json_key_value("restrict", true); + if (buffer_flags.get(DecorationCoherent)) + json_stream->emit_json_key_value("coherent", true); + } + } + + emit_type_array(type); + + { + bool is_sized_block = is_block && (get_storage_class(res.id) == StorageClassUniform || + get_storage_class(res.id) == StorageClassUniformConstant || + get_storage_class(res.id) == StorageClassStorageBuffer); + if (is_sized_block) + { + uint32_t block_size = uint32_t(get_declared_struct_size(get_type(res.base_type_id))); + json_stream->emit_json_key_value("block_size", block_size); + } + } + + if (type.storage == StorageClassPushConstant) + json_stream->emit_json_key_value("push_constant", true); + if (mask.get(DecorationLocation)) + json_stream->emit_json_key_value("location", get_decoration(res.id, DecorationLocation)); + if (mask.get(DecorationRowMajor)) + json_stream->emit_json_key_value("row_major", true); + if (mask.get(DecorationColMajor)) + json_stream->emit_json_key_value("column_major", true); + if (mask.get(DecorationIndex)) + json_stream->emit_json_key_value("index", get_decoration(res.id, DecorationIndex)); + if (type.storage != StorageClassPushConstant && mask.get(DecorationDescriptorSet)) + json_stream->emit_json_key_value("set", get_decoration(res.id, DecorationDescriptorSet)); + if (mask.get(DecorationBinding)) + json_stream->emit_json_key_value("binding", get_decoration(res.id, DecorationBinding)); + if (mask.get(DecorationInputAttachmentIndex)) + json_stream->emit_json_key_value("input_attachment_index", + get_decoration(res.id, DecorationInputAttachmentIndex)); + if (mask.get(DecorationOffset)) + json_stream->emit_json_key_value("offset", get_decoration(res.id, DecorationOffset)); + + + + if (type.basetype == SPIRType::Image && type.image.sampled == 2) + { + const char *fmt = format_to_glsl(type.image.format); + if (fmt != nullptr) + json_stream->emit_json_key_value("format", std::string(fmt)); + } + json_stream->end_json_object(); + } + json_stream->end_json_array(); +} + +void CompilerReflection::emit_specialization_constants() +{ + auto specialization_constants = get_specialization_constants(); + if (specialization_constants.empty()) + return; + + json_stream->emit_json_key_array("specialization_constants"); + for (const auto spec_const : specialization_constants) + { + auto &c = get(spec_const.id); + auto type = get(c.constant_type); + json_stream->begin_json_object(); + json_stream->emit_json_key_value("id", spec_const.constant_id); + json_stream->emit_json_key_value("type", type_to_glsl(type)); + switch (type.basetype) + { + case SPIRType::UInt: + json_stream->emit_json_key_value("default_value", c.scalar()); + break; + + case SPIRType::Int: + json_stream->emit_json_key_value("default_value", c.scalar_i32()); + break; + + case SPIRType::Float: + json_stream->emit_json_key_value("default_value", c.scalar_f32()); + break; + + case SPIRType::Boolean: + json_stream->emit_json_key_value("default_value", c.scalar() != 0); + break; + + default: + break; + } + json_stream->end_json_object(); + } + json_stream->end_json_array(); +} + +string CompilerReflection::to_member_name(const SPIRType &type, uint32_t index) const +{ + auto *type_meta = ir.find_meta(type.self); + + if (type_meta) + { + auto &memb = type_meta->members; + if (index < memb.size() && !memb[index].alias.empty()) + return memb[index].alias; + else + return join("_m", index); + } + else + return join("_m", index); +} diff --git a/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_reflect.hpp b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_reflect.hpp new file mode 100644 index 000000000000..dbd9e39e6fd9 --- /dev/null +++ b/third_party/rust/spirv_cross/src/vendor/SPIRV-Cross/spirv_reflect.hpp @@ -0,0 +1,83 @@ + + + + + + + + + + + + + + + + +#ifndef SPIRV_CROSS_REFLECT_HPP +#define SPIRV_CROSS_REFLECT_HPP + +#include "spirv_glsl.hpp" +#include + +namespace simple_json +{ +class Stream; +} + +namespace SPIRV_CROSS_NAMESPACE +{ +class CompilerReflection : public CompilerGLSL +{ + using Parent = CompilerGLSL; + +public: + explicit CompilerReflection(std::vector spirv_) + : Parent(std::move(spirv_)) + { + options.vulkan_semantics = true; + } + + CompilerReflection(const uint32_t *ir_, size_t word_count) + : Parent(ir_, word_count) + { + options.vulkan_semantics = true; + } + + explicit CompilerReflection(const ParsedIR &ir_) + : CompilerGLSL(ir_) + { + options.vulkan_semantics = true; + } + + explicit CompilerReflection(ParsedIR &&ir_) + : CompilerGLSL(std::move(ir_)) + { + options.vulkan_semantics = true; + } + + void set_format(const std::string &format); + std::string compile() override; + +private: + static std::string execution_model_to_str(spv::ExecutionModel model); + + void emit_entry_points(); + void emit_types(); + void emit_resources(); + void emit_specialization_constants(); + + void emit_type(const SPIRType &type, bool &emitted_open_tag); + void emit_type_member(const SPIRType &type, uint32_t index); + void emit_type_member_qualifiers(const SPIRType &type, uint32_t index); + void emit_type_array(const SPIRType &type); + void emit_resources(const char *tag, const SmallVector &resources); + + std::string to_member_name(const SPIRType &type, uint32_t index) const; + + std::shared_ptr json_stream; +}; + +} + +#endif diff --git a/third_party/rust/spirv_cross/src/wrapper.cpp b/third_party/rust/spirv_cross/src/wrapper.cpp new file mode 100644 index 000000000000..242b01c82b84 --- /dev/null +++ b/third_party/rust/spirv_cross/src/wrapper.cpp @@ -0,0 +1,505 @@ +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma warning(disable : 4996 4101) + +#include "vendor/SPIRV-Cross/spirv_cross_util.hpp" +#include "vendor/SPIRV-Cross/spirv_hlsl.hpp" +#include "vendor/SPIRV-Cross/spirv_msl.hpp" +#include "vendor/SPIRV-Cross/spirv_glsl.hpp" +#include "wrapper.hpp" + +static const char *latest_exception_message; + +#ifdef SPIRV_CROSS_WRAPPER_NO_EXCEPTIONS +#define INTERNAL_RESULT(block_to_attempt) \ + do \ + { \ + { \ + block_to_attempt \ + } \ + return ScInternalResult::Success; \ + } while (0); +#else +#define INTERNAL_RESULT(block_to_attempt) \ + do \ + { \ + try \ + { \ + { \ + block_to_attempt \ + } \ + return ScInternalResult::Success; \ + } \ + catch (const spirv_cross::CompilerError &ex) \ + { \ + latest_exception_message = strdup(ex.what()); \ + return ScInternalResult::CompilationError; \ + } \ + catch (const std::exception &ex) \ + { \ + return ScInternalResult::Unhandled; \ + } \ + catch (...) \ + { \ + return ScInternalResult::Unhandled; \ + } \ + return ScInternalResult::Unhandled; \ + } while (0); +#endif + +extern "C" +{ + ScInternalResult sc_internal_get_latest_exception_message(const char **message) + { + INTERNAL_RESULT(*message = latest_exception_message;) + } + +#ifdef SPIRV_CROSS_WRAPPER_HLSL + ScInternalResult sc_internal_compiler_hlsl_new(ScInternalCompilerHlsl **compiler, const uint32_t *ir, const size_t size) + { + INTERNAL_RESULT(*compiler = new spirv_cross::CompilerHLSL(ir, size);) + } + + ScInternalResult sc_internal_compiler_hlsl_set_options(const ScInternalCompilerHlsl *compiler, const ScHlslCompilerOptions *options) + { + INTERNAL_RESULT( + do { + auto compiler_glsl = (spirv_cross::CompilerGLSL *)compiler; + auto glsl_options = compiler_glsl->get_common_options(); + glsl_options.vertex.fixup_clipspace = options->vertex_transform_clip_space; + glsl_options.vertex.flip_vert_y = options->vertex_invert_y; + compiler_glsl->set_common_options(glsl_options); + + auto compiler_hlsl = (spirv_cross::CompilerHLSL *)compiler; + auto hlsl_options = compiler_hlsl->get_hlsl_options(); + hlsl_options.shader_model = options->shader_model; + hlsl_options.point_size_compat = options->point_size_compat; + hlsl_options.point_coord_compat = options->point_coord_compat; + + compiler_hlsl->set_hlsl_options(hlsl_options); + } while (0);) + } + + ScInternalResult sc_internal_compiler_hlsl_set_root_constant_layout(const ScInternalCompilerHlsl *compiler, const ScHlslRootConstant *constants, size_t count) + { + INTERNAL_RESULT( + do { + std::vector root_constants; + for (size_t i = 0; i < count; i++) + { + root_constants.push_back( + spirv_cross::RootConstants{ + constants[i].start, + constants[i].end, + constants[i].binding, + constants[i].space}); + } + + auto compiler_hlsl = (spirv_cross::CompilerHLSL *)compiler; + compiler_hlsl->set_root_constant_layouts(root_constants); + } while (0);) + } +#endif + +#ifdef SPIRV_CROSS_WRAPPER_MSL + ScInternalResult sc_internal_compiler_msl_new(ScInternalCompilerMsl **compiler, const uint32_t *ir, const size_t size) + { + INTERNAL_RESULT(*compiler = new spirv_cross::CompilerMSL(ir, size);) + } + + ScInternalResult sc_internal_compiler_msl_compile(const ScInternalCompilerBase *compiler, const char **shader, + const spirv_cross::MSLVertexAttr *p_vat_overrides, const size_t vat_override_count, + const spirv_cross::MSLResourceBinding *p_res_overrides, const size_t res_override_count, + const MslConstSamplerMapping *p_const_samplers, const size_t const_sampler_count) + { + INTERNAL_RESULT( + do { + auto compiler_msl = ((spirv_cross::CompilerMSL *)compiler); + + for (size_t i = 0; i < vat_override_count; i++) + { + compiler_msl->add_msl_vertex_attribute(p_vat_overrides[i]); + } + + for (size_t i = 0; i < res_override_count; i++) + { + compiler_msl->add_msl_resource_binding(p_res_overrides[i]); + } + + for (size_t i = 0; i < const_sampler_count; i++) + { + const auto& mapping = p_const_samplers[i]; + compiler_msl->remap_constexpr_sampler_by_binding(mapping.desc_set, mapping.binding, mapping.sampler); + } + + *shader = strdup(compiler_msl->compile().c_str()); + } while (0);) + } + + ScInternalResult sc_internal_compiler_msl_set_options(const ScInternalCompilerMsl *compiler, const ScMslCompilerOptions *options) + { + INTERNAL_RESULT( + do { + auto compiler_msl = (spirv_cross::CompilerMSL *)compiler; + + auto glsl_options = compiler_msl->get_common_options(); + glsl_options.vertex.fixup_clipspace = options->vertex_transform_clip_space; + glsl_options.vertex.flip_vert_y = options->vertex_invert_y; + compiler_msl->set_common_options(glsl_options); + + auto msl_options = compiler_msl->get_msl_options(); + msl_options.platform = static_cast(options->platform); + msl_options.msl_version = options->version; + msl_options.swizzle_buffer_index = options->swizzle_buffer_index; + msl_options.indirect_params_buffer_index = options->indirect_params_buffer_index; + msl_options.shader_output_buffer_index = options->shader_output_buffer_index; + msl_options.shader_patch_output_buffer_index = options->shader_patch_output_buffer_index; + msl_options.shader_tess_factor_buffer_index = options->shader_tess_factor_buffer_index; + msl_options.buffer_size_buffer_index = options->buffer_size_buffer_index; + msl_options.enable_point_size_builtin = options->enable_point_size_builtin; + msl_options.disable_rasterization = options->disable_rasterization; + msl_options.capture_output_to_buffer = options->capture_output_to_buffer; + msl_options.swizzle_texture_samples = options->swizzle_texture_samples; + msl_options.tess_domain_origin_lower_left = options->tess_domain_origin_lower_left; + msl_options.argument_buffers = options->argument_buffers; + msl_options.pad_fragment_output_components = options->pad_fragment_output_components; + compiler_msl->set_msl_options(msl_options); + } while (0);) + } + + ScInternalResult sc_internal_compiler_msl_get_is_rasterization_disabled(const ScInternalCompilerMsl *compiler, bool *is_rasterization_disabled) + { + INTERNAL_RESULT(*is_rasterization_disabled = ((spirv_cross::CompilerMSL *)compiler)->get_is_rasterization_disabled();) + } +#endif + +#ifdef SPIRV_CROSS_WRAPPER_GLSL + ScInternalResult sc_internal_compiler_glsl_new(ScInternalCompilerGlsl **compiler, const uint32_t *ir, const size_t size) + { + INTERNAL_RESULT(*compiler = new spirv_cross::CompilerGLSL(ir, size);) + } + + ScInternalResult sc_internal_compiler_glsl_set_options(const ScInternalCompilerGlsl *compiler, const ScGlslCompilerOptions *options) + { + INTERNAL_RESULT( + do { + auto compiler_glsl = (spirv_cross::CompilerGLSL *)compiler; + auto glsl_options = compiler_glsl->get_common_options(); + glsl_options.version = options->version; + glsl_options.es = options->es; + glsl_options.vertex.fixup_clipspace = options->vertex_transform_clip_space; + glsl_options.vertex.flip_vert_y = options->vertex_invert_y; + compiler_glsl->set_common_options(glsl_options); + } while (0);) + } + + ScInternalResult sc_internal_compiler_glsl_build_combined_image_samplers(const ScInternalCompilerBase *compiler) + { + INTERNAL_RESULT( + do { + ((spirv_cross::CompilerGLSL *)compiler)->build_combined_image_samplers(); + } while (0);) + } + + ScInternalResult sc_internal_compiler_glsl_get_combined_image_samplers(const ScInternalCompilerBase *compiler, const ScCombinedImageSampler **samplers, size_t *size) + { + INTERNAL_RESULT( + do { + const spirv_cross::SmallVector& ret = ((spirv_cross::CompilerGLSL *)compiler)->get_combined_image_samplers(); + *samplers = (const ScCombinedImageSampler *)ret.data(); + *size = ret.size(); + } while (0);) + } +#endif + + ScInternalResult sc_internal_compiler_get_decoration(const ScInternalCompilerBase *compiler, uint32_t *result, const uint32_t id, const spv::Decoration decoration) + { + INTERNAL_RESULT(*result = ((spirv_cross::Compiler *)compiler)->get_decoration(id, decoration);) + } + + ScInternalResult sc_internal_compiler_unset_decoration(const ScInternalCompilerBase *compiler, const uint32_t id, const spv::Decoration decoration) + { + INTERNAL_RESULT(((spirv_cross::Compiler *)compiler)->unset_decoration(id, decoration);) + } + + ScInternalResult sc_internal_compiler_set_decoration(const ScInternalCompilerBase *compiler, const uint32_t id, const spv::Decoration decoration, const uint32_t argument) + { + INTERNAL_RESULT(((spirv_cross::Compiler *)compiler)->set_decoration(id, decoration, argument);) + } + + ScInternalResult sc_internal_compiler_get_name(const ScInternalCompilerBase *compiler, const uint32_t id, const char **name) + { + INTERNAL_RESULT( + do { + auto const _name = ((spirv_cross::Compiler *)compiler)->get_name(id); + *name = strdup(_name.c_str()); + } while (0);) + } + + ScInternalResult sc_internal_compiler_set_name(const ScInternalCompilerBase *compiler, const uint32_t id, const char *name) + { + INTERNAL_RESULT(((spirv_cross::Compiler *)compiler)->set_name(id, std::string(name));) + } + + ScInternalResult sc_internal_compiler_get_entry_points(const ScInternalCompilerBase *compiler, ScEntryPoint **entry_points, size_t *size) + { + INTERNAL_RESULT( + do { + auto const &comp = *((spirv_cross::Compiler *)compiler); + auto const &sc_entry_point_names_and_stages = comp.get_entry_points_and_stages(); + auto const sc_size = sc_entry_point_names_and_stages.size(); + auto const &sc_entry_points = std::make_unique(sc_size); + for (uint32_t i = 0; i < sc_size; i++) + { + auto const &sc_entry_point = sc_entry_point_names_and_stages[i]; + sc_entry_points[i] = comp.get_entry_point(sc_entry_point.name, sc_entry_point.execution_model); + } + + *entry_points = (ScEntryPoint *)malloc(sc_size * sizeof(ScEntryPoint)); + *size = sc_size; + for (uint32_t i = 0; i < sc_size; i++) + { + auto const &sc_entry_point = sc_entry_points[i]; + entry_points[i]->name = strdup(sc_entry_point.name.c_str()); + entry_points[i]->execution_model = sc_entry_point.model; + entry_points[i]->work_group_size_x = sc_entry_point.workgroup_size.x; + entry_points[i]->work_group_size_y = sc_entry_point.workgroup_size.y; + entry_points[i]->work_group_size_z = sc_entry_point.workgroup_size.z; + } + } while (0);) + } + + ScInternalResult sc_internal_compiler_get_active_buffer_ranges(const ScInternalCompilerBase *compiler, uint32_t id, ScBufferRange **active_buffer_ranges, size_t *size) + { + INTERNAL_RESULT( + do { + auto const &comp = *((spirv_cross::Compiler *)compiler); + auto const &sc_active_buffer_ranges = comp.get_active_buffer_ranges(id); + auto const sc_size = sc_active_buffer_ranges.size(); + + *active_buffer_ranges = (ScBufferRange *)malloc(sc_size * sizeof(ScBufferRange)); + *size = sc_size; + for (uint32_t i = 0; i < sc_size; i++) + { + auto const &sc_active_buffer_range = sc_active_buffer_ranges[i]; + active_buffer_ranges[i]->index = sc_active_buffer_range.index; + active_buffer_ranges[i]->offset = sc_active_buffer_range.offset; + active_buffer_ranges[i]->range = sc_active_buffer_range.range; + } + } while (0);) + } + + ScInternalResult sc_internal_compiler_get_cleansed_entry_point_name(const ScInternalCompilerBase *compiler, const char *original_entry_point_name, const spv::ExecutionModel execution_model, const char **compiled_entry_point_name) + { + INTERNAL_RESULT( + do { + *compiled_entry_point_name = strdup( + (*((spirv_cross::Compiler *)compiler)) + .get_cleansed_entry_point_name(std::string(original_entry_point_name), execution_model) + .c_str()); + } while (0);) + } + + void fill_resource_array(ScResourceArray *resources, const spirv_cross::SmallVector &sc_resources) + { + auto const sc_size = sc_resources.size(); + + if (sc_size == 0) + { + resources->num = 0; + resources->data = 0x0; + return; + } + + resources->num = sc_size; + resources->data = (ScResource *)malloc(sc_size * sizeof(ScResource)); + for (uint32_t i = 0; i < sc_size; i++) + { + auto const &resource = sc_resources[i]; + resources->data[i].id = resource.id; + resources->data[i].type_id = resource.type_id; + resources->data[i].base_type_id = resource.base_type_id; + resources->data[i].name = strdup(resource.name.c_str()); + } + } + + ScInternalResult sc_internal_compiler_get_shader_resources(const ScInternalCompilerBase *compiler, ScShaderResources *shader_resources) + { + INTERNAL_RESULT( + do { + auto const sc_resources = ((const spirv_cross::Compiler *)compiler)->get_shader_resources(); + + fill_resource_array(&shader_resources->uniform_buffers, sc_resources.uniform_buffers); + fill_resource_array(&shader_resources->storage_buffers, sc_resources.storage_buffers); + fill_resource_array(&shader_resources->stage_inputs, sc_resources.stage_inputs); + fill_resource_array(&shader_resources->stage_outputs, sc_resources.stage_outputs); + fill_resource_array(&shader_resources->subpass_inputs, sc_resources.subpass_inputs); + fill_resource_array(&shader_resources->storage_images, sc_resources.storage_images); + fill_resource_array(&shader_resources->sampled_images, sc_resources.sampled_images); + fill_resource_array(&shader_resources->atomic_counters, sc_resources.atomic_counters); + fill_resource_array(&shader_resources->push_constant_buffers, sc_resources.push_constant_buffers); + fill_resource_array(&shader_resources->separate_images, sc_resources.separate_images); + fill_resource_array(&shader_resources->separate_samplers, sc_resources.separate_samplers); + } while (0);) + } + + ScInternalResult sc_internal_compiler_get_specialization_constants(const ScInternalCompilerBase *compiler, ScSpecializationConstant **constants, size_t *size) + { + INTERNAL_RESULT( + do { + auto const sc_constants = ((const spirv_cross::Compiler *)compiler)->get_specialization_constants(); + auto const sc_size = sc_constants.size(); + + auto p_constants = (ScSpecializationConstant *)malloc(sc_size * sizeof(ScSpecializationConstant)); + *constants = p_constants; + *size = sc_size; + for (uint32_t i = 0; i < sc_size; i++) + { + auto const &sc_constant = sc_constants[i]; + p_constants[i].id = sc_constant.id; + p_constants[i].constant_id = sc_constant.constant_id; + } + } while (0);) + } + + ScInternalResult sc_internal_compiler_set_scalar_constant(const ScInternalCompilerBase *compiler, const uint32_t id, const uint32_t constant_high_bits, const uint32_t constant_low_bits) + { + INTERNAL_RESULT( + do { + auto &sc_constant = ((spirv_cross::Compiler *)compiler)->get_constant(id); + sc_constant.m.c[0].r[0].u64 = (((uint64_t)constant_high_bits) << 32) | constant_low_bits; + } while (0);) + } + + ScInternalResult sc_internal_compiler_get_type(const ScInternalCompilerBase *compiler, const uint32_t id, const ScType **spirv_type) + { + INTERNAL_RESULT( + do { + auto const &type = ((spirv_cross::Compiler *)compiler)->get_type(id); + auto const member_types_size = type.member_types.size(); + auto const array_size = type.array.size(); + + auto ty = (ScType *)malloc(sizeof(ScType)); + ty->type = type.basetype; + ty->member_types_size = member_types_size; + ty->array_size = array_size; + + if (member_types_size > 0) + { + auto const &member_types = (uint32_t *)malloc(member_types_size * sizeof(uint32_t)); + + for (size_t i = 0; i < member_types_size; i++) + { + member_types[i] = type.member_types[i]; + } + + ty->member_types = member_types; + } + + if (array_size > 0) + { + auto const &array = (uint32_t *)malloc(array_size * sizeof(uint32_t)); + + for (size_t i = 0; i < array_size; i++) + { + array[i] = type.array[i]; + } + + ty->array = array; + } + + *spirv_type = ty; + } while (0);) + } + + ScInternalResult sc_internal_compiler_get_member_name(const ScInternalCompilerBase *compiler, const uint32_t id, const uint32_t index, const char **name) + { + INTERNAL_RESULT( + do { + auto const member_name = ((spirv_cross::Compiler *)compiler)->get_member_name(id, index); + *name = strdup(member_name.c_str()); + } while (0);) + } + + ScInternalResult sc_internal_compiler_get_member_decoration(const ScInternalCompilerBase *compiler, const uint32_t id, const uint32_t index, const spv::Decoration decoration, uint32_t *result) + { + INTERNAL_RESULT(*result = ((spirv_cross::Compiler *)compiler)->get_member_decoration(id, index, decoration);) + } + + ScInternalResult sc_internal_compiler_set_member_decoration(const ScInternalCompilerBase *compiler, const uint32_t id, const uint32_t index, const spv::Decoration decoration, const uint32_t argument) + { + INTERNAL_RESULT(((spirv_cross::Compiler *)compiler)->set_member_decoration(id, index, decoration, argument);) + } + + ScInternalResult sc_internal_compiler_get_declared_struct_size(const ScInternalCompilerBase *compiler, const uint32_t id, uint32_t *result) + { + INTERNAL_RESULT(do { + auto const &comp = ((spirv_cross::Compiler *)compiler); + *result = comp->get_declared_struct_size(comp->get_type(id)); + } while (0);) + } + + ScInternalResult sc_internal_compiler_get_declared_struct_member_size(const ScInternalCompilerBase *compiler, const uint32_t id, const uint32_t index, uint32_t *result) + { + INTERNAL_RESULT(do { + auto const &comp = ((spirv_cross::Compiler *)compiler); + *result = comp->get_declared_struct_member_size(comp->get_type(id), index); + } while (0);) + } + + ScInternalResult sc_internal_compiler_rename_interface_variable(const ScInternalCompilerBase *compiler, const ScResource *resources, const size_t resources_size, uint32_t location, const char *name) + { + INTERNAL_RESULT(do { + spirv_cross::SmallVector sc_resources; + for (size_t i = 0; i < resources_size; i++) + { + auto const &resource = resources[i]; + spirv_cross::Resource sc_resource; + std::string sc_name(resource.name); + sc_resource.id = resource.id; + sc_resource.type_id = resource.type_id; + sc_resource.base_type_id = resource.base_type_id; + sc_resource.name = sc_name; + sc_resources.push_back(sc_resource); + } + + auto &comp = *(spirv_cross::Compiler *)compiler; + std::string new_name(name); + spirv_cross_util::rename_interface_variable(comp, sc_resources, location, new_name); + } while (0);) + } + + ScInternalResult sc_internal_compiler_get_work_group_size_specialization_constants(const ScInternalCompilerBase *compiler, ScSpecializationConstant **constants) + { + INTERNAL_RESULT(do { + spirv_cross::SpecializationConstant wg_x; + spirv_cross::SpecializationConstant wg_y; + spirv_cross::SpecializationConstant wg_z; + ((const spirv_cross::Compiler *)compiler)->get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + + auto p_constants = (ScSpecializationConstant *)malloc(3 * sizeof(ScSpecializationConstant)); + p_constants[0].id = wg_x.id; + p_constants[0].constant_id = wg_x.constant_id; + p_constants[1].id = wg_y.id; + p_constants[1].constant_id = wg_y.constant_id; + p_constants[2].id = wg_z.id; + p_constants[2].constant_id = wg_z.constant_id; + + *constants = p_constants; + } while (0);) + } + + ScInternalResult sc_internal_compiler_compile(const ScInternalCompilerBase *compiler, const char **shader) + { + INTERNAL_RESULT(*shader = strdup(((spirv_cross::Compiler *)compiler)->compile().c_str());) + } + + ScInternalResult sc_internal_compiler_delete(ScInternalCompilerBase *compiler) + { + INTERNAL_RESULT(delete (spirv_cross::Compiler *)compiler;) + } + + ScInternalResult sc_internal_free_pointer(void *pointer) + { + INTERNAL_RESULT(free(pointer);) + } +} diff --git a/third_party/rust/spirv_cross/src/wrapper.hpp b/third_party/rust/spirv_cross/src/wrapper.hpp new file mode 100644 index 000000000000..4d49da204bd8 --- /dev/null +++ b/third_party/rust/spirv_cross/src/wrapper.hpp @@ -0,0 +1,189 @@ +#include "vendor/SPIRV-Cross/spirv.hpp" +#include "vendor/SPIRV-Cross/spirv_cross_util.hpp" +#include "vendor/SPIRV-Cross/spirv_hlsl.hpp" +#include "vendor/SPIRV-Cross/spirv_msl.hpp" +#include "vendor/SPIRV-Cross/spirv_glsl.hpp" + +typedef void ScInternalCompilerBase; +typedef void ScInternalCompilerHlsl; +typedef void ScInternalCompilerMsl; +typedef void ScInternalCompilerGlsl; + +extern "C" +{ + enum ScInternalResult + { + Success, + Unhandled, + CompilationError, + }; + + typedef struct ScEntryPoint + { + char *name; + spv::ExecutionModel execution_model; + uint32_t work_group_size_x; + uint32_t work_group_size_y; + uint32_t work_group_size_z; + } ScEntryPoint; + + typedef struct ScBufferRange + { + unsigned index; + size_t offset; + size_t range; + } ScBufferRange; + + typedef struct ScCombinedImageSampler + { + uint32_t combined_id; + uint32_t image_id; + uint32_t sampler_id; + } ScCombinedImageSampler; + + typedef struct ScHlslRootConstant + { + uint32_t start; + uint32_t end; + uint32_t binding; + uint32_t space; + } ScHlslRootConstant; + + typedef struct ScHlslCompilerOptions + { + int32_t shader_model; + bool point_size_compat; + bool point_coord_compat; + bool vertex_transform_clip_space; + bool vertex_invert_y; + } ScHlslCompilerOptions; + + typedef struct ScMslCompilerOptions + { + bool vertex_transform_clip_space; + bool vertex_invert_y; + uint8_t platform; + uint32_t version; + bool enable_point_size_builtin; + bool disable_rasterization; + uint32_t swizzle_buffer_index; + uint32_t indirect_params_buffer_index; + uint32_t shader_output_buffer_index; + uint32_t shader_patch_output_buffer_index; + uint32_t shader_tess_factor_buffer_index; + uint32_t buffer_size_buffer_index; + bool capture_output_to_buffer; + bool swizzle_texture_samples; + bool tess_domain_origin_lower_left; + bool argument_buffers; + bool pad_fragment_output_components; + } ScMslCompilerOptions; + + typedef struct ScGlslCompilerOptions + { + bool vertex_transform_clip_space; + bool vertex_invert_y; + uint32_t version; + bool es; + } ScGlslCompilerOptions; + + typedef struct ScResource + { + uint32_t id; + uint32_t type_id; + uint32_t base_type_id; + char *name; + } ScResource; + + typedef struct ScResourceArray + { + ScResource *data; + size_t num; + } ScResourceArray; + + typedef struct ScShaderResources + { + ScResourceArray uniform_buffers; + ScResourceArray storage_buffers; + ScResourceArray stage_inputs; + ScResourceArray stage_outputs; + ScResourceArray subpass_inputs; + ScResourceArray storage_images; + ScResourceArray sampled_images; + ScResourceArray atomic_counters; + ScResourceArray push_constant_buffers; + ScResourceArray separate_images; + ScResourceArray separate_samplers; + } ScShaderResources; + + typedef struct ScSpecializationConstant + { + uint32_t id; + uint32_t constant_id; + } ScSpecializationConstant; + + typedef struct ScType + { + spirv_cross::SPIRType::BaseType type; + uint32_t *member_types; + size_t member_types_size; + uint32_t *array; + size_t array_size; + } ScType; + + ScInternalResult sc_internal_get_latest_exception_message(const char **message); + +#ifdef SPIRV_CROSS_WRAPPER_HLSL + ScInternalResult sc_internal_compiler_hlsl_new(ScInternalCompilerHlsl **compiler, const uint32_t *ir, const size_t size); + ScInternalResult sc_internal_compiler_hlsl_set_options(const ScInternalCompilerHlsl *compiler, const ScHlslCompilerOptions *options); + ScInternalResult sc_internal_compiler_hlsl_set_root_constant_layout(const ScInternalCompilerHlsl *compiler, const ScHlslRootConstant *constants, size_t count); +#endif + +#ifdef SPIRV_CROSS_WRAPPER_MSL + typedef struct MslConstSamplerMapping { + uint32_t desc_set; + uint32_t binding; + spirv_cross::MSLConstexprSampler sampler; + } MslConstSamplerMapping; + + ScInternalResult sc_internal_compiler_msl_new(ScInternalCompilerMsl **compiler, const uint32_t *ir, const size_t size); + ScInternalResult sc_internal_compiler_msl_set_options(const ScInternalCompilerMsl *compiler, const ScMslCompilerOptions *options); + ScInternalResult sc_internal_compiler_msl_get_is_rasterization_disabled(const ScInternalCompilerMsl *compiler, bool *is_rasterization_disabled); + ScInternalResult sc_internal_compiler_msl_compile(const ScInternalCompilerBase *compiler, const char **shader, + const spirv_cross::MSLVertexAttr *p_vat_overrides, const size_t vat_override_count, + const spirv_cross::MSLResourceBinding *p_res_overrides, const size_t res_override_count, + const MslConstSamplerMapping *p_const_samplers, const size_t const_sampler_count); +#endif + +#ifdef SPIRV_CROSS_WRAPPER_GLSL + ScInternalResult sc_internal_compiler_glsl_new(ScInternalCompilerGlsl **compiler, const uint32_t *ir, const size_t size); + ScInternalResult sc_internal_compiler_glsl_set_options(const ScInternalCompilerGlsl *compiler, const ScGlslCompilerOptions *options); + ScInternalResult sc_internal_compiler_glsl_build_combined_image_samplers(const ScInternalCompilerBase *compiler); + ScInternalResult sc_internal_compiler_glsl_get_combined_image_samplers(const ScInternalCompilerBase *compiler, const ScCombinedImageSampler **samplers, size_t *size); +#endif + + ScInternalResult sc_internal_compiler_get_decoration(const ScInternalCompilerBase *compiler, uint32_t *result, const uint32_t id, const spv::Decoration decoration); + ScInternalResult sc_internal_compiler_set_decoration(const ScInternalCompilerBase *compiler, const uint32_t id, const spv::Decoration decoration, const uint32_t argument); + ScInternalResult sc_internal_compiler_unset_decoration(const ScInternalCompilerBase *compiler, const uint32_t id, const spv::Decoration decoration); + ScInternalResult sc_internal_compiler_get_name(const ScInternalCompilerBase *compiler, const uint32_t id, const char **name); + ScInternalResult sc_internal_compiler_set_name(const ScInternalCompilerBase *compiler, const uint32_t id, const char *name); + ScInternalResult sc_internal_compiler_get_entry_points(const ScInternalCompilerBase *compiler, ScEntryPoint **entry_points, size_t *size); + ScInternalResult sc_internal_compiler_get_active_buffer_ranges(const ScInternalCompilerBase *compiler, uint32_t id, ScBufferRange **active_buffer_ranges, size_t *size); + ScInternalResult sc_internal_compiler_get_cleansed_entry_point_name(const ScInternalCompilerBase *compiler, const char *original_entry_point_name, const spv::ExecutionModel execution_model, const char **compiled_entry_point_name); + ScInternalResult sc_internal_compiler_get_shader_resources(const ScInternalCompilerBase *compiler, ScShaderResources *shader_resources); + ScInternalResult sc_internal_compiler_get_specialization_constants(const ScInternalCompilerBase *compiler, ScSpecializationConstant **constants, size_t *size); + + ScInternalResult sc_internal_compiler_set_scalar_constant(const ScInternalCompilerBase *compiler, const uint32_t id, const uint32_t constant_high_bits, const uint32_t constant_low_bits); + ScInternalResult sc_internal_compiler_get_type(const ScInternalCompilerBase *compiler, const uint32_t id, const ScType **spirv_type); + ScInternalResult sc_internal_compiler_get_member_name(const ScInternalCompilerBase *compiler, const uint32_t id, const uint32_t index, const char **name); + ScInternalResult sc_internal_compiler_get_member_decoration(const ScInternalCompilerBase *compiler, const uint32_t id, const uint32_t index, const spv::Decoration decoration, uint32_t *result); + ScInternalResult sc_internal_compiler_set_member_decoration(const ScInternalCompilerBase *compiler, const uint32_t id, const uint32_t index, const spv::Decoration decoration, const uint32_t argument); + ScInternalResult sc_internal_compiler_get_declared_struct_size(const ScInternalCompilerBase *compiler, const uint32_t id, uint32_t *result); + ScInternalResult sc_internal_compiler_get_declared_struct_member_size(const ScInternalCompilerBase *compiler, const uint32_t id, const uint32_t index, uint32_t *result); + ScInternalResult sc_internal_compiler_rename_interface_variable(const ScInternalCompilerBase *compiler, const ScResource *resources, const size_t resources_size, uint32_t location, const char *name); + ScInternalResult sc_internal_compiler_get_work_group_size_specialization_constants(const ScInternalCompilerBase *compiler, ScSpecializationConstant **constants); + ScInternalResult sc_internal_compiler_compile(const ScInternalCompilerBase *compiler, const char **shader); + ScInternalResult sc_internal_compiler_delete(ScInternalCompilerBase *compiler); + + ScInternalResult sc_internal_free_pointer(void *pointer); +} diff --git a/third_party/rust/spirv_cross/tests/common/mod.rs b/third_party/rust/spirv_cross/tests/common/mod.rs new file mode 100644 index 000000000000..f5df87f9734c --- /dev/null +++ b/third_party/rust/spirv_cross/tests/common/mod.rs @@ -0,0 +1,9 @@ +#[allow(clippy::cast_ptr_alignment)] +pub fn words_from_bytes(buf: &[u8]) -> &[u32] { + unsafe { + std::slice::from_raw_parts( + buf.as_ptr() as *const u32, + buf.len() / std::mem::size_of::(), + ) + } +} diff --git a/third_party/rust/spirv_cross/tests/glsl_tests.rs b/third_party/rust/spirv_cross/tests/glsl_tests.rs new file mode 100644 index 000000000000..e62e21c4291d --- /dev/null +++ b/third_party/rust/spirv_cross/tests/glsl_tests.rs @@ -0,0 +1,216 @@ +use spirv_cross::{glsl, spirv}; + +mod common; +use crate::common::words_from_bytes; + +#[test] +fn glsl_compiler_options_has_default() { + let compiler_options = glsl::CompilerOptions::default(); + assert_eq!(compiler_options.vertex.invert_y, false); + assert_eq!(compiler_options.vertex.transform_clip_space, false); +} + +#[test] +fn ast_compiles_to_glsl() { + let mut ast = spirv::Ast::::parse(&spirv::Module::from_words(words_from_bytes( + include_bytes!("shaders/simple.vert.spv"), + ))) + .unwrap(); + ast.set_compiler_options(&glsl::CompilerOptions { + version: glsl::Version::V4_60, + vertex: glsl::CompilerVertexOptions::default(), + }) + .unwrap(); + + assert_eq!( + ast.compile().unwrap(), + "\ +#version 460 + +layout(std140) uniform uniform_buffer_object +{ + mat4 u_model_view_projection; + float u_scale; +} _22; + +layout(location = 0) out vec3 v_normal; +layout(location = 1) in vec3 a_normal; +layout(location = 0) in vec4 a_position; + +void main() +{ + v_normal = a_normal; + gl_Position = (_22.u_model_view_projection * a_position) * _22.u_scale; +} + +" + ); +} + +#[test] +fn ast_compiles_all_versions_to_glsl() { + use spirv_cross::glsl::Version::*; + + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let mut ast = spirv::Ast::::parse(&module).unwrap(); + + let versions = [ + V1_10, V1_20, V1_30, V1_40, V1_50, V3_30, V4_00, V4_10, V4_20, V4_30, V4_40, V4_50, V4_60, + V1_00Es, V3_00Es, + ]; + for &version in versions.iter() { + if ast + .set_compiler_options(&glsl::CompilerOptions { + version, + vertex: glsl::CompilerVertexOptions::default(), + }) + .is_err() + { + panic!("Did not compile"); + } + } +} + +#[test] +fn ast_renames_interface_variables() { + let vert = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/struct.vert.spv"))); + let mut vert_ast = spirv::Ast::::parse(&vert).unwrap(); + vert_ast + .set_compiler_options(&glsl::CompilerOptions { + version: glsl::Version::V1_00Es, + vertex: glsl::CompilerVertexOptions::default(), + }) + .unwrap(); + let vert_stage_outputs = vert_ast.get_shader_resources().unwrap().stage_outputs; + vert_ast + .rename_interface_variable(&vert_stage_outputs, 0, "renamed") + .unwrap(); + + let vert_output = vert_ast.compile().unwrap(); + + let frag = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/struct.frag.spv"))); + let mut frag_ast = spirv::Ast::::parse(&frag).unwrap(); + frag_ast + .set_compiler_options(&glsl::CompilerOptions { + version: glsl::Version::V1_00Es, + vertex: glsl::CompilerVertexOptions::default(), + }) + .unwrap(); + let frag_stage_inputs = frag_ast.get_shader_resources().unwrap().stage_inputs; + frag_ast + .rename_interface_variable(&frag_stage_inputs, 0, "renamed") + .unwrap(); + let frag_output = frag_ast.compile().unwrap(); + + assert_eq!( + vert_output, + "\ +#version 100 + +struct SPIRV_Cross_Interface_Location0 +{ + vec4 InterfaceMember0; + vec4 InterfaceMember1; + vec4 InterfaceMember2; + vec4 InterfaceMember3; +}; + +varying vec4 renamed_InterfaceMember0; +varying vec4 renamed_InterfaceMember1; +varying vec4 renamed_InterfaceMember2; +varying vec4 renamed_InterfaceMember3; +attribute vec4 a; +attribute vec4 b; +attribute vec4 c; +attribute vec4 d; + +void main() +{ + { + SPIRV_Cross_Interface_Location0 renamed = SPIRV_Cross_Interface_Location0(a, b, c, d); + renamed_InterfaceMember0 = renamed.InterfaceMember0; + renamed_InterfaceMember1 = renamed.InterfaceMember1; + renamed_InterfaceMember2 = renamed.InterfaceMember2; + renamed_InterfaceMember3 = renamed.InterfaceMember3; + } +} + +" + ); + + assert_eq!( + frag_output, + "\ +#version 100 +precision mediump float; +precision highp int; + +struct SPIRV_Cross_Interface_Location0 +{ + vec4 InterfaceMember0; + vec4 InterfaceMember1; + vec4 InterfaceMember2; + vec4 InterfaceMember3; +}; + +varying vec4 renamed_InterfaceMember0; +varying vec4 renamed_InterfaceMember1; +varying vec4 renamed_InterfaceMember2; +varying vec4 renamed_InterfaceMember3; + +void main() +{ + gl_FragData[0] = vec4(renamed_InterfaceMember0.x, renamed_InterfaceMember1.y, renamed_InterfaceMember2.z, renamed_InterfaceMember3.w); +} + +" + ); +} + +#[test] +fn ast_can_rename_combined_image_samplers() { + let mut ast = spirv::Ast::::parse(&spirv::Module::from_words(words_from_bytes( + include_bytes!("shaders/sampler.frag.spv"), + ))) + .unwrap(); + ast.set_compiler_options(&glsl::CompilerOptions { + version: glsl::Version::V4_10, + vertex: glsl::CompilerVertexOptions::default(), + }) + .unwrap(); + for cis in ast.get_combined_image_samplers().unwrap() { + let new_name = "combined_sampler".to_string() + + "_" + + &cis.sampler_id.to_string() + + "_" + + &cis.image_id.to_string() + + "_" + + &cis.combined_id.to_string(); + ast.set_name(cis.combined_id, &new_name).unwrap(); + assert_eq!(new_name, ast.get_name(cis.combined_id).unwrap()); + } + + assert_eq!( + ast.compile().unwrap(), + "\ +#version 410 +#ifdef GL_ARB_shading_language_420pack +#extension GL_ARB_shading_language_420pack : require +#endif + +uniform sampler2D combined_sampler_16_12_26; + +layout(location = 0) out vec4 target0; +layout(location = 0) in vec2 v_uv; + +void main() +{ + target0 = texture(combined_sampler_16_12_26, v_uv); +} + +" + ); +} diff --git a/third_party/rust/spirv_cross/tests/hlsl_tests.rs b/third_party/rust/spirv_cross/tests/hlsl_tests.rs new file mode 100644 index 000000000000..fc9a3c73f44c --- /dev/null +++ b/third_party/rust/spirv_cross/tests/hlsl_tests.rs @@ -0,0 +1,106 @@ +use spirv_cross::{hlsl, spirv}; + +mod common; +use crate::common::words_from_bytes; + +#[test] +fn hlsl_compiler_options_has_default() { + let compiler_options = hlsl::CompilerOptions::default(); + assert_eq!(compiler_options.shader_model, hlsl::ShaderModel::V3_0); + assert_eq!(compiler_options.point_size_compat, false); + assert_eq!(compiler_options.point_coord_compat, false); + assert_eq!(compiler_options.vertex.invert_y, false); + assert_eq!(compiler_options.vertex.transform_clip_space, false); +} + +#[test] +fn ast_compiles_to_hlsl() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let mut ast = spirv::Ast::::parse(&module).unwrap(); + ast.set_compiler_options(&hlsl::CompilerOptions { + shader_model: hlsl::ShaderModel::V6_0, + point_size_compat: false, + point_coord_compat: false, + vertex: hlsl::CompilerVertexOptions::default(), + }) + .unwrap(); + + assert_eq!( + ast.compile().unwrap(), + "\ +cbuffer uniform_buffer_object +{ + row_major float4x4 _22_u_model_view_projection : packoffset(c0); + float _22_u_scale : packoffset(c4); +}; + + +static float4 gl_Position; +static float3 v_normal; +static float3 a_normal; +static float4 a_position; + +struct SPIRV_Cross_Input +{ + float4 a_position : TEXCOORD0; + float3 a_normal : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float3 v_normal : TEXCOORD0; + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + v_normal = a_normal; + gl_Position = mul(a_position, _22_u_model_view_projection) * _22_u_scale; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + a_normal = stage_input.a_normal; + a_position = stage_input.a_position; + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.v_normal = v_normal; + return stage_output; +} +" + ); +} + +#[test] +fn ast_compiles_all_shader_models_to_hlsl() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let mut ast = spirv::Ast::::parse(&module).unwrap(); + + let shader_models = [ + hlsl::ShaderModel::V3_0, + hlsl::ShaderModel::V4_0, + hlsl::ShaderModel::V4_0L9_0, + hlsl::ShaderModel::V4_0L9_1, + hlsl::ShaderModel::V4_0L9_3, + hlsl::ShaderModel::V4_1, + hlsl::ShaderModel::V5_0, + hlsl::ShaderModel::V5_1, + hlsl::ShaderModel::V6_0, + ]; + for &shader_model in shader_models.iter() { + if ast + .set_compiler_options(&hlsl::CompilerOptions { + shader_model, + point_size_compat: false, + point_coord_compat: false, + vertex: hlsl::CompilerVertexOptions::default(), + }) + .is_err() + { + panic!("Did not compile"); + } + } +} diff --git a/third_party/rust/spirv_cross/tests/msl_tests.rs b/third_party/rust/spirv_cross/tests/msl_tests.rs new file mode 100644 index 000000000000..56649e2ade6a --- /dev/null +++ b/third_party/rust/spirv_cross/tests/msl_tests.rs @@ -0,0 +1,309 @@ +use spirv_cross::{msl, spirv}; + +use std::collections::BTreeMap; + +mod common; +use crate::common::words_from_bytes; + +#[test] +fn msl_compiler_options_has_default() { + let compiler_options = msl::CompilerOptions::default(); + assert_eq!(compiler_options.vertex.invert_y, false); + assert_eq!(compiler_options.vertex.transform_clip_space, false); + assert!(compiler_options.resource_binding_overrides.is_empty()); + assert!(compiler_options.vertex_attribute_overrides.is_empty()); +} + +#[test] +fn is_rasterization_enabled() { + let modules = [ + ( + true, + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))), + ), + ( + false, + spirv::Module::from_words(words_from_bytes(include_bytes!( + "shaders/rasterize_disabled.vert.spv" + ))), + ), + ]; + for (expected, module) in &modules { + let mut ast = spirv::Ast::::parse(&module).unwrap(); + ast.compile().unwrap(); + assert_eq!(*expected, ast.is_rasterization_enabled().unwrap()); + } +} + +#[test] +fn ast_compiles_to_msl() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let mut ast = spirv::Ast::::parse(&module).unwrap(); + + let mut compiler_options = msl::CompilerOptions::default(); + + compiler_options.resource_binding_overrides.insert( + msl::ResourceBindingLocation { + stage: spirv::ExecutionModel::Vertex, + desc_set: 0, + binding: 0, + }, + msl::ResourceBinding { + buffer_id: 5, + texture_id: 6, + sampler_id: 7, + }, + ); + + ast.set_compiler_options(&compiler_options).unwrap(); + assert_eq!( + ast.compile().unwrap(), + "\ +#include +#include + +using namespace metal; + +struct uniform_buffer_object +{ + float4x4 u_model_view_projection; + float u_scale; +}; + +struct main0_out +{ + float3 v_normal [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 a_position [[attribute(0)]]; + float3 a_normal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant uniform_buffer_object& _22 [[buffer(5)]]) +{ + main0_out out = {}; + out.v_normal = in.a_normal; + out.gl_Position = (_22.u_model_view_projection * in.a_position) * _22.u_scale; + return out; +} + +" + ); + assert_eq!( + ast.get_cleansed_entry_point_name("main", spirv::ExecutionModel::Vertex) + .unwrap(), + "main0" + ); +} + +#[test] +fn captures_output_to_buffer() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let mut ast = spirv::Ast::::parse(&module).unwrap(); + let compiler_options = msl::CompilerOptions { + capture_output_to_buffer: true, + output_buffer_index: 456, + ..Default::default() + }; + ast.set_compiler_options(&compiler_options).unwrap(); + assert_eq!( + ast.compile().unwrap(), + "\ +#include +#include + +using namespace metal; + +struct uniform_buffer_object +{ + float4x4 u_model_view_projection; + float u_scale; +}; + +struct main0_out +{ + float3 v_normal [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 a_position [[attribute(0)]]; + float3 a_normal [[attribute(1)]]; +}; + +vertex void main0(main0_in in [[stage_in]], constant uniform_buffer_object& _22 [[buffer(0)]], uint gl_VertexIndex [[vertex_id]], uint gl_BaseVertex [[base_vertex]], uint gl_InstanceIndex [[instance_id]], uint gl_BaseInstance [[base_instance]], device main0_out* spvOut [[buffer(456)]], device uint* spvIndirectParams [[buffer(29)]]) +{ + device main0_out& out = spvOut[(gl_InstanceIndex - gl_BaseInstance) * spvIndirectParams[0] + gl_VertexIndex - gl_BaseVertex]; + out.v_normal = in.a_normal; + out.gl_Position = (_22.u_model_view_projection * in.a_position) * _22.u_scale; +} + +" + ); +} + +#[test] +fn swizzles_texture_samples() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/sampler.frag.spv"))); + let mut ast = spirv::Ast::::parse(&module).unwrap(); + let compiler_options = msl::CompilerOptions { + swizzle_texture_samples: true, + swizzle_buffer_index: 123, + ..Default::default() + }; + ast.set_compiler_options(&compiler_options).unwrap(); + assert_eq!( + ast.compile().unwrap(), + "\ +#pragma clang diagnostic ignored \"-Wmissing-prototypes\" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 target0 [[color(0)]]; +}; + +struct main0_in +{ + float2 v_uv [[user(locn0)]]; +}; + +template struct spvRemoveReference { typedef T type; }; +template struct spvRemoveReference { typedef T type; }; +template struct spvRemoveReference { typedef T type; }; +template inline constexpr thread T&& spvForward(thread typename spvRemoveReference::type& x) +{ + return static_cast(x); +} +template inline constexpr thread T&& spvForward(thread typename spvRemoveReference::type&& x) +{ + return static_cast(x); +} + +enum class spvSwizzle : uint +{ + none = 0, + zero, + one, + red, + green, + blue, + alpha +}; + +template +inline T spvGetSwizzle(vec x, T c, spvSwizzle s) +{ + switch (s) + { + case spvSwizzle::none: + return c; + case spvSwizzle::zero: + return 0; + case spvSwizzle::one: + return 1; + case spvSwizzle::red: + return x.r; + case spvSwizzle::green: + return x.g; + case spvSwizzle::blue: + return x.b; + case spvSwizzle::alpha: + return x.a; + } +} + +// Wrapper function that swizzles texture samples and fetches. +template +inline vec spvTextureSwizzle(vec x, uint s) +{ + if (!s) + return x; + return vec(spvGetSwizzle(x, x.r, spvSwizzle((s >> 0) & 0xFF)), spvGetSwizzle(x, x.g, spvSwizzle((s >> 8) & 0xFF)), spvGetSwizzle(x, x.b, spvSwizzle((s >> 16) & 0xFF)), spvGetSwizzle(x, x.a, spvSwizzle((s >> 24) & 0xFF))); +} + +template +inline T spvTextureSwizzle(T x, uint s) +{ + return spvTextureSwizzle(vec(x, 0, 0, 1), s).x; +} + +fragment main0_out main0(main0_in in [[stage_in]], constant uint* spvSwizzleConstants [[buffer(123)]], texture2d u_texture [[texture(0)]], sampler u_sampler [[sampler(0)]]) +{ + main0_out out = {}; + constant uint& u_textureSwzl = spvSwizzleConstants[0]; + out.target0 = spvTextureSwizzle(u_texture.sample(u_sampler, in.v_uv), u_textureSwzl); + return out; +} + +" + ); +} + +#[test] +fn sets_argument_buffer_index() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/sampler.frag.spv"))); + let mut ast = spirv::Ast::::parse(&module).unwrap(); + let mut resource_binding_overrides = BTreeMap::new(); + resource_binding_overrides.insert(spirv_cross::msl::ResourceBindingLocation { + stage: spirv::ExecutionModel::Fragment, + desc_set: 0, + binding: msl::ARGUMENT_BUFFER_BINDING, + }, spirv_cross::msl::ResourceBinding { + buffer_id: 2, + texture_id: 0, + sampler_id: 0, + }); + let compiler_options = msl::CompilerOptions { + resource_binding_overrides, + version: spirv_cross::msl::Version::V2_0, + enable_argument_buffers: true, + ..Default::default() + }; + ast.set_compiler_options(&compiler_options).unwrap(); + assert_eq!( + ast.compile().unwrap(), + "\ +#include +#include + +using namespace metal; + +struct spvDescriptorSetBuffer0 +{ + texture2d u_texture [[id(0)]]; + sampler u_sampler [[id(1)]]; +}; + +struct main0_out +{ + float4 target0 [[color(0)]]; +}; + +struct main0_in +{ + float2 v_uv [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], constant spvDescriptorSetBuffer0& spvDescriptorSet0 [[buffer(2)]]) +{ + main0_out out = {}; + out.target0 = spvDescriptorSet0.u_texture.sample(spvDescriptorSet0.u_sampler, in.v_uv); + return out; +} + +", + ); +} diff --git a/third_party/rust/spirv_cross/tests/shaders/array.vert b/third_party/rust/spirv_cross/tests/shaders/array.vert new file mode 100644 index 000000000000..cd359020af24 --- /dev/null +++ b/third_party/rust/spirv_cross/tests/shaders/array.vert @@ -0,0 +1,20 @@ +#version 310 es + +layout(std140) uniform uniform_buffer_object +{ + mat4 u_model_view_projection; + float u_scale; + vec3 u_bias[3]; +}; + +layout(location = 0) in vec4 a_position; +layout(location = 1) in vec3 a_normal; +layout(location = 0) out vec3 v_normal; +layout(location = 1) out vec3 v_bias; + +void main() +{ + v_normal = a_normal; + v_bias = u_bias[gl_VertexIndex]; + gl_Position = u_model_view_projection * a_position * u_scale; +} diff --git a/third_party/rust/spirv_cross/tests/shaders/array.vert.spv b/third_party/rust/spirv_cross/tests/shaders/array.vert.spv new file mode 100644 index 0000000000000000000000000000000000000000..7445d1b9ef55f22891712463175bfb906eea7aa7 GIT binary patch literal 1508 zcmY+CS#Q%o6osd8orbNYlrBICP6K5r1RkJ5LLmB-BK09srM`}w#z97jT{#OCKLUw= z%L@|c8+&M&=w#-cyUo3xpw?b7X2}GmVcwW>t(%$@V{Vwb+q);HC!NW3(0TjLxva{Y zh0mJ2Sj)mVPBYHe1k_u?Js}eIg+M=I4)qr*VlgI^LfqVDc`=ShI!Xy$p7=j)+2x(v zei~2kX_yW9&a!l&IifJ&eP<(y(n^A(maz-=zO@_ z%udeZQSwi#InCD!%qEGnLV3{J*O5I-ifMBBB^xA{rQR&{UQ(1M{OkImvlP?ICuvoC z#p%>`dYVm7(;o?bq2?8F&|((MFt!&y)O0mLX6Ez4nLC_(aK6DJzaw53ZVSh<``Lt! zmvhk3wE5FIR$cF-hf>8|%?rQJ3x3QCttWJKe`3+N3$xo3Sa9aYeP0OF@qLsp>Pn** zw6dqb`T3m8#K&!@5_-$Mw6gA&&h#4z+p0+qa9(gE-l)##A1cmFz5Tk*)b)0_zl-;0 z#`u$eRXh24+dKMS(~bt;)IW2N1lB>t*L5a-SDf!dEwpAedvcHy8+X2|oj#c@cYrfn zG-_;Vj|A>b4fwWzo%kK?*wBfG^DWS@KhTaZ_h^bUU*8w@N8;QUop^X8p!s?&omu$r zX(u0jz@KPe639tBe819s{PT+A&-);r8NfT*sYU#uW`vFTve17^;XQwpN)I;$zS$>P z`L=Lu^nYKV_akMe?_GgeGB0dC$3va5k%R9IKT;KXYYW7*1pMjqv49Teo=*jO{G}N9 tGXWoT^3dafKpuK0j(%SX=s&BtS2`1mP8_p%EkpwKxgQ+AKbrD;;V=5YVb=fv literal 0 HcmV?d00001 diff --git a/third_party/rust/spirv_cross/tests/shaders/rasterize_disabled.vert b/third_party/rust/spirv_cross/tests/shaders/rasterize_disabled.vert new file mode 100644 index 000000000000..959183d2cb06 --- /dev/null +++ b/third_party/rust/spirv_cross/tests/shaders/rasterize_disabled.vert @@ -0,0 +1,22 @@ +#version 450 + +layout(location = 0) in vec4 input_buffer; + +layout(binding = 0, std140) uniform iu +{ + uvec4 m[2]; +} input_uniform; + +layout(binding = 1, std430) writeonly buffer ob +{ + uvec4 m[2]; +} output_buffer; + +void main() +{ + gl_Position = input_buffer; + for (int i = 0; i < 2; i++) + { + output_buffer.m[i] = input_uniform.m[i]; + } +} diff --git a/third_party/rust/spirv_cross/tests/shaders/rasterize_disabled.vert.spv b/third_party/rust/spirv_cross/tests/shaders/rasterize_disabled.vert.spv new file mode 100644 index 0000000000000000000000000000000000000000..869778bfffd8a5ad768e370efa46fd66bff21c70 GIT binary patch literal 1552 zcmY+ETTc^V5QaZ!3n&UACl6Q_P*Kr(HYP?BqDi^n!i2l_%I5yTeq)e*YCL=X$cVX6 zQ53%UKsr=~Pma>A`bBsFuJAL7{6Uc#5#up6YR}^P+&O&T>VIhUv(^_(&=ObI*Bhi+ z+UtT&7xB(h;AuBINWZp>i#~hdd)rA*-=%}>sQa-s@;R}-ywe%^YRu=XlJhuo+C9y) z!{fZ&ZuLvN5>~{Em`d0}_KxjYEp%f}3g1+?r7zF^D`1a1;Avjws7o>D9(vg`@22fu zUwn^gyp0rP#L9OmZyz!DHqIR*rY`2qn|RcXYl66E9hS5=kyG#`cyDa4tA$+~?dk54 z_0;WQzI$6kiuMM&vubZ6&+6BTdjO5L5}^^SJ14Vz^nB0 zi_~`aWq!{f6Ij|A+UL-nZw>oA(s;2tnzxEI^vMZv7m)VU|7vLa{fOCT4w=B#wU72Z zESc|p^;z3|ZQnvn|02>_`n8vbx^-f`8|d!5#BblweG{ZRx^LWX1Kl|H+e9X?Se*`A z$P%lDo9M=gIg_^U8FRPM6FBDjP9;_ichQX#GgsTU5_A6hBmHdt$6yoi3%+sVoLO@3 zUm!K4wcOpmJ8$tCTX`AwUb{qjt0ma`d*e4$&^AWhT=C!3Tt!-2`IB0H*VkZ4|8=B( zrM8))(U* tqWc!cS<}75jB{>xu(vYy@*lLlzcIIaJb}f$d%TO-_+8xLFR$_v{s*>^V;2Ab literal 0 HcmV?d00001 diff --git a/third_party/rust/spirv_cross/tests/shaders/sampler.frag b/third_party/rust/spirv_cross/tests/shaders/sampler.frag new file mode 100644 index 000000000000..ce499c4126c2 --- /dev/null +++ b/third_party/rust/spirv_cross/tests/shaders/sampler.frag @@ -0,0 +1,12 @@ +#version 450 +#extension GL_ARB_separate_shader_objects : enable + +layout(location = 0) in vec2 v_uv; +layout(location = 0) out vec4 target0; + +layout(set = 0, binding = 0) uniform texture2D u_texture; +layout(set = 0, binding = 1) uniform sampler u_sampler; + +void main() { + target0 = texture(sampler2D(u_texture, u_sampler), v_uv); +} diff --git a/third_party/rust/spirv_cross/tests/shaders/sampler.frag.spv b/third_party/rust/spirv_cross/tests/shaders/sampler.frag.spv new file mode 100644 index 0000000000000000000000000000000000000000..8eab7309b45811017f7ef9b785b9b526fe2fea64 GIT binary patch literal 720 zcmYk3OHTqp5QJNnm!dpGK{PR-cfvu9iP1zaTsUc>$IU7e9}y+H3-R~)tGt+4-vDtZ z>FueSn(nUIAa`6cTeFgttz(PTu$&mPiWPkxj_yYNBpvrpP6sHe7I-0=nr`NmiAnVO z!NIzsrKn2>uLjx$s%rMFOL-}VqwxCnCQRHkilfwp$xAeLarpW8=AP2TtRPjkCYZWu z6hFIkp!0%l3eUpSeWkP5X%AQTToO&D?=JRS-p?`*XLCUtF6$fuw7}Fk^5`G;GMJj# zb1>)l=RRIn^_p^t&z^T|e&5o*D6Xq$OV4|N@z>S2tjvqTUo0_R;W~1?p^Tn>*|oyd zs4NfAQUi=H*1o2wDx<4;hJU!wf$_sSP{(a$YB1-HGJWyG-CbpR!qWqccepdXJvuzG znEgjgLHSZFHMbPZ1g3vOLGOF1=-X7Ffk#81wt^lH((pce3Uu&S>cTt0{c4JH#UDKg BEaw0K literal 0 HcmV?d00001 diff --git a/third_party/rust/spirv_cross/tests/shaders/simple.vert b/third_party/rust/spirv_cross/tests/shaders/simple.vert new file mode 100644 index 000000000000..6b1ff7acb83e --- /dev/null +++ b/third_party/rust/spirv_cross/tests/shaders/simple.vert @@ -0,0 +1,17 @@ +#version 310 es + +layout(std140) uniform uniform_buffer_object +{ + mat4 u_model_view_projection; + float u_scale; +}; + +layout(location = 0) in vec4 a_position; +layout(location = 1) in vec3 a_normal; +layout(location = 0) out vec3 v_normal; + +void main() +{ + v_normal = a_normal; + gl_Position = u_model_view_projection * a_position * u_scale; +} diff --git a/third_party/rust/spirv_cross/tests/shaders/simple.vert.spv b/third_party/rust/spirv_cross/tests/shaders/simple.vert.spv new file mode 100644 index 0000000000000000000000000000000000000000..a936f70c06b924aecefc0a7794ba548fe879957f GIT binary patch literal 1172 zcmY+COHbQC6oscr?C>a19tGM0HA%ZdLM#APs;FQU7Az_ubvrUJL9P-TISxhqh)Vsp z-5~XRV~?O?UC%vd&fJ&XZuPsyywWpcKAUX`<8&lfOyuvU|8cSM8go0c z-(C5`m6dxy8|#5*aaK&SDl2laUC)!(^I2Y9W{(bCqsgRx)L9&I?h>BJ1%bA$lhn}m%S?sBz3OHYsfXVlk0Tke2#U0tL}$6r1mb9X9k)A zeeX0HFf#)a=Ufp&U+}EJaSq2mP%r3OvU9}g^wN{D!1NPpUzG~ouI}(>;IGSzwI{L->B$A=JQ1F0eE6>oW**^uPCoU7b8zUH zcniYB<6qj*n;8U-_)EgXf+s$hZw~iXcv*WQBgbBYSLCxcg;%u`FT`gSaCJ`|#i;v) zCq9@N(L4D!8lOAh_u9!tO!Q!8_%eU^_UJ$Aj94LGUp}>S|5)FUGiqbO{}sbGKM~8! zUdx!*H=Qz9@R=0UwJD=k`bAHT?_}^`a%{<{?~m?)w`FMHi9>yfj5yTIJ!;*RC9>bm ay`FsT!gG&4KFRnNERPqzSP|UO=$1&_+Ao&Zn{w{Kg;fj$v;n^KCW? z)0WgwqBi$)QUx5Ti|d*fRgqn;hitK&Inh((^8VIW%$1txv_GEh58&LV$ox+cT%f;W z#3cS1Phk;lcV$&?%9^Kg686k@&=A`EggM^8?-7}VI!4aTzrDE|S~1r-|I5eR1IIf* HA=c;xzZVYf literal 0 HcmV?d00001 diff --git a/third_party/rust/spirv_cross/tests/shaders/struct.frag b/third_party/rust/spirv_cross/tests/shaders/struct.frag new file mode 100644 index 000000000000..26f55beb5a47 --- /dev/null +++ b/third_party/rust/spirv_cross/tests/shaders/struct.frag @@ -0,0 +1,18 @@ +#version 310 es +precision mediump float; + +struct W +{ + vec4 e; + vec4 f; + vec4 g; + vec4 h; +}; + +layout(location = 0) in W w; + +layout(location = 0) out vec4 color; + +void main() { + color = vec4(w.e.x, w.f.y, w.g.z, w.h.w); +} diff --git a/third_party/rust/spirv_cross/tests/shaders/struct.frag.spv b/third_party/rust/spirv_cross/tests/shaders/struct.frag.spv new file mode 100644 index 0000000000000000000000000000000000000000..ba31e4178b2b93f53e027255d505da0cca1a6e5d GIT binary patch literal 952 zcmYk4%S!`648~*IZL8HjtZjYPDISI5L5m;?p4LMTf(Osy8`Odd{_9=@zu)czCXh|O zG8=CjJDVaoQ^fDCX2q`y?)5p zvUFA26_2*9T{(Q@QR2=dojQ&f~v;NAL9O eJoZoSTAN`vK}s*w;STZ_nuJ{j>7QQnRQeCpj2~72 literal 0 HcmV?d00001 diff --git a/third_party/rust/spirv_cross/tests/shaders/struct.vert b/third_party/rust/spirv_cross/tests/shaders/struct.vert new file mode 100644 index 000000000000..dbbaa485dc44 --- /dev/null +++ b/third_party/rust/spirv_cross/tests/shaders/struct.vert @@ -0,0 +1,21 @@ +#version 310 es + +layout(location = 0) in vec4 a; +layout(location = 1) in vec4 b; +layout(location = 2) in vec4 c; +layout(location = 3) in vec4 d; + +struct V +{ + vec4 a; + vec4 b; + vec4 c; + vec4 d; +}; + +layout(location = 0) out V v; + +void main() +{ + v = V(a, b, c, d); +} diff --git a/third_party/rust/spirv_cross/tests/shaders/struct.vert.spv b/third_party/rust/spirv_cross/tests/shaders/struct.vert.spv new file mode 100644 index 0000000000000000000000000000000000000000..a275c8e052c8c3043f9538b86f3e1ff99d278ec3 GIT binary patch literal 672 zcmYk2zfS@|5QWD(?f_AK2xvzz_QXPniP4sZ!oM9o4wIhzAj807BZ;hgWiReY$?CZ_bX7Ta4Tih(K?da3(vS$#-P2S@Fw%xxAWKM`a zF(l52lF$AXzbaaM|NT{Sdc-qaO{K%`+7byn`;y^;{58Wm+7W7b{?{SbhdT25QNvtc z&YSDc*)?Z7J0o0T=X0!vSU=X)5$i{+mRLVySouqk*!-NVW}g!m^z--)_C01~-d!$_ zIelF?HLTwquwLb?_t^F1oOQ9@OV5=9VUbt&e?1Ifs?$GwI7_^62Geh7e4?1 literal 0 HcmV?d00001 diff --git a/third_party/rust/spirv_cross/tests/shaders/workgroup.comp b/third_party/rust/spirv_cross/tests/shaders/workgroup.comp new file mode 100644 index 000000000000..bedd81b6df2b --- /dev/null +++ b/third_party/rust/spirv_cross/tests/shaders/workgroup.comp @@ -0,0 +1,9 @@ +#version 450 + +layout(local_size_x_id = 5, local_size_y_id = 10, local_size_z_id = 15) in; + +void main() +{ + // See https://github.com/KhronosGroup/glslang/issues/557 + gl_WorkGroupSize; +} diff --git a/third_party/rust/spirv_cross/tests/shaders/workgroup.comp.spv b/third_party/rust/spirv_cross/tests/shaders/workgroup.comp.spv new file mode 100644 index 0000000000000000000000000000000000000000..dccb7fb8b16aa0adb47daa09bacbd7ab1b1e8501 GIT binary patch literal 360 zcmYk1y$ZrW5QNt|6QdDh5D`1YK8Sz#25Hjy1cHr)Hu{7ahr$5#lycC#^@S1bt zFEtV={%2}2J*3s*2hRc@1l|w42s~E>b73N9yqC)0b=;@&NL3dCXA^kLi9F^+9&={W flZ^+STixJf;=vr+gSH`$eK=^}>afR08y$r&eNGNi literal 0 HcmV?d00001 diff --git a/third_party/rust/spirv_cross/tests/spirv_tests.rs b/third_party/rust/spirv_cross/tests/spirv_tests.rs new file mode 100644 index 000000000000..2f0c0e7ffd8f --- /dev/null +++ b/third_party/rust/spirv_cross/tests/spirv_tests.rs @@ -0,0 +1,267 @@ +use spirv_cross::{hlsl as lang, spirv}; + +mod common; +use crate::common::words_from_bytes; + +#[test] +fn ast_gets_entry_points() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let entry_points = spirv::Ast::::parse(&module) + .unwrap() + .get_entry_points() + .unwrap(); + + assert_eq!(entry_points.len(), 1); + assert_eq!(entry_points[0].name, "main"); +} + +#[test] +fn ast_gets_shader_resources() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let shader_resources = spirv::Ast::::parse(&module) + .unwrap() + .get_shader_resources() + .unwrap(); + + let spirv::ShaderResources { + uniform_buffers, + stage_inputs, + stage_outputs, + .. + } = shader_resources; + + assert_eq!(uniform_buffers.len(), 1); + assert_eq!(uniform_buffers[0].name, "uniform_buffer_object"); + assert_eq!(shader_resources.storage_buffers.len(), 0); + assert_eq!(stage_inputs.len(), 2); + assert!(stage_inputs + .iter() + .any(|stage_input| stage_input.name == "a_normal")); + assert!(stage_inputs + .iter() + .any(|stage_input| stage_input.name == "a_position")); + assert_eq!(stage_outputs.len(), 1); + assert!(stage_outputs + .iter() + .any(|stage_output| stage_output.name == "v_normal")); + assert_eq!(shader_resources.subpass_inputs.len(), 0); + assert_eq!(shader_resources.storage_images.len(), 0); + assert_eq!(shader_resources.sampled_images.len(), 0); + assert_eq!(shader_resources.atomic_counters.len(), 0); + assert_eq!(shader_resources.push_constant_buffers.len(), 0); + assert_eq!(shader_resources.separate_images.len(), 0); + assert_eq!(shader_resources.separate_samplers.len(), 0); +} + +#[test] +fn ast_gets_decoration() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let ast = spirv::Ast::::parse(&module).unwrap(); + + let stage_inputs = ast.get_shader_resources().unwrap().stage_inputs; + let decoration = ast + .get_decoration(stage_inputs[0].id, spirv::Decoration::DescriptorSet) + .unwrap(); + assert_eq!(decoration, 0); +} + +#[test] +fn ast_sets_decoration() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let mut ast = spirv::Ast::::parse(&module).unwrap(); + + let stage_inputs = ast.get_shader_resources().unwrap().stage_inputs; + let updated_value = 3; + ast.set_decoration( + stage_inputs[0].id, + spirv::Decoration::DescriptorSet, + updated_value, + ) + .unwrap(); + assert_eq!( + ast.get_decoration(stage_inputs[0].id, spirv::Decoration::DescriptorSet) + .unwrap(), + updated_value + ); +} + +#[test] +fn ast_gets_type_member_types_and_array() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let ast = spirv::Ast::::parse(&module).unwrap(); + + let uniform_buffers = ast.get_shader_resources().unwrap().uniform_buffers; + + let is_struct = match ast.get_type(uniform_buffers[0].base_type_id).unwrap() { + spirv::Type::Struct { + member_types, + array, + } => { + assert_eq!(member_types.len(), 2); + assert_eq!(array.len(), 0); + true + } + _ => false, + }; + + assert!(is_struct); +} + +#[test] +fn ast_gets_array_dimensions() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/array.vert.spv"))); + let ast = spirv::Ast::::parse(&module).unwrap(); + + let uniform_buffers = ast.get_shader_resources().unwrap().uniform_buffers; + + let is_struct = match ast.get_type(uniform_buffers[0].base_type_id).unwrap() { + spirv::Type::Struct { member_types, .. } => { + assert_eq!(member_types.len(), 3); + let is_float = match ast.get_type(member_types[2]).unwrap() { + spirv::Type::Float { array } => { + assert_eq!(array.len(), 1); + assert_eq!(array[0], 3); + true + } + _ => false, + }; + assert!(is_float); + true + } + _ => false, + }; + + assert!(is_struct); +} + +#[test] +fn ast_gets_declared_struct_size_and_struct_member_size() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let ast = spirv::Ast::::parse(&module).unwrap(); + let uniform_buffers = ast.get_shader_resources().unwrap().uniform_buffers; + let mat4_size = 4 * 16; + let float_size = 4; + assert_eq!( + ast.get_declared_struct_size(uniform_buffers[0].base_type_id) + .unwrap(), + mat4_size + float_size + ); + assert_eq!( + ast.get_declared_struct_member_size(uniform_buffers[0].base_type_id, 0) + .unwrap(), + mat4_size + ); + assert_eq!( + ast.get_declared_struct_member_size(uniform_buffers[0].base_type_id, 1) + .unwrap(), + float_size + ); +} + +#[test] +fn ast_gets_member_name() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let ast = spirv::Ast::::parse(&module).unwrap(); + + let uniform_buffers = ast.get_shader_resources().unwrap().uniform_buffers; + + assert_eq!( + ast.get_member_name(uniform_buffers[0].base_type_id, 0) + .unwrap(), + "u_model_view_projection" + ); +} + +#[test] +fn ast_gets_member_decoration() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let ast = spirv::Ast::::parse(&module).unwrap(); + + let uniform_buffers = ast.get_shader_resources().unwrap().uniform_buffers; + + assert_eq!( + ast.get_member_decoration( + uniform_buffers[0].base_type_id, + 1, + spirv::Decoration::Offset + ) + .unwrap(), + 64 + ); +} + +#[test] +fn ast_sets_member_decoration() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); + let mut ast = spirv::Ast::::parse(&module).unwrap(); + + let uniform_buffers = ast.get_shader_resources().unwrap().uniform_buffers; + + let new_offset = 128; + + ast.set_member_decoration( + uniform_buffers[0].base_type_id, + 1, + spirv::Decoration::Offset, + new_offset, + ) + .unwrap(); + + assert_eq!( + ast.get_member_decoration( + uniform_buffers[0].base_type_id, + 1, + spirv::Decoration::Offset + ) + .unwrap(), + new_offset + ); +} + +#[test] +fn as_gets_specialization_constants() { + let comp = spirv::Module::from_words(words_from_bytes(include_bytes!( + "shaders/specialization.comp.spv" + ))); + let comp_ast = spirv::Ast::::parse(&comp).unwrap(); + let specialization_constants = comp_ast.get_specialization_constants().unwrap(); + assert_eq!(specialization_constants[0].constant_id, 10); +} + +#[test] +fn as_gets_work_group_size_specialization_constants() { + let comp = spirv::Module::from_words(words_from_bytes(include_bytes!( + "shaders/workgroup.comp.spv" + ))); + let comp_ast = spirv::Ast::::parse(&comp).unwrap(); + let work_group_size = comp_ast + .get_work_group_size_specialization_constants() + .unwrap(); + assert_eq!( + work_group_size, + spirv::WorkGroupSizeSpecializationConstants { + x: spirv::SpecializationConstant { + id: 7, + constant_id: 5, + }, + y: spirv::SpecializationConstant { + id: 8, + constant_id: 10, + }, + z: spirv::SpecializationConstant { + id: 9, + constant_id: 15, + }, + } + ); +} diff --git a/third_party/rust/storage-map/.cargo-checksum.json b/third_party/rust/storage-map/.cargo-checksum.json new file mode 100644 index 000000000000..b5ea79580697 --- /dev/null +++ b/third_party/rust/storage-map/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"e20b19550244b4b01defb2ba97d19106658ebd265a7f6c1ce52011cef188021b","LICENSE":"c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4","README.md":"3c9074177c6dfe47137f5709f6d7590d2c10c07df505bb8b6a0e61d0cb6f12f6","src/lib.rs":"31740e3630007eed81077f2c4fe8aa73ac30047626635b07597c6321833044fd","tests/ten.rs":"7e511caa7b76f5be9631273328d97073437b868e9b3b1a065908d51a09abdb96"},"package":"fd0a4829a5c591dc24a944a736d6b1e4053e51339a79fd5d4702c4c999a9c45e"} \ No newline at end of file diff --git a/third_party/rust/storage-map/Cargo.toml b/third_party/rust/storage-map/Cargo.toml new file mode 100644 index 000000000000..d5ce1b937599 --- /dev/null +++ b/third_party/rust/storage-map/Cargo.toml @@ -0,0 +1,27 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "storage-map" +version = "0.2.0" +authors = ["Dzmitry Malyshau "] +description = "Concurrent append-only map storage" +homepage = "https://github.com/kvark/storage-map" +documentation = "https://docs.rs/crate/storage-map" +keywords = ["hashmap", "map", "concurrent", "lock"] +license = "Apache-2.0" +repository = "https://github.com/kvark/storage-map" +[dependencies.lock_api] +version = "0.3.1" +[dev-dependencies.parking_lot] +version = "0.9.0" diff --git a/third_party/rust/storage-map/LICENSE b/third_party/rust/storage-map/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/third_party/rust/storage-map/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/rust/storage-map/README.md b/third_party/rust/storage-map/README.md new file mode 100644 index 000000000000..3b25e801882b --- /dev/null +++ b/third_party/rust/storage-map/README.md @@ -0,0 +1,2 @@ +# storage-map +Concurrent append-only storage map in Rust diff --git a/third_party/rust/storage-map/src/lib.rs b/third_party/rust/storage-map/src/lib.rs new file mode 100644 index 000000000000..917d4b07e57e --- /dev/null +++ b/third_party/rust/storage-map/src/lib.rs @@ -0,0 +1,172 @@ +use lock_api::RawRwLock; +use std::{ + cell::UnsafeCell, + collections::hash_map::HashMap, + fmt, + hash, + ops, +}; + + + + +pub struct StorageMap { + lock: L, + map: UnsafeCell, +} + +unsafe impl Send for StorageMap {} +unsafe impl Sync for StorageMap {} + +impl Default for StorageMap { + fn default() -> Self { + StorageMap { + lock: L::INIT, + map: UnsafeCell::new(M::default()), + } + } +} + +impl fmt::Debug for StorageMap { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.map.get().fmt(formatter) + } +} + + + +pub struct StorageMapGuard<'a, L: 'a + RawRwLock, V: 'a> { + lock: &'a L, + value: &'a V, + exclusive: bool, +} + +impl<'a, L: RawRwLock, V> ops::Deref for StorageMapGuard<'a, L, V> { + type Target = V; + fn deref(&self) -> &V { + self.value + } +} + +impl<'a, L: RawRwLock, V> Drop for StorageMapGuard<'a, L, V> { + fn drop(&mut self) { + if self.exclusive { + self.lock.unlock_exclusive(); + } else { + self.lock.unlock_shared(); + } + } +} + +pub struct WholeMapWriteGuard<'a, L: 'a + RawRwLock, M: 'a> { + lock: &'a L, + map: &'a mut M, +} + +impl<'a, L: RawRwLock, M> ops::Deref for WholeMapWriteGuard<'a, L, M> { + type Target = M; + fn deref(&self) -> &M { + self.map + } +} + +impl<'a, L: RawRwLock, M> ops::DerefMut for WholeMapWriteGuard<'a, L, M> { + fn deref_mut(&mut self) -> &mut M { + self.map + } +} + +impl<'a, L: RawRwLock, V> Drop for WholeMapWriteGuard<'a, L, V> { + fn drop(&mut self) { + self.lock.unlock_exclusive(); + } +} + + + +pub enum PrepareResult { + + AlreadyExists, + + UnableToCreate, + + Created, +} + +impl StorageMap> +where + L: RawRwLock, + K: Clone + Eq + hash::Hash, + S: hash::BuildHasher, +{ + + pub fn with_hasher(hash_builder: S) -> Self { + StorageMap { + lock: L::INIT, + map: UnsafeCell::new(HashMap::with_hasher(hash_builder)), + } + } + + + + + pub fn get_or_create_with<'a, F: FnOnce() -> V>( + &'a self, key: &K, create_fn: F + ) -> StorageMapGuard<'a, L, V> { + self.lock.lock_shared(); + + let map = unsafe { &*self.map.get() }; + if let Some(value) = map.get(key) { + return StorageMapGuard { + lock: &self.lock, + value, + exclusive: false, + }; + } + self.lock.unlock_shared(); + + let value = create_fn(); + self.lock.lock_exclusive(); + let map = unsafe { &mut *self.map.get() }; + StorageMapGuard { + lock: &self.lock, + value: &*map.entry(key.clone()).or_insert(value), + exclusive: true, + } + } + + + + pub fn prepare_maybe Option>( + &self, key: &K, create_fn: F + ) -> PrepareResult { + self.lock.lock_shared(); + + let map = unsafe { &*self.map.get() }; + let has = map.contains_key(key); + self.lock.unlock_shared(); + if has { + return PrepareResult::AlreadyExists; + } + + let value = match create_fn() { + Some(value) => value, + None => return PrepareResult::UnableToCreate, + }; + + self.lock.lock_exclusive(); + let map = unsafe { &mut *self.map.get() }; + map.insert(key.clone(), value); + self.lock.unlock_exclusive(); + PrepareResult::Created + } + + + pub fn whole_write(&self) -> WholeMapWriteGuard> { + self.lock.lock_exclusive(); + WholeMapWriteGuard { + lock: &self.lock, + map: unsafe { &mut *self.map.get() }, + } + } +} diff --git a/third_party/rust/storage-map/tests/ten.rs b/third_party/rust/storage-map/tests/ten.rs new file mode 100644 index 000000000000..d8e1abf72da7 --- /dev/null +++ b/third_party/rust/storage-map/tests/ten.rs @@ -0,0 +1,42 @@ +extern crate lock_api; +extern crate parking_lot; +extern crate storage_map; + +use std::collections::hash_map::{HashMap, RandomState}; +use std::sync::Arc; +use std::{time, thread}; + +#[test] +fn fill_ten() { + type Map = HashMap; + let s = RandomState::new(); + let map = storage_map::StorageMap::::with_hasher(s); + let arc = Arc::new(map); + let join_handles = (0 .. 10000usize) + .map(|i| { + let arc = Arc::clone(&arc); + let key = (i % 10) as u8; + thread::spawn(move || { + arc.get_or_create_with(&key, || { + thread::sleep(time::Duration::new(0, 100)); + format!("{}", i) + }); + }) + }) + .collect::>(); + for handle in join_handles { + let _ = handle.join(); + } +} + +#[test] +fn whole_write() { + type Map = HashMap; + let map = storage_map::StorageMap::::with_hasher(RandomState::new()); + map.get_or_create_with(&3, || "three".to_owned()); + map.get_or_create_with(&5, || "five".to_owned()); + let mut guard = map.whole_write(); + for (_key, _value) in guard.drain() { + + } +} diff --git a/third_party/rust/uluru/.cargo-checksum.json b/third_party/rust/uluru/.cargo-checksum.json index 69281ec58c22..7e6f35f85c6a 100644 --- a/third_party/rust/uluru/.cargo-checksum.json +++ b/third_party/rust/uluru/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"7040d9d576e67a8c86293a132992390b64adc5380917abce864dd9d8f3df7b06","LICENSE":"3db78572e8657cca9e9446ce56a057b8a981eb41af318c49a5fe08e7a10fa52a","README.md":"e918fd4e18659a1d4438ca8f198f979cfdd7a9d27dc8e16f65bcf198c2f3d1cd","lib.rs":"4393ac1bd1d1d41bdcf21901d2aee31187232f95af273dead63913a2fc195b51","tests.rs":"056ea5bd7bd7793107c057cb1600ba831400c29be8938df41a3f9ac8d92fccfd"},"package":"d2606e9192f308ddc4f0b3c5d1bf3400e28a70fff956e9d9f46d23b094746d9f"} \ No newline at end of file +{"files":{"Cargo.toml":"c17f68b45f66a90fe49f9798875d31b7dbca4cabeead79e2ed6d963cbdb76fe3","LICENSE":"3db78572e8657cca9e9446ce56a057b8a981eb41af318c49a5fe08e7a10fa52a","README.md":"e918fd4e18659a1d4438ca8f198f979cfdd7a9d27dc8e16f65bcf198c2f3d1cd","lib.rs":"4393ac1bd1d1d41bdcf21901d2aee31187232f95af273dead63913a2fc195b51","tests.rs":"056ea5bd7bd7793107c057cb1600ba831400c29be8938df41a3f9ac8d92fccfd"},"package":"6d7b39d0c32eba57d52d334e4bdd150df6e755264eefaa1ae2e7cd125f35e1ca"} \ No newline at end of file diff --git a/third_party/rust/uluru/Cargo.toml b/third_party/rust/uluru/Cargo.toml index 7915c6171574..8353ff4ffe4b 100644 --- a/third_party/rust/uluru/Cargo.toml +++ b/third_party/rust/uluru/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "uluru" -version = "0.3.0" +version = "0.4.0" authors = ["The Servo Project Developers"] description = "A simple, fast, LRU cache implementation" readme = "README.md" @@ -25,5 +25,5 @@ repository = "https://github.com/servo/uluru" name = "uluru" path = "lib.rs" [dependencies.arrayvec] -version = "0.4.6" +version = "0.5" default-features = false diff --git a/third_party/rust/wio/.cargo-checksum.json b/third_party/rust/wio/.cargo-checksum.json new file mode 100644 index 000000000000..3005879109a5 --- /dev/null +++ b/third_party/rust/wio/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"89d6e29e2d13fcf58de301fed58841998c9dd18dd2397f2404ba77ba3dd66730","LICENSE-APACHE":"b40930bbcf80744c86c46a12bc9da056641d722716c378f5659b9e555ef833e1","LICENSE-MIT":"5b19674a1db628a475850a131956ed49521b744e3dda8f5a94141f9aba681219","README.md":"d08ab02a40b9934719cc5f9083aae6b012b2535827048225484adca26776babd","src/apc.rs":"8baef3e1fde88c79c22fda4acc0c56183388a188251fd08b38d6e70eb6a4874f","src/com.rs":"419c855c84dbc93e0ffe9668ad1eece78a73504689c7986a0b27c57256f59659","src/console.rs":"d02d4c83083487159e9626159a27b2b99620d60c85fd01421af9a4d5b441cc16","src/error.rs":"0869b4b1bc731b2d66fc36c33c205d9c888ffc6a057dff61847185357c290f49","src/handle.rs":"322b7881466505c240552171d1b8db0c70fc85e05baf921129bc7ab0a4e54f90","src/lib.rs":"34be2562e1e006a2ada84fabeaf8bcfc3abf91b9f516faa291d58470bb638443","src/perf.rs":"0a50ec0b26bd9d3ec257d1b5fe02787337bc2053bb1e189864937a37ba5dea3d","src/pipe.rs":"671753d23dfbcd17ffcd1c1570beca3720502e9059ef5fde55d5f8e6413da1b3","src/sleep.rs":"63acd7e403b13f21f1a5add47ab79df61deab43b42db24c3570e2867bba454b7","src/thread.rs":"4ab1da513bfb0dc099ddaf7ce6622d46423007a7ec7a902064bf24b1805b63ae","src/ums.rs":"024f1513de1f72d6f8a796567f08f1a3ac728ab1a83ae94a9cd851f5d9d7e9b2","src/wide.rs":"443ef562870d316e1fd967ae9752c98cc9b7483abb0ad79b295628cc9440c3ad"},"package":"5d129932f4644ac2396cb456385cbf9e63b5b30c6e8dc4820bdca4eb082037a5"} \ No newline at end of file diff --git a/third_party/rust/wio/Cargo.toml b/third_party/rust/wio/Cargo.toml new file mode 100644 index 000000000000..2e1c29ab2aea --- /dev/null +++ b/third_party/rust/wio/Cargo.toml @@ -0,0 +1,28 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "wio" +version = "0.2.2" +authors = ["Peter Atashian "] +include = ["/src/**/*", "/Cargo.toml", "/LICENSE-MIT", "/LICENSE-APACHE", "/build.rs", "/README.md"] +description = "Windows IO wrapper" +readme = "README.md" +keywords = ["windows", "ffi", "win32", "com"] +categories = ["api-bindings", "os::windows-apis"] +license = "MIT/Apache-2.0" +repository = "https://github.com/retep998/wio-rs" +[dependencies.winapi] +version = "0.3" +features = ["consoleapi", "errhandlingapi", "fileapi", "handleapi", "minwindef", "processthreadsapi", "std", "unknwnbase", "wincon", "winnt"] +[dev-dependencies.rand] +version = "0.4" diff --git a/third_party/rust/wio/LICENSE-APACHE b/third_party/rust/wio/LICENSE-APACHE new file mode 100644 index 000000000000..8dada3edaf50 --- /dev/null +++ b/third_party/rust/wio/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/rust/wio/LICENSE-MIT b/third_party/rust/wio/LICENSE-MIT new file mode 100644 index 000000000000..796e929aa550 --- /dev/null +++ b/third_party/rust/wio/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright (c) 2015 The winapi-rs Developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/rust/wio/README.md b/third_party/rust/wio/README.md new file mode 100644 index 000000000000..3a7a0a9dadb9 --- /dev/null +++ b/third_party/rust/wio/README.md @@ -0,0 +1,4 @@ +# wio-rs # + +A middle-level wrapper around various things in Windows API. +Designed to be a very thin layer around Windows API to provide a safe Rusty API but without hiding any functionality. diff --git a/third_party/rust/wio/src/apc.rs b/third_party/rust/wio/src/apc.rs new file mode 100644 index 000000000000..30b5b9ebc473 --- /dev/null +++ b/third_party/rust/wio/src/apc.rs @@ -0,0 +1,39 @@ + + + + + +use {Result, k32, last_error, w}; +use std::os::windows::io::{AsRawHandle}; +use thread::{Thread}; + +pub fn queue(func: T, thread: &Thread) -> Result<()> where T: FnOnce() + 'static { + unsafe extern "system" fn helper(thing: w::ULONG_PTR) { + let func = Box::from_raw(thing as *mut T); + func() + } + let thing = Box::into_raw(Box::new(func)) as w::ULONG_PTR; + match unsafe { k32::QueueUserAPC(Some(helper::), thread.as_raw_handle(), thing) } { + 0 => { + + unsafe { Box::from_raw(thing as *mut T) }; + last_error() + }, + _ => Ok(()), + } +} +pub fn queue_current(func: T) -> Result<()> where T: FnOnce() + 'static { + unsafe extern "system" fn helper(thing: w::ULONG_PTR) { + let func = Box::from_raw(thing as *mut T); + func() + } + let thing = Box::into_raw(Box::new(func)) as w::ULONG_PTR; + match unsafe { k32::QueueUserAPC(Some(helper::), k32::GetCurrentThread(), thing) } { + 0 => { + + unsafe { Box::from_raw(thing as *mut T) }; + last_error() + }, + _ => Ok(()), + } +} diff --git a/third_party/rust/wio/src/com.rs b/third_party/rust/wio/src/com.rs new file mode 100644 index 000000000000..af403a9ba342 --- /dev/null +++ b/third_party/rust/wio/src/com.rs @@ -0,0 +1,79 @@ + + + + + +use std::fmt::{Debug, Error as FmtError, Formatter}; +use std::mem::forget; +use std::ops::Deref; +use std::ptr::{NonNull, null_mut}; +use winapi::Interface; +use winapi::um::unknwnbase::IUnknown; + + +#[repr(transparent)] +pub struct ComPtr(NonNull); +impl ComPtr { + + + + pub unsafe fn from_raw(ptr: *mut T) -> ComPtr where T: Interface { + ComPtr(NonNull::new(ptr).expect("ptr should not be null")) + } + + pub fn up(self) -> ComPtr where T: Deref, U: Interface { + unsafe { ComPtr::from_raw(self.into_raw() as *mut U) } + } + + + pub fn into_raw(self) -> *mut T { + let p = self.0.as_ptr(); + forget(self); + p + } + + fn as_unknown(&self) -> &IUnknown { + unsafe { &*(self.as_raw() as *mut IUnknown) } + } + + pub fn cast(&self) -> Result, i32> where U: Interface { + let mut obj = null_mut(); + let err = unsafe { self.as_unknown().QueryInterface(&U::uuidof(), &mut obj) }; + if err < 0 { return Err(err); } + Ok(unsafe { ComPtr::from_raw(obj as *mut U) }) + } + + + pub fn as_raw(&self) -> *mut T { + self.0.as_ptr() + } +} +impl Deref for ComPtr { + type Target = T; + fn deref(&self) -> &T { + unsafe { &*self.as_raw() } + } +} +impl Clone for ComPtr where T: Interface { + fn clone(&self) -> Self { + unsafe { + self.as_unknown().AddRef(); + ComPtr::from_raw(self.as_raw()) + } + } +} +impl Debug for ComPtr { + fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { + write!(f, "{:?}", self.0) + } +} +impl Drop for ComPtr { + fn drop(&mut self) { + unsafe { self.as_unknown().Release(); } + } +} +impl PartialEq> for ComPtr where T: Interface { + fn eq(&self, other: &ComPtr) -> bool { + self.0 == other.0 + } +} diff --git a/third_party/rust/wio/src/console.rs b/third_party/rust/wio/src/console.rs new file mode 100644 index 000000000000..872951555a17 --- /dev/null +++ b/third_party/rust/wio/src/console.rs @@ -0,0 +1,270 @@ + + + + + +use error::{Error, Result}; +use handle::Handle; +use std::{ + mem::{size_of_val, zeroed}, + os::windows::io::FromRawHandle, + ptr::{null, null_mut}, +}; +use wide::ToWide; +use winapi::{ + um::{ + consoleapi::{AllocConsole, GetConsoleCP, GetConsoleOutputCP, GetNumberOfConsoleInputEvents, ReadConsoleInputW}, + fileapi::{CreateFileW, OPEN_EXISTING}, + handleapi::INVALID_HANDLE_VALUE, + wincon::{AttachConsole, CHAR_INFO, CONSOLE_FONT_INFOEX, CONSOLE_SCREEN_BUFFER_INFO, CONSOLE_SCREEN_BUFFER_INFOEX, CONSOLE_TEXTMODE_BUFFER, COORD, CreateConsoleScreenBuffer, FlushConsoleInputBuffer, FOCUS_EVENT, FreeConsole, GetConsoleScreenBufferInfo, GetConsoleScreenBufferInfoEx, GetCurrentConsoleFont, INPUT_RECORD, KEY_EVENT, MENU_EVENT, MOUSE_EVENT, SetConsoleActiveScreenBuffer, SetConsoleCP, SetConsoleOutputCP, SetConsoleScreenBufferInfoEx, SMALL_RECT, WINDOW_BUFFER_SIZE_EVENT, WriteConsoleOutputW}, + winnt::{FILE_SHARE_READ, FILE_SHARE_WRITE, GENERIC_READ, GENERIC_WRITE, HANDLE}, + }, + shared::minwindef::{DWORD, FALSE}, +}; + +pub struct ScreenBuffer(Handle); +impl ScreenBuffer { + pub fn new() -> Result { + let handle = unsafe { CreateConsoleScreenBuffer( + GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, + null(), CONSOLE_TEXTMODE_BUFFER, null_mut(), + )}; + if handle == INVALID_HANDLE_VALUE { return Error::last() } + unsafe { Ok(ScreenBuffer(Handle::new(handle))) } + } + + pub fn from_conout() -> Result { + let handle = unsafe { CreateFileW( + "CONOUT$".to_wide_null().as_ptr(), GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ, null_mut(), OPEN_EXISTING, + 0, null_mut(), + )}; + if handle == INVALID_HANDLE_VALUE { return Error::last() } + unsafe { Ok(ScreenBuffer(Handle::new(handle))) } + } + pub fn set_active(&self) -> Result<()> { + let res = unsafe { SetConsoleActiveScreenBuffer(*self.0) }; + if res == 0 { return Error::last() } + Ok(()) + } + pub fn info(&self) -> Result { + let mut info = ScreenBufferInfo(unsafe { zeroed() }); + let res = unsafe { GetConsoleScreenBufferInfo(*self.0, &mut info.0) }; + if res == 0 { return Error::last() } + Ok(info) + } + pub fn info_ex(&self) -> Result { + let mut info: CONSOLE_SCREEN_BUFFER_INFOEX = unsafe { zeroed() }; + info.cbSize = size_of_val(&info) as u32; + let res = unsafe { GetConsoleScreenBufferInfoEx(*self.0, &mut info) }; + if res == 0 { return Error::last() } + + info.srWindow.Right += 1; + info.srWindow.Bottom += 1; + Ok(ScreenBufferInfoEx(info)) + } + pub fn set_info_ex(&self, mut info: ScreenBufferInfoEx) -> Result<()> { + let res = unsafe { SetConsoleScreenBufferInfoEx(*self.0, &mut info.0) }; + if res == 0 { return Error::last() } + Ok(()) + } + + + + + + + + + + pub fn write_output(&self, buf: &[CharInfo], size: (i16, i16), pos: (i16, i16)) -> Result<()> { + assert!(buf.len() == (size.0 as usize) * (size.1 as usize)); + let mut rect = SMALL_RECT { + Left: pos.0, + Top: pos.1, + Right: pos.0 + size.0, + Bottom: pos.1 + size.1, + }; + let size = COORD { X: size.0, Y: size.1 }; + let pos = COORD { X: 0, Y: 0 }; + let res = unsafe { WriteConsoleOutputW( + *self.0, buf.as_ptr() as *const CHAR_INFO, size, pos, &mut rect + )}; + if res == 0 { return Error::last() } + Ok(()) + } + pub fn font_size(&self) -> Result<(i16, i16)> { + unsafe { + let mut font = zeroed(); + let res = GetCurrentConsoleFont(*self.0, FALSE, &mut font); + if res == 0 { return Error::last() } + Ok((font.dwFontSize.X, font.dwFontSize.Y)) + } + } +} +impl FromRawHandle for ScreenBuffer { + unsafe fn from_raw_handle(handle: HANDLE) -> ScreenBuffer { + ScreenBuffer(Handle::new(handle)) + } +} +pub struct InputBuffer(Handle); +impl InputBuffer { + + pub fn from_conin() -> Result { + let handle = unsafe { CreateFileW( + "CONIN$".to_wide_null().as_ptr(), GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, null_mut(), OPEN_EXISTING, + 0, null_mut(), + )}; + if handle == INVALID_HANDLE_VALUE { Error::last() } + else { unsafe { Ok(InputBuffer::from_raw_handle(handle)) } } + } + + pub fn available_input(&self) -> Result { + let mut num = 0; + let res = unsafe { GetNumberOfConsoleInputEvents(*self.0, &mut num) }; + if res == 0 { return Error::last() } + Ok(num) + } + + pub fn read_input(&self) -> Result> { + let mut buf: [INPUT_RECORD; 0x1000] = unsafe { zeroed() }; + let mut size = 0; + let res = unsafe { ReadConsoleInputW( + *self.0, buf.as_mut_ptr(), buf.len() as DWORD, &mut size, + )}; + if res == 0 { return Error::last() } + Ok(buf[..(size as usize)].iter().map(|input| { + unsafe { match input.EventType { + KEY_EVENT => { + let e = input.Event.KeyEvent(); + Input::Key { + key_down: e.bKeyDown != 0, + repeat_count: e.wRepeatCount, + key_code: e.wVirtualKeyCode, + scan_code: e.wVirtualScanCode, + wide_char: *e.uChar.UnicodeChar(), + control_key_state: e.dwControlKeyState, + } + }, + MOUSE_EVENT => { + let e = input.Event.MouseEvent(); + Input::Mouse { + position: (e.dwMousePosition.X, e.dwMousePosition.Y), + button_state: e.dwButtonState, + control_key_state: e.dwControlKeyState, + event_flags: e.dwEventFlags, + } + }, + WINDOW_BUFFER_SIZE_EVENT => { + let s = input.Event.WindowBufferSizeEvent().dwSize; + Input::WindowBufferSize(s.X, s.Y) + }, + MENU_EVENT => Input::Menu(input.Event.MenuEvent().dwCommandId), + FOCUS_EVENT => Input::Focus(input.Event.FocusEvent().bSetFocus != 0), + e => unreachable!("invalid event type: {}", e), + } } + }).collect()) + } + + pub fn flush_input(&self) -> Result<()> { + let res = unsafe { FlushConsoleInputBuffer(*self.0) }; + if res == 0 { return Error::last() } + Ok(()) + } +} +impl FromRawHandle for InputBuffer { + unsafe fn from_raw_handle(handle: HANDLE) -> InputBuffer { + InputBuffer(Handle::from_raw_handle(handle)) + } +} +#[repr(transparent)] #[derive(Copy, Clone)] +pub struct ScreenBufferInfo(CONSOLE_SCREEN_BUFFER_INFO); +impl ScreenBufferInfo { + pub fn size(&self) -> (i16, i16) { + (self.0.dwSize.X, self.0.dwSize.Y) + } +} +#[repr(transparent)] #[derive(Copy, Clone)] +pub struct ScreenBufferInfoEx(CONSOLE_SCREEN_BUFFER_INFOEX); +impl ScreenBufferInfoEx { + pub fn raw_mut(&mut self) -> &mut CONSOLE_SCREEN_BUFFER_INFOEX { + &mut self.0 + } +} +#[repr(transparent)] #[derive(Copy, Clone)] +pub struct FontInfoEx(CONSOLE_FONT_INFOEX); +#[derive(Copy, Clone)] +pub enum Input { + Key { + key_down: bool, + repeat_count: u16, + key_code: u16, + scan_code: u16, + wide_char: u16, + control_key_state: u32, + }, + Mouse { + position: (i16, i16), + button_state: u32, + control_key_state: u32, + event_flags: u32, + }, + WindowBufferSize(i16, i16), + Menu(u32), + Focus(bool), +} +#[repr(transparent)] #[derive(Copy, Clone)] +pub struct CharInfo(CHAR_INFO); +impl CharInfo { + pub fn new(ch: u16, attr: u16) -> CharInfo { + let mut ci: CHAR_INFO = unsafe { zeroed() }; + unsafe { *ci.Char.UnicodeChar_mut() = ch }; + ci.Attributes = attr; + CharInfo(ci) + } + pub fn character(&self) -> u16 { unsafe { *self.0.Char.UnicodeChar() } } + pub fn attributes(&self) -> u16 { self.0.Attributes } +} + +pub fn alloc() -> Result<()> { + match unsafe { AllocConsole() } { + 0 => Error::last(), + _ => Ok(()), + } +} + +pub fn free() -> Result<()> { + match unsafe { FreeConsole() } { + 0 => Error::last(), + _ => Ok(()), + } +} + + +pub fn attach(processid: Option) -> Result<()> { + match unsafe { AttachConsole(processid.unwrap_or(-1i32 as u32)) } { + 0 => Error::last(), + _ => Ok(()), + } +} + +pub fn input_code_page() -> u32 { + unsafe { GetConsoleCP() } +} + +pub fn output_code_page() -> u32 { + unsafe { GetConsoleOutputCP() } +} + +pub fn set_input_code_page(code: u32) -> Result<()> { + let res = unsafe { SetConsoleCP(code) }; + if res == 0 { return Error::last() } + Ok(()) +} + +pub fn set_output_code_page(code: u32) -> Result<()> { + let res = unsafe { SetConsoleOutputCP(code) }; + if res == 0 { return Error::last() } + Ok(()) +} diff --git a/third_party/rust/wio/src/error.rs b/third_party/rust/wio/src/error.rs new file mode 100644 index 000000000000..cd8e2d5d2689 --- /dev/null +++ b/third_party/rust/wio/src/error.rs @@ -0,0 +1,18 @@ + + + + + +use std::result; +use winapi::shared::minwindef::DWORD; +use winapi::um::errhandlingapi::GetLastError; +#[derive(Clone, Copy, Debug)] +pub struct Error(DWORD); +impl Error { + pub fn code(&self) -> u32 { self.0 } + pub fn last() -> Result { + Err(Error(unsafe { GetLastError() })) + } +} + +pub type Result = result::Result; diff --git a/third_party/rust/wio/src/handle.rs b/third_party/rust/wio/src/handle.rs new file mode 100644 index 000000000000..ac0cf560f849 --- /dev/null +++ b/third_party/rust/wio/src/handle.rs @@ -0,0 +1,71 @@ + + + + + +use error::{Error, Result}; +use std::{ + ops::Deref, + os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle}, + ptr::null_mut, +}; +use winapi::{ + um::{ + handleapi::{CloseHandle, DuplicateHandle}, + processthreadsapi::GetCurrentProcess, + winnt::{DUPLICATE_SAME_ACCESS, HANDLE}, + }, + shared::minwindef::FALSE, +}; + +pub struct Handle(HANDLE); +impl Handle { + + pub unsafe fn new(handle: HANDLE) -> Handle { + Handle(handle) + } + pub fn close(self) -> Result<()> { + match unsafe { CloseHandle(self.into_raw_handle()) } { + 0 => Error::last(), + _ => Ok(()), + } + } + + pub unsafe fn duplicate_from(handle: HANDLE) -> Result { + let mut new_handle = null_mut(); + let res = DuplicateHandle( + GetCurrentProcess(), handle, GetCurrentProcess(), + &mut new_handle, 0, FALSE, DUPLICATE_SAME_ACCESS, + ); + match res { + 0 => Error::last(), + _ => Ok(Handle(new_handle)), + } + } +} +impl AsRawHandle for Handle { + fn as_raw_handle(&self) -> HANDLE { + self.0 + } +} +impl Deref for Handle { + type Target = HANDLE; + fn deref(&self) -> &HANDLE { &self.0 } +} +impl Drop for Handle { + fn drop(&mut self) { + let ret = unsafe { CloseHandle(self.0) }; + let err: Result<()> = Error::last(); + assert!(ret != 0, "{:?}", err); + } +} +impl FromRawHandle for Handle { + unsafe fn from_raw_handle(handle: HANDLE) -> Handle { + Handle(handle) + } +} +impl IntoRawHandle for Handle { + fn into_raw_handle(self) -> HANDLE { + self.0 + } +} diff --git a/third_party/rust/wio/src/lib.rs b/third_party/rust/wio/src/lib.rs new file mode 100644 index 000000000000..0d498d017ea7 --- /dev/null +++ b/third_party/rust/wio/src/lib.rs @@ -0,0 +1,20 @@ + + + + + +#![cfg(windows)] +extern crate winapi; + + +pub mod com; + +pub mod error; + + + + + +pub mod wide; + +pub use error::{Error, Result}; diff --git a/third_party/rust/wio/src/perf.rs b/third_party/rust/wio/src/perf.rs new file mode 100644 index 000000000000..1392cac37369 --- /dev/null +++ b/third_party/rust/wio/src/perf.rs @@ -0,0 +1,17 @@ + + + + + +use {k32}; + +pub fn frequency() -> i64 { + let mut freq = 0; + unsafe { k32::QueryPerformanceFrequency(&mut freq) }; + freq +} +pub fn counter() -> i64 { + let mut count = 0; + unsafe { k32::QueryPerformanceCounter(&mut count) }; + count +} diff --git a/third_party/rust/wio/src/pipe.rs b/third_party/rust/wio/src/pipe.rs new file mode 100644 index 000000000000..bae7dd13b76d --- /dev/null +++ b/third_party/rust/wio/src/pipe.rs @@ -0,0 +1,16 @@ + + + + + +use handle::{Handle}; + +pub struct NamedPipe(Handle); +impl NamedPipe { + +} +pub enum Access { + Inbound, + Outbound, + Duplex, +} diff --git a/third_party/rust/wio/src/sleep.rs b/third_party/rust/wio/src/sleep.rs new file mode 100644 index 000000000000..b38d81621b72 --- /dev/null +++ b/third_party/rust/wio/src/sleep.rs @@ -0,0 +1,23 @@ + + + + + +use {k32, w}; + +pub fn sleep(ms: u32) { + unsafe { k32::Sleep(ms) } +} +#[derive(Debug, Eq, PartialEq)] +pub enum WakeReason { + TimedOut, + CallbacksFired, +} +pub fn sleep_alertable(ms: u32) -> WakeReason { + let ret = unsafe { k32::SleepEx(ms, w::TRUE) }; + match ret { + 0 => WakeReason::TimedOut, + w::WAIT_IO_COMPLETION => WakeReason::CallbacksFired, + _ => unreachable!("SleepEx returned weird value of {:?}", ret), + } +} diff --git a/third_party/rust/wio/src/thread.rs b/third_party/rust/wio/src/thread.rs new file mode 100644 index 000000000000..198bcf700a66 --- /dev/null +++ b/third_party/rust/wio/src/thread.rs @@ -0,0 +1,51 @@ + + + + + +use {Result, k32, last_error, w}; +use handle::{Handle}; +use std::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle}; +use std::thread::{JoinHandle}; + +pub struct Thread(Handle); +impl Thread { + pub fn current() -> Result { + unsafe { Handle::duplicate_from(k32::GetCurrentThread()).map(Thread) } + } + + pub fn set_affinity_mask(&self, mask: usize) -> Result { + let res = unsafe { + k32::SetThreadAffinityMask(*self.0, mask as w::ULONG_PTR) + }; + match res { + 0 => last_error(), + prev => Ok(prev as usize), + } + } +} +impl From> for Thread { + fn from(o: JoinHandle) -> Thread { + unsafe { Thread::from_raw_handle(o.into_raw_handle()) } + } +} +impl<'a, T> From<&'a JoinHandle> for Thread { + fn from(o: &'a JoinHandle) -> Thread { + unsafe { Thread::from_raw_handle(o.as_raw_handle()) } + } +} +impl AsRawHandle for Thread { + fn as_raw_handle(&self) -> w::HANDLE { + self.0.as_raw_handle() + } +} +impl IntoRawHandle for Thread { + fn into_raw_handle(self) -> w::HANDLE { + self.0.into_raw_handle() + } +} +impl FromRawHandle for Thread { + unsafe fn from_raw_handle(handle: w::HANDLE) -> Thread { + Thread(Handle::from_raw_handle(handle)) + } +} diff --git a/third_party/rust/wio/src/ums.rs b/third_party/rust/wio/src/ums.rs new file mode 100644 index 000000000000..2974673e9564 --- /dev/null +++ b/third_party/rust/wio/src/ums.rs @@ -0,0 +1,3 @@ + + +use {k32, w}; \ No newline at end of file diff --git a/third_party/rust/wio/src/wide.rs b/third_party/rust/wio/src/wide.rs new file mode 100644 index 000000000000..8e8e6885bb74 --- /dev/null +++ b/third_party/rust/wio/src/wide.rs @@ -0,0 +1,59 @@ + + + + + +use std::ffi::{OsStr, OsString}; +use std::os::windows::ffi::{OsStrExt, OsStringExt}; +use std::path::PathBuf; +use std::slice::from_raw_parts; + +pub trait ToWide { + fn to_wide(&self) -> Vec; + fn to_wide_null(&self) -> Vec; +} +impl ToWide for T where T: AsRef { + #[inline] + fn to_wide(&self) -> Vec { + self.as_ref().encode_wide().collect() + } + #[inline] + fn to_wide_null(&self) -> Vec { + self.as_ref().encode_wide().chain(Some(0)).collect() + } +} +pub trait FromWide where Self: Sized { + fn from_wide(wide: &[u16]) -> Self; + #[inline] + fn from_wide_null(wide: &[u16]) -> Self { + let len = wide.iter().take_while(|&&c| c != 0).count(); + Self::from_wide(&wide[..len]) + } + #[inline] + unsafe fn from_wide_ptr(wide: *const u16, len: usize) -> Self { + assert!(!wide.is_null()); + Self::from_wide(from_raw_parts(wide, len)) + } + #[inline] + unsafe fn from_wide_ptr_null(wide: *const u16) -> Self { + assert!(!wide.is_null()); + for i in 0.. { + if *wide.offset(i) == 0 { + return Self::from_wide_ptr(wide, i as usize) + } + } + unreachable!() + } +} +impl FromWide for OsString { + #[inline] + fn from_wide(wide: &[u16]) -> OsString { + OsStringExt::from_wide(wide) + } +} +impl FromWide for PathBuf { + #[inline] + fn from_wide(wide: &[u16]) -> PathBuf { + ::from_wide(wide).into() + } +} diff --git a/third_party/rust/x11/.cargo-checksum.json b/third_party/rust/x11/.cargo-checksum.json new file mode 100644 index 000000000000..7a01f1595dff --- /dev/null +++ b/third_party/rust/x11/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"496e0f8ca4e0704fad12dfe9dcc0a63d4f5bf1e05024fc5c8ff2663a5773bacb","build.rs":"eb64e6325338b0e9e46cc9bdad96939ce19fb33b7a269e3a3c92b1ec0930c615","examples/hello-world.rs":"0fc60afb6ba41930e0e74fc31b835faa771b06eb70314cdd9a07e05204b1b389","examples/input.rs":"18a254cbd92871c1c27edf420ff29eb02e0dd6eb744ecbb8dbebcd590780b96a","examples/xrecord.rs":"d2458011f4ee170db613d5effa483f51ac56bceab7f25c44eee4cb22c1863207","src/dpms.rs":"77cb9237a0262350bf6553e40d6cc9e97a599bca089a457ef87c57c9b9cf6194","src/glx.rs":"b559c1e9663069f417bf70f1f86eb3e1103739c7041ee6a9103759030fac1126","src/internal.rs":"9e1f269e36e6a92325be4f5e67185d60c12e4a74b98c454759c0161325d0d1e4","src/keysym.rs":"4d65901072224729fe18fb5c815242e51e75a34c1dfc88e5ac1cea19e8a423a8","src/lib.rs":"49ad75828478d09a2f0aceb7effe61fecea9785285cd17d8622b924e88740c5a","src/link.rs":"5a6f63372091daf218d27d55e8ab5775ccf591e50c532c4c442acdf2b64136a9","src/xcursor.rs":"d4d87186c78c42bbff7318fb530851bdb61e38b431233644a3b2295bb0081841","src/xf86vmode.rs":"fa118cf4d8ed1aae60a6e97016a0f2c50b62175628700a1fb96a3d2ea0712305","src/xfixes.rs":"cc2a1681415c3d2d32331dd610567376b6eaa5f42d58b8940a1ff1d765d9cc85","src/xft.rs":"0951bc2f362c4c9722554b89c87ab67e7d60698734820fd88207f874fa84dee9","src/xinerama.rs":"c4bd4f21f044d695b2fc052a20d554aac1f2227459e38da9b66125c60b6d33b7","src/xinput.rs":"3876865629612690d0ab3b551a7e6303eaf9818ff0a56c0d803281c16c7c475a","src/xinput2.rs":"b5a0eba5b1ae5f89534b3fad30fd13f45b02b4ef6b492734fbf4b0c66a7391e1","src/xlib.rs":"65221fbbf70bf25d828692bacf288ab4ebdc073ba09917a3790b5bc0c2bbb70b","src/xlib_xcb.rs":"71ee6274e261bb8f0b82ea260afffa9273531f0f87df9541152412b88aff468f","src/xmd.rs":"149c818c19e90a14c8f60016cd18a04d0de4fd702fc5df89e5283f6ee1ce4852","src/xmu.rs":"262df634c584dac47d0d898dd75b6b2de7c4256b9b494cf89123d8279dad3020","src/xrandr.rs":"c137fadcd035142536567a4ab2591331f040a8c77306746a70ffc5df2bdf6979","src/xrecord.rs":"82e4ad59079ce6c6ad0097ef4b44935d2448b363784abe008919f89cb02ae8c4","src/xrender.rs":"d6642eb83709bc8ff913dd382677e94d5153618f005fead645962947b8ff50b4","src/xss.rs":"8322f210627a59d825a11256ff4daff42c53c6e8ba626f55b3859bf938e8a7b2","src/xt.rs":"fa2324391a91387f44eeee6742c50676329f08d941d002ff70d4eb99f36af7bc","src/xtest.rs":"dcd0eb130ffb3cf96165d1699d6b625649c28ed819036a85b5f175c2a3479918"},"package":"39697e3123f715483d311b5826e254b6f3cfebdd83cf7ef3358f579c3d68e235"} \ No newline at end of file diff --git a/third_party/rust/x11/Cargo.toml b/third_party/rust/x11/Cargo.toml new file mode 100644 index 000000000000..4e8caf0e0fad --- /dev/null +++ b/third_party/rust/x11/Cargo.toml @@ -0,0 +1,45 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "x11" +version = "2.18.1" +authors = ["daggerbot ", "Erle Pereira "] +build = "build.rs" +description = "X11 library bindings for Rust" +documentation = "https://docs.rs/x11" +license = "CC0-1.0" +repository = "https://github.com/erlepereira/x11-rs.git" +[dependencies.libc] +version = "0.2" +[build-dependencies.pkg-config] +version = "0.3.8" + +[features] +dox = [] +dpms = [] +glx = [] +xcursor = [] +xf86vmode = [] +xft = [] +xinerama = [] +xinput = [] +xlib = [] +xlib_xcb = [] +xmu = [] +xrandr = [] +xrecord = ["xtst"] +xrender = [] +xss = [] +xt = [] +xtest = ["xtst"] +xtst = [] diff --git a/third_party/rust/x11/build.rs b/third_party/rust/x11/build.rs new file mode 100644 index 000000000000..a4a13e9e6e5a --- /dev/null +++ b/third_party/rust/x11/build.rs @@ -0,0 +1,38 @@ + + + + +extern crate pkg_config; + +use std::env; + +fn main () { + if cfg!(feature = "dox") { return; } + + let deps = [ + ("gl", "1", "glx"), + ("x11", "1.4.99.1", "xlib"), + ("x11-xcb", "1.6", "xlib_xcb"), + ("xcursor", "1.1", "xcursor"), + ("xext", "1.3", "dpms"), + ("xft", "2.1", "xft"), + ("xi", "1.7", "xinput"), + ("xinerama", "1.1", "xinerama"), + ("xmu", "1.1", "xmu"), + ("xrandr", "1.5", "xrandr"), + ("xrender", "0.9.6", "xrender"), + ("xscrnsaver", "1.2", "xss"), + ("xt", "1.1", "xt"), + ("xtst", "1.2", "xtst"), + ("xxf86vm", "1.1", "xf86vmode"), + ]; + + for &(dep, version, feature) in deps.iter() { + let var = format!( + "CARGO_FEATURE_{}", + feature.to_uppercase().replace('-', "_") + ); + if env::var_os(var).is_none() { continue; } + pkg_config::Config::new().atleast_version(version).probe(dep).unwrap(); + } +} diff --git a/third_party/rust/x11/examples/hello-world.rs b/third_party/rust/x11/examples/hello-world.rs new file mode 100644 index 000000000000..db4fff9533de --- /dev/null +++ b/third_party/rust/x11/examples/hello-world.rs @@ -0,0 +1,89 @@ + + + + +#![cfg_attr(not(feature = "xlib"), allow(dead_code))] +#![cfg_attr(not(feature = "xlib"), allow(unused_imports))] + +extern crate x11; + +use std::ffi::CString; +use std::mem; +use std::os::raw::*; +use std::ptr; + +use x11::xlib; + +#[cfg(not(feature = "xlib"))] +fn main () { + panic!("this example requires `--features xlib`"); +} + +#[cfg(feature = "xlib")] +fn main () { + unsafe { + + let display = xlib::XOpenDisplay(ptr::null()); + + if display.is_null() { + panic!("XOpenDisplay failed"); + } + + + let screen = xlib::XDefaultScreen(display); + let root = xlib::XRootWindow(display, screen); + + let mut attributes: xlib::XSetWindowAttributes = mem::uninitialized(); + attributes.background_pixel = xlib::XWhitePixel(display, screen); + + let window = xlib::XCreateWindow(display, root, + 0, 0, 400, 300, + 0, 0, + xlib::InputOutput as c_uint, ptr::null_mut(), + xlib::CWBackPixel, &mut attributes); + + + let title_str = CString::new("hello-world").unwrap(); + xlib::XStoreName(display, window, title_str.as_ptr() as *mut c_char); + + + let wm_protocols_str = CString::new("WM_PROTOCOLS").unwrap(); + let wm_delete_window_str = CString::new("WM_DELETE_WINDOW").unwrap(); + + let wm_protocols = xlib::XInternAtom(display, wm_protocols_str.as_ptr(), xlib::False); + let wm_delete_window = xlib::XInternAtom(display, wm_delete_window_str.as_ptr(), xlib::False); + + let mut protocols = [wm_delete_window]; + + xlib::XSetWMProtocols(display, window, protocols.as_mut_ptr(), protocols.len() as c_int); + + + xlib::XMapWindow(display, window); + + + let mut event: xlib::XEvent = mem::uninitialized(); + + loop { + xlib::XNextEvent(display, &mut event); + + match event.get_type() { + xlib::ClientMessage => { + let xclient = xlib::XClientMessageEvent::from(event); + + if xclient.message_type == wm_protocols && xclient.format == 32 { + let protocol = xclient.data.get_long(0) as xlib::Atom; + + if protocol == wm_delete_window { + break; + } + } + }, + + _ => () + } + } + + + xlib::XCloseDisplay(display); + } +} diff --git a/third_party/rust/x11/examples/input.rs b/third_party/rust/x11/examples/input.rs new file mode 100644 index 000000000000..16fcd4a1a106 --- /dev/null +++ b/third_party/rust/x11/examples/input.rs @@ -0,0 +1,384 @@ + + + + + + + + + +#![cfg_attr(not(feature = "xlib"), allow(dead_code))] +#![cfg_attr(not(feature = "xlib"), allow(unused_imports))] + +extern crate x11; +extern crate libc; + +use std::ffi::CString; +use std::ptr::{ + null, + null_mut, +}; +use std::mem::{transmute, zeroed}; +use std::os::raw::*; +use std::slice::{from_raw_parts}; +use x11::{xlib, xinput2}; + + + +pub struct DemoWindow { + pub display: *mut xlib::Display, + pub window: xlib::Window, + + wm_protocols: xlib::Atom, + wm_delete_window: xlib::Atom +} + +impl DemoWindow { + + pub fn new(title: &str, width: u32, height: u32) -> DemoWindow { + unsafe { + + let display = xlib::XOpenDisplay(null()); + if display == null_mut() { + panic!("can't open display"); + } + + + let wm_delete_window_str = CString::new("WM_DELETE_WINDOW").unwrap(); + let wm_protocols_str = CString::new("WM_PROTOCOLS").unwrap(); + + let wm_delete_window = xlib::XInternAtom(display, wm_delete_window_str.as_ptr(), xlib::False); + let wm_protocols = xlib::XInternAtom(display, wm_protocols_str.as_ptr(), xlib::False); + + if wm_delete_window == 0 || wm_protocols == 0 { + panic!("can't load atoms"); + } + + + let screen_num = xlib::XDefaultScreen(display); + let root = xlib::XRootWindow(display, screen_num); + let white_pixel = xlib::XWhitePixel(display, screen_num); + + let mut attributes: xlib::XSetWindowAttributes = zeroed(); + attributes.background_pixel = white_pixel; + + let window = xlib::XCreateWindow(display, root, 0, 0, width as c_uint, height as c_uint, 0, 0, + xlib::InputOutput as c_uint, null_mut(), + xlib::CWBackPixel, &mut attributes); + + let title_str = CString::new(title).unwrap(); + xlib::XStoreName(display, window, title_str.as_ptr() as *mut _); + + + let mut protocols = [wm_delete_window]; + + if xlib::XSetWMProtocols(display, window, &mut protocols[0] as *mut xlib::Atom, 1) == xlib::False { + panic!("can't set WM protocols"); + } + + DemoWindow{ + display: display, + window: window, + wm_protocols: wm_protocols, + wm_delete_window: wm_delete_window + } + } + } + + + pub fn show(&mut self) { + unsafe { + xlib::XMapWindow(self.display, self.window); + } + } + + + + pub fn run_event_loop(&mut self, mut event_handler: EventHandler) + where EventHandler: FnMut(&xlib::XEvent) { + let mut event: xlib::XEvent = unsafe{zeroed()}; + loop { + unsafe{xlib::XNextEvent(self.display, &mut event)}; + match event.get_type() { + xlib::ClientMessage => { + let xclient: xlib::XClientMessageEvent = From::from(event); + + + if xclient.message_type == self.wm_protocols && xclient.format == 32 { + let protocol = xclient.data.get_long(0) as xlib::Atom; + + + if protocol == self.wm_delete_window { + break; + } + } + }, + _ => event_handler(&event) + } + } + } +} + +impl Drop for DemoWindow { + + fn drop(&mut self) { + unsafe { + xlib::XDestroyWindow(self.display, self.window); + xlib::XCloseDisplay(self.display); + } + } +} + +const TITLE: &'static str = "XInput Demo"; +const DEFAULT_WIDTH: c_uint = 640; +const DEFAULT_HEIGHT: c_uint = 480; + +#[derive(Debug)] +enum AxisType { + HorizontalScroll, + VerticalScroll, + Other +} + +#[derive(Debug)] +struct Axis { + id: i32, + device_id: i32, + axis_number: i32, + axis_type: AxisType +} + +#[derive(Debug)] +struct AxisValue { + device_id: i32, + axis_number: i32, + value: f64 +} + +struct InputState { + cursor_pos: (f64, f64), + axis_values: Vec +} + +fn read_input_axis_info(display: *mut xlib::Display) -> Vec { + let mut axis_list = Vec::new(); + let mut device_count = 0; + + + + let devices = unsafe{xinput2::XIQueryDevice(display, xinput2::XIAllMasterDevices, &mut device_count)}; + for i in 0..device_count { + let device = unsafe { *(devices.offset(i as isize)) }; + for k in 0..device.num_classes { + let class = unsafe { *(device.classes.offset(k as isize)) }; + match unsafe { (*class)._type } { + xinput2::XIScrollClass => { + let scroll_class: &xinput2::XIScrollClassInfo = unsafe{transmute(class)}; + axis_list.push(Axis{ + id: scroll_class.sourceid, + device_id: device.deviceid, + axis_number: scroll_class.number, + axis_type: match scroll_class.scroll_type { + xinput2::XIScrollTypeHorizontal => AxisType::HorizontalScroll, + xinput2::XIScrollTypeVertical => AxisType::VerticalScroll, + _ => { unreachable!() } + } + }) + }, + xinput2::XIValuatorClass => { + let valuator_class: &xinput2::XIValuatorClassInfo = unsafe{transmute(class)}; + axis_list.push(Axis{ + id: valuator_class.sourceid, + device_id: device.deviceid, + axis_number: valuator_class.number, + axis_type: AxisType::Other + }) + }, + _ => { } + } + } + } + + axis_list.sort_by(|a, b| { + if a.device_id != b.device_id { + a.device_id.cmp(&b.device_id) + } else if a.id != b.id { + a.id.cmp(&b.id) + } else { + a.axis_number.cmp(&b.axis_number) + } + }); + axis_list +} + + + + +fn calc_scroll_deltas(event: &xinput2::XIDeviceEvent, + axis_id: i32, + axis_value: f64, + axis_list: &[Axis], + prev_axis_values: &mut Vec) -> (f64, f64) { + let prev_value_pos = prev_axis_values.iter().position(|prev_axis| { + prev_axis.device_id == event.sourceid && + prev_axis.axis_number == axis_id + }); + let delta = match prev_value_pos { + Some(idx) => axis_value - prev_axis_values[idx].value, + None => 0.0 + }; + + let new_axis_value = AxisValue{ + device_id: event.sourceid, + axis_number: axis_id, + value: axis_value + }; + + match prev_value_pos { + Some(idx) => prev_axis_values[idx] = new_axis_value, + None => prev_axis_values.push(new_axis_value) + } + + let mut scroll_delta = (0.0, 0.0); + + for axis in axis_list.iter() { + if axis.id == event.sourceid && + axis.axis_number == axis_id { + match axis.axis_type { + AxisType::HorizontalScroll => scroll_delta.0 = delta, + AxisType::VerticalScroll => scroll_delta.1 = delta, + _ => {} + } + } + } + + scroll_delta +} + +#[cfg(not(all(feature = "xlib", feature = "xinput")))] +fn main () { + panic!("this example requires `--features 'xlib xinput'`"); +} + +#[cfg(all(feature = "xlib", feature = "xinput"))] +fn main () { + let mut demo_window = DemoWindow::new(TITLE, DEFAULT_WIDTH, DEFAULT_HEIGHT); + + + let mut opcode: c_int = 0; + let mut event: c_int = 0; + let mut error: c_int = 0; + let xinput_str = CString::new("XInputExtension").unwrap(); + let xinput_available = unsafe { + xlib::XQueryExtension(demo_window.display, xinput_str.as_ptr(), &mut opcode, &mut event, &mut error) + }; + if xinput_available == xlib::False { + panic!("XInput not available") + } + + let mut xinput_major_ver = xinput2::XI_2_Major; + let mut xinput_minor_ver = xinput2::XI_2_Minor; + if unsafe{xinput2::XIQueryVersion(demo_window.display, + &mut xinput_major_ver, &mut xinput_minor_ver)} != xlib::Success as c_int { + panic!("XInput2 not available"); + } + println!("XI version available {}.{}", xinput_major_ver, xinput_minor_ver); + + + let mut mask: [c_uchar; 1] = [0]; + let mut input_event_mask = xinput2::XIEventMask { + deviceid: xinput2::XIAllMasterDevices, + mask_len: mask.len() as i32, + mask: mask.as_mut_ptr() + }; + let events = &[ + xinput2::XI_ButtonPress, + xinput2::XI_ButtonRelease, + xinput2::XI_KeyPress, + xinput2::XI_KeyRelease, + xinput2::XI_Motion + ]; + for &event in events { + xinput2::XISetMask(&mut mask, event); + } + + match unsafe{xinput2::XISelectEvents(demo_window.display, + demo_window.window, &mut input_event_mask, 1)} { + status if status as u8 == xlib::Success => (), + err => panic!("Failed to select events {:?}", err) + } + + + demo_window.show(); + + + let display = demo_window.display; + let axis_list = read_input_axis_info(display); + + let mut prev_state = InputState{ + cursor_pos: (0.0, 0.0), + axis_values: Vec::new() + }; + + demo_window.run_event_loop(|event| { + match event.get_type() { + xlib::GenericEvent => { + let mut cookie: xlib::XGenericEventCookie = From::from(*event); + if unsafe{xlib::XGetEventData(display, &mut cookie)} != xlib::True { + println!("Failed to retrieve event data"); + return; + } + match cookie.evtype { + xinput2::XI_KeyPress | xinput2::XI_KeyRelease => { + let event_data: &xinput2::XIDeviceEvent = unsafe{transmute(cookie.data)}; + if cookie.evtype == xinput2::XI_KeyPress { + if event_data.flags & xinput2::XIKeyRepeat == 0 { + println!("Key {} pressed", event_data.detail); + } + } else { + println!("Key {} released", event_data.detail); + } + }, + xinput2::XI_ButtonPress | xinput2::XI_ButtonRelease => { + let event_data: &xinput2::XIDeviceEvent = unsafe{transmute(cookie.data)}; + if cookie.evtype == xinput2::XI_ButtonPress { + println!("Button {} pressed", event_data.detail); + } else { + println!("Button {} released", event_data.detail); + } + }, + xinput2::XI_Motion => { + let event_data: &xinput2::XIDeviceEvent = unsafe{transmute(cookie.data)}; + let axis_state = event_data.valuators; + let mask = unsafe{ from_raw_parts(axis_state.mask, axis_state.mask_len as usize) }; + let mut axis_count = 0; + + let mut scroll_delta = (0.0, 0.0); + for axis_id in 0..axis_state.mask_len { + if xinput2::XIMaskIsSet(&mask, axis_id) { + let axis_value = unsafe{*axis_state.values.offset(axis_count)}; + let delta = calc_scroll_deltas(event_data, axis_id, axis_value, &axis_list, &mut prev_state.axis_values); + scroll_delta.0 += delta.0; + scroll_delta.1 += delta.1; + axis_count += 1; + } + } + + if scroll_delta.0.abs() > 0.0 || scroll_delta.1.abs() > 0.0 { + println!("Mouse wheel/trackpad scrolled by ({}, {})", scroll_delta.0, scroll_delta.1); + } + + let new_cursor_pos = (event_data.event_x, event_data.event_y); + if new_cursor_pos != prev_state.cursor_pos { + println!("Mouse moved to ({}, {})", new_cursor_pos.0, new_cursor_pos.1); + prev_state.cursor_pos = new_cursor_pos; + } + }, + _ => () + } + unsafe{xlib::XFreeEventData(display, &mut cookie)}; + }, + _ => () + } + }); +} diff --git a/third_party/rust/x11/examples/xrecord.rs b/third_party/rust/x11/examples/xrecord.rs new file mode 100644 index 000000000000..353d73e9824c --- /dev/null +++ b/third_party/rust/x11/examples/xrecord.rs @@ -0,0 +1,127 @@ + + +#![cfg_attr(not(feature = "xlib"), allow(dead_code))] +#![cfg_attr(not(feature = "xlib"), allow(unused_imports))] + +extern crate libc; +extern crate x11; + +use std::ffi::CString; +use std::ptr::{ + null, + null_mut, +}; + +use std::os::raw::{ + c_int +}; +use x11::xlib; +use x11::xrecord; + +static mut EVENT_COUNT:u32 = 0; + +#[cfg(not(all(feature = "xlib", feature = "xrecord")))] +fn main () { + panic!("this example requires `--features 'xlib xrecord'`"); +} + +#[cfg(all(feature = "xlib", feature = "xrecord"))] +fn main () { + unsafe { + + let dpy_control = xlib::XOpenDisplay(null()); + let dpy_data = xlib::XOpenDisplay(null()); + if dpy_control == null_mut() || dpy_data == null_mut() { + panic!("can't open display"); + } + + xlib::XSynchronize(dpy_control, 1); + + let extension_name = CString::new("RECORD").unwrap(); + + let extension = xlib::XInitExtension( + dpy_control, + extension_name.as_ptr()); + if extension.is_null() { + panic!("Error init X Record Extension"); + } + + + let mut version_major: c_int = 0; + let mut version_minor: c_int = 0; + xrecord::XRecordQueryVersion( + dpy_control, + &mut version_major, + &mut version_minor + ); + println!( + "RECORD extension version {}.{}", + version_major, + version_minor + ); + + + let mut record_range: xrecord::XRecordRange = *xrecord::XRecordAllocRange(); + record_range.device_events.first = xlib::KeyPress as u8; + record_range.device_events.last = xlib::MotionNotify as u8; + + + let context = xrecord::XRecordCreateContext( + dpy_control, + 0, + &mut xrecord::XRecordAllClients, + 1, + std::mem::transmute(&mut &mut record_range), + 1 + ); + + if context == 0 { + panic!("Fail create Record context\n"); + } + + + let result = xrecord::XRecordEnableContext( + dpy_data, + context, + Some(record_callback), + &mut 0 + ); + if result == 0 { + panic!("Cound not enable the Record context!\n"); + } + } +} + +unsafe extern "C" fn record_callback(_:*mut i8, raw_data: *mut xrecord::XRecordInterceptData) { + EVENT_COUNT += 1; + let data = &*raw_data; + + + if data.category != xrecord::XRecordFromServer { + return; + } + + + let xdatum = &*(data.data as *mut XRecordDatum); + + let event_type = match xdatum.xtype as i32 { + xlib::KeyPress => "KeyPress", + xlib::KeyRelease => "KeyRelease", + xlib::ButtonPress => "ButtonPress", + xlib::ButtonRelease => "ButtonRelease", + xlib::MotionNotify => "MotionNotify", + _ => "Other" + }; + + println!("Event recieve\t{:?}\tevent.", event_type); + + xrecord::XRecordFreeData(raw_data); +} + +#[repr(C)] +struct XRecordDatum { + xtype: u8, + code: u8, + unknown1: u8, + unknown2: u8 +} diff --git a/third_party/rust/x11/src/dpms.rs b/third_party/rust/x11/src/dpms.rs new file mode 100644 index 000000000000..108a226f30a4 --- /dev/null +++ b/third_party/rust/x11/src/dpms.rs @@ -0,0 +1,44 @@ + + + + +use std::os::raw::{ c_int }; + +use xlib::{ Display, Status, Bool }; +use xmd::{ CARD16, BOOL }; + + + + + + + +x11_link! { Xext, xext, ["libXext.so.6", "libXext.so"], 9, + pub fn DPMSQueryExtension (_1: *mut Display, _2: *mut c_int, _3: *mut c_int) -> Bool, + pub fn DPMSGetVersion (_1: *mut Display, _2: *mut c_int, _3: *mut c_int) -> Status, + pub fn DPMSCapable (_1: *mut Display) -> Bool, + pub fn DPMSSetTimeouts (_1: *mut Display, _2: CARD16, _3: CARD16, _4: CARD16) -> Status, + pub fn DPMSGetTimeouts (_1: *mut Display, _2: *mut CARD16, _3: *mut CARD16, _4: *mut CARD16) -> Bool, + pub fn DPMSEnable (_1: *mut Display) -> Status, + pub fn DPMSDisable (_1: *mut Display) -> Status, + pub fn DPMSForceLevel (_1: *mut Display, _2: CARD16) -> Status, + pub fn DPMSInfo (_1: *mut Display, _2: *mut CARD16, _3: *mut BOOL) -> Status, +variadic: +globals: +} + + + + + + + +pub const DPMSMajorVersion: c_int = 1; +pub const DPMSMinorVersion: c_int = 1; + +pub const DPMSExtensionName: &'static str = "DPMS"; + +pub const DPMSModeOn: CARD16 = 0; +pub const DPMSModeStandby: CARD16 = 1; +pub const DPMSModeSuspend: CARD16 = 2; +pub const DPMSModeOff: CARD16 = 3; diff --git a/third_party/rust/x11/src/glx.rs b/third_party/rust/x11/src/glx.rs new file mode 100644 index 000000000000..982ee2be849d --- /dev/null +++ b/third_party/rust/x11/src/glx.rs @@ -0,0 +1,249 @@ + + + + +use std::os::raw::{ + c_char, + c_int, + c_uchar, + c_uint, + c_ulong, +}; + +use ::xlib::{ + Display, + XID, + XVisualInfo, +}; + + + + + + + +x11_link! { Glx, gl, ["libGL.so.1", "libGL.so"], 40, + pub fn glXChooseFBConfig (_4: *mut Display, _3: c_int, _2: *const c_int, _1: *mut c_int) -> *mut GLXFBConfig, + pub fn glXChooseVisual (_3: *mut Display, _2: c_int, _1: *mut c_int) -> *mut XVisualInfo, + pub fn glXCopyContext (_4: *mut Display, _3: GLXContext, _2: GLXContext, _1: c_ulong) -> (), + pub fn glXCreateContext (_4: *mut Display, _3: *mut XVisualInfo, _2: GLXContext, _1: c_int) -> GLXContext, + pub fn glXCreateGLXPixmap (_3: *mut Display, _2: *mut XVisualInfo, _1: c_ulong) -> c_ulong, + pub fn glXCreateNewContext (_5: *mut Display, _4: GLXFBConfig, _3: c_int, _2: GLXContext, _1: c_int) -> GLXContext, + pub fn glXCreatePbuffer (_3: *mut Display, _2: GLXFBConfig, _1: *const c_int) -> c_ulong, + pub fn glXCreatePixmap (_4: *mut Display, _3: GLXFBConfig, _2: c_ulong, _1: *const c_int) -> c_ulong, + pub fn glXCreateWindow (_4: *mut Display, _3: GLXFBConfig, _2: c_ulong, _1: *const c_int) -> c_ulong, + pub fn glXDestroyContext (_2: *mut Display, _1: GLXContext) -> (), + pub fn glXDestroyGLXPixmap (_2: *mut Display, _1: c_ulong) -> (), + pub fn glXDestroyPbuffer (_2: *mut Display, _1: c_ulong) -> (), + pub fn glXDestroyPixmap (_2: *mut Display, _1: c_ulong) -> (), + pub fn glXDestroyWindow (_2: *mut Display, _1: c_ulong) -> (), + pub fn glXGetClientString (_2: *mut Display, _1: c_int) -> *const c_char, + pub fn glXGetConfig (_4: *mut Display, _3: *mut XVisualInfo, _2: c_int, _1: *mut c_int) -> c_int, + pub fn glXGetCurrentContext () -> GLXContext, + pub fn glXGetCurrentDisplay () -> *mut Display, + pub fn glXGetCurrentDrawable () -> c_ulong, + pub fn glXGetCurrentReadDrawable () -> c_ulong, + pub fn glXGetFBConfigAttrib (_4: *mut Display, _3: GLXFBConfig, _2: c_int, _1: *mut c_int) -> c_int, + pub fn glXGetFBConfigs (_3: *mut Display, _2: c_int, _1: *mut c_int) -> *mut GLXFBConfig, + pub fn glXGetProcAddress (_1: *const c_uchar) -> Option, + pub fn glXGetProcAddressARB (_1: *const c_uchar) -> Option, + pub fn glXGetSelectedEvent (_3: *mut Display, _2: c_ulong, _1: *mut c_ulong) -> (), + pub fn glXGetVisualFromFBConfig (_2: *mut Display, _1: GLXFBConfig) -> *mut XVisualInfo, + pub fn glXIsDirect (_2: *mut Display, _1: GLXContext) -> c_int, + pub fn glXMakeContextCurrent (_4: *mut Display, _3: c_ulong, _2: c_ulong, _1: GLXContext) -> c_int, + pub fn glXMakeCurrent (_3: *mut Display, _2: c_ulong, _1: GLXContext) -> c_int, + pub fn glXQueryContext (_4: *mut Display, _3: GLXContext, _2: c_int, _1: *mut c_int) -> c_int, + pub fn glXQueryDrawable (_4: *mut Display, _3: c_ulong, _2: c_int, _1: *mut c_uint) -> (), + pub fn glXQueryExtension (_3: *mut Display, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn glXQueryExtensionsString (_2: *mut Display, _1: c_int) -> *const c_char, + pub fn glXQueryServerString (_3: *mut Display, _2: c_int, _1: c_int) -> *const c_char, + pub fn glXQueryVersion (_3: *mut Display, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn glXSelectEvent (_3: *mut Display, _2: c_ulong, _1: c_ulong) -> (), + pub fn glXSwapBuffers (_2: *mut Display, _1: c_ulong) -> (), + pub fn glXUseXFont (_4: c_ulong, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn glXWaitGL () -> (), + pub fn glXWaitX () -> (), +variadic: +globals: +} + + + + + + + + +#[repr(C)] pub struct __GLXcontextRec; +#[repr(C)] pub struct __GLXFBConfigRec; + + +pub type GLXContext = *mut __GLXcontextRec; +pub type GLXContextID = XID; +pub type GLXDrawable = XID; +pub type GLXFBConfig = *mut __GLXFBConfigRec; +pub type GLXFBConfigID = XID; +pub type GLXPbuffer = XID; +pub type GLXPixmap = XID; +pub type GLXWindow = XID; + + + + + + + + +pub const GLX_SLOW_CONFIG: c_int = 0x8001; +pub const GLX_NON_CONFORMANT_CONFIG: c_int = 0x800d; + + +pub const GLX_WINDOW_BIT: c_int = 0x0001; +pub const GLX_PIXMAP_BIT: c_int = 0x0002; +pub const GLX_PBUFFER_BIT: c_int = 0x0004; + + +pub const GLX_USE_GL: c_int = 0x0001; +pub const GLX_BUFFER_SIZE: c_int = 0x0002; +pub const GLX_LEVEL: c_int = 0x0003; +pub const GLX_RGBA: c_int = 0x0004; +pub const GLX_DOUBLEBUFFER: c_int = 0x0005; +pub const GLX_STEREO: c_int = 0x0006; +pub const GLX_AUX_BUFFERS: c_int = 0x0007; +pub const GLX_RED_SIZE: c_int = 0x0008; +pub const GLX_GREEN_SIZE: c_int = 0x0009; +pub const GLX_BLUE_SIZE: c_int = 0x000a; +pub const GLX_ALPHA_SIZE: c_int = 0x000b; +pub const GLX_DEPTH_SIZE: c_int = 0x000c; +pub const GLX_STENCIL_SIZE: c_int = 0x000d; +pub const GLX_ACCUM_RED_SIZE: c_int = 0x000e; +pub const GLX_ACCUM_GREEN_SIZE: c_int = 0x000f; +pub const GLX_ACCUM_BLUE_SIZE: c_int = 0x0010; +pub const GLX_ACCUM_ALPHA_SIZE: c_int = 0x0011; +pub const GLX_CONFIG_CAVEAT: c_int = 0x0020; +pub const GLX_X_VISUAL_TYPE: c_int = 0x0022; +pub const GLX_TRANSPARENT_TYPE: c_int = 0x0023; +pub const GLX_TRANSPARENT_INDEX_VALUE: c_int = 0x0024; +pub const GLX_TRANSPARENT_RED_VALUE: c_int = 0x0025; +pub const GLX_TRANSPARENT_GREEN_VALUE: c_int = 0x0026; +pub const GLX_TRANSPARENT_BLUE_VALUE: c_int = 0x0027; +pub const GLX_TRANSPARENT_ALPHA_VALUE: c_int = 0x0028; +pub const GLX_VISUAL_ID: c_int = 0x800B; +pub const GLX_SCREEN: c_int = 0x800C; +pub const GLX_DRAWABLE_TYPE: c_int = 0x8010; +pub const GLX_RENDER_TYPE: c_int = 0x8011; +pub const GLX_X_RENDERABLE: c_int = 0x8012; +pub const GLX_FBCONFIG_ID: c_int = 0x8013; +pub const GLX_MAX_PBUFFER_WIDTH: c_int = 0x8016; +pub const GLX_MAX_PBUFFER_HEIGHT: c_int = 0x8017; +pub const GLX_MAX_PBUFFER_PIXELS: c_int = 0x8018; +pub const GLX_SAMPLE_BUFFERS: c_int = 0x1_86a0; +pub const GLX_SAMPLES: c_int = 0x1_86a1; + + +pub const GLX_DONT_CARE: c_int = -1; +pub const GLX_NONE: c_int = 0x8000; + + +pub const GLX_RGBA_BIT: c_int = 0x0001; +pub const GLX_COLOR_INDEX_BIT: c_int = 0x0002; + + +pub const GLX_TRANSPARENT_RGB: c_int = 0x8008; +pub const GLX_TRANSPARENT_INDEX: c_int = 0x8009; + + +pub const GLX_TRUE_COLOR: c_int = 0x8002; +pub const GLX_DIRECT_COLOR: c_int = 0x8003; +pub const GLX_PSEUDO_COLOR: c_int = 0x8004; +pub const GLX_STATIC_COLOR: c_int = 0x8005; +pub const GLX_GRAY_SCALE: c_int = 0x8006; +pub const GLX_STATIC_GRAY: c_int = 0x8007; + + +pub const GLX_BAD_SCREEN: c_int = 1; +pub const GLX_BAD_ATTRIBUTE: c_int = 2; +pub const GLX_NO_EXTENSION: c_int = 3; +pub const GLX_BAD_VISUAL: c_int = 4; +pub const GLX_BAD_CONTEXT: c_int = 5; +pub const GLX_BAD_VALUE: c_int = 6; +pub const GLX_BAD_ENUM: c_int = 7; + + +pub const GLX_VENDOR: c_int = 1; +pub const GLX_VERSION: c_int = 2; +pub const GLX_EXTENSIONS: c_int = 3; + + +pub const GLX_FRONT_LEFT_BUFFER_BIT: c_uint = 0x0001; +pub const GLX_FRONT_RIGHT_BUFFER_BIT: c_uint = 0x0002; +pub const GLX_BACK_LEFT_BUFFER_BIT: c_uint = 0x0004; +pub const GLX_BACK_RIGHT_BUFFER_BIT: c_uint = 0x0008; +pub const GLX_AUX_BUFFERS_BIT: c_uint = 0x0010; +pub const GLX_DEPTH_BUFFER_BIT: c_uint = 0x0020; +pub const GLX_STENCIL_BUFFER_BIT: c_uint = 0x0040; +pub const GLX_ACCUM_BUFFER_BIT: c_uint = 0080; + + +pub const GLX_RGBA_TYPE: c_int = 0x8014; +pub const GLX_COLOR_INDEX_TYPE: c_int = 0x8015; + + +pub const GLX_PRESERVED_CONTENTS: c_int = 0x801B; +pub const GLX_LARGEST_PBUFFER: c_int = 0x801C; +pub const GLX_WIDTH: c_int = 0x801D; +pub const GLX_HEIGHT: c_int = 0x801E; +pub const GLX_PBUFFER_HEIGHT: c_int = 0x8040; +pub const GLX_PBUFFER_WIDTH: c_int = 0x8041; + + +pub const GLX_EVENT_MASK: c_int = 0x801F; + + +pub const GLX_PBUFFER_CLOBBER_MASK: c_ulong = 0x0800_0000; + + +pub const GLX_DAMAGED: c_int = 0x8020; +pub const GLX_SAVED: c_int = 0x8021; + + +pub const GLX_WINDOW: c_int = 0x8022; +pub const GLX_PBUFFER: c_int = 0x8023; + + + + + + + +pub mod arb { + use std::os::raw::c_int; + + + pub const GLX_CONTEXT_MAJOR_VERSION_ARB: c_int = 0x2091; + pub const GLX_CONTEXT_MINOR_VERSION_ARB: c_int = 0x2092; + pub const GLX_CONTEXT_FLAGS_ARB: c_int = 0x2094; + pub const GLX_CONTEXT_PROFILE_MASK_ARB: c_int = 0x9126; + + + pub const GLX_CONTEXT_DEBUG_BIT_ARB: c_int = 0x0001; + pub const GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB: c_int = 0x0002; + + + pub const GLX_CONTEXT_CORE_PROFILE_BIT_ARB: c_int = 0x0001; + pub const GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB: c_int = 0x0002; +} + + + + + + + +pub mod ext { + use std::os::raw::c_int; + + + pub const GLX_SWAP_INTERVAL_EXT: c_int = 0x20f1; + pub const GLX_MAX_SWAP_INTERVAL_EXT: c_int = 0x20f2; +} diff --git a/third_party/rust/x11/src/internal.rs b/third_party/rust/x11/src/internal.rs new file mode 100644 index 000000000000..ff3cb96c367d --- /dev/null +++ b/third_party/rust/x11/src/internal.rs @@ -0,0 +1,41 @@ + + + + +use std::cmp::min; +use std::mem::{ + size_of, + zeroed, +}; + + + + + + + +pub unsafe fn mem_eq (a: &T, b: &T) -> bool { + let a_addr = a as *const T as usize; + let b_addr = b as *const T as usize; + + for i in 0..size_of::() { + if *((a_addr + i) as *const u8) != *((b_addr + i) as *const u8) { + return false; + } + } + + return true; +} + +pub unsafe fn transmute_union (input: &I) -> O + where I : Sized, O : Sized +{ + let mut output: O = zeroed(); + let copy_len = min(size_of::(), size_of::()); + + for i in 0..copy_len { + *((&mut output as *mut O as usize + i) as *mut u8) = *((input as *const I as usize + i) as *const u8); + } + + return output; +} diff --git a/third_party/rust/x11/src/keysym.rs b/third_party/rust/x11/src/keysym.rs new file mode 100644 index 000000000000..5ef545afa4ac --- /dev/null +++ b/third_party/rust/x11/src/keysym.rs @@ -0,0 +1,1332 @@ + + + + +use std::os::raw::c_uint; + +pub const XK_BackSpace: c_uint = 0xFF08; +pub const XK_Tab: c_uint = 0xFF09; +pub const XK_Linefeed: c_uint = 0xFF0A; +pub const XK_Clear: c_uint = 0xFF0B; +pub const XK_Return: c_uint = 0xFF0D; +pub const XK_Pause: c_uint = 0xFF13; +pub const XK_Scroll_Lock: c_uint = 0xFF14; +pub const XK_Sys_Req: c_uint = 0xFF15; +pub const XK_Escape: c_uint = 0xFF1B; +pub const XK_Delete: c_uint = 0xFFFF; +pub const XK_Multi_key: c_uint = 0xFF20; +pub const XK_Kanji: c_uint = 0xFF21; +pub const XK_Muhenkan: c_uint = 0xFF22; +pub const XK_Henkan_Mode: c_uint = 0xFF23; +pub const XK_Henkan: c_uint = 0xFF23; +pub const XK_Romaji: c_uint = 0xFF24; +pub const XK_Hiragana: c_uint = 0xFF25; +pub const XK_Katakana: c_uint = 0xFF26; +pub const XK_Hiragana_Katakana: c_uint = 0xFF27; +pub const XK_Zenkaku: c_uint = 0xFF28; +pub const XK_Hankaku: c_uint = 0xFF29; +pub const XK_Zenkaku_Hankaku: c_uint = 0xFF2A; +pub const XK_Touroku: c_uint = 0xFF2B; +pub const XK_Massyo: c_uint = 0xFF2C; +pub const XK_Kana_Lock: c_uint = 0xFF2D; +pub const XK_Kana_Shift: c_uint = 0xFF2E; +pub const XK_Eisu_Shift: c_uint = 0xFF2F; +pub const XK_Eisu_toggle: c_uint = 0xFF30; +pub const XK_Home: c_uint = 0xFF50; +pub const XK_Left: c_uint = 0xFF51; +pub const XK_Up: c_uint = 0xFF52; +pub const XK_Right: c_uint = 0xFF53; +pub const XK_Down: c_uint = 0xFF54; +pub const XK_Prior: c_uint = 0xFF55; +pub const XK_Page_Up: c_uint = 0xFF55; +pub const XK_Next: c_uint = 0xFF56; +pub const XK_Page_Down: c_uint = 0xFF56; +pub const XK_End: c_uint = 0xFF57; +pub const XK_Begin: c_uint = 0xFF58; +pub const XK_Win_L: c_uint = 0xFF5B; +pub const XK_Win_R: c_uint = 0xFF5C; +pub const XK_App: c_uint = 0xFF5D; +pub const XK_Select: c_uint = 0xFF60; +pub const XK_Print: c_uint = 0xFF61; +pub const XK_Execute: c_uint = 0xFF62; +pub const XK_Insert: c_uint = 0xFF63; +pub const XK_Undo: c_uint = 0xFF65; +pub const XK_Redo: c_uint = 0xFF66; +pub const XK_Menu: c_uint = 0xFF67; +pub const XK_Find: c_uint = 0xFF68; +pub const XK_Cancel: c_uint = 0xFF69; +pub const XK_Help: c_uint = 0xFF6A; +pub const XK_Break: c_uint = 0xFF6B; +pub const XK_Mode_switch: c_uint = 0xFF7E; +pub const XK_script_switch: c_uint = 0xFF7E; +pub const XK_Num_Lock: c_uint = 0xFF7F; +pub const XK_KP_Space: c_uint = 0xFF80; +pub const XK_KP_Tab: c_uint = 0xFF89; +pub const XK_KP_Enter: c_uint = 0xFF8D; +pub const XK_KP_F1: c_uint = 0xFF91; +pub const XK_KP_F2: c_uint = 0xFF92; +pub const XK_KP_F3: c_uint = 0xFF93; +pub const XK_KP_F4: c_uint = 0xFF94; +pub const XK_KP_Home: c_uint = 0xFF95; +pub const XK_KP_Left: c_uint = 0xFF96; +pub const XK_KP_Up: c_uint = 0xFF97; +pub const XK_KP_Right: c_uint = 0xFF98; +pub const XK_KP_Down: c_uint = 0xFF99; +pub const XK_KP_Prior: c_uint = 0xFF9A; +pub const XK_KP_Page_Up: c_uint = 0xFF9A; +pub const XK_KP_Next: c_uint = 0xFF9B; +pub const XK_KP_Page_Down: c_uint = 0xFF9B; +pub const XK_KP_End: c_uint = 0xFF9C; +pub const XK_KP_Begin: c_uint = 0xFF9D; +pub const XK_KP_Insert: c_uint = 0xFF9E; +pub const XK_KP_Delete: c_uint = 0xFF9F; +pub const XK_KP_Equal: c_uint = 0xFFBD; +pub const XK_KP_Multiply: c_uint = 0xFFAA; +pub const XK_KP_Add: c_uint = 0xFFAB; +pub const XK_KP_Separator: c_uint = 0xFFAC; +pub const XK_KP_Subtract: c_uint = 0xFFAD; +pub const XK_KP_Decimal: c_uint = 0xFFAE; +pub const XK_KP_Divide: c_uint = 0xFFAF; +pub const XK_KP_0: c_uint = 0xFFB0; +pub const XK_KP_1: c_uint = 0xFFB1; +pub const XK_KP_2: c_uint = 0xFFB2; +pub const XK_KP_3: c_uint = 0xFFB3; +pub const XK_KP_4: c_uint = 0xFFB4; +pub const XK_KP_5: c_uint = 0xFFB5; +pub const XK_KP_6: c_uint = 0xFFB6; +pub const XK_KP_7: c_uint = 0xFFB7; +pub const XK_KP_8: c_uint = 0xFFB8; +pub const XK_KP_9: c_uint = 0xFFB9; +pub const XK_F1: c_uint = 0xFFBE; +pub const XK_F2: c_uint = 0xFFBF; +pub const XK_F3: c_uint = 0xFFC0; +pub const XK_F4: c_uint = 0xFFC1; +pub const XK_F5: c_uint = 0xFFC2; +pub const XK_F6: c_uint = 0xFFC3; +pub const XK_F7: c_uint = 0xFFC4; +pub const XK_F8: c_uint = 0xFFC5; +pub const XK_F9: c_uint = 0xFFC6; +pub const XK_F10: c_uint = 0xFFC7; +pub const XK_F11: c_uint = 0xFFC8; +pub const XK_L1: c_uint = 0xFFC8; +pub const XK_F12: c_uint = 0xFFC9; +pub const XK_L2: c_uint = 0xFFC9; +pub const XK_F13: c_uint = 0xFFCA; +pub const XK_L3: c_uint = 0xFFCA; +pub const XK_F14: c_uint = 0xFFCB; +pub const XK_L4: c_uint = 0xFFCB; +pub const XK_F15: c_uint = 0xFFCC; +pub const XK_L5: c_uint = 0xFFCC; +pub const XK_F16: c_uint = 0xFFCD; +pub const XK_L6: c_uint = 0xFFCD; +pub const XK_F17: c_uint = 0xFFCE; +pub const XK_L7: c_uint = 0xFFCE; +pub const XK_F18: c_uint = 0xFFCF; +pub const XK_L8: c_uint = 0xFFCF; +pub const XK_F19: c_uint = 0xFFD0; +pub const XK_L9: c_uint = 0xFFD0; +pub const XK_F20: c_uint = 0xFFD1; +pub const XK_L10: c_uint = 0xFFD1; +pub const XK_F21: c_uint = 0xFFD2; +pub const XK_R1: c_uint = 0xFFD2; +pub const XK_F22: c_uint = 0xFFD3; +pub const XK_R2: c_uint = 0xFFD3; +pub const XK_F23: c_uint = 0xFFD4; +pub const XK_R3: c_uint = 0xFFD4; +pub const XK_F24: c_uint = 0xFFD5; +pub const XK_R4: c_uint = 0xFFD5; +pub const XK_F25: c_uint = 0xFFD6; +pub const XK_R5: c_uint = 0xFFD6; +pub const XK_F26: c_uint = 0xFFD7; +pub const XK_R6: c_uint = 0xFFD7; +pub const XK_F27: c_uint = 0xFFD8; +pub const XK_R7: c_uint = 0xFFD8; +pub const XK_F28: c_uint = 0xFFD9; +pub const XK_R8: c_uint = 0xFFD9; +pub const XK_F29: c_uint = 0xFFDA; +pub const XK_R9: c_uint = 0xFFDA; +pub const XK_F30: c_uint = 0xFFDB; +pub const XK_R10: c_uint = 0xFFDB; +pub const XK_F31: c_uint = 0xFFDC; +pub const XK_R11: c_uint = 0xFFDC; +pub const XK_F32: c_uint = 0xFFDD; +pub const XK_R12: c_uint = 0xFFDD; +pub const XK_F33: c_uint = 0xFFDE; +pub const XK_R13: c_uint = 0xFFDE; +pub const XK_F34: c_uint = 0xFFDF; +pub const XK_R14: c_uint = 0xFFDF; +pub const XK_F35: c_uint = 0xFFE0; +pub const XK_R15: c_uint = 0xFFE0; +pub const XK_Shift_L: c_uint = 0xFFE1; +pub const XK_Shift_R: c_uint = 0xFFE2; +pub const XK_Control_L: c_uint = 0xFFE3; +pub const XK_Control_R: c_uint = 0xFFE4; +pub const XK_Caps_Lock: c_uint = 0xFFE5; +pub const XK_Shift_Lock: c_uint = 0xFFE6; +pub const XK_Meta_L: c_uint = 0xFFE7; +pub const XK_Meta_R: c_uint = 0xFFE8; +pub const XK_Alt_L: c_uint = 0xFFE9; +pub const XK_Alt_R: c_uint = 0xFFEA; +pub const XK_Super_L: c_uint = 0xFFEB; +pub const XK_Super_R: c_uint = 0xFFEC; +pub const XK_Hyper_L: c_uint = 0xFFED; +pub const XK_Hyper_R: c_uint = 0xFFEE; +pub const XK_space: c_uint = 0x020; +pub const XK_exclam: c_uint = 0x021; +pub const XK_quotedbl: c_uint = 0x022; +pub const XK_numbersign: c_uint = 0x023; +pub const XK_dollar: c_uint = 0x024; +pub const XK_percent: c_uint = 0x025; +pub const XK_ampersand: c_uint = 0x026; +pub const XK_apostrophe: c_uint = 0x027; +pub const XK_quoteright: c_uint = 0x027; +pub const XK_parenleft: c_uint = 0x028; +pub const XK_parenright: c_uint = 0x029; +pub const XK_asterisk: c_uint = 0x02a; +pub const XK_plus: c_uint = 0x02b; +pub const XK_comma: c_uint = 0x02c; +pub const XK_minus: c_uint = 0x02d; +pub const XK_period: c_uint = 0x02e; +pub const XK_slash: c_uint = 0x02f; +pub const XK_0: c_uint = 0x030; +pub const XK_1: c_uint = 0x031; +pub const XK_2: c_uint = 0x032; +pub const XK_3: c_uint = 0x033; +pub const XK_4: c_uint = 0x034; +pub const XK_5: c_uint = 0x035; +pub const XK_6: c_uint = 0x036; +pub const XK_7: c_uint = 0x037; +pub const XK_8: c_uint = 0x038; +pub const XK_9: c_uint = 0x039; +pub const XK_colon: c_uint = 0x03a; +pub const XK_semicolon: c_uint = 0x03b; +pub const XK_less: c_uint = 0x03c; +pub const XK_equal: c_uint = 0x03d; +pub const XK_greater: c_uint = 0x03e; +pub const XK_question: c_uint = 0x03f; +pub const XK_at: c_uint = 0x040; +pub const XK_A: c_uint = 0x041; +pub const XK_B: c_uint = 0x042; +pub const XK_C: c_uint = 0x043; +pub const XK_D: c_uint = 0x044; +pub const XK_E: c_uint = 0x045; +pub const XK_F: c_uint = 0x046; +pub const XK_G: c_uint = 0x047; +pub const XK_H: c_uint = 0x048; +pub const XK_I: c_uint = 0x049; +pub const XK_J: c_uint = 0x04a; +pub const XK_K: c_uint = 0x04b; +pub const XK_L: c_uint = 0x04c; +pub const XK_M: c_uint = 0x04d; +pub const XK_N: c_uint = 0x04e; +pub const XK_O: c_uint = 0x04f; +pub const XK_P: c_uint = 0x050; +pub const XK_Q: c_uint = 0x051; +pub const XK_R: c_uint = 0x052; +pub const XK_S: c_uint = 0x053; +pub const XK_T: c_uint = 0x054; +pub const XK_U: c_uint = 0x055; +pub const XK_V: c_uint = 0x056; +pub const XK_W: c_uint = 0x057; +pub const XK_X: c_uint = 0x058; +pub const XK_Y: c_uint = 0x059; +pub const XK_Z: c_uint = 0x05a; +pub const XK_bracketleft: c_uint = 0x05b; +pub const XK_backslash: c_uint = 0x05c; +pub const XK_bracketright: c_uint = 0x05d; +pub const XK_asciicircum: c_uint = 0x05e; +pub const XK_underscore: c_uint = 0x05f; +pub const XK_grave: c_uint = 0x060; +pub const XK_quoteleft: c_uint = 0x060; +pub const XK_a: c_uint = 0x061; +pub const XK_b: c_uint = 0x062; +pub const XK_c: c_uint = 0x063; +pub const XK_d: c_uint = 0x064; +pub const XK_e: c_uint = 0x065; +pub const XK_f: c_uint = 0x066; +pub const XK_g: c_uint = 0x067; +pub const XK_h: c_uint = 0x068; +pub const XK_i: c_uint = 0x069; +pub const XK_j: c_uint = 0x06a; +pub const XK_k: c_uint = 0x06b; +pub const XK_l: c_uint = 0x06c; +pub const XK_m: c_uint = 0x06d; +pub const XK_n: c_uint = 0x06e; +pub const XK_o: c_uint = 0x06f; +pub const XK_p: c_uint = 0x070; +pub const XK_q: c_uint = 0x071; +pub const XK_r: c_uint = 0x072; +pub const XK_s: c_uint = 0x073; +pub const XK_t: c_uint = 0x074; +pub const XK_u: c_uint = 0x075; +pub const XK_v: c_uint = 0x076; +pub const XK_w: c_uint = 0x077; +pub const XK_x: c_uint = 0x078; +pub const XK_y: c_uint = 0x079; +pub const XK_z: c_uint = 0x07a; +pub const XK_braceleft: c_uint = 0x07b; +pub const XK_bar: c_uint = 0x07c; +pub const XK_braceright: c_uint = 0x07d; +pub const XK_asciitilde: c_uint = 0x07e; +pub const XK_nobreakspace: c_uint = 0x0a0; +pub const XK_exclamdown: c_uint = 0x0a1; +pub const XK_cent: c_uint = 0x0a2; +pub const XK_sterling: c_uint = 0x0a3; +pub const XK_currency: c_uint = 0x0a4; +pub const XK_yen: c_uint = 0x0a5; +pub const XK_brokenbar: c_uint = 0x0a6; +pub const XK_section: c_uint = 0x0a7; +pub const XK_diaeresis: c_uint = 0x0a8; +pub const XK_copyright: c_uint = 0x0a9; +pub const XK_ordfeminine: c_uint = 0x0aa; +pub const XK_guillemotleft: c_uint = 0x0ab; +pub const XK_notsign: c_uint = 0x0ac; +pub const XK_hyphen: c_uint = 0x0ad; +pub const XK_registered: c_uint = 0x0ae; +pub const XK_macron: c_uint = 0x0af; +pub const XK_degree: c_uint = 0x0b0; +pub const XK_plusminus: c_uint = 0x0b1; +pub const XK_twosuperior: c_uint = 0x0b2; +pub const XK_threesuperior: c_uint = 0x0b3; +pub const XK_acute: c_uint = 0x0b4; +pub const XK_mu: c_uint = 0x0b5; +pub const XK_paragraph: c_uint = 0x0b6; +pub const XK_periodcentered: c_uint = 0x0b7; +pub const XK_cedilla: c_uint = 0x0b8; +pub const XK_onesuperior: c_uint = 0x0b9; +pub const XK_masculine: c_uint = 0x0ba; +pub const XK_guillemotright: c_uint = 0x0bb; +pub const XK_onequarter: c_uint = 0x0bc; +pub const XK_onehalf: c_uint = 0x0bd; +pub const XK_threequarters: c_uint = 0x0be; +pub const XK_questiondown: c_uint = 0x0bf; +pub const XK_Agrave: c_uint = 0x0c0; +pub const XK_Aacute: c_uint = 0x0c1; +pub const XK_Acircumflex: c_uint = 0x0c2; +pub const XK_Atilde: c_uint = 0x0c3; +pub const XK_Adiaeresis: c_uint = 0x0c4; +pub const XK_Aring: c_uint = 0x0c5; +pub const XK_AE: c_uint = 0x0c6; +pub const XK_Ccedilla: c_uint = 0x0c7; +pub const XK_Egrave: c_uint = 0x0c8; +pub const XK_Eacute: c_uint = 0x0c9; +pub const XK_Ecircumflex: c_uint = 0x0ca; +pub const XK_Ediaeresis: c_uint = 0x0cb; +pub const XK_Igrave: c_uint = 0x0cc; +pub const XK_Iacute: c_uint = 0x0cd; +pub const XK_Icircumflex: c_uint = 0x0ce; +pub const XK_Idiaeresis: c_uint = 0x0cf; +pub const XK_ETH: c_uint = 0x0d0; +pub const XK_Eth: c_uint = 0x0d0; +pub const XK_Ntilde: c_uint = 0x0d1; +pub const XK_Ograve: c_uint = 0x0d2; +pub const XK_Oacute: c_uint = 0x0d3; +pub const XK_Ocircumflex: c_uint = 0x0d4; +pub const XK_Otilde: c_uint = 0x0d5; +pub const XK_Odiaeresis: c_uint = 0x0d6; +pub const XK_multiply: c_uint = 0x0d7; +pub const XK_Ooblique: c_uint = 0x0d8; +pub const XK_Ugrave: c_uint = 0x0d9; +pub const XK_Uacute: c_uint = 0x0da; +pub const XK_Ucircumflex: c_uint = 0x0db; +pub const XK_Udiaeresis: c_uint = 0x0dc; +pub const XK_Yacute: c_uint = 0x0dd; +pub const XK_THORN: c_uint = 0x0de; +pub const XK_Thorn: c_uint = 0x0de; +pub const XK_ssharp: c_uint = 0x0df; +pub const XK_agrave: c_uint = 0x0e0; +pub const XK_aacute: c_uint = 0x0e1; +pub const XK_acircumflex: c_uint = 0x0e2; +pub const XK_atilde: c_uint = 0x0e3; +pub const XK_adiaeresis: c_uint = 0x0e4; +pub const XK_aring: c_uint = 0x0e5; +pub const XK_ae: c_uint = 0x0e6; +pub const XK_ccedilla: c_uint = 0x0e7; +pub const XK_egrave: c_uint = 0x0e8; +pub const XK_eacute: c_uint = 0x0e9; +pub const XK_ecircumflex: c_uint = 0x0ea; +pub const XK_ediaeresis: c_uint = 0x0eb; +pub const XK_igrave: c_uint = 0x0ec; +pub const XK_iacute: c_uint = 0x0ed; +pub const XK_icircumflex: c_uint = 0x0ee; +pub const XK_idiaeresis: c_uint = 0x0ef; +pub const XK_eth: c_uint = 0x0f0; +pub const XK_ntilde: c_uint = 0x0f1; +pub const XK_ograve: c_uint = 0x0f2; +pub const XK_oacute: c_uint = 0x0f3; +pub const XK_ocircumflex: c_uint = 0x0f4; +pub const XK_otilde: c_uint = 0x0f5; +pub const XK_odiaeresis: c_uint = 0x0f6; +pub const XK_division: c_uint = 0x0f7; +pub const XK_oslash: c_uint = 0x0f8; +pub const XK_ugrave: c_uint = 0x0f9; +pub const XK_uacute: c_uint = 0x0fa; +pub const XK_ucircumflex: c_uint = 0x0fb; +pub const XK_udiaeresis: c_uint = 0x0fc; +pub const XK_yacute: c_uint = 0x0fd; +pub const XK_thorn: c_uint = 0x0fe; +pub const XK_ydiaeresis: c_uint = 0x0ff; +pub const XK_Aogonek: c_uint = 0x1a1; +pub const XK_breve: c_uint = 0x1a2; +pub const XK_Lstroke: c_uint = 0x1a3; +pub const XK_Lcaron: c_uint = 0x1a5; +pub const XK_Sacute: c_uint = 0x1a6; +pub const XK_Scaron: c_uint = 0x1a9; +pub const XK_Scedilla: c_uint = 0x1aa; +pub const XK_Tcaron: c_uint = 0x1ab; +pub const XK_Zacute: c_uint = 0x1ac; +pub const XK_Zcaron: c_uint = 0x1ae; +pub const XK_Zabovedot: c_uint = 0x1af; +pub const XK_aogonek: c_uint = 0x1b1; +pub const XK_ogonek: c_uint = 0x1b2; +pub const XK_lstroke: c_uint = 0x1b3; +pub const XK_lcaron: c_uint = 0x1b5; +pub const XK_sacute: c_uint = 0x1b6; +pub const XK_caron: c_uint = 0x1b7; +pub const XK_scaron: c_uint = 0x1b9; +pub const XK_scedilla: c_uint = 0x1ba; +pub const XK_tcaron: c_uint = 0x1bb; +pub const XK_zacute: c_uint = 0x1bc; +pub const XK_doubleacute: c_uint = 0x1bd; +pub const XK_zcaron: c_uint = 0x1be; +pub const XK_zabovedot: c_uint = 0x1bf; +pub const XK_Racute: c_uint = 0x1c0; +pub const XK_Abreve: c_uint = 0x1c3; +pub const XK_Lacute: c_uint = 0x1c5; +pub const XK_Cacute: c_uint = 0x1c6; +pub const XK_Ccaron: c_uint = 0x1c8; +pub const XK_Eogonek: c_uint = 0x1ca; +pub const XK_Ecaron: c_uint = 0x1cc; +pub const XK_Dcaron: c_uint = 0x1cf; +pub const XK_Dstroke: c_uint = 0x1d0; +pub const XK_Nacute: c_uint = 0x1d1; +pub const XK_Ncaron: c_uint = 0x1d2; +pub const XK_Odoubleacute: c_uint = 0x1d5; +pub const XK_Rcaron: c_uint = 0x1d8; +pub const XK_Uring: c_uint = 0x1d9; +pub const XK_Udoubleacute: c_uint = 0x1db; +pub const XK_Tcedilla: c_uint = 0x1de; +pub const XK_racute: c_uint = 0x1e0; +pub const XK_abreve: c_uint = 0x1e3; +pub const XK_lacute: c_uint = 0x1e5; +pub const XK_cacute: c_uint = 0x1e6; +pub const XK_ccaron: c_uint = 0x1e8; +pub const XK_eogonek: c_uint = 0x1ea; +pub const XK_ecaron: c_uint = 0x1ec; +pub const XK_dcaron: c_uint = 0x1ef; +pub const XK_dstroke: c_uint = 0x1f0; +pub const XK_nacute: c_uint = 0x1f1; +pub const XK_ncaron: c_uint = 0x1f2; +pub const XK_odoubleacute: c_uint = 0x1f5; +pub const XK_udoubleacute: c_uint = 0x1fb; +pub const XK_rcaron: c_uint = 0x1f8; +pub const XK_uring: c_uint = 0x1f9; +pub const XK_tcedilla: c_uint = 0x1fe; +pub const XK_abovedot: c_uint = 0x1ff; +pub const XK_Hstroke: c_uint = 0x2a1; +pub const XK_Hcircumflex: c_uint = 0x2a6; +pub const XK_Iabovedot: c_uint = 0x2a9; +pub const XK_Gbreve: c_uint = 0x2ab; +pub const XK_Jcircumflex: c_uint = 0x2ac; +pub const XK_hstroke: c_uint = 0x2b1; +pub const XK_hcircumflex: c_uint = 0x2b6; +pub const XK_idotless: c_uint = 0x2b9; +pub const XK_gbreve: c_uint = 0x2bb; +pub const XK_jcircumflex: c_uint = 0x2bc; +pub const XK_Cabovedot: c_uint = 0x2c5; +pub const XK_Ccircumflex: c_uint = 0x2c6; +pub const XK_Gabovedot: c_uint = 0x2d5; +pub const XK_Gcircumflex: c_uint = 0x2d8; +pub const XK_Ubreve: c_uint = 0x2dd; +pub const XK_Scircumflex: c_uint = 0x2de; +pub const XK_cabovedot: c_uint = 0x2e5; +pub const XK_ccircumflex: c_uint = 0x2e6; +pub const XK_gabovedot: c_uint = 0x2f5; +pub const XK_gcircumflex: c_uint = 0x2f8; +pub const XK_ubreve: c_uint = 0x2fd; +pub const XK_scircumflex: c_uint = 0x2fe; +pub const XK_kra: c_uint = 0x3a2; +pub const XK_kappa: c_uint = 0x3a2; +pub const XK_Rcedilla: c_uint = 0x3a3; +pub const XK_Itilde: c_uint = 0x3a5; +pub const XK_Lcedilla: c_uint = 0x3a6; +pub const XK_Emacron: c_uint = 0x3aa; +pub const XK_Gcedilla: c_uint = 0x3ab; +pub const XK_Tslash: c_uint = 0x3ac; +pub const XK_rcedilla: c_uint = 0x3b3; +pub const XK_itilde: c_uint = 0x3b5; +pub const XK_lcedilla: c_uint = 0x3b6; +pub const XK_emacron: c_uint = 0x3ba; +pub const XK_gcedilla: c_uint = 0x3bb; +pub const XK_tslash: c_uint = 0x3bc; +pub const XK_ENG: c_uint = 0x3bd; +pub const XK_eng: c_uint = 0x3bf; +pub const XK_Amacron: c_uint = 0x3c0; +pub const XK_Iogonek: c_uint = 0x3c7; +pub const XK_Eabovedot: c_uint = 0x3cc; +pub const XK_Imacron: c_uint = 0x3cf; +pub const XK_Ncedilla: c_uint = 0x3d1; +pub const XK_Omacron: c_uint = 0x3d2; +pub const XK_Kcedilla: c_uint = 0x3d3; +pub const XK_Uogonek: c_uint = 0x3d9; +pub const XK_Utilde: c_uint = 0x3dd; +pub const XK_Umacron: c_uint = 0x3de; +pub const XK_amacron: c_uint = 0x3e0; +pub const XK_iogonek: c_uint = 0x3e7; +pub const XK_eabovedot: c_uint = 0x3ec; +pub const XK_imacron: c_uint = 0x3ef; +pub const XK_ncedilla: c_uint = 0x3f1; +pub const XK_omacron: c_uint = 0x3f2; +pub const XK_kcedilla: c_uint = 0x3f3; +pub const XK_uogonek: c_uint = 0x3f9; +pub const XK_utilde: c_uint = 0x3fd; +pub const XK_umacron: c_uint = 0x3fe; +pub const XK_overline: c_uint = 0x47e; +pub const XK_kana_fullstop: c_uint = 0x4a1; +pub const XK_kana_openingbracket: c_uint = 0x4a2; +pub const XK_kana_closingbracket: c_uint = 0x4a3; +pub const XK_kana_comma: c_uint = 0x4a4; +pub const XK_kana_conjunctive: c_uint = 0x4a5; +pub const XK_kana_middledot: c_uint = 0x4a5; +pub const XK_kana_WO: c_uint = 0x4a6; +pub const XK_kana_a: c_uint = 0x4a7; +pub const XK_kana_i: c_uint = 0x4a8; +pub const XK_kana_u: c_uint = 0x4a9; +pub const XK_kana_e: c_uint = 0x4aa; +pub const XK_kana_o: c_uint = 0x4ab; +pub const XK_kana_ya: c_uint = 0x4ac; +pub const XK_kana_yu: c_uint = 0x4ad; +pub const XK_kana_yo: c_uint = 0x4ae; +pub const XK_kana_tsu: c_uint = 0x4af; +pub const XK_kana_tu: c_uint = 0x4af; +pub const XK_prolongedsound: c_uint = 0x4b0; +pub const XK_kana_A: c_uint = 0x4b1; +pub const XK_kana_I: c_uint = 0x4b2; +pub const XK_kana_U: c_uint = 0x4b3; +pub const XK_kana_E: c_uint = 0x4b4; +pub const XK_kana_O: c_uint = 0x4b5; +pub const XK_kana_KA: c_uint = 0x4b6; +pub const XK_kana_KI: c_uint = 0x4b7; +pub const XK_kana_KU: c_uint = 0x4b8; +pub const XK_kana_KE: c_uint = 0x4b9; +pub const XK_kana_KO: c_uint = 0x4ba; +pub const XK_kana_SA: c_uint = 0x4bb; +pub const XK_kana_SHI: c_uint = 0x4bc; +pub const XK_kana_SU: c_uint = 0x4bd; +pub const XK_kana_SE: c_uint = 0x4be; +pub const XK_kana_SO: c_uint = 0x4bf; +pub const XK_kana_TA: c_uint = 0x4c0; +pub const XK_kana_CHI: c_uint = 0x4c1; +pub const XK_kana_TI: c_uint = 0x4c1; +pub const XK_kana_TSU: c_uint = 0x4c2; +pub const XK_kana_TU: c_uint = 0x4c2; +pub const XK_kana_TE: c_uint = 0x4c3; +pub const XK_kana_TO: c_uint = 0x4c4; +pub const XK_kana_NA: c_uint = 0x4c5; +pub const XK_kana_NI: c_uint = 0x4c6; +pub const XK_kana_NU: c_uint = 0x4c7; +pub const XK_kana_NE: c_uint = 0x4c8; +pub const XK_kana_NO: c_uint = 0x4c9; +pub const XK_kana_HA: c_uint = 0x4ca; +pub const XK_kana_HI: c_uint = 0x4cb; +pub const XK_kana_FU: c_uint = 0x4cc; +pub const XK_kana_HU: c_uint = 0x4cc; +pub const XK_kana_HE: c_uint = 0x4cd; +pub const XK_kana_HO: c_uint = 0x4ce; +pub const XK_kana_MA: c_uint = 0x4cf; +pub const XK_kana_MI: c_uint = 0x4d0; +pub const XK_kana_MU: c_uint = 0x4d1; +pub const XK_kana_ME: c_uint = 0x4d2; +pub const XK_kana_MO: c_uint = 0x4d3; +pub const XK_kana_YA: c_uint = 0x4d4; +pub const XK_kana_YU: c_uint = 0x4d5; +pub const XK_kana_YO: c_uint = 0x4d6; +pub const XK_kana_RA: c_uint = 0x4d7; +pub const XK_kana_RI: c_uint = 0x4d8; +pub const XK_kana_RU: c_uint = 0x4d9; +pub const XK_kana_RE: c_uint = 0x4da; +pub const XK_kana_RO: c_uint = 0x4db; +pub const XK_kana_WA: c_uint = 0x4dc; +pub const XK_kana_N: c_uint = 0x4dd; +pub const XK_voicedsound: c_uint = 0x4de; +pub const XK_semivoicedsound: c_uint = 0x4df; +pub const XK_kana_switch: c_uint = 0xFF7E; +pub const XK_Arabic_comma: c_uint = 0x5ac; +pub const XK_Arabic_semicolon: c_uint = 0x5bb; +pub const XK_Arabic_question_mark: c_uint = 0x5bf; +pub const XK_Arabic_hamza: c_uint = 0x5c1; +pub const XK_Arabic_maddaonalef: c_uint = 0x5c2; +pub const XK_Arabic_hamzaonalef: c_uint = 0x5c3; +pub const XK_Arabic_hamzaonwaw: c_uint = 0x5c4; +pub const XK_Arabic_hamzaunderalef: c_uint = 0x5c5; +pub const XK_Arabic_hamzaonyeh: c_uint = 0x5c6; +pub const XK_Arabic_alef: c_uint = 0x5c7; +pub const XK_Arabic_beh: c_uint = 0x5c8; +pub const XK_Arabic_tehmarbuta: c_uint = 0x5c9; +pub const XK_Arabic_teh: c_uint = 0x5ca; +pub const XK_Arabic_theh: c_uint = 0x5cb; +pub const XK_Arabic_jeem: c_uint = 0x5cc; +pub const XK_Arabic_hah: c_uint = 0x5cd; +pub const XK_Arabic_khah: c_uint = 0x5ce; +pub const XK_Arabic_dal: c_uint = 0x5cf; +pub const XK_Arabic_thal: c_uint = 0x5d0; +pub const XK_Arabic_ra: c_uint = 0x5d1; +pub const XK_Arabic_zain: c_uint = 0x5d2; +pub const XK_Arabic_seen: c_uint = 0x5d3; +pub const XK_Arabic_sheen: c_uint = 0x5d4; +pub const XK_Arabic_sad: c_uint = 0x5d5; +pub const XK_Arabic_dad: c_uint = 0x5d6; +pub const XK_Arabic_tah: c_uint = 0x5d7; +pub const XK_Arabic_zah: c_uint = 0x5d8; +pub const XK_Arabic_ain: c_uint = 0x5d9; +pub const XK_Arabic_ghain: c_uint = 0x5da; +pub const XK_Arabic_tatweel: c_uint = 0x5e0; +pub const XK_Arabic_feh: c_uint = 0x5e1; +pub const XK_Arabic_qaf: c_uint = 0x5e2; +pub const XK_Arabic_kaf: c_uint = 0x5e3; +pub const XK_Arabic_lam: c_uint = 0x5e4; +pub const XK_Arabic_meem: c_uint = 0x5e5; +pub const XK_Arabic_noon: c_uint = 0x5e6; +pub const XK_Arabic_ha: c_uint = 0x5e7; +pub const XK_Arabic_heh: c_uint = 0x5e7; +pub const XK_Arabic_waw: c_uint = 0x5e8; +pub const XK_Arabic_alefmaksura: c_uint = 0x5e9; +pub const XK_Arabic_yeh: c_uint = 0x5ea; +pub const XK_Arabic_fathatan: c_uint = 0x5eb; +pub const XK_Arabic_dammatan: c_uint = 0x5ec; +pub const XK_Arabic_kasratan: c_uint = 0x5ed; +pub const XK_Arabic_fatha: c_uint = 0x5ee; +pub const XK_Arabic_damma: c_uint = 0x5ef; +pub const XK_Arabic_kasra: c_uint = 0x5f0; +pub const XK_Arabic_shadda: c_uint = 0x5f1; +pub const XK_Arabic_sukun: c_uint = 0x5f2; +pub const XK_Arabic_switch: c_uint = 0xFF7E; +pub const XK_Serbian_dje: c_uint = 0x6a1; +pub const XK_Macedonia_gje: c_uint = 0x6a2; +pub const XK_Cyrillic_io: c_uint = 0x6a3; +pub const XK_Ukrainian_ie: c_uint = 0x6a4; +pub const XK_Ukranian_je: c_uint = 0x6a4; +pub const XK_Macedonia_dse: c_uint = 0x6a5; +pub const XK_Ukrainian_i: c_uint = 0x6a6; +pub const XK_Ukranian_i: c_uint = 0x6a6; +pub const XK_Ukrainian_yi: c_uint = 0x6a7; +pub const XK_Ukranian_yi: c_uint = 0x6a7; +pub const XK_Cyrillic_je: c_uint = 0x6a8; +pub const XK_Serbian_je: c_uint = 0x6a8; +pub const XK_Cyrillic_lje: c_uint = 0x6a9; +pub const XK_Serbian_lje: c_uint = 0x6a9; +pub const XK_Cyrillic_nje: c_uint = 0x6aa; +pub const XK_Serbian_nje: c_uint = 0x6aa; +pub const XK_Serbian_tshe: c_uint = 0x6ab; +pub const XK_Macedonia_kje: c_uint = 0x6ac; +pub const XK_Byelorussian_shortu: c_uint = 0x6ae; +pub const XK_Cyrillic_dzhe: c_uint = 0x6af; +pub const XK_Serbian_dze: c_uint = 0x6af; +pub const XK_numerosign: c_uint = 0x6b0; +pub const XK_Serbian_DJE: c_uint = 0x6b1; +pub const XK_Macedonia_GJE: c_uint = 0x6b2; +pub const XK_Cyrillic_IO: c_uint = 0x6b3; +pub const XK_Ukrainian_IE: c_uint = 0x6b4; +pub const XK_Ukranian_JE: c_uint = 0x6b4; +pub const XK_Macedonia_DSE: c_uint = 0x6b5; +pub const XK_Ukrainian_I: c_uint = 0x6b6; +pub const XK_Ukranian_I: c_uint = 0x6b6; +pub const XK_Ukrainian_YI: c_uint = 0x6b7; +pub const XK_Ukranian_YI: c_uint = 0x6b7; +pub const XK_Cyrillic_JE: c_uint = 0x6b8; +pub const XK_Serbian_JE: c_uint = 0x6b8; +pub const XK_Cyrillic_LJE: c_uint = 0x6b9; +pub const XK_Serbian_LJE: c_uint = 0x6b9; +pub const XK_Cyrillic_NJE: c_uint = 0x6ba; +pub const XK_Serbian_NJE: c_uint = 0x6ba; +pub const XK_Serbian_TSHE: c_uint = 0x6bb; +pub const XK_Macedonia_KJE: c_uint = 0x6bc; +pub const XK_Byelorussian_SHORTU: c_uint = 0x6be; +pub const XK_Cyrillic_DZHE: c_uint = 0x6bf; +pub const XK_Serbian_DZE: c_uint = 0x6bf; +pub const XK_Cyrillic_yu: c_uint = 0x6c0; +pub const XK_Cyrillic_a: c_uint = 0x6c1; +pub const XK_Cyrillic_be: c_uint = 0x6c2; +pub const XK_Cyrillic_tse: c_uint = 0x6c3; +pub const XK_Cyrillic_de: c_uint = 0x6c4; +pub const XK_Cyrillic_ie: c_uint = 0x6c5; +pub const XK_Cyrillic_ef: c_uint = 0x6c6; +pub const XK_Cyrillic_ghe: c_uint = 0x6c7; +pub const XK_Cyrillic_ha: c_uint = 0x6c8; +pub const XK_Cyrillic_i: c_uint = 0x6c9; +pub const XK_Cyrillic_shorti: c_uint = 0x6ca; +pub const XK_Cyrillic_ka: c_uint = 0x6cb; +pub const XK_Cyrillic_el: c_uint = 0x6cc; +pub const XK_Cyrillic_em: c_uint = 0x6cd; +pub const XK_Cyrillic_en: c_uint = 0x6ce; +pub const XK_Cyrillic_o: c_uint = 0x6cf; +pub const XK_Cyrillic_pe: c_uint = 0x6d0; +pub const XK_Cyrillic_ya: c_uint = 0x6d1; +pub const XK_Cyrillic_er: c_uint = 0x6d2; +pub const XK_Cyrillic_es: c_uint = 0x6d3; +pub const XK_Cyrillic_te: c_uint = 0x6d4; +pub const XK_Cyrillic_u: c_uint = 0x6d5; +pub const XK_Cyrillic_zhe: c_uint = 0x6d6; +pub const XK_Cyrillic_ve: c_uint = 0x6d7; +pub const XK_Cyrillic_softsign: c_uint = 0x6d8; +pub const XK_Cyrillic_yeru: c_uint = 0x6d9; +pub const XK_Cyrillic_ze: c_uint = 0x6da; +pub const XK_Cyrillic_sha: c_uint = 0x6db; +pub const XK_Cyrillic_e: c_uint = 0x6dc; +pub const XK_Cyrillic_shcha: c_uint = 0x6dd; +pub const XK_Cyrillic_che: c_uint = 0x6de; +pub const XK_Cyrillic_hardsign: c_uint = 0x6df; +pub const XK_Cyrillic_YU: c_uint = 0x6e0; +pub const XK_Cyrillic_A: c_uint = 0x6e1; +pub const XK_Cyrillic_BE: c_uint = 0x6e2; +pub const XK_Cyrillic_TSE: c_uint = 0x6e3; +pub const XK_Cyrillic_DE: c_uint = 0x6e4; +pub const XK_Cyrillic_IE: c_uint = 0x6e5; +pub const XK_Cyrillic_EF: c_uint = 0x6e6; +pub const XK_Cyrillic_GHE: c_uint = 0x6e7; +pub const XK_Cyrillic_HA: c_uint = 0x6e8; +pub const XK_Cyrillic_I: c_uint = 0x6e9; +pub const XK_Cyrillic_SHORTI: c_uint = 0x6ea; +pub const XK_Cyrillic_KA: c_uint = 0x6eb; +pub const XK_Cyrillic_EL: c_uint = 0x6ec; +pub const XK_Cyrillic_EM: c_uint = 0x6ed; +pub const XK_Cyrillic_EN: c_uint = 0x6ee; +pub const XK_Cyrillic_O: c_uint = 0x6ef; +pub const XK_Cyrillic_PE: c_uint = 0x6f0; +pub const XK_Cyrillic_YA: c_uint = 0x6f1; +pub const XK_Cyrillic_ER: c_uint = 0x6f2; +pub const XK_Cyrillic_ES: c_uint = 0x6f3; +pub const XK_Cyrillic_TE: c_uint = 0x6f4; +pub const XK_Cyrillic_U: c_uint = 0x6f5; +pub const XK_Cyrillic_ZHE: c_uint = 0x6f6; +pub const XK_Cyrillic_VE: c_uint = 0x6f7; +pub const XK_Cyrillic_SOFTSIGN: c_uint = 0x6f8; +pub const XK_Cyrillic_YERU: c_uint = 0x6f9; +pub const XK_Cyrillic_ZE: c_uint = 0x6fa; +pub const XK_Cyrillic_SHA: c_uint = 0x6fb; +pub const XK_Cyrillic_E: c_uint = 0x6fc; +pub const XK_Cyrillic_SHCHA: c_uint = 0x6fd; +pub const XK_Cyrillic_CHE: c_uint = 0x6fe; +pub const XK_Cyrillic_HARDSIGN: c_uint = 0x6ff; +pub const XK_Greek_ALPHAaccent: c_uint = 0x7a1; +pub const XK_Greek_EPSILONaccent: c_uint = 0x7a2; +pub const XK_Greek_ETAaccent: c_uint = 0x7a3; +pub const XK_Greek_IOTAaccent: c_uint = 0x7a4; +pub const XK_Greek_IOTAdiaeresis: c_uint = 0x7a5; +pub const XK_Greek_OMICRONaccent: c_uint = 0x7a7; +pub const XK_Greek_UPSILONaccent: c_uint = 0x7a8; +pub const XK_Greek_UPSILONdieresis: c_uint = 0x7a9; +pub const XK_Greek_OMEGAaccent: c_uint = 0x7ab; +pub const XK_Greek_accentdieresis: c_uint = 0x7ae; +pub const XK_Greek_horizbar: c_uint = 0x7af; +pub const XK_Greek_alphaaccent: c_uint = 0x7b1; +pub const XK_Greek_epsilonaccent: c_uint = 0x7b2; +pub const XK_Greek_etaaccent: c_uint = 0x7b3; +pub const XK_Greek_iotaaccent: c_uint = 0x7b4; +pub const XK_Greek_iotadieresis: c_uint = 0x7b5; +pub const XK_Greek_iotaaccentdieresis: c_uint = 0x7b6; +pub const XK_Greek_omicronaccent: c_uint = 0x7b7; +pub const XK_Greek_upsilonaccent: c_uint = 0x7b8; +pub const XK_Greek_upsilondieresis: c_uint = 0x7b9; +pub const XK_Greek_upsilonaccentdieresis: c_uint = 0x7ba; +pub const XK_Greek_omegaaccent: c_uint = 0x7bb; +pub const XK_Greek_ALPHA: c_uint = 0x7c1; +pub const XK_Greek_BETA: c_uint = 0x7c2; +pub const XK_Greek_GAMMA: c_uint = 0x7c3; +pub const XK_Greek_DELTA: c_uint = 0x7c4; +pub const XK_Greek_EPSILON: c_uint = 0x7c5; +pub const XK_Greek_ZETA: c_uint = 0x7c6; +pub const XK_Greek_ETA: c_uint = 0x7c7; +pub const XK_Greek_THETA: c_uint = 0x7c8; +pub const XK_Greek_IOTA: c_uint = 0x7c9; +pub const XK_Greek_KAPPA: c_uint = 0x7ca; +pub const XK_Greek_LAMDA: c_uint = 0x7cb; +pub const XK_Greek_LAMBDA: c_uint = 0x7cb; +pub const XK_Greek_MU: c_uint = 0x7cc; +pub const XK_Greek_NU: c_uint = 0x7cd; +pub const XK_Greek_XI: c_uint = 0x7ce; +pub const XK_Greek_OMICRON: c_uint = 0x7cf; +pub const XK_Greek_PI: c_uint = 0x7d0; +pub const XK_Greek_RHO: c_uint = 0x7d1; +pub const XK_Greek_SIGMA: c_uint = 0x7d2; +pub const XK_Greek_TAU: c_uint = 0x7d4; +pub const XK_Greek_UPSILON: c_uint = 0x7d5; +pub const XK_Greek_PHI: c_uint = 0x7d6; +pub const XK_Greek_CHI: c_uint = 0x7d7; +pub const XK_Greek_PSI: c_uint = 0x7d8; +pub const XK_Greek_OMEGA: c_uint = 0x7d9; +pub const XK_Greek_alpha: c_uint = 0x7e1; +pub const XK_Greek_beta: c_uint = 0x7e2; +pub const XK_Greek_gamma: c_uint = 0x7e3; +pub const XK_Greek_delta: c_uint = 0x7e4; +pub const XK_Greek_epsilon: c_uint = 0x7e5; +pub const XK_Greek_zeta: c_uint = 0x7e6; +pub const XK_Greek_eta: c_uint = 0x7e7; +pub const XK_Greek_theta: c_uint = 0x7e8; +pub const XK_Greek_iota: c_uint = 0x7e9; +pub const XK_Greek_kappa: c_uint = 0x7ea; +pub const XK_Greek_lamda: c_uint = 0x7eb; +pub const XK_Greek_lambda: c_uint = 0x7eb; +pub const XK_Greek_mu: c_uint = 0x7ec; +pub const XK_Greek_nu: c_uint = 0x7ed; +pub const XK_Greek_xi: c_uint = 0x7ee; +pub const XK_Greek_omicron: c_uint = 0x7ef; +pub const XK_Greek_pi: c_uint = 0x7f0; +pub const XK_Greek_rho: c_uint = 0x7f1; +pub const XK_Greek_sigma: c_uint = 0x7f2; +pub const XK_Greek_finalsmallsigma: c_uint = 0x7f3; +pub const XK_Greek_tau: c_uint = 0x7f4; +pub const XK_Greek_upsilon: c_uint = 0x7f5; +pub const XK_Greek_phi: c_uint = 0x7f6; +pub const XK_Greek_chi: c_uint = 0x7f7; +pub const XK_Greek_psi: c_uint = 0x7f8; +pub const XK_Greek_omega: c_uint = 0x7f9; +pub const XK_Greek_switch: c_uint = 0xFF7E; +pub const XK_leftradical: c_uint = 0x8a1; +pub const XK_topleftradical: c_uint = 0x8a2; +pub const XK_horizconnector: c_uint = 0x8a3; +pub const XK_topintegral: c_uint = 0x8a4; +pub const XK_botintegral: c_uint = 0x8a5; +pub const XK_vertconnector: c_uint = 0x8a6; +pub const XK_topleftsqbracket: c_uint = 0x8a7; +pub const XK_botleftsqbracket: c_uint = 0x8a8; +pub const XK_toprightsqbracket: c_uint = 0x8a9; +pub const XK_botrightsqbracket: c_uint = 0x8aa; +pub const XK_topleftparens: c_uint = 0x8ab; +pub const XK_botleftparens: c_uint = 0x8ac; +pub const XK_toprightparens: c_uint = 0x8ad; +pub const XK_botrightparens: c_uint = 0x8ae; +pub const XK_leftmiddlecurlybrace: c_uint = 0x8af; +pub const XK_rightmiddlecurlybrace: c_uint = 0x8b0; +pub const XK_topleftsummation: c_uint = 0x8b1; +pub const XK_botleftsummation: c_uint = 0x8b2; +pub const XK_topvertsummationconnector: c_uint = 0x8b3; +pub const XK_botvertsummationconnector: c_uint = 0x8b4; +pub const XK_toprightsummation: c_uint = 0x8b5; +pub const XK_botrightsummation: c_uint = 0x8b6; +pub const XK_rightmiddlesummation: c_uint = 0x8b7; +pub const XK_lessthanequal: c_uint = 0x8bc; +pub const XK_notequal: c_uint = 0x8bd; +pub const XK_greaterthanequal: c_uint = 0x8be; +pub const XK_integral: c_uint = 0x8bf; +pub const XK_therefore: c_uint = 0x8c0; +pub const XK_variation: c_uint = 0x8c1; +pub const XK_infinity: c_uint = 0x8c2; +pub const XK_nabla: c_uint = 0x8c5; +pub const XK_approximate: c_uint = 0x8c8; +pub const XK_similarequal: c_uint = 0x8c9; +pub const XK_ifonlyif: c_uint = 0x8cd; +pub const XK_implies: c_uint = 0x8ce; +pub const XK_identical: c_uint = 0x8cf; +pub const XK_radical: c_uint = 0x8d6; +pub const XK_includedin: c_uint = 0x8da; +pub const XK_includes: c_uint = 0x8db; +pub const XK_intersection: c_uint = 0x8dc; +pub const XK_union: c_uint = 0x8dd; +pub const XK_logicaland: c_uint = 0x8de; +pub const XK_logicalor: c_uint = 0x8df; +pub const XK_partialderivative: c_uint = 0x8ef; +pub const XK_function: c_uint = 0x8f6; +pub const XK_leftarrow: c_uint = 0x8fb; +pub const XK_uparrow: c_uint = 0x8fc; +pub const XK_rightarrow: c_uint = 0x8fd; +pub const XK_downarrow: c_uint = 0x8fe; +pub const XK_blank: c_uint = 0x9df; +pub const XK_soliddiamond: c_uint = 0x9e0; +pub const XK_checkerboard: c_uint = 0x9e1; +pub const XK_ht: c_uint = 0x9e2; +pub const XK_ff: c_uint = 0x9e3; +pub const XK_cr: c_uint = 0x9e4; +pub const XK_lf: c_uint = 0x9e5; +pub const XK_nl: c_uint = 0x9e8; +pub const XK_vt: c_uint = 0x9e9; +pub const XK_lowrightcorner: c_uint = 0x9ea; +pub const XK_uprightcorner: c_uint = 0x9eb; +pub const XK_upleftcorner: c_uint = 0x9ec; +pub const XK_lowleftcorner: c_uint = 0x9ed; +pub const XK_crossinglines: c_uint = 0x9ee; +pub const XK_horizlinescan1: c_uint = 0x9ef; +pub const XK_horizlinescan3: c_uint = 0x9f0; +pub const XK_horizlinescan5: c_uint = 0x9f1; +pub const XK_horizlinescan7: c_uint = 0x9f2; +pub const XK_horizlinescan9: c_uint = 0x9f3; +pub const XK_leftt: c_uint = 0x9f4; +pub const XK_rightt: c_uint = 0x9f5; +pub const XK_bott: c_uint = 0x9f6; +pub const XK_topt: c_uint = 0x9f7; +pub const XK_vertbar: c_uint = 0x9f8; +pub const XK_emspace: c_uint = 0xaa1; +pub const XK_enspace: c_uint = 0xaa2; +pub const XK_em3space: c_uint = 0xaa3; +pub const XK_em4space: c_uint = 0xaa4; +pub const XK_digitspace: c_uint = 0xaa5; +pub const XK_punctspace: c_uint = 0xaa6; +pub const XK_thinspace: c_uint = 0xaa7; +pub const XK_hairspace: c_uint = 0xaa8; +pub const XK_emdash: c_uint = 0xaa9; +pub const XK_endash: c_uint = 0xaaa; +pub const XK_signifblank: c_uint = 0xaac; +pub const XK_ellipsis: c_uint = 0xaae; +pub const XK_doubbaselinedot: c_uint = 0xaaf; +pub const XK_onethird: c_uint = 0xab0; +pub const XK_twothirds: c_uint = 0xab1; +pub const XK_onefifth: c_uint = 0xab2; +pub const XK_twofifths: c_uint = 0xab3; +pub const XK_threefifths: c_uint = 0xab4; +pub const XK_fourfifths: c_uint = 0xab5; +pub const XK_onesixth: c_uint = 0xab6; +pub const XK_fivesixths: c_uint = 0xab7; +pub const XK_careof: c_uint = 0xab8; +pub const XK_figdash: c_uint = 0xabb; +pub const XK_leftanglebracket: c_uint = 0xabc; +pub const XK_decimalpoint: c_uint = 0xabd; +pub const XK_rightanglebracket: c_uint = 0xabe; +pub const XK_marker: c_uint = 0xabf; +pub const XK_oneeighth: c_uint = 0xac3; +pub const XK_threeeighths: c_uint = 0xac4; +pub const XK_fiveeighths: c_uint = 0xac5; +pub const XK_seveneighths: c_uint = 0xac6; +pub const XK_trademark: c_uint = 0xac9; +pub const XK_signaturemark: c_uint = 0xaca; +pub const XK_trademarkincircle: c_uint = 0xacb; +pub const XK_leftopentriangle: c_uint = 0xacc; +pub const XK_rightopentriangle: c_uint = 0xacd; +pub const XK_emopencircle: c_uint = 0xace; +pub const XK_emopenrectangle: c_uint = 0xacf; +pub const XK_leftsinglequotemark: c_uint = 0xad0; +pub const XK_rightsinglequotemark: c_uint = 0xad1; +pub const XK_leftdoublequotemark: c_uint = 0xad2; +pub const XK_rightdoublequotemark: c_uint = 0xad3; +pub const XK_prescription: c_uint = 0xad4; +pub const XK_minutes: c_uint = 0xad6; +pub const XK_seconds: c_uint = 0xad7; +pub const XK_latincross: c_uint = 0xad9; +pub const XK_hexagram: c_uint = 0xada; +pub const XK_filledrectbullet: c_uint = 0xadb; +pub const XK_filledlefttribullet: c_uint = 0xadc; +pub const XK_filledrighttribullet: c_uint = 0xadd; +pub const XK_emfilledcircle: c_uint = 0xade; +pub const XK_emfilledrect: c_uint = 0xadf; +pub const XK_enopencircbullet: c_uint = 0xae0; +pub const XK_enopensquarebullet: c_uint = 0xae1; +pub const XK_openrectbullet: c_uint = 0xae2; +pub const XK_opentribulletup: c_uint = 0xae3; +pub const XK_opentribulletdown: c_uint = 0xae4; +pub const XK_openstar: c_uint = 0xae5; +pub const XK_enfilledcircbullet: c_uint = 0xae6; +pub const XK_enfilledsqbullet: c_uint = 0xae7; +pub const XK_filledtribulletup: c_uint = 0xae8; +pub const XK_filledtribulletdown: c_uint = 0xae9; +pub const XK_leftpointer: c_uint = 0xaea; +pub const XK_rightpointer: c_uint = 0xaeb; +pub const XK_club: c_uint = 0xaec; +pub const XK_diamond: c_uint = 0xaed; +pub const XK_heart: c_uint = 0xaee; +pub const XK_maltesecross: c_uint = 0xaf0; +pub const XK_dagger: c_uint = 0xaf1; +pub const XK_doubledagger: c_uint = 0xaf2; +pub const XK_checkmark: c_uint = 0xaf3; +pub const XK_ballotcross: c_uint = 0xaf4; +pub const XK_musicalsharp: c_uint = 0xaf5; +pub const XK_musicalflat: c_uint = 0xaf6; +pub const XK_malesymbol: c_uint = 0xaf7; +pub const XK_femalesymbol: c_uint = 0xaf8; +pub const XK_telephone: c_uint = 0xaf9; +pub const XK_telephonerecorder: c_uint = 0xafa; +pub const XK_phonographcopyright: c_uint = 0xafb; +pub const XK_caret: c_uint = 0xafc; +pub const XK_singlelowquotemark: c_uint = 0xafd; +pub const XK_doublelowquotemark: c_uint = 0xafe; +pub const XK_cursor: c_uint = 0xaff; +pub const XK_leftcaret: c_uint = 0xba3; +pub const XK_rightcaret: c_uint = 0xba6; +pub const XK_downcaret: c_uint = 0xba8; +pub const XK_upcaret: c_uint = 0xba9; +pub const XK_overbar: c_uint = 0xbc0; +pub const XK_downtack: c_uint = 0xbc2; +pub const XK_upshoe: c_uint = 0xbc3; +pub const XK_downstile: c_uint = 0xbc4; +pub const XK_underbar: c_uint = 0xbc6; +pub const XK_jot: c_uint = 0xbca; +pub const XK_quad: c_uint = 0xbcc; +pub const XK_uptack: c_uint = 0xbce; +pub const XK_circle: c_uint = 0xbcf; +pub const XK_upstile: c_uint = 0xbd3; +pub const XK_downshoe: c_uint = 0xbd6; +pub const XK_rightshoe: c_uint = 0xbd8; +pub const XK_leftshoe: c_uint = 0xbda; +pub const XK_lefttack: c_uint = 0xbdc; +pub const XK_righttack: c_uint = 0xbfc; +pub const XK_hebrew_doublelowline: c_uint = 0xcdf; +pub const XK_hebrew_aleph: c_uint = 0xce0; +pub const XK_hebrew_bet: c_uint = 0xce1; +pub const XK_hebrew_beth: c_uint = 0xce1; +pub const XK_hebrew_gimel: c_uint = 0xce2; +pub const XK_hebrew_gimmel: c_uint = 0xce2; +pub const XK_hebrew_dalet: c_uint = 0xce3; +pub const XK_hebrew_daleth: c_uint = 0xce3; +pub const XK_hebrew_he: c_uint = 0xce4; +pub const XK_hebrew_waw: c_uint = 0xce5; +pub const XK_hebrew_zain: c_uint = 0xce6; +pub const XK_hebrew_zayin: c_uint = 0xce6; +pub const XK_hebrew_chet: c_uint = 0xce7; +pub const XK_hebrew_het: c_uint = 0xce7; +pub const XK_hebrew_tet: c_uint = 0xce8; +pub const XK_hebrew_teth: c_uint = 0xce8; +pub const XK_hebrew_yod: c_uint = 0xce9; +pub const XK_hebrew_finalkaph: c_uint = 0xcea; +pub const XK_hebrew_kaph: c_uint = 0xceb; +pub const XK_hebrew_lamed: c_uint = 0xcec; +pub const XK_hebrew_finalmem: c_uint = 0xced; +pub const XK_hebrew_mem: c_uint = 0xcee; +pub const XK_hebrew_finalnun: c_uint = 0xcef; +pub const XK_hebrew_nun: c_uint = 0xcf0; +pub const XK_hebrew_samech: c_uint = 0xcf1; +pub const XK_hebrew_samekh: c_uint = 0xcf1; +pub const XK_hebrew_ayin: c_uint = 0xcf2; +pub const XK_hebrew_finalpe: c_uint = 0xcf3; +pub const XK_hebrew_pe: c_uint = 0xcf4; +pub const XK_hebrew_finalzade: c_uint = 0xcf5; +pub const XK_hebrew_finalzadi: c_uint = 0xcf5; +pub const XK_hebrew_zade: c_uint = 0xcf6; +pub const XK_hebrew_zadi: c_uint = 0xcf6; +pub const XK_hebrew_qoph: c_uint = 0xcf7; +pub const XK_hebrew_kuf: c_uint = 0xcf7; +pub const XK_hebrew_resh: c_uint = 0xcf8; +pub const XK_hebrew_shin: c_uint = 0xcf9; +pub const XK_hebrew_taw: c_uint = 0xcfa; +pub const XK_hebrew_taf: c_uint = 0xcfa; +pub const XK_Hebrew_switch: c_uint = 0xFF7E; + +pub const XF86XK_ModeLock: c_uint = 0x1008FF01; +pub const XF86XK_MonBrightnessUp: c_uint = 0x1008FF02; +pub const XF86XK_MonBrightnessDown: c_uint = 0x1008FF03; +pub const XF86XK_KbdLightOnOff: c_uint = 0x1008FF04; +pub const XF86XK_KbdBrightnessUp: c_uint = 0x1008FF05; +pub const XF86XK_KbdBrightnessDown: c_uint = 0x1008FF06; +pub const XF86XK_Standby: c_uint = 0x1008FF10; +pub const XF86XK_AudioLowerVolume: c_uint = 0x1008FF11; +pub const XF86XK_AudioMute: c_uint = 0x1008FF12; +pub const XF86XK_AudioRaiseVolume: c_uint = 0x1008FF13; +pub const XF86XK_AudioPlay: c_uint = 0x1008FF14; +pub const XF86XK_AudioStop: c_uint = 0x1008FF15; +pub const XF86XK_AudioPrev: c_uint = 0x1008FF16; +pub const XF86XK_AudioNext: c_uint = 0x1008FF17; +pub const XF86XK_HomePage: c_uint = 0x1008FF18; +pub const XF86XK_Mail: c_uint = 0x1008FF19; +pub const XF86XK_Start: c_uint = 0x1008FF1A; +pub const XF86XK_Search: c_uint = 0x1008FF1B; +pub const XF86XK_AudioRecord: c_uint = 0x1008FF1C; +pub const XF86XK_Calculator: c_uint = 0x1008FF1D; +pub const XF86XK_Memo: c_uint = 0x1008FF1E; +pub const XF86XK_ToDoList: c_uint = 0x1008FF1F; +pub const XF86XK_Calendar: c_uint = 0x1008FF20; +pub const XF86XK_PowerDown: c_uint = 0x1008FF21; +pub const XF86XK_ContrastAdjust: c_uint = 0x1008FF22; +pub const XF86XK_RockerUp: c_uint = 0x1008FF23; +pub const XF86XK_RockerDown: c_uint = 0x1008FF24; +pub const XF86XK_RockerEnter: c_uint = 0x1008FF25; +pub const XF86XK_Back: c_uint = 0x1008FF26; +pub const XF86XK_Forward: c_uint = 0x1008FF27; +pub const XF86XK_Stop: c_uint = 0x1008FF28; +pub const XF86XK_Refresh: c_uint = 0x1008FF29; +pub const XF86XK_PowerOff: c_uint = 0x1008FF2A; +pub const XF86XK_WakeUp: c_uint = 0x1008FF2B; +pub const XF86XK_Eject: c_uint = 0x1008FF2C; +pub const XF86XK_ScreenSaver: c_uint = 0x1008FF2D; +pub const XF86XK_WWW: c_uint = 0x1008FF2E; +pub const XF86XK_Sleep: c_uint = 0x1008FF2F; +pub const XF86XK_Favorites: c_uint = 0x1008FF30; +pub const XF86XK_AudioPause: c_uint = 0x1008FF31; +pub const XF86XK_AudioMedia: c_uint = 0x1008FF32; +pub const XF86XK_MyComputer: c_uint = 0x1008FF33; +pub const XF86XK_VendorHome: c_uint = 0x1008FF34; +pub const XF86XK_LightBulb: c_uint = 0x1008FF35; +pub const XF86XK_Shop: c_uint = 0x1008FF36; +pub const XF86XK_History: c_uint = 0x1008FF37; +pub const XF86XK_OpenURL: c_uint = 0x1008FF38; +pub const XF86XK_AddFavorite: c_uint = 0x1008FF39; +pub const XF86XK_HotLinks: c_uint = 0x1008FF3A; +pub const XF86XK_BrightnessAdjust: c_uint = 0x1008FF3B; +pub const XF86XK_Finance: c_uint = 0x1008FF3C; +pub const XF86XK_Community: c_uint = 0x1008FF3D; +pub const XF86XK_AudioRewind: c_uint = 0x1008FF3E; +pub const XF86XK_BackForward: c_uint = 0x1008FF3F; +pub const XF86XK_Launch0: c_uint = 0x1008FF40; +pub const XF86XK_Launch1: c_uint = 0x1008FF41; +pub const XF86XK_Launch2: c_uint = 0x1008FF42; +pub const XF86XK_Launch3: c_uint = 0x1008FF43; +pub const XF86XK_Launch4: c_uint = 0x1008FF44; +pub const XF86XK_Launch5: c_uint = 0x1008FF45; +pub const XF86XK_Launch6: c_uint = 0x1008FF46; +pub const XF86XK_Launch7: c_uint = 0x1008FF47; +pub const XF86XK_Launch8: c_uint = 0x1008FF48; +pub const XF86XK_Launch9: c_uint = 0x1008FF49; +pub const XF86XK_LaunchA: c_uint = 0x1008FF4A; +pub const XF86XK_LaunchB: c_uint = 0x1008FF4B; +pub const XF86XK_LaunchC: c_uint = 0x1008FF4C; +pub const XF86XK_LaunchD: c_uint = 0x1008FF4D; +pub const XF86XK_LaunchE: c_uint = 0x1008FF4E; +pub const XF86XK_LaunchF: c_uint = 0x1008FF4F; +pub const XF86XK_ApplicationLeft: c_uint = 0x1008FF50; +pub const XF86XK_ApplicationRight: c_uint = 0x1008FF51; +pub const XF86XK_Book: c_uint = 0x1008FF52; +pub const XF86XK_CD: c_uint = 0x1008FF53; +pub const XF86XK_Calculater: c_uint = 0x1008FF54; +pub const XF86XK_Clear: c_uint = 0x1008FF55; +pub const XF86XK_Close: c_uint = 0x1008FF56; +pub const XF86XK_Copy: c_uint = 0x1008FF57; +pub const XF86XK_Cut: c_uint = 0x1008FF58; +pub const XF86XK_Display: c_uint = 0x1008FF59; +pub const XF86XK_DOS: c_uint = 0x1008FF5A; +pub const XF86XK_Documents: c_uint = 0x1008FF5B; +pub const XF86XK_Excel: c_uint = 0x1008FF5C; +pub const XF86XK_Explorer: c_uint = 0x1008FF5D; +pub const XF86XK_Game: c_uint = 0x1008FF5E; +pub const XF86XK_Go: c_uint = 0x1008FF5F; +pub const XF86XK_iTouch: c_uint = 0x1008FF60; +pub const XF86XK_LogOff: c_uint = 0x1008FF61; +pub const XF86XK_Market: c_uint = 0x1008FF62; +pub const XF86XK_Meeting: c_uint = 0x1008FF63; +pub const XF86XK_MenuKB: c_uint = 0x1008FF65; +pub const XF86XK_MenuPB: c_uint = 0x1008FF66; +pub const XF86XK_MySites: c_uint = 0x1008FF67; +pub const XF86XK_New: c_uint = 0x1008FF68; +pub const XF86XK_News: c_uint = 0x1008FF69; +pub const XF86XK_OfficeHome: c_uint = 0x1008FF6A; +pub const XF86XK_Open: c_uint = 0x1008FF6B; +pub const XF86XK_Option: c_uint = 0x1008FF6C; +pub const XF86XK_Paste: c_uint = 0x1008FF6D; +pub const XF86XK_Phone: c_uint = 0x1008FF6E; +pub const XF86XK_Q: c_uint = 0x1008FF70; +pub const XF86XK_Reply: c_uint = 0x1008FF72; +pub const XF86XK_Reload: c_uint = 0x1008FF73; +pub const XF86XK_RotateWindows: c_uint = 0x1008FF74; +pub const XF86XK_RotationPB: c_uint = 0x1008FF75; +pub const XF86XK_RotationKB: c_uint = 0x1008FF76; +pub const XF86XK_Save: c_uint = 0x1008FF77; +pub const XF86XK_ScrollUp: c_uint = 0x1008FF78; +pub const XF86XK_ScrollDown: c_uint = 0x1008FF79; +pub const XF86XK_ScrollClick: c_uint = 0x1008FF7A; +pub const XF86XK_Send: c_uint = 0x1008FF7B; +pub const XF86XK_Spell: c_uint = 0x1008FF7C; +pub const XF86XK_SplitScreen: c_uint = 0x1008FF7D; +pub const XF86XK_Support: c_uint = 0x1008FF7E; +pub const XF86XK_TaskPane: c_uint = 0x1008FF7F; +pub const XF86XK_Terminal: c_uint = 0x1008FF80; +pub const XF86XK_Tools: c_uint = 0x1008FF81; +pub const XF86XK_Travel: c_uint = 0x1008FF82; +pub const XF86XK_UserPB: c_uint = 0x1008FF84; +pub const XF86XK_User1KB: c_uint = 0x1008FF85; +pub const XF86XK_User2KB: c_uint = 0x1008FF86; +pub const XF86XK_Video: c_uint = 0x1008FF87; +pub const XF86XK_WheelButton: c_uint = 0x1008FF88; +pub const XF86XK_Word: c_uint = 0x1008FF89; +pub const XF86XK_Xfer: c_uint = 0x1008FF8A; +pub const XF86XK_ZoomIn: c_uint = 0x1008FF8B; +pub const XF86XK_ZoomOut: c_uint = 0x1008FF8C; +pub const XF86XK_Away: c_uint = 0x1008FF8D; +pub const XF86XK_Messenger: c_uint = 0x1008FF8E; +pub const XF86XK_WebCam: c_uint = 0x1008FF8F; +pub const XF86XK_MailForward: c_uint = 0x1008FF90; +pub const XF86XK_Pictures: c_uint = 0x1008FF91; +pub const XF86XK_Music: c_uint = 0x1008FF92; +pub const XF86XK_Battery: c_uint = 0x1008FF93; +pub const XF86XK_Bluetooth: c_uint = 0x1008FF94; +pub const XF86XK_WLAN: c_uint = 0x1008FF95; +pub const XF86XK_UWB: c_uint = 0x1008FF96; +pub const XF86XK_AudioForward: c_uint = 0x1008FF97; +pub const XF86XK_AudioRepeat: c_uint = 0x1008FF98; +pub const XF86XK_AudioRandomPlay: c_uint = 0x1008FF99; +pub const XF86XK_Subtitle: c_uint = 0x1008FF9A; +pub const XF86XK_AudioCycleTrack: c_uint = 0x1008FF9B; +pub const XF86XK_CycleAngle: c_uint = 0x1008FF9C; +pub const XF86XK_FrameBack: c_uint = 0x1008FF9D; +pub const XF86XK_FrameForward: c_uint = 0x1008FF9E; +pub const XF86XK_Time: c_uint = 0x1008FF9F; +pub const XF86XK_Select: c_uint = 0x1008FFA0; +pub const XF86XK_View: c_uint = 0x1008FFA1; +pub const XF86XK_TopMenu: c_uint = 0x1008FFA2; +pub const XF86XK_Red: c_uint = 0x1008FFA3; +pub const XF86XK_Green: c_uint = 0x1008FFA4; +pub const XF86XK_Yellow: c_uint = 0x1008FFA5; +pub const XF86XK_Blue: c_uint = 0x1008FFA6; +pub const XF86XK_Suspend: c_uint = 0x1008FFA7; +pub const XF86XK_Hibernate: c_uint = 0x1008FFA8; +pub const XF86XK_TouchpadToggle: c_uint = 0x1008FFA9; +pub const XF86XK_TouchpadOn: c_uint = 0x1008FFB0; +pub const XF86XK_TouchpadOff: c_uint = 0x1008FFB1; +pub const XF86XK_AudioMicMute: c_uint = 0x1008FFB2; +pub const XF86XK_Switch_VT_1: c_uint = 0x1008FE01; +pub const XF86XK_Switch_VT_2: c_uint = 0x1008FE02; +pub const XF86XK_Switch_VT_3: c_uint = 0x1008FE03; +pub const XF86XK_Switch_VT_4: c_uint = 0x1008FE04; +pub const XF86XK_Switch_VT_5: c_uint = 0x1008FE05; +pub const XF86XK_Switch_VT_6: c_uint = 0x1008FE06; +pub const XF86XK_Switch_VT_7: c_uint = 0x1008FE07; +pub const XF86XK_Switch_VT_8: c_uint = 0x1008FE08; +pub const XF86XK_Switch_VT_9: c_uint = 0x1008FE09; +pub const XF86XK_Switch_VT_10: c_uint = 0x1008FE0A; +pub const XF86XK_Switch_VT_11: c_uint = 0x1008FE0B; +pub const XF86XK_Switch_VT_12: c_uint = 0x1008FE0C; +pub const XF86XK_Ungrab: c_uint = 0x1008FE20; +pub const XF86XK_ClearGrab: c_uint = 0x1008FE21; +pub const XF86XK_Next_VMode: c_uint = 0x1008FE22; +pub const XF86XK_Prev_VMode: c_uint = 0x1008FE23; +pub const XF86XK_LogWindowTree: c_uint = 0x1008FE24; +pub const XF86XK_LogGrabInfo: c_uint = 0x1008FE25; + +pub const XK_ISO_Lock: c_uint = 0xfe01; +pub const XK_ISO_Level2_Latch: c_uint = 0xfe02; +pub const XK_ISO_Level3_Shift: c_uint = 0xfe03; +pub const XK_ISO_Level3_Latch: c_uint = 0xfe04; +pub const XK_ISO_Level3_Lock: c_uint = 0xfe05; +pub const XK_ISO_Level5_Shift: c_uint = 0xfe11; +pub const XK_ISO_Level5_Latch: c_uint = 0xfe12; +pub const XK_ISO_Level5_Lock: c_uint = 0xfe13; +pub const XK_ISO_Group_Shift: c_uint = 0xff7e; +pub const XK_ISO_Group_Latch: c_uint = 0xfe06; +pub const XK_ISO_Group_Lock: c_uint = 0xfe07; +pub const XK_ISO_Next_Group: c_uint = 0xfe08; +pub const XK_ISO_Next_Group_Lock: c_uint = 0xfe09; +pub const XK_ISO_Prev_Group: c_uint = 0xfe0a; +pub const XK_ISO_Prev_Group_Lock: c_uint = 0xfe0b; +pub const XK_ISO_First_Group: c_uint = 0xfe0c; +pub const XK_ISO_First_Group_Lock: c_uint = 0xfe0d; +pub const XK_ISO_Last_Group: c_uint = 0xfe0e; +pub const XK_ISO_Last_Group_Lock: c_uint = 0xfe0f; + +pub const XK_ISO_Left_Tab: c_uint = 0xfe20; +pub const XK_ISO_Move_Line_Up: c_uint = 0xfe21; +pub const XK_ISO_Move_Line_Down: c_uint = 0xfe22; +pub const XK_ISO_Partial_Line_Up: c_uint = 0xfe23; +pub const XK_ISO_Partial_Line_Down: c_uint = 0xfe24; +pub const XK_ISO_Partial_Space_Left: c_uint = 0xfe25; +pub const XK_ISO_Partial_Space_Right: c_uint = 0xfe26; +pub const XK_ISO_Set_Margin_Left: c_uint = 0xfe27; +pub const XK_ISO_Set_Margin_Right: c_uint = 0xfe28; +pub const XK_ISO_Release_Margin_Left: c_uint = 0xfe29; +pub const XK_ISO_Release_Margin_Right: c_uint = 0xfe2a; +pub const XK_ISO_Release_Both_Margins: c_uint = 0xfe2b; +pub const XK_ISO_Fast_Cursor_Left: c_uint = 0xfe2c; +pub const XK_ISO_Fast_Cursor_Right: c_uint = 0xfe2d; +pub const XK_ISO_Fast_Cursor_Up: c_uint = 0xfe2e; +pub const XK_ISO_Fast_Cursor_Down: c_uint = 0xfe2f; +pub const XK_ISO_Continuous_Underline: c_uint = 0xfe30; +pub const XK_ISO_Discontinuous_Underline: c_uint = 0xfe31; +pub const XK_ISO_Emphasize: c_uint = 0xfe32; +pub const XK_ISO_Center_Object: c_uint = 0xfe33; +pub const XK_ISO_Enter: c_uint = 0xfe34; + +pub const XK_dead_grave: c_uint = 0xfe50; +pub const XK_dead_acute: c_uint = 0xfe51; +pub const XK_dead_circumflex: c_uint = 0xfe52; +pub const XK_dead_tilde: c_uint = 0xfe53; +pub const XK_dead_perispomeni: c_uint = 0xfe53; +pub const XK_dead_macron: c_uint = 0xfe54; +pub const XK_dead_breve: c_uint = 0xfe55; +pub const XK_dead_abovedot: c_uint = 0xfe56; +pub const XK_dead_diaeresis: c_uint = 0xfe57; +pub const XK_dead_abovering: c_uint = 0xfe58; +pub const XK_dead_doubleacute: c_uint = 0xfe59; +pub const XK_dead_caron: c_uint = 0xfe5a; +pub const XK_dead_cedilla: c_uint = 0xfe5b; +pub const XK_dead_ogonek: c_uint = 0xfe5c; +pub const XK_dead_iota: c_uint = 0xfe5d; +pub const XK_dead_voiced_sound: c_uint = 0xfe5e; +pub const XK_dead_semivoiced_sound: c_uint = 0xfe5f; +pub const XK_dead_belowdot: c_uint = 0xfe60; +pub const XK_dead_hook: c_uint = 0xfe61; +pub const XK_dead_horn: c_uint = 0xfe62; +pub const XK_dead_stroke: c_uint = 0xfe63; +pub const XK_dead_abovecomma: c_uint = 0xfe64; +pub const XK_dead_psili: c_uint = 0xfe64; +pub const XK_dead_abovereversedcomma: c_uint = 0xfe65; +pub const XK_dead_dasia: c_uint = 0xfe65; +pub const XK_dead_doublegrave: c_uint = 0xfe66; +pub const XK_dead_belowring: c_uint = 0xfe67; +pub const XK_dead_belowmacron: c_uint = 0xfe68; +pub const XK_dead_belowcircumflex: c_uint = 0xfe69; +pub const XK_dead_belowtilde: c_uint = 0xfe6a; +pub const XK_dead_belowbreve: c_uint = 0xfe6b; +pub const XK_dead_belowdiaeresis: c_uint = 0xfe6c; +pub const XK_dead_invertedbreve: c_uint = 0xfe6d; +pub const XK_dead_belowcomma: c_uint = 0xfe6e; +pub const XK_dead_currency: c_uint = 0xfe6f; + +pub const XK_dead_lowline: c_uint = 0xfe90; +pub const XK_dead_aboveverticalline: c_uint = 0xfe91; +pub const XK_dead_belowverticalline: c_uint = 0xfe92; +pub const XK_dead_longsolidusoverlay: c_uint = 0xfe93; + +pub const XK_dead_a: c_uint = 0xfe80; +pub const XK_dead_A: c_uint = 0xfe81; +pub const XK_dead_e: c_uint = 0xfe82; +pub const XK_dead_E: c_uint = 0xfe83; +pub const XK_dead_i: c_uint = 0xfe84; +pub const XK_dead_I: c_uint = 0xfe85; +pub const XK_dead_o: c_uint = 0xfe86; +pub const XK_dead_O: c_uint = 0xfe87; +pub const XK_dead_u: c_uint = 0xfe88; +pub const XK_dead_U: c_uint = 0xfe89; +pub const XK_dead_small_schwa: c_uint = 0xfe8a; +pub const XK_dead_capital_schwa: c_uint = 0xfe8b; + +pub const XK_dead_greek: c_uint = 0xfe8c; + +pub const XK_First_Virtual_Screen: c_uint = 0xfed0; +pub const XK_Prev_Virtual_Screen: c_uint = 0xfed1; +pub const XK_Next_Virtual_Screen: c_uint = 0xfed2; +pub const XK_Last_Virtual_Screen: c_uint = 0xfed4; +pub const XK_Terminate_Server: c_uint = 0xfed5; + +pub const XK_AccessX_Enable: c_uint = 0xfe70; +pub const XK_AccessX_Feedback_Enable: c_uint = 0xfe71; +pub const XK_RepeatKeys_Enable: c_uint = 0xfe72; +pub const XK_SlowKeys_Enable: c_uint = 0xfe73; +pub const XK_BounceKeys_Enable: c_uint = 0xfe74; +pub const XK_StickyKeys_Enable: c_uint = 0xfe75; +pub const XK_MouseKeys_Enable: c_uint = 0xfe76; +pub const XK_MouseKeys_Accel_Enable: c_uint = 0xfe77; +pub const XK_Overlay1_Enable: c_uint = 0xfe78; +pub const XK_Overlay2_Enable: c_uint = 0xfe79; +pub const XK_AudibleBell_Enable: c_uint = 0xfe7a; + +pub const XK_Pointer_Left: c_uint = 0xfee0; +pub const XK_Pointer_Right: c_uint = 0xfee1; +pub const XK_Pointer_Up: c_uint = 0xfee2; +pub const XK_Pointer_Down: c_uint = 0xfee3; +pub const XK_Pointer_UpLeft: c_uint = 0xfee4; +pub const XK_Pointer_UpRight: c_uint = 0xfee5; +pub const XK_Pointer_DownLeft: c_uint = 0xfee6; +pub const XK_Pointer_DownRight: c_uint = 0xfee7; +pub const XK_Pointer_Button_Dflt: c_uint = 0xfee8; +pub const XK_Pointer_Button1: c_uint = 0xfee9; +pub const XK_Pointer_Button2: c_uint = 0xfeea; +pub const XK_Pointer_Button3: c_uint = 0xfeeb; +pub const XK_Pointer_Button4: c_uint = 0xfeec; +pub const XK_Pointer_Button5: c_uint = 0xfeed; +pub const XK_Pointer_DblClick_Dflt: c_uint = 0xfeee; +pub const XK_Pointer_DblClick1: c_uint = 0xfeef; +pub const XK_Pointer_DblClick2: c_uint = 0xfef0; +pub const XK_Pointer_DblClick3: c_uint = 0xfef1; +pub const XK_Pointer_DblClick4: c_uint = 0xfef2; +pub const XK_Pointer_DblClick5: c_uint = 0xfef3; +pub const XK_Pointer_Drag_Dflt: c_uint = 0xfef4; +pub const XK_Pointer_Drag1: c_uint = 0xfef5; +pub const XK_Pointer_Drag2: c_uint = 0xfef6; +pub const XK_Pointer_Drag3: c_uint = 0xfef7; +pub const XK_Pointer_Drag4: c_uint = 0xfef8; +pub const XK_Pointer_Drag5: c_uint = 0xfefd; + +pub const XK_Pointer_EnableKeys: c_uint = 0xfef9; +pub const XK_Pointer_Accelerate: c_uint = 0xfefa; +pub const XK_Pointer_DfltBtnNext: c_uint = 0xfefb; +pub const XK_Pointer_DfltBtnPrev: c_uint = 0xfefc; + +pub const XK_ch: c_uint = 0xfea0; +pub const XK_Ch: c_uint = 0xfea1; +pub const XK_CH: c_uint = 0xfea2; +pub const XK_c_h: c_uint = 0xfea3; +pub const XK_C_h: c_uint = 0xfea4; +pub const XK_C_H: c_uint = 0xfea5; diff --git a/third_party/rust/x11/src/lib.rs b/third_party/rust/x11/src/lib.rs new file mode 100644 index 000000000000..46960f41c090 --- /dev/null +++ b/third_party/rust/x11/src/lib.rs @@ -0,0 +1,37 @@ + + + + +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(improper_ctypes)] + +extern crate libc; + +#[macro_use] +mod link; +mod internal; + +#[macro_use] +pub mod xlib; + +pub mod dpms; +pub mod glx; +pub mod keysym; +pub mod xcursor; +pub mod xf86vmode; +pub mod xfixes; +pub mod xft; +pub mod xinerama; +pub mod xinput; +pub mod xinput2; +pub mod xmd; +pub mod xmu; +pub mod xrandr; +pub mod xrecord; +pub mod xrender; +pub mod xss; +pub mod xt; +pub mod xtest; +pub mod xlib_xcb; diff --git a/third_party/rust/x11/src/link.rs b/third_party/rust/x11/src/link.rs new file mode 100644 index 000000000000..f554563486e0 --- /dev/null +++ b/third_party/rust/x11/src/link.rs @@ -0,0 +1,22 @@ + + + + +macro_rules! x11_link { + { $struct_name:ident, $pkg_name:expr, [$($lib_name:expr),*], $nsyms:expr, + $(pub fn $fn_name:ident ($($param_name:ident : $param_type:ty),*) -> $ret_type:ty,)* + variadic: + $(pub fn $vfn_name:ident ($($vparam_name: ident : $vparam_type:ty),+) -> $vret_type:ty,)* + globals: + $(pub static $var_name:ident : $var_type:ty,)* + } => { + extern "C" { + $(pub fn $fn_name ($($param_name : $param_type),*) -> $ret_type;)* + $(pub fn $vfn_name ($($vparam_name : $vparam_type),+, ...) -> $vret_type;)* + } + + extern { + $(pub static $var_name : $var_type;)* + } + } +} diff --git a/third_party/rust/x11/src/xcursor.rs b/third_party/rust/x11/src/xcursor.rs new file mode 100644 index 000000000000..ea14792acce6 --- /dev/null +++ b/third_party/rust/x11/src/xcursor.rs @@ -0,0 +1,211 @@ + + + + +use std::os::raw::{ + c_char, + c_int, + c_long, + c_uchar, + c_uint, + c_ulong, + c_void, +}; +use libc::FILE; + +use ::xlib::{ + Cursor, + Display, + XColor, + XImage, +}; + + + + + + + +x11_link! { Xcursor, xcursor, ["libXcursor.so.1", "libXcursor.so"], 59, + pub fn XcursorAnimateCreate (_1: *mut XcursorCursors) -> *mut XcursorAnimate, + pub fn XcursorAnimateDestroy (_1: *mut XcursorAnimate) -> (), + pub fn XcursorAnimateNext (_1: *mut XcursorAnimate) -> c_ulong, + pub fn XcursorCommentCreate (_2: c_uint, _1: c_int) -> *mut XcursorComment, + pub fn XcursorCommentDestroy (_1: *mut XcursorComment) -> (), + pub fn XcursorCommentsCreate (_1: c_int) -> *mut XcursorComments, + pub fn XcursorCommentsDestroy (_1: *mut XcursorComments) -> (), + pub fn XcursorCursorsCreate (_2: *mut Display, _1: c_int) -> *mut XcursorCursors, + pub fn XcursorCursorsDestroy (_1: *mut XcursorCursors) -> (), + pub fn XcursorFileLoad (_3: *mut FILE, _2: *mut *mut XcursorComments, _1: *mut *mut XcursorImages) -> c_int, + pub fn XcursorFileLoadAllImages (_1: *mut FILE) -> *mut XcursorImages, + pub fn XcursorFileLoadImage (_2: *mut FILE, _1: c_int) -> *mut XcursorImage, + pub fn XcursorFileLoadImages (_2: *mut FILE, _1: c_int) -> *mut XcursorImages, + pub fn XcursorFilenameLoad (_3: *const c_char, _2: *mut *mut XcursorComments, _1: *mut *mut XcursorImages) -> c_int, + pub fn XcursorFilenameLoadAllImages (_1: *const c_char) -> *mut XcursorImages, + pub fn XcursorFilenameLoadCursor (_2: *mut Display, _1: *const c_char) -> c_ulong, + pub fn XcursorFilenameLoadCursors (_2: *mut Display, _1: *const c_char) -> *mut XcursorCursors, + pub fn XcursorFilenameLoadImage (_2: *const c_char, _1: c_int) -> *mut XcursorImage, + pub fn XcursorFilenameLoadImages (_2: *const c_char, _1: c_int) -> *mut XcursorImages, + pub fn XcursorFilenameSave (_3: *const c_char, _2: *const XcursorComments, _1: *const XcursorImages) -> c_int, + pub fn XcursorFilenameSaveImages (_2: *const c_char, _1: *const XcursorImages) -> c_int, + pub fn XcursorFileSave (_3: *mut FILE, _2: *const XcursorComments, _1: *const XcursorImages) -> c_int, + pub fn XcursorFileSaveImages (_2: *mut FILE, _1: *const XcursorImages) -> c_int, + pub fn XcursorGetDefaultSize (_1: *mut Display) -> c_int, + pub fn XcursorGetTheme (_1: *mut Display) -> *mut c_char, + pub fn XcursorGetThemeCore (_1: *mut Display) -> c_int, + pub fn XcursorImageCreate (_2: c_int, _1: c_int) -> *mut XcursorImage, + pub fn XcursorImageDestroy (_1: *mut XcursorImage) -> (), + pub fn XcursorImageHash (_2: *mut XImage, _1: *mut c_uchar) -> (), + pub fn XcursorImageLoadCursor (_2: *mut Display, _1: *const XcursorImage) -> c_ulong, + pub fn XcursorImagesCreate (_1: c_int) -> *mut XcursorImages, + pub fn XcursorImagesDestroy (_1: *mut XcursorImages) -> (), + pub fn XcursorImagesLoadCursor (_2: *mut Display, _1: *const XcursorImages) -> c_ulong, + pub fn XcursorImagesLoadCursors (_2: *mut Display, _1: *const XcursorImages) -> *mut XcursorCursors, + pub fn XcursorImagesSetName (_2: *mut XcursorImages, _1: *const c_char) -> (), + pub fn XcursorLibraryLoadCursor (_2: *mut Display, _1: *const c_char) -> c_ulong, + pub fn XcursorLibraryLoadCursors (_2: *mut Display, _1: *const c_char) -> *mut XcursorCursors, + pub fn XcursorLibraryLoadImage (_3: *const c_char, _2: *const c_char, _1: c_int) -> *mut XcursorImage, + pub fn XcursorLibraryLoadImages (_3: *const c_char, _2: *const c_char, _1: c_int) -> *mut XcursorImages, + pub fn XcursorLibraryPath () -> *const c_char, + pub fn XcursorLibraryShape (_1: *const c_char) -> c_int, + pub fn XcursorNoticeCreateBitmap (_4: *mut Display, _3: c_ulong, _2: c_uint, _1: c_uint) -> (), + pub fn XcursorNoticePutBitmap (_3: *mut Display, _2: c_ulong, _1: *mut XImage) -> (), + pub fn XcursorSetDefaultSize (_2: *mut Display, _1: c_int) -> c_int, + pub fn XcursorSetTheme (_2: *mut Display, _1: *const c_char) -> c_int, + pub fn XcursorSetThemeCore (_2: *mut Display, _1: c_int) -> c_int, + pub fn XcursorShapeLoadCursor (_2: *mut Display, _1: c_uint) -> c_ulong, + pub fn XcursorShapeLoadCursors (_2: *mut Display, _1: c_uint) -> *mut XcursorCursors, + pub fn XcursorShapeLoadImage (_3: c_uint, _2: *const c_char, _1: c_int) -> *mut XcursorImage, + pub fn XcursorShapeLoadImages (_3: c_uint, _2: *const c_char, _1: c_int) -> *mut XcursorImages, + pub fn XcursorSupportsAnim (_1: *mut Display) -> c_int, + pub fn XcursorSupportsARGB (_1: *mut Display) -> c_int, + pub fn XcursorTryShapeBitmapCursor (_7: *mut Display, _6: c_ulong, _5: c_ulong, _4: *mut XColor, _3: *mut XColor, _2: c_uint, _1: c_uint) -> c_ulong, + pub fn XcursorTryShapeCursor (_7: *mut Display, _6: c_ulong, _5: c_ulong, _4: c_uint, _3: c_uint, _2: *const XColor, _1: *const XColor) -> c_ulong, + pub fn XcursorXcFileLoad (_3: *mut XcursorFile, _2: *mut *mut XcursorComments, _1: *mut *mut XcursorImages) -> c_int, + pub fn XcursorXcFileLoadAllImages (_1: *mut XcursorFile) -> *mut XcursorImages, + pub fn XcursorXcFileLoadImage (_2: *mut XcursorFile, _1: c_int) -> *mut XcursorImage, + pub fn XcursorXcFileLoadImages (_2: *mut XcursorFile, _1: c_int) -> *mut XcursorImages, + pub fn XcursorXcFileSave (_3: *mut XcursorFile, _2: *const XcursorComments, _1: *const XcursorImages) -> c_int, +variadic: +globals: +} + + + + + + + +pub type XcursorBool = c_int; +pub type XcursorDim = XcursorUInt; +pub type XcursorPixel = XcursorUInt; +pub type XcursorUInt = c_uint; + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct _XcursorAnimate { + pub cursors: *mut XcursorCursors, + pub sequence: c_int, +} +pub type XcursorAnimate = _XcursorAnimate; + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct _XcursorChunkHeader { + pub header: XcursorUInt, + pub type_: XcursorUInt, + pub subtype: XcursorUInt, + pub version: XcursorUInt, +} +pub type XcursorChunkHeader = _XcursorChunkHeader; + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct _XcursorComment { + pub version: XcursorUInt, + pub comment_type: XcursorUInt, + pub comment: *mut c_char, +} +pub type XcursorComment = _XcursorComment; + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct _XcursorComments { + pub ncomment: c_int, + pub comments: *mut *mut XcursorComment, +} +pub type XcursorComments = _XcursorComments; + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct _XcursorCursors { + pub dpy: *mut Display, + pub ref_: c_int, + pub ncursor: c_int, + pub cursors: *mut Cursor, +} +pub type XcursorCursors = _XcursorCursors; + +#[derive(Debug, Copy)] +#[repr(C)] +pub struct _XcursorFile { + pub closure: *mut c_void, + pub read: Option c_int>, + pub write: Option c_int>, + pub seek: Option c_int>, +} +pub type XcursorFile = _XcursorFile; + +impl Clone for _XcursorFile { + fn clone (&self) -> _XcursorFile { + _XcursorFile { + closure: self.closure, + read: self.read, + write: self.write, + seek: self.seek, + } + } +} + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct _XcursorFileHeader { + pub magic: XcursorUInt, + pub header: XcursorUInt, + pub version: XcursorUInt, + pub ntoc: XcursorUInt, + pub tocs: *mut XcursorFileToc, +} +pub type XcursorFileHeader = _XcursorFileHeader; + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct _XcursorFileToc { + pub type_: XcursorUInt, + pub subtype: XcursorUInt, + pub position: XcursorUInt, +} +pub type XcursorFileToc = _XcursorFileToc; + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct _XcursorImage { + pub version: XcursorUInt, + pub size: XcursorDim, + pub width: XcursorDim, + pub height: XcursorDim, + pub xhot: XcursorDim, + pub yhot: XcursorDim, + pub delay: XcursorUInt, + pub pixels: *mut XcursorPixel, +} +pub type XcursorImage = _XcursorImage; + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct _XcursorImages { + pub nimage: c_int, + pub images: *mut *mut XcursorImage, + pub name: *mut c_char, +} +pub type XcursorImages = _XcursorImages; diff --git a/third_party/rust/x11/src/xf86vmode.rs b/third_party/rust/x11/src/xf86vmode.rs new file mode 100644 index 000000000000..f4ba1f1ea753 --- /dev/null +++ b/third_party/rust/x11/src/xf86vmode.rs @@ -0,0 +1,146 @@ + + + + +use std::os::raw::{ + c_char, + c_float, + c_int, + c_uchar, + c_uint, + c_ulong, + c_ushort, +}; + +use ::xlib::{ + Bool, + Display, + Time, + Window, + XEvent, +}; + + + + + + + +x11_link! { Xf86vmode, xxf86vm, ["libXxf86vm.so.1", "libXxf86vm.so"], 22, + pub fn XF86VidModeAddModeLine (_4: *mut Display, _3: c_int, _2: *mut XF86VidModeModeInfo, _1: *mut XF86VidModeModeInfo) -> c_int, + pub fn XF86VidModeDeleteModeLine (_3: *mut Display, _2: c_int, _1: *mut XF86VidModeModeInfo) -> c_int, + pub fn XF86VidModeGetAllModeLines (_4: *mut Display, _3: c_int, _2: *mut c_int, _1: *mut *mut *mut XF86VidModeModeInfo) -> c_int, + pub fn XF86VidModeGetDotClocks (_6: *mut Display, _5: c_int, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut *mut c_int) -> c_int, + pub fn XF86VidModeGetGamma (_3: *mut Display, _2: c_int, _1: *mut XF86VidModeGamma) -> c_int, + pub fn XF86VidModeGetGammaRamp (_6: *mut Display, _5: c_int, _4: c_int, _3: *mut c_ushort, _2: *mut c_ushort, _1: *mut c_ushort) -> c_int, + pub fn XF86VidModeGetGammaRampSize (_3: *mut Display, _2: c_int, _1: *mut c_int) -> c_int, + pub fn XF86VidModeGetModeLine (_4: *mut Display, _3: c_int, _2: *mut c_int, _1: *mut XF86VidModeModeLine) -> c_int, + pub fn XF86VidModeGetMonitor (_3: *mut Display, _2: c_int, _1: *mut XF86VidModeMonitor) -> c_int, + pub fn XF86VidModeGetPermissions (_3: *mut Display, _2: c_int, _1: *mut c_int) -> c_int, + pub fn XF86VidModeGetViewPort (_4: *mut Display, _3: c_int, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XF86VidModeLockModeSwitch (_3: *mut Display, _2: c_int, _1: c_int) -> c_int, + pub fn XF86VidModeModModeLine (_3: *mut Display, _2: c_int, _1: *mut XF86VidModeModeLine) -> c_int, + pub fn XF86VidModeQueryExtension (_3: *mut Display, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XF86VidModeQueryVersion (_3: *mut Display, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XF86VidModeSetClientVersion (_1: *mut Display) -> c_int, + pub fn XF86VidModeSetGamma (_3: *mut Display, _2: c_int, _1: *mut XF86VidModeGamma) -> c_int, + pub fn XF86VidModeSetGammaRamp (_6: *mut Display, _5: c_int, _4: c_int, _3: *mut c_ushort, _2: *mut c_ushort, _1: *mut c_ushort) -> c_int, + pub fn XF86VidModeSetViewPort (_4: *mut Display, _3: c_int, _2: c_int, _1: c_int) -> c_int, + pub fn XF86VidModeSwitchMode (_3: *mut Display, _2: c_int, _1: c_int) -> c_int, + pub fn XF86VidModeSwitchToMode (_3: *mut Display, _2: c_int, _1: *mut XF86VidModeModeInfo) -> c_int, + pub fn XF86VidModeValidateModeLine (_3: *mut Display, _2: c_int, _1: *mut XF86VidModeModeInfo) -> c_int, +variadic: +globals: +} + + + + + + + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct XF86VidModeGamma { + pub red: c_float, + pub green: c_float, + pub blue: c_float, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XF86VidModeModeInfo { + pub dotclock: c_uint, + pub hdisplay: c_ushort, + pub hsyncstart: c_ushort, + pub hsyncend: c_ushort, + pub htotal: c_ushort, + pub hskew: c_ushort, + pub vdisplay: c_ushort, + pub vsyncstart: c_ushort, + pub vsyncend: c_ushort, + pub vtotal: c_ushort, + pub flags: c_uint, + pub privsize: c_int, + pub private: *mut i32, +} + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct XF86VidModeModeLine { + pub hdisplay: c_ushort, + pub hsyncstart: c_ushort, + pub hsyncend: c_ushort, + pub htotal: c_ushort, + pub hskew: c_ushort, + pub vdisplay: c_ushort, + pub vsyncstart: c_ushort, + pub vsyncend: c_ushort, + pub vtotal: c_ushort, + pub flags: c_uint, + pub privsize: c_int, + pub private: *mut i32, +} + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct XF86VidModeMonitor { + pub vendor: *mut c_char, + pub model: *mut c_char, + pub EMPTY: c_float, + pub nhsync: c_uchar, + pub hsync: *mut XF86VidModeSyncRange, + pub nvsync: c_uchar, + pub vsync: *mut XF86VidModeSyncRange, +} + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct XF86VidModeSyncRange { + pub hi: c_float, + pub lo: c_float, +} + + + + + + + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct XF86VidModeNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub root: Window, + pub state: c_int, + pub kind: c_int, + pub forced: bool, + pub time: Time, +} + +event_conversions_and_tests! { + xf86vm_notify: XF86VidModeNotifyEvent, +} diff --git a/third_party/rust/x11/src/xfixes.rs b/third_party/rust/x11/src/xfixes.rs new file mode 100644 index 000000000000..96e78f275c2d --- /dev/null +++ b/third_party/rust/x11/src/xfixes.rs @@ -0,0 +1,13 @@ + + + + +use ::xlib::XID; + + + + + + + +pub type PointerBarrier = XID; diff --git a/third_party/rust/x11/src/xft.rs b/third_party/rust/x11/src/xft.rs new file mode 100644 index 000000000000..cea33a890506 --- /dev/null +++ b/third_party/rust/x11/src/xft.rs @@ -0,0 +1,219 @@ + + + + +use std::os::raw::*; + +use xlib::{Display, Region, Visual, XRectangle}; +use xrender::{XGlyphInfo, XRenderColor}; + + + + + + + + +pub enum FT_FaceRec {} +pub type FT_UInt = c_uint; + + +pub type FcChar32 = c_uint; +pub enum FcCharSet {} +pub enum FcPattern {} + +#[repr(C)] +pub enum FcEndian { Big, Little } + +#[repr(C)] +pub enum FcResult { Match, NoMatch, TypeMismatch, NoId, OutOfMemory } + + + + + + + +x11_link! { Xft, xft, ["libXft.so.2", "libXft.so"], 77, + pub fn XftCharExists (_2: *mut Display, _1: *mut XftFont, _0: c_uint) -> c_int, + pub fn XftCharFontSpecRender (_7: *mut Display, _6: c_int, _5: c_ulong, _4: c_ulong, _3: c_int, _2: c_int, _1: *const XftCharFontSpec, _0: c_int) -> (), + pub fn XftCharIndex (_2: *mut Display, _1: *mut XftFont, _0: c_uint) -> c_uint, + pub fn XftCharSpecRender (_8: *mut Display, _7: c_int, _6: c_ulong, _5: *mut XftFont, _4: c_ulong, _3: c_int, _2: c_int, _1: *const XftCharSpec, _0: c_int) -> (), + pub fn XftColorAllocName (_4: *mut Display, _3: *const Visual, _2: c_ulong, _1: *const c_char, _0: *mut XftColor) -> c_int, + pub fn XftColorAllocValue (_4: *mut Display, _3: *mut Visual, _2: c_ulong, _1: *const XRenderColor, _0: *mut XftColor) -> c_int, + pub fn XftColorFree (_3: *mut Display, _2: *mut Visual, _1: c_ulong, _0: *mut XftColor) -> (), + pub fn XftDefaultHasRender (_0: *mut Display) -> c_int, + pub fn XftDefaultSet (_1: *mut Display, _0: *mut FcPattern) -> c_int, + pub fn XftDefaultSubstitute (_2: *mut Display, _1: c_int, _0: *mut FcPattern) -> (), + pub fn XftDrawChange (_1: *mut XftDraw, _0: c_ulong) -> (), + pub fn XftDrawCharFontSpec (_3: *mut XftDraw, _2: *const XftColor, _1: *const XftCharFontSpec, _0: c_int) -> (), + pub fn XftDrawCharSpec (_4: *mut XftDraw, _3: *const XftColor, _2: *mut XftFont, _1: *const XftCharSpec, _0: c_int) -> (), + pub fn XftDrawColormap (_0: *mut XftDraw) -> c_ulong, + pub fn XftDrawCreate (_3: *mut Display, _2: c_ulong, _1: *mut Visual, _0: c_ulong) -> *mut XftDraw, + pub fn XftDrawCreateAlpha (_2: *mut Display, _1: c_ulong, _0: c_int) -> *mut XftDraw, + pub fn XftDrawCreateBitmap (_1: *mut Display, _0: c_ulong) -> *mut XftDraw, + pub fn XftDrawDestroy (_0: *mut XftDraw) -> (), + pub fn XftDrawDisplay (_0: *mut XftDraw) -> *mut Display, + pub fn XftDrawDrawable (_0: *mut XftDraw) -> c_ulong, + pub fn XftDrawGlyphFontSpec (_3: *mut XftDraw, _2: *const XftColor, _1: *const XftGlyphFontSpec, _0: c_int) -> (), + pub fn XftDrawGlyphs (_6: *mut XftDraw, _5: *const XftColor, _4: *mut XftFont, _3: c_int, _2: c_int, _1: *const c_uint, _0: c_int) -> (), + pub fn XftDrawGlyphSpec (_4: *mut XftDraw, _3: *const XftColor, _2: *mut XftFont, _1: *const XftGlyphSpec, _0: c_int) -> (), + pub fn XftDrawPicture (_0: *mut XftDraw) -> c_ulong, + pub fn XftDrawRect (_5: *mut XftDraw, _4: *const XftColor, _3: c_int, _2: c_int, _1: c_uint, _0: c_uint) -> (), + pub fn XftDrawSetClip (_1: *mut XftDraw, _0: Region) -> c_int, + pub fn XftDrawSetClipRectangles (_4: *mut XftDraw, _3: c_int, _2: c_int, _1: *const XRectangle, _0: c_int) -> c_int, + pub fn XftDrawSetSubwindowMode (_1: *mut XftDraw, _0: c_int) -> (), + pub fn XftDrawSrcPicture (_1: *mut XftDraw, _0: *const XftColor) -> c_ulong, + pub fn XftDrawString16 (_6: *mut XftDraw, _5: *const XftColor, _4: *mut XftFont, _3: c_int, _2: c_int, _1: *const c_ushort, _0: c_int) -> (), + pub fn XftDrawString32 (_6: *mut XftDraw, _5: *const XftColor, _4: *mut XftFont, _3: c_int, _2: c_int, _1: *const c_uint, _0: c_int) -> (), + pub fn XftDrawString8 (_6: *mut XftDraw, _5: *const XftColor, _4: *mut XftFont, _3: c_int, _2: c_int, _1: *const c_uchar, _0: c_int) -> (), + pub fn XftDrawStringUtf16 (_7: *mut XftDraw, _6: *const XftColor, _5: *mut XftFont, _4: c_int, _3: c_int, _2: *const c_uchar, _1: FcEndian, _0: c_int) -> (), + pub fn XftDrawStringUtf8 (_6: *mut XftDraw, _5: *const XftColor, _4: *mut XftFont, _3: c_int, _2: c_int, _1: *const c_uchar, _0: c_int) -> (), + pub fn XftDrawVisual (_0: *mut XftDraw) -> *mut Visual, + pub fn XftFontCheckGlyph (_5: *mut Display, _4: *mut XftFont, _3: c_int, _2: c_uint, _1: *mut c_uint, _0: *mut c_int) -> c_int, + pub fn XftFontClose (_1: *mut Display, _0: *mut XftFont) -> (), + pub fn XftFontCopy (_1: *mut Display, _0: *mut XftFont) -> *mut XftFont, + pub fn XftFontInfoCreate (_1: *mut Display, _0: *const FcPattern) -> *mut XftFontInfo, + pub fn XftFontInfoDestroy (_1: *mut Display, _0: *mut XftFontInfo) -> (), + pub fn XftFontInfoEqual (_1: *const XftFontInfo, _0: *const XftFontInfo) -> c_int, + pub fn XftFontInfoHash (_0: *const XftFontInfo) -> c_uint, + pub fn XftFontLoadGlyphs (_4: *mut Display, _3: *mut XftFont, _2: c_int, _1: *const c_uint, _0: c_int) -> (), + pub fn XftFontMatch (_3: *mut Display, _2: c_int, _1: *const FcPattern, _0: *mut FcResult) -> *mut FcPattern, + pub fn XftFontOpenInfo (_2: *mut Display, _1: *mut FcPattern, _0: *mut XftFontInfo) -> *mut XftFont, + pub fn XftFontOpenName (_2: *mut Display, _1: c_int, _0: *const c_char) -> *mut XftFont, + pub fn XftFontOpenPattern (_1: *mut Display, _0: *mut FcPattern) -> *mut XftFont, + pub fn XftFontOpenXlfd (_2: *mut Display, _1: c_int, _0: *const c_char) -> *mut XftFont, + pub fn XftFontUnloadGlyphs (_3: *mut Display, _2: *mut XftFont, _1: *const c_uint, _0: c_int) -> (), + pub fn XftGetVersion () -> c_int, + pub fn XftGlyphExtents (_4: *mut Display, _3: *mut XftFont, _2: *const c_uint, _1: c_int, _0: *mut XGlyphInfo) -> (), + pub fn XftGlyphFontSpecRender (_7: *mut Display, _6: c_int, _5: c_ulong, _4: c_ulong, _3: c_int, _2: c_int, _1: *const XftGlyphFontSpec, _0: c_int) -> (), + pub fn XftGlyphRender (_10: *mut Display, _9: c_int, _8: c_ulong, _7: *mut XftFont, _6: c_ulong, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: *const c_uint, _0: c_int) -> (), + pub fn XftGlyphSpecRender (_8: *mut Display, _7: c_int, _6: c_ulong, _5: *mut XftFont, _4: c_ulong, _3: c_int, _2: c_int, _1: *const XftGlyphSpec, _0: c_int) -> (), + pub fn XftInit (_0: *const c_char) -> c_int, + pub fn XftInitFtLibrary () -> c_int, + pub fn XftLockFace (_0: *mut XftFont) -> *mut FT_FaceRec, + pub fn XftNameParse (_0: *const c_char) -> *mut FcPattern, + pub fn XftNameUnparse (_2: *mut FcPattern, _1: *mut c_char, _0: c_int) -> c_int, + pub fn XftTextExtents16 (_4: *mut Display, _3: *mut XftFont, _2: *const c_ushort, _1: c_int, _0: *mut XGlyphInfo) -> (), + pub fn XftTextExtents32 (_4: *mut Display, _3: *mut XftFont, _2: *const c_uint, _1: c_int, _0: *mut XGlyphInfo) -> (), + pub fn XftTextExtents8 (_4: *mut Display, _3: *mut XftFont, _2: *const c_uchar, _1: c_int, _0: *mut XGlyphInfo) -> (), + pub fn XftTextExtentsUtf16 (_5: *mut Display, _4: *mut XftFont, _3: *const c_uchar, _2: FcEndian, _1: c_int, _0: *mut XGlyphInfo) -> (), + pub fn XftTextExtentsUtf8 (_4: *mut Display, _3: *mut XftFont, _2: *const c_uchar, _1: c_int, _0: *mut XGlyphInfo) -> (), + pub fn XftTextRender16 (_10: *mut Display, _9: c_int, _8: c_ulong, _7: *mut XftFont, _6: c_ulong, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: *const c_ushort, _0: c_int) -> (), + pub fn XftTextRender16BE (_10: *mut Display, _9: c_int, _8: c_ulong, _7: *mut XftFont, _6: c_ulong, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: *const c_uchar, _0: c_int) -> (), + pub fn XftTextRender16LE (_10: *mut Display, _9: c_int, _8: c_ulong, _7: *mut XftFont, _6: c_ulong, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: *const c_uchar, _0: c_int) -> (), + pub fn XftTextRender32 (_10: *mut Display, _9: c_int, _8: c_ulong, _7: *mut XftFont, _6: c_ulong, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: *const c_uint, _0: c_int) -> (), + pub fn XftTextRender32BE (_10: *mut Display, _9: c_int, _8: c_ulong, _7: *mut XftFont, _6: c_ulong, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: *const c_uchar, _0: c_int) -> (), + pub fn XftTextRender32LE (_10: *mut Display, _9: c_int, _8: c_ulong, _7: *mut XftFont, _6: c_ulong, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: *const c_uchar, _0: c_int) -> (), + pub fn XftTextRender8 (_10: *mut Display, _9: c_int, _8: c_ulong, _7: *mut XftFont, _6: c_ulong, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: *const c_uchar, _0: c_int) -> (), + pub fn XftTextRenderUtf16 (_11: *mut Display, _10: c_int, _9: c_ulong, _8: *mut XftFont, _7: c_ulong, _6: c_int, _5: c_int, _4: c_int, _3: c_int, _2: *const c_uchar, _1: FcEndian, _0: c_int) -> (), + pub fn XftTextRenderUtf8 (_10: *mut Display, _9: c_int, _8: c_ulong, _7: *mut XftFont, _6: c_ulong, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: *const c_uchar, _0: c_int) -> (), + pub fn XftUnlockFace (_0: *mut XftFont) -> (), + pub fn XftXlfdParse (_2: *const c_char, _1: c_int, _0: c_int) -> *mut FcPattern, +variadic: + pub fn XftFontOpen (_1: *mut Display, _0: c_int) -> *mut XftFont, + pub fn XftListFonts (_1: *mut Display, _0: c_int) -> *mut XftFontSet, +globals: +} + + + + + + + +pub enum XftFontInfo {} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XftFont { + pub ascent: c_int, + pub descent: c_int, + pub height: c_int, + pub max_advance_width: c_int, + pub charset: *mut FcCharSet, + pub pattern: *mut FcPattern, +} + +pub enum XftDraw {} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XftColor { + pub pixel: c_ulong, + pub color: XRenderColor, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XftCharSpec { + pub ucs4: FcChar32, + pub x: c_short, + pub y: c_short, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XftCharFontSpec { + pub font: *mut XftFont, + pub ucs4: FcChar32, + pub x: c_short, + pub y: c_short, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XftFontSet { + pub nfont: c_int, + pub sfont: c_int, + pub fonts: *mut *mut XftPattern, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XftGlyphSpec { + pub glyph: FT_UInt, + pub x: c_short, + pub y: c_short, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XftGlyphFontSpec { + pub font: *mut XftFont, + pub glyph: FT_UInt, + pub x: c_short, + pub y: c_short, +} + +pub enum XftPattern {} + + + + + + + + +pub const XFT_FAMILY: &'static str = "family"; +pub const XFT_STYLE: &'static str = "style"; +pub const XFT_SLANT: &'static str = "slant"; +pub const XFT_WEIGHT: &'static str = "weight"; +pub const XFT_SIZE: &'static str = "size"; +pub const XFT_PIXEL_SIZE: &'static str = "pixelsize"; +pub const XFT_SPACING: &'static str = "spacing"; +pub const XFT_FOUNDRY: &'static str = "foundry"; +pub const XFT_ANTIALIAS: &'static str = "antialias"; + + +pub const XFT_SLANT_ROMAN: c_int = 0; +pub const XFT_SLANT_ITALIC: c_int = 100; +pub const XFT_SLANT_OBLIQUE: c_int = 110; + + +pub const XftTypeVoid: c_int = 0; +pub const XftTypeInteger: c_int = 1; +pub const XftTypeDouble: c_int = 2; +pub const XftTypeString: c_int = 3; +pub const XftTypeBool: c_int = 4; +pub const XftTypeMatrix: c_int = 5; diff --git a/third_party/rust/x11/src/xinerama.rs b/third_party/rust/x11/src/xinerama.rs new file mode 100644 index 000000000000..3646fd3e10fb --- /dev/null +++ b/third_party/rust/x11/src/xinerama.rs @@ -0,0 +1,66 @@ + + + + +use std::os::raw::{ + c_int, + c_short, +}; + +use ::xlib::{ + Bool, + Display, + Drawable, + Status, + Window, + XID, +}; + + + + + + + +x11_link! { Xlib, xinerama, ["libXinerama.so.1", "libXinerama.so"], 10, + pub fn XineramaIsActive (dpy: *mut Display) -> Bool, + pub fn XineramaQueryExtension (dpy: *mut Display, event_base: *mut c_int, error_base: *mut c_int) -> Bool, + pub fn XineramaQueryScreens (dpy: *mut Display, number: *mut c_int) -> *mut XineramaScreenInfo, + pub fn XineramaQueryVersion (dpy: *mut Display, major_versionp: *mut c_int, minor_versionp: *mut c_int) -> Status, + pub fn XPanoramiXAllocInfo () -> *mut XPanoramiXInfo, + pub fn XPanoramiXGetScreenCount (dpy: *mut Display, drawable: Drawable, panoramiX_info: *mut XPanoramiXInfo) -> Status, + pub fn XPanoramiXGetScreenSize (dpy: *mut Display, drawable: Drawable, screen_num: c_int, panoramiX_info: *mut XPanoramiXInfo) -> Status, + pub fn XPanoramiXGetState (dpy: *mut Display, drawable: Drawable, panoramiX_info: *mut XPanoramiXInfo) -> Status, + pub fn XPanoramiXQueryExtension (dpy: *mut Display, event_base_return: *mut c_int, error_base_return: *mut c_int) -> Bool, + pub fn XPanoramiXQueryVersion (dpy: *mut Display, major_version_return: *mut c_int, minor_version_return: *mut c_int) -> Status, +variadic: +globals: +} + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XineramaScreenInfo { + pub screen_number: c_int, + pub x_org: c_short, + pub y_org: c_short, + pub width: c_short, + pub height: c_short, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XPanoramiXInfo { + pub window: Window, + pub screen: c_int, + pub State: c_int, + pub width: c_int, + pub height: c_int, + pub ScreenCount: c_int, + pub eventMask: XID, +} diff --git a/third_party/rust/x11/src/xinput.rs b/third_party/rust/x11/src/xinput.rs new file mode 100644 index 000000000000..93fc33522c86 --- /dev/null +++ b/third_party/rust/x11/src/xinput.rs @@ -0,0 +1,165 @@ + + + + +use std::os::raw::{ + c_char, + c_int, + c_long, + c_short, + c_uchar, + c_uint, + c_ulong, +}; + +use ::xlib::{ + Atom, + Display, + Time, + XEvent, + XID, + XModifierKeymap, +}; + + + + + +x11_link! { XInput, xi, ["libXi.so.6", "libXi.so"], 44, + pub fn XAllowDeviceEvents (_4: *mut Display, _3: *mut XDevice, _2: c_int, _1: c_ulong) -> c_int, + pub fn XChangeDeviceControl (_4: *mut Display, _3: *mut XDevice, _2: c_int, _1: *mut XDeviceControl) -> c_int, + pub fn XChangeDeviceDontPropagateList (_5: *mut Display, _4: c_ulong, _3: c_int, _2: *mut c_ulong, _1: c_int) -> c_int, + pub fn XChangeDeviceKeyMapping (_6: *mut Display, _5: *mut XDevice, _4: c_int, _3: c_int, _2: *mut c_ulong, _1: c_int) -> c_int, + pub fn XChangeDeviceProperty (_8: *mut Display, _7: *mut XDevice, _6: c_ulong, _5: c_ulong, _4: c_int, _3: c_int, _2: *const c_uchar, _1: c_int) -> (), + pub fn XChangeFeedbackControl (_4: *mut Display, _3: *mut XDevice, _2: c_ulong, _1: *mut XFeedbackControl) -> c_int, + pub fn XChangeKeyboardDevice (_2: *mut Display, _1: *mut XDevice) -> c_int, + pub fn XChangePointerDevice (_4: *mut Display, _3: *mut XDevice, _2: c_int, _1: c_int) -> c_int, + pub fn XCloseDevice (_2: *mut Display, _1: *mut XDevice) -> c_int, + pub fn XDeleteDeviceProperty (_3: *mut Display, _2: *mut XDevice, _1: c_ulong) -> (), + pub fn XDeviceBell (_5: *mut Display, _4: *mut XDevice, _3: c_ulong, _2: c_ulong, _1: c_int) -> c_int, + pub fn XFreeDeviceControl (_1: *mut XDeviceControl) -> (), + pub fn XFreeDeviceList (_1: *mut XDeviceInfo) -> (), + pub fn XFreeDeviceMotionEvents (_1: *mut XDeviceTimeCoord) -> (), + pub fn XFreeDeviceState (_1: *mut XDeviceState) -> (), + pub fn XFreeFeedbackList (_1: *mut XFeedbackState) -> (), + pub fn XGetDeviceButtonMapping (_4: *mut Display, _3: *mut XDevice, _2: *mut c_uchar, _1: c_uint) -> c_int, + pub fn XGetDeviceControl (_3: *mut Display, _2: *mut XDevice, _1: c_int) -> *mut XDeviceControl, + pub fn XGetDeviceDontPropagateList (_3: *mut Display, _2: c_ulong, _1: *mut c_int) -> *mut c_ulong, + pub fn XGetDeviceFocus (_5: *mut Display, _4: *mut XDevice, _3: *mut c_ulong, _2: *mut c_int, _1: *mut c_ulong) -> c_int, + pub fn XGetDeviceKeyMapping (_5: *mut Display, _4: *mut XDevice, _3: c_uchar, _2: c_int, _1: *mut c_int) -> *mut c_ulong, + pub fn XGetDeviceModifierMapping (_2: *mut Display, _1: *mut XDevice) -> *mut XModifierKeymap, + pub fn XGetDeviceMotionEvents (_7: *mut Display, _6: *mut XDevice, _5: c_ulong, _4: c_ulong, _3: *mut c_int, _2: *mut c_int, _1: *mut c_int) -> *mut XDeviceTimeCoord, + pub fn XGetDeviceProperty (_12: *mut Display, _11: *mut XDevice, _10: c_ulong, _9: c_long, _8: c_long, _7: c_int, _6: c_ulong, _5: *mut c_ulong, _4: *mut c_int, _3: *mut c_ulong, _2: *mut c_ulong, _1: *mut *mut c_uchar) -> c_int, + pub fn XGetExtensionVersion (_2: *mut Display, _1: *const c_char) -> *mut XExtensionVersion, + pub fn XGetFeedbackControl (_3: *mut Display, _2: *mut XDevice, _1: *mut c_int) -> *mut XFeedbackState, + pub fn XGetSelectedExtensionEvents (_6: *mut Display, _5: c_ulong, _4: *mut c_int, _3: *mut *mut c_ulong, _2: *mut c_int, _1: *mut *mut c_ulong) -> c_int, + pub fn XGrabDevice (_9: *mut Display, _8: *mut XDevice, _7: c_ulong, _6: c_int, _5: c_int, _4: *mut c_ulong, _3: c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XGrabDeviceButton (_11: *mut Display, _10: *mut XDevice, _9: c_uint, _8: c_uint, _7: *mut XDevice, _6: c_ulong, _5: c_int, _4: c_uint, _3: *mut c_ulong, _2: c_int, _1: c_int) -> c_int, + pub fn XGrabDeviceKey (_11: *mut Display, _10: *mut XDevice, _9: c_uint, _8: c_uint, _7: *mut XDevice, _6: c_ulong, _5: c_int, _4: c_uint, _3: *mut c_ulong, _2: c_int, _1: c_int) -> c_int, + + pub fn XListDeviceProperties (_3: *mut Display, _2: *mut XDevice, _1: *mut c_int) -> *mut c_ulong, + pub fn XListInputDevices (_2: *mut Display, _1: *mut c_int) -> *mut XDeviceInfo, + pub fn XOpenDevice (_2: *mut Display, _1: c_ulong) -> *mut XDevice, + pub fn XQueryDeviceState (_2: *mut Display, _1: *mut XDevice) -> *mut XDeviceState, + pub fn XSelectExtensionEvent (_4: *mut Display, _3: c_ulong, _2: *mut c_ulong, _1: c_int) -> c_int, + pub fn XSendExtensionEvent (_7: *mut Display, _6: *mut XDevice, _5: c_ulong, _4: c_int, _3: c_int, _2: *mut c_ulong, _1: *mut XEvent) -> c_int, + pub fn XSetDeviceButtonMapping (_4: *mut Display, _3: *mut XDevice, _2: *mut c_uchar, _1: c_int) -> c_int, + pub fn XSetDeviceFocus (_5: *mut Display, _4: *mut XDevice, _3: c_ulong, _2: c_int, _1: c_ulong) -> c_int, + pub fn XSetDeviceMode (_3: *mut Display, _2: *mut XDevice, _1: c_int) -> c_int, + pub fn XSetDeviceModifierMapping (_3: *mut Display, _2: *mut XDevice, _1: *mut XModifierKeymap) -> c_int, + pub fn XSetDeviceValuators (_5: *mut Display, _4: *mut XDevice, _3: *mut c_int, _2: c_int, _1: c_int) -> c_int, + pub fn XUngrabDevice (_3: *mut Display, _2: *mut XDevice, _1: c_ulong) -> c_int, + pub fn XUngrabDeviceButton (_6: *mut Display, _5: *mut XDevice, _4: c_uint, _3: c_uint, _2: *mut XDevice, _1: c_ulong) -> c_int, + pub fn XUngrabDeviceKey (_6: *mut Display, _5: *mut XDevice, _4: c_uint, _3: c_uint, _2: *mut XDevice, _1: c_ulong) -> c_int, +variadic: +globals: +} + + + + + + +pub enum _XAnyClassinfo {} + +pub type XAnyClassPtr = *mut _XAnyClassinfo; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XDevice { + pub device_id: XID, + pub num_classes: c_int, + pub classes: *mut XInputClassInfo, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XDeviceControl { + pub control: XID, + pub length: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XDeviceInfo { + pub id: XID, + pub type_: Atom, + pub name: *mut c_char, + pub num_classes: c_int, + pub use_: c_int, + pub inputclassinfo: XAnyClassPtr, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XDeviceState { + pub device_id: XID, + pub num_classes: c_int, + pub data: *mut XInputClass, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XDeviceTimeCoord { + pub time: Time, + pub data: *mut c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XExtensionVersion { + pub present: c_int, + pub major_version: c_short, + pub minor_version: c_short, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XFeedbackControl { + pub class: XID, + pub length: c_int, + pub id: XID, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XFeedbackState { + pub class: XID, + pub length: c_int, + pub id: XID, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XInputClass { + pub class: c_uchar, + pub length: c_uchar, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XInputClassInfo { + pub input_class: c_uchar, + pub event_type_base: c_uchar, +} + diff --git a/third_party/rust/x11/src/xinput2.rs b/third_party/rust/x11/src/xinput2.rs new file mode 100644 index 000000000000..48a6e35b814c --- /dev/null +++ b/third_party/rust/x11/src/xinput2.rs @@ -0,0 +1,758 @@ +use xfixes::PointerBarrier; +use xlib::{Atom, Display, Time, Window}; +use std::os::raw::{c_int, c_uint, c_long, c_double, c_ulong, c_uchar}; + + + + +fn mask_byte(mask_flag: i32) -> usize { + (mask_flag >> 3) as usize +} + +pub fn XISetMask(mask: &mut [::std::os::raw::c_uchar], event: i32) { + mask[mask_byte(event)] |= 1 << (event & 7); +} + +pub fn XIClearMask(mask: &mut [::std::os::raw::c_uchar], event: i32) { + mask[mask_byte(event)] &= 1 << (event & 7); +} + +pub fn XIMaskIsSet(mask: &[::std::os::raw::c_uchar], event: i32) -> bool { + (mask[mask_byte(event)] & (1 << (event & 7))) != 0 +} + + + + +x11_link! { XInput2, xi, ["libXi.so.6", "libXi.so"], 34, + pub fn XIAllowEvents (_4: *mut Display, _3: c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XIAllowTouchEvents (_5: *mut Display, _4: c_int, _3: c_uint, _2: c_ulong, _1: c_int) -> c_int, + pub fn XIBarrierReleasePointer (_4: *mut Display, _3: c_int, _2: c_ulong, _1: c_uint) -> (), + pub fn XIBarrierReleasePointers (_3: *mut Display, _2: *mut XIBarrierReleasePointerInfo, _1: c_int) -> (), + pub fn XIChangeHierarchy (_3: *mut Display, _2: *mut XIAnyHierarchyChangeInfo, _1: c_int) -> c_int, + pub fn XIChangeProperty (_8: *mut Display, _7: c_int, _6: c_ulong, _5: c_ulong, _4: c_int, _3: c_int, _2: *mut c_uchar, _1: c_int) -> (), + pub fn XIDefineCursor (_4: *mut Display, _3: c_int, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XIDeleteProperty (_3: *mut Display, _2: c_int, _1: c_ulong) -> (), + pub fn XIFreeDeviceInfo (_1: *mut XIDeviceInfo) -> (), + pub fn XIGetClientPointer (_3: *mut Display, _2: c_ulong, _1: *mut c_int) -> c_int, + pub fn XIGetFocus (_3: *mut Display, _2: c_int, _1: *mut c_ulong) -> c_int, + pub fn XIGetProperty (_12: *mut Display, _11: c_int, _10: c_ulong, _9: c_long, _8: c_long, _7: c_int, _6: c_ulong, _5: *mut c_ulong, _4: *mut c_int, _3: *mut c_ulong, _2: *mut c_ulong, _1: *mut *mut c_uchar) -> c_int, + pub fn XIGetSelectedEvents (_3: *mut Display, _2: c_ulong, _1: *mut c_int) -> *mut XIEventMask, + pub fn XIGrabButton (_11: *mut Display, _10: c_int, _9: c_int, _8: c_ulong, _7: c_ulong, _6: c_int, _5: c_int, _4: c_int, _3: *mut XIEventMask, _2: c_int, _1: *mut XIGrabModifiers) -> c_int, + pub fn XIGrabDevice (_9: *mut Display, _8: c_int, _7: c_ulong, _6: c_ulong, _5: c_ulong, _4: c_int, _3: c_int, _2: c_int, _1: *mut XIEventMask) -> c_int, + pub fn XIGrabEnter (_10: *mut Display, _9: c_int, _8: c_ulong, _7: c_ulong, _6: c_int, _5: c_int, _4: c_int, _3: *mut XIEventMask, _2: c_int, _1: *mut XIGrabModifiers) -> c_int, + pub fn XIGrabFocusIn (_9: *mut Display, _8: c_int, _7: c_ulong, _6: c_int, _5: c_int, _4: c_int, _3: *mut XIEventMask, _2: c_int, _1: *mut XIGrabModifiers) -> c_int, + pub fn XIGrabKeycode (_10: *mut Display, _9: c_int, _8: c_int, _7: c_ulong, _6: c_int, _5: c_int, _4: c_int, _3: *mut XIEventMask, _2: c_int, _1: *mut XIGrabModifiers) -> c_int, + pub fn XIGrabTouchBegin (_7: *mut Display, _6: c_int, _5: c_ulong, _4: c_int, _3: *mut XIEventMask, _2: c_int, _1: *mut XIGrabModifiers) -> c_int, + pub fn XIListProperties (_3: *mut Display, _2: c_int, _1: *mut c_int) -> *mut c_ulong, + pub fn XIQueryDevice (_3: *mut Display, _2: c_int, _1: *mut c_int) -> *mut XIDeviceInfo, + pub fn XIQueryPointer (_12: *mut Display, _11: c_int, _10: c_ulong, _9: *mut c_ulong, _8: *mut c_ulong, _7: *mut c_double, _6: *mut c_double, _5: *mut c_double, _4: *mut c_double, _3: *mut XIButtonState, _2: *mut XIModifierState, _1: *mut XIModifierState) -> c_int, + pub fn XIQueryVersion (_3: *mut Display, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XISelectEvents (_4: *mut Display, _3: c_ulong, _2: *mut XIEventMask, _1: c_int) -> c_int, + pub fn XISetClientPointer (_3: *mut Display, _2: c_ulong, _1: c_int) -> c_int, + pub fn XISetFocus (_4: *mut Display, _3: c_int, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XIUndefineCursor (_3: *mut Display, _2: c_int, _1: c_ulong) -> c_int, + pub fn XIUngrabButton (_6: *mut Display, _5: c_int, _4: c_int, _3: c_ulong, _2: c_int, _1: *mut XIGrabModifiers) -> c_int, + pub fn XIUngrabDevice (_3: *mut Display, _2: c_int, _1: c_ulong) -> c_int, + pub fn XIUngrabEnter (_5: *mut Display, _4: c_int, _3: c_ulong, _2: c_int, _1: *mut XIGrabModifiers) -> c_int, + pub fn XIUngrabFocusIn (_5: *mut Display, _4: c_int, _3: c_ulong, _2: c_int, _1: *mut XIGrabModifiers) -> c_int, + pub fn XIUngrabKeycode (_6: *mut Display, _5: c_int, _4: c_int, _3: c_ulong, _2: c_int, _1: *mut XIGrabModifiers) -> c_int, + pub fn XIUngrabTouchBegin (_5: *mut Display, _4: c_int, _3: c_ulong, _2: c_int, _1: *mut XIGrabModifiers) -> c_int, + pub fn XIWarpPointer (_10: *mut Display, _9: c_int, _8: c_ulong, _7: c_ulong, _6: c_double, _5: c_double, _4: c_uint, _3: c_uint, _2: c_double, _1: c_double) -> c_int, +variadic: +globals: +} + + + + + + +pub const XInput_2_0: i32 = 7; +pub const XI_2_Major: i32 = 2; +pub const XI_2_Minor: i32 = 3; +pub const XIPropertyDeleted: i32 = 0; +pub const XIPropertyCreated: i32 = 1; +pub const XIPropertyModified: i32 = 2; +pub const XIPropModeReplace: i32 = 0; +pub const XIPropModePrepend: i32 = 1; +pub const XIPropModeAppend: i32 = 2; +pub const XINotifyNormal: i32 = 0; +pub const XINotifyGrab: i32 = 1; +pub const XINotifyUngrab: i32 = 2; +pub const XINotifyWhileGrabbed: i32 = 3; +pub const XINotifyPassiveGrab: i32 = 4; +pub const XINotifyPassiveUngrab: i32 = 5; +pub const XINotifyAncestor: i32 = 0; +pub const XINotifyVirtual: i32 = 1; +pub const XINotifyInferior: i32 = 2; +pub const XINotifyNonlinear: i32 = 3; +pub const XINotifyNonlinearVirtual: i32 = 4; +pub const XINotifyPointer: i32 = 5; +pub const XINotifyPointerRoot: i32 = 6; +pub const XINotifyDetailNone: i32 = 7; +pub const XIGrabModeSync: i32 = 0; +pub const XIGrabModeAsync: i32 = 1; +pub const XIGrabModeTouch: i32 = 2; +pub const XIGrabSuccess: i32 = 0; +pub const XIAlreadyGrabbed: i32 = 1; +pub const XIGrabInvalidTime: i32 = 2; +pub const XIGrabNotViewable: i32 = 3; +pub const XIGrabFrozen: i32 = 4; +pub const XIGrabtypeButton: i32 = 0; +pub const XIGrabtypeKeycode: i32 = 1; +pub const XIGrabtypeEnter: i32 = 2; +pub const XIGrabtypeFocusIn: i32 = 3; +pub const XIGrabtypeTouchBegin: i32 = 4; +pub const XIAnyButton: i32 = 0; +pub const XIAnyKeycode: i32 = 0; +pub const XIAsyncDevice: i32 = 0; +pub const XISyncDevice: i32 = 1; +pub const XIReplayDevice: i32 = 2; +pub const XIAsyncPairedDevice: i32 = 3; +pub const XIAsyncPair: i32 = 4; +pub const XISyncPair: i32 = 5; +pub const XIAcceptTouch: i32 = 6; +pub const XIRejectTouch: i32 = 7; +pub const XISlaveSwitch: i32 = 1; +pub const XIDeviceChange: i32 = 2; +pub const XIMasterAdded: i32 = (1 << 0); +pub const XIMasterRemoved: i32 = (1 << 1); +pub const XISlaveAdded: i32 = (1 << 2); +pub const XISlaveRemoved: i32 = (1 << 3); +pub const XISlaveAttached: i32 = (1 << 4); +pub const XISlaveDetached: i32 = (1 << 5); +pub const XIDeviceEnabled: i32 = (1 << 6); +pub const XIDeviceDisabled: i32 = (1 << 7); +pub const XIAddMaster: i32 = 1; +pub const XIRemoveMaster: i32 = 2; +pub const XIAttachSlave: i32 = 3; +pub const XIDetachSlave: i32 = 4; +pub const XIAttachToMaster: i32 = 1; +pub const XIFloating: i32 = 2; +pub const XIModeRelative: i32 = 0; +pub const XIModeAbsolute: i32 = 1; +pub const XIMasterPointer: i32 = 1; +pub const XIMasterKeyboard: i32 = 2; +pub const XISlavePointer: i32 = 3; +pub const XISlaveKeyboard: i32 = 4; +pub const XIFloatingSlave: i32 = 5; +pub const XIKeyClass: i32 = 0; +pub const XIButtonClass: i32 = 1; +pub const XIValuatorClass: i32 = 2; +pub const XIScrollClass: i32 = 3; +pub const XITouchClass: i32 = 8; +pub const XIScrollTypeVertical: i32 = 1; +pub const XIScrollTypeHorizontal: i32 = 2; +pub const XIScrollFlagNoEmulation: i32 = (1 << 0); +pub const XIScrollFlagPreferred: i32 = (1 << 1); +pub const XIKeyRepeat: i32 = (1 << 16); +pub const XIPointerEmulated: i32 = (1 << 16); +pub const XITouchPendingEnd: i32 = (1 << 16); +pub const XITouchEmulatingPointer: i32 = (1 << 17); +pub const XIBarrierPointerReleased: i32 = (1 << 0); +pub const XIBarrierDeviceIsGrabbed: i32 = (1 << 1); +pub const XIDirectTouch: i32 = 1; +pub const XIDependentTouch: i32 = 2; +pub const XIAllDevices: i32 = 0; +pub const XIAllMasterDevices: i32 = 1; +pub const XI_DeviceChanged: i32 = 1; +pub const XI_KeyPress: i32 = 2; +pub const XI_KeyRelease: i32 = 3; +pub const XI_ButtonPress: i32 = 4; +pub const XI_ButtonRelease: i32 = 5; +pub const XI_Motion: i32 = 6; +pub const XI_Enter: i32 = 7; +pub const XI_Leave: i32 = 8; +pub const XI_FocusIn: i32 = 9; +pub const XI_FocusOut: i32 = 10; +pub const XI_HierarchyChanged: i32 = 11; +pub const XI_PropertyEvent: i32 = 12; +pub const XI_RawKeyPress: i32 = 13; +pub const XI_RawKeyRelease: i32 = 14; +pub const XI_RawButtonPress: i32 = 15; +pub const XI_RawButtonRelease: i32 = 16; +pub const XI_RawMotion: i32 = 17; +pub const XI_TouchBegin: i32 = 18 ; +pub const XI_TouchUpdate: i32 = 19; +pub const XI_TouchEnd: i32 = 20; +pub const XI_TouchOwnership: i32 = 21; +pub const XI_RawTouchBegin: i32 = 22; +pub const XI_RawTouchUpdate: i32 = 23; +pub const XI_RawTouchEnd: i32 = 24; +pub const XI_BarrierHit: i32 = 25 ; +pub const XI_BarrierLeave: i32 = 26; +pub const XI_LASTEVENT: i32 = XI_BarrierLeave; +pub const XI_DeviceChangedMask: i32 = (1 << XI_DeviceChanged); +pub const XI_KeyPressMask: i32 = (1 << XI_KeyPress); +pub const XI_KeyReleaseMask: i32 = (1 << XI_KeyRelease); +pub const XI_ButtonPressMask: i32 = (1 << XI_ButtonPress); +pub const XI_ButtonReleaseMask: i32 = (1 << XI_ButtonRelease); +pub const XI_MotionMask: i32 = (1 << XI_Motion); +pub const XI_EnterMask: i32 = (1 << XI_Enter); +pub const XI_LeaveMask: i32 = (1 << XI_Leave); +pub const XI_FocusInMask: i32 = (1 << XI_FocusIn); +pub const XI_FocusOutMask: i32 = (1 << XI_FocusOut); +pub const XI_HierarchyChangedMask: i32 = (1 << XI_HierarchyChanged); +pub const XI_PropertyEventMask: i32 = (1 << XI_PropertyEvent); +pub const XI_RawKeyPressMask: i32 = (1 << XI_RawKeyPress); +pub const XI_RawKeyReleaseMask: i32 = (1 << XI_RawKeyRelease); +pub const XI_RawButtonPressMask: i32 = (1 << XI_RawButtonPress); +pub const XI_RawButtonReleaseMask: i32 = (1 << XI_RawButtonRelease); +pub const XI_RawMotionMask: i32 = (1 << XI_RawMotion); +pub const XI_TouchBeginMask: i32 = (1 << XI_TouchBegin); +pub const XI_TouchEndMask: i32 = (1 << XI_TouchEnd); +pub const XI_TouchOwnershipChangedMask: i32 = (1 << XI_TouchOwnership); +pub const XI_TouchUpdateMask: i32 = (1 << XI_TouchUpdate); +pub const XI_RawTouchBeginMask: i32 = (1 << XI_RawTouchBegin); +pub const XI_RawTouchEndMask: i32 = (1 << XI_RawTouchEnd); +pub const XI_RawTouchUpdateMask: i32 = (1 << XI_RawTouchUpdate); +pub const XI_BarrierHitMask: i32 = (1 << XI_BarrierHit); +pub const XI_BarrierLeaveMask: i32 = (1 << XI_BarrierLeave); + + + + + + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIAddMasterInfo { + pub _type: ::std::os::raw::c_int, + pub name: *mut ::std::os::raw::c_char, + pub send_core: ::std::os::raw::c_int, + pub enable: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XIAddMasterInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIAddMasterInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIRemoveMasterInfo { + pub _type: ::std::os::raw::c_int, + pub deviceid: ::std::os::raw::c_int, + pub return_mode: ::std::os::raw::c_int, + pub return_pointer: ::std::os::raw::c_int, + pub return_keyboard: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XIRemoveMasterInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIRemoveMasterInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIAttachSlaveInfo { + pub _type: ::std::os::raw::c_int, + pub deviceid: ::std::os::raw::c_int, + pub new_master: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XIAttachSlaveInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIAttachSlaveInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIDetachSlaveInfo { + pub _type: ::std::os::raw::c_int, + pub deviceid: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XIDetachSlaveInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIDetachSlaveInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIAnyHierarchyChangeInfo { + pub _bindgen_data_: [u64; 3usize], +} +impl XIAnyHierarchyChangeInfo { + pub unsafe fn _type(&mut self) -> *mut ::std::os::raw::c_int { + let raw: *mut u8 = ::std::mem::transmute(&self._bindgen_data_); + ::std::mem::transmute(raw.offset(0)) + } + pub unsafe fn add(&mut self) -> *mut XIAddMasterInfo { + let raw: *mut u8 = ::std::mem::transmute(&self._bindgen_data_); + ::std::mem::transmute(raw.offset(0)) + } + pub unsafe fn remove(&mut self) -> *mut XIRemoveMasterInfo { + let raw: *mut u8 = ::std::mem::transmute(&self._bindgen_data_); + ::std::mem::transmute(raw.offset(0)) + } + pub unsafe fn attach(&mut self) -> *mut XIAttachSlaveInfo { + let raw: *mut u8 = ::std::mem::transmute(&self._bindgen_data_); + ::std::mem::transmute(raw.offset(0)) + } + pub unsafe fn detach(&mut self) -> *mut XIDetachSlaveInfo { + let raw: *mut u8 = ::std::mem::transmute(&self._bindgen_data_); + ::std::mem::transmute(raw.offset(0)) + } +} +impl ::std::clone::Clone for XIAnyHierarchyChangeInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIAnyHierarchyChangeInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIModifierState { + pub base: ::std::os::raw::c_int, + pub latched: ::std::os::raw::c_int, + pub locked: ::std::os::raw::c_int, + pub effective: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XIModifierState { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIModifierState { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +pub type XIGroupState = XIModifierState; + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIButtonState { + pub mask_len: ::std::os::raw::c_int, + pub mask: *mut ::std::os::raw::c_uchar, +} +impl ::std::clone::Clone for XIButtonState { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIButtonState { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIValuatorState { + pub mask_len: ::std::os::raw::c_int, + pub mask: *mut ::std::os::raw::c_uchar, + pub values: *mut ::std::os::raw::c_double, +} +impl ::std::clone::Clone for XIValuatorState { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIValuatorState { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIEventMask { + pub deviceid: ::std::os::raw::c_int, + pub mask_len: ::std::os::raw::c_int, + pub mask: *mut ::std::os::raw::c_uchar, +} +impl ::std::clone::Clone for XIEventMask { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIEventMask { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIAnyClassInfo { + pub _type: ::std::os::raw::c_int, + pub sourceid: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XIAnyClassInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIAnyClassInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIButtonClassInfo { + pub _type: ::std::os::raw::c_int, + pub sourceid: ::std::os::raw::c_int, + pub num_buttons: ::std::os::raw::c_int, + pub labels: *mut Atom, + pub state: XIButtonState, +} +impl ::std::clone::Clone for XIButtonClassInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIButtonClassInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIKeyClassInfo { + pub _type: ::std::os::raw::c_int, + pub sourceid: ::std::os::raw::c_int, + pub num_keycodes: ::std::os::raw::c_int, + pub keycodes: *mut ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XIKeyClassInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIKeyClassInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIValuatorClassInfo { + pub _type: ::std::os::raw::c_int, + pub sourceid: ::std::os::raw::c_int, + pub number: ::std::os::raw::c_int, + pub label: Atom, + pub min: ::std::os::raw::c_double, + pub max: ::std::os::raw::c_double, + pub value: ::std::os::raw::c_double, + pub resolution: ::std::os::raw::c_int, + pub mode: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XIValuatorClassInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIValuatorClassInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIScrollClassInfo { + pub _type: ::std::os::raw::c_int, + pub sourceid: ::std::os::raw::c_int, + pub number: ::std::os::raw::c_int, + pub scroll_type: ::std::os::raw::c_int, + pub increment: ::std::os::raw::c_double, + pub flags: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XIScrollClassInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIScrollClassInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XITouchClassInfo { + pub _type: ::std::os::raw::c_int, + pub sourceid: ::std::os::raw::c_int, + pub mode: ::std::os::raw::c_int, + pub num_touches: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XITouchClassInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XITouchClassInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIDeviceInfo { + pub deviceid: ::std::os::raw::c_int, + pub name: *mut ::std::os::raw::c_char, + pub _use: ::std::os::raw::c_int, + pub attachment: ::std::os::raw::c_int, + pub enabled: ::std::os::raw::c_int, + pub num_classes: ::std::os::raw::c_int, + pub classes: *mut *mut XIAnyClassInfo, +} +impl ::std::clone::Clone for XIDeviceInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIDeviceInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIGrabModifiers { + pub modifiers: ::std::os::raw::c_int, + pub status: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XIGrabModifiers { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIGrabModifiers { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +pub type BarrierEventID = ::std::os::raw::c_uint; + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIBarrierReleasePointerInfo { + pub deviceid: ::std::os::raw::c_int, + pub barrier: PointerBarrier, + pub eventid: BarrierEventID, +} +impl ::std::clone::Clone for XIBarrierReleasePointerInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIBarrierReleasePointerInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIEvent { + pub _type: ::std::os::raw::c_int, + pub serial: ::std::os::raw::c_ulong, + pub send_event: ::std::os::raw::c_int, + pub display: *mut Display, + pub extension: ::std::os::raw::c_int, + pub evtype: ::std::os::raw::c_int, + pub time: Time, +} +impl ::std::clone::Clone for XIEvent { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIEvent { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIHierarchyInfo { + pub deviceid: ::std::os::raw::c_int, + pub attachment: ::std::os::raw::c_int, + pub _use: ::std::os::raw::c_int, + pub enabled: ::std::os::raw::c_int, + pub flags: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XIHierarchyInfo { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIHierarchyInfo { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIHierarchyEvent { + pub _type: ::std::os::raw::c_int, + pub serial: ::std::os::raw::c_ulong, + pub send_event: ::std::os::raw::c_int, + pub display: *mut Display, + pub extension: ::std::os::raw::c_int, + pub evtype: ::std::os::raw::c_int, + pub time: Time, + pub flags: ::std::os::raw::c_int, + pub num_info: ::std::os::raw::c_int, + pub info: *mut XIHierarchyInfo, +} +impl ::std::clone::Clone for XIHierarchyEvent { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIHierarchyEvent { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIDeviceChangedEvent { + pub _type: ::std::os::raw::c_int, + pub serial: ::std::os::raw::c_ulong, + pub send_event: ::std::os::raw::c_int, + pub display: *mut Display, + pub extension: ::std::os::raw::c_int, + pub evtype: ::std::os::raw::c_int, + pub time: Time, + pub deviceid: ::std::os::raw::c_int, + pub sourceid: ::std::os::raw::c_int, + pub reason: ::std::os::raw::c_int, + pub num_classes: ::std::os::raw::c_int, + pub classes: *mut *mut XIAnyClassInfo, +} +impl ::std::clone::Clone for XIDeviceChangedEvent { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIDeviceChangedEvent { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIDeviceEvent { + pub _type: ::std::os::raw::c_int, + pub serial: ::std::os::raw::c_ulong, + pub send_event: ::std::os::raw::c_int, + pub display: *mut Display, + pub extension: ::std::os::raw::c_int, + pub evtype: ::std::os::raw::c_int, + pub time: Time, + pub deviceid: ::std::os::raw::c_int, + pub sourceid: ::std::os::raw::c_int, + pub detail: ::std::os::raw::c_int, + pub root: Window, + pub event: Window, + pub child: Window, + pub root_x: ::std::os::raw::c_double, + pub root_y: ::std::os::raw::c_double, + pub event_x: ::std::os::raw::c_double, + pub event_y: ::std::os::raw::c_double, + pub flags: ::std::os::raw::c_int, + pub buttons: XIButtonState, + pub valuators: XIValuatorState, + pub mods: XIModifierState, + pub group: XIGroupState, +} +impl ::std::clone::Clone for XIDeviceEvent { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIDeviceEvent { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIRawEvent { + pub _type: ::std::os::raw::c_int, + pub serial: ::std::os::raw::c_ulong, + pub send_event: ::std::os::raw::c_int, + pub display: *mut Display, + pub extension: ::std::os::raw::c_int, + pub evtype: ::std::os::raw::c_int, + pub time: Time, + pub deviceid: ::std::os::raw::c_int, + pub sourceid: ::std::os::raw::c_int, + pub detail: ::std::os::raw::c_int, + pub flags: ::std::os::raw::c_int, + pub valuators: XIValuatorState, + pub raw_values: *mut ::std::os::raw::c_double, +} +impl ::std::clone::Clone for XIRawEvent { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIRawEvent { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIEnterEvent { + pub _type: ::std::os::raw::c_int, + pub serial: ::std::os::raw::c_ulong, + pub send_event: ::std::os::raw::c_int, + pub display: *mut Display, + pub extension: ::std::os::raw::c_int, + pub evtype: ::std::os::raw::c_int, + pub time: Time, + pub deviceid: ::std::os::raw::c_int, + pub sourceid: ::std::os::raw::c_int, + pub detail: ::std::os::raw::c_int, + pub root: Window, + pub event: Window, + pub child: Window, + pub root_x: ::std::os::raw::c_double, + pub root_y: ::std::os::raw::c_double, + pub event_x: ::std::os::raw::c_double, + pub event_y: ::std::os::raw::c_double, + pub mode: ::std::os::raw::c_int, + pub focus: ::std::os::raw::c_int, + pub same_screen: ::std::os::raw::c_int, + pub buttons: XIButtonState, + pub mods: XIModifierState, + pub group: XIGroupState, +} +impl ::std::clone::Clone for XIEnterEvent { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIEnterEvent { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +pub type XILeaveEvent = XIEnterEvent; +pub type XIFocusInEvent = XIEnterEvent; +pub type XIFocusOutEvent = XIEnterEvent; + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIPropertyEvent { + pub _type: ::std::os::raw::c_int, + pub serial: ::std::os::raw::c_ulong, + pub send_event: ::std::os::raw::c_int, + pub display: *mut Display, + pub extension: ::std::os::raw::c_int, + pub evtype: ::std::os::raw::c_int, + pub time: Time, + pub deviceid: ::std::os::raw::c_int, + pub property: Atom, + pub what: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XIPropertyEvent { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIPropertyEvent { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XITouchOwnershipEvent { + pub _type: ::std::os::raw::c_int, + pub serial: ::std::os::raw::c_ulong, + pub send_event: ::std::os::raw::c_int, + pub display: *mut Display, + pub extension: ::std::os::raw::c_int, + pub evtype: ::std::os::raw::c_int, + pub time: Time, + pub deviceid: ::std::os::raw::c_int, + pub sourceid: ::std::os::raw::c_int, + pub touchid: ::std::os::raw::c_uint, + pub root: Window, + pub event: Window, + pub child: Window, + pub flags: ::std::os::raw::c_int, +} +impl ::std::clone::Clone for XITouchOwnershipEvent { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XITouchOwnershipEvent { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} + +#[repr(C)] +#[derive(Debug, Copy)] +pub struct XIBarrierEvent { + pub _type: ::std::os::raw::c_int, + pub serial: ::std::os::raw::c_ulong, + pub send_event: ::std::os::raw::c_int, + pub display: *mut Display, + pub extension: ::std::os::raw::c_int, + pub evtype: ::std::os::raw::c_int, + pub time: Time, + pub deviceid: ::std::os::raw::c_int, + pub sourceid: ::std::os::raw::c_int, + pub event: Window, + pub root: Window, + pub root_x: ::std::os::raw::c_double, + pub root_y: ::std::os::raw::c_double, + pub dx: ::std::os::raw::c_double, + pub dy: ::std::os::raw::c_double, + pub dtime: ::std::os::raw::c_int, + pub flags: ::std::os::raw::c_int, + pub barrier: PointerBarrier, + pub eventid: BarrierEventID, +} +impl ::std::clone::Clone for XIBarrierEvent { + fn clone(&self) -> Self { *self } +} +impl ::std::default::Default for XIBarrierEvent { + fn default() -> Self { unsafe { ::std::mem::zeroed() } } +} diff --git a/third_party/rust/x11/src/xlib.rs b/third_party/rust/x11/src/xlib.rs new file mode 100644 index 000000000000..318dea68d339 --- /dev/null +++ b/third_party/rust/x11/src/xlib.rs @@ -0,0 +1,3404 @@ + + + + +use std::slice; +use std::os::raw::{ + c_char, + c_double, + c_int, + c_long, + c_short, + c_schar, + c_uchar, + c_uint, + c_ulong, + c_ushort, + c_void, +}; +use std::fmt; + +use libc::wchar_t; + +use ::internal::{ + mem_eq, + transmute_union, +}; +use xf86vmode; +use xrandr; +use xss; + + + +pub mod xkb {} + + + + + + + +x11_link! { Xlib, x11, ["libX11.so.6", "libX11.so"], 767, + pub fn XActivateScreenSaver (_1: *mut Display) -> c_int, + pub fn XAddConnectionWatch (_3: *mut Display, _2: Option, _1: *mut c_char) -> c_int, + pub fn XAddExtension (_1: *mut Display) -> *mut XExtCodes, + pub fn XAddHost (_2: *mut Display, _1: *mut XHostAddress) -> c_int, + pub fn XAddHosts (_3: *mut Display, _2: *mut XHostAddress, _1: c_int) -> c_int, + pub fn XAddPixel (_2: *mut XImage, _1: c_long) -> c_int, + pub fn XAddToExtensionList (_2: *mut *mut XExtData, _1: *mut XExtData) -> c_int, + pub fn XAddToSaveSet (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XAllocClassHint () -> *mut XClassHint, + pub fn XAllocColor (_3: *mut Display, _2: c_ulong, _1: *mut XColor) -> c_int, + pub fn XAllocColorCells (_7: *mut Display, _6: c_ulong, _5: c_int, _4: *mut c_ulong, _3: c_uint, _2: *mut c_ulong, _1: c_uint) -> c_int, + pub fn XAllocColorPlanes (_11: *mut Display, _10: c_ulong, _9: c_int, _8: *mut c_ulong, _7: c_int, _6: c_int, _5: c_int, _4: c_int, _3: *mut c_ulong, _2: *mut c_ulong, _1: *mut c_ulong) -> c_int, + pub fn XAllocIconSize () -> *mut XIconSize, + pub fn XAllocNamedColor (_5: *mut Display, _4: c_ulong, _3: *const c_char, _2: *mut XColor, _1: *mut XColor) -> c_int, + pub fn XAllocSizeHints () -> *mut XSizeHints, + pub fn XAllocStandardColormap () -> *mut XStandardColormap, + pub fn XAllocWMHints () -> *mut XWMHints, + pub fn XAllowEvents (_3: *mut Display, _2: c_int, _1: c_ulong) -> c_int, + pub fn XAllPlanes () -> c_ulong, + pub fn XAutoRepeatOff (_1: *mut Display) -> c_int, + pub fn XAutoRepeatOn (_1: *mut Display) -> c_int, + pub fn XBaseFontNameListOfFontSet (_1: XFontSet) -> *mut c_char, + pub fn XBell (_2: *mut Display, _1: c_int) -> c_int, + pub fn XBitmapBitOrder (_1: *mut Display) -> c_int, + pub fn XBitmapPad (_1: *mut Display) -> c_int, + pub fn XBitmapUnit (_1: *mut Display) -> c_int, + pub fn XBlackPixel (_2: *mut Display, _1: c_int) -> c_ulong, + pub fn XBlackPixelOfScreen (_1: *mut Screen) -> c_ulong, + pub fn XCellsOfScreen (_1: *mut Screen) -> c_int, + pub fn XChangeActivePointerGrab (_4: *mut Display, _3: c_uint, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XChangeGC (_4: *mut Display, _3: GC, _2: c_ulong, _1: *mut XGCValues) -> c_int, + pub fn XChangeKeyboardControl (_3: *mut Display, _2: c_ulong, _1: *mut XKeyboardControl) -> c_int, + pub fn XChangeKeyboardMapping (_5: *mut Display, _4: c_int, _3: c_int, _2: *mut c_ulong, _1: c_int) -> c_int, + pub fn XChangePointerControl (_6: *mut Display, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: c_int) -> c_int, + pub fn XChangeProperty (_8: *mut Display, _7: c_ulong, _6: c_ulong, _5: c_ulong, _4: c_int, _3: c_int, _2: *const c_uchar, _1: c_int) -> c_int, + pub fn XChangeSaveSet (_3: *mut Display, _2: c_ulong, _1: c_int) -> c_int, + pub fn XChangeWindowAttributes (_4: *mut Display, _3: c_ulong, _2: c_ulong, _1: *mut XSetWindowAttributes) -> c_int, + pub fn XCheckIfEvent (_4: *mut Display, _3: *mut XEvent, _2: Option c_int>, _1: *mut c_char) -> c_int, + pub fn XCheckMaskEvent (_3: *mut Display, _2: c_long, _1: *mut XEvent) -> c_int, + pub fn XCheckTypedEvent (_3: *mut Display, _2: c_int, _1: *mut XEvent) -> c_int, + pub fn XCheckTypedWindowEvent (_4: *mut Display, _3: c_ulong, _2: c_int, _1: *mut XEvent) -> c_int, + pub fn XCheckWindowEvent (_4: *mut Display, _3: c_ulong, _2: c_long, _1: *mut XEvent) -> c_int, + pub fn XCirculateSubwindows (_3: *mut Display, _2: c_ulong, _1: c_int) -> c_int, + pub fn XCirculateSubwindowsDown (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XCirculateSubwindowsUp (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XClearArea (_7: *mut Display, _6: c_ulong, _5: c_int, _4: c_int, _3: c_uint, _2: c_uint, _1: c_int) -> c_int, + pub fn XClearWindow (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XClipBox (_2: Region, _1: *mut XRectangle) -> c_int, + pub fn XCloseDisplay (_1: *mut Display) -> c_int, + pub fn XCloseIM (_1: XIM) -> c_int, + pub fn XCloseOM (_1: XOM) -> c_int, + pub fn XcmsAddColorSpace (_1: *mut XcmsColorSpace) -> c_int, + pub fn XcmsAddFunctionSet (_1: *mut XcmsFunctionSet) -> c_int, + pub fn XcmsAllocColor (_4: *mut Display, _3: c_ulong, _2: *mut XcmsColor, _1: c_ulong) -> c_int, + pub fn XcmsAllocNamedColor (_6: *mut Display, _5: c_ulong, _4: *const c_char, _3: *mut XcmsColor, _2: *mut XcmsColor, _1: c_ulong) -> c_int, + pub fn XcmsCCCOfColormap (_2: *mut Display, _1: c_ulong) -> XcmsCCC, + pub fn XcmsCIELabClipab (_5: XcmsCCC, _4: *mut XcmsColor, _3: c_uint, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsCIELabClipL (_5: XcmsCCC, _4: *mut XcmsColor, _3: c_uint, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsCIELabClipLab (_5: XcmsCCC, _4: *mut XcmsColor, _3: c_uint, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsCIELabQueryMaxC (_4: XcmsCCC, _3: c_double, _2: c_double, _1: *mut XcmsColor) -> c_int, + pub fn XcmsCIELabQueryMaxL (_4: XcmsCCC, _3: c_double, _2: c_double, _1: *mut XcmsColor) -> c_int, + pub fn XcmsCIELabQueryMaxLC (_3: XcmsCCC, _2: c_double, _1: *mut XcmsColor) -> c_int, + pub fn XcmsCIELabQueryMinL (_4: XcmsCCC, _3: c_double, _2: c_double, _1: *mut XcmsColor) -> c_int, + pub fn XcmsCIELabToCIEXYZ (_4: XcmsCCC, _3: *mut XcmsColor, _2: *mut XcmsColor, _1: c_uint) -> c_int, + pub fn XcmsCIELabWhiteShiftColors (_7: XcmsCCC, _6: *mut XcmsColor, _5: *mut XcmsColor, _4: c_ulong, _3: *mut XcmsColor, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsCIELuvClipL (_5: XcmsCCC, _4: *mut XcmsColor, _3: c_uint, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsCIELuvClipLuv (_5: XcmsCCC, _4: *mut XcmsColor, _3: c_uint, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsCIELuvClipuv (_5: XcmsCCC, _4: *mut XcmsColor, _3: c_uint, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsCIELuvQueryMaxC (_4: XcmsCCC, _3: c_double, _2: c_double, _1: *mut XcmsColor) -> c_int, + pub fn XcmsCIELuvQueryMaxL (_4: XcmsCCC, _3: c_double, _2: c_double, _1: *mut XcmsColor) -> c_int, + pub fn XcmsCIELuvQueryMaxLC (_3: XcmsCCC, _2: c_double, _1: *mut XcmsColor) -> c_int, + pub fn XcmsCIELuvQueryMinL (_4: XcmsCCC, _3: c_double, _2: c_double, _1: *mut XcmsColor) -> c_int, + pub fn XcmsCIELuvToCIEuvY (_4: XcmsCCC, _3: *mut XcmsColor, _2: *mut XcmsColor, _1: c_uint) -> c_int, + pub fn XcmsCIELuvWhiteShiftColors (_7: XcmsCCC, _6: *mut XcmsColor, _5: *mut XcmsColor, _4: c_ulong, _3: *mut XcmsColor, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsCIEuvYToCIELuv (_4: XcmsCCC, _3: *mut XcmsColor, _2: *mut XcmsColor, _1: c_uint) -> c_int, + pub fn XcmsCIEuvYToCIEXYZ (_4: XcmsCCC, _3: *mut XcmsColor, _2: *mut XcmsColor, _1: c_uint) -> c_int, + pub fn XcmsCIEuvYToTekHVC (_4: XcmsCCC, _3: *mut XcmsColor, _2: *mut XcmsColor, _1: c_uint) -> c_int, + pub fn XcmsCIExyYToCIEXYZ (_4: XcmsCCC, _3: *mut XcmsColor, _2: *mut XcmsColor, _1: c_uint) -> c_int, + pub fn XcmsCIEXYZToCIELab (_4: XcmsCCC, _3: *mut XcmsColor, _2: *mut XcmsColor, _1: c_uint) -> c_int, + pub fn XcmsCIEXYZToCIEuvY (_4: XcmsCCC, _3: *mut XcmsColor, _2: *mut XcmsColor, _1: c_uint) -> c_int, + pub fn XcmsCIEXYZToCIExyY (_4: XcmsCCC, _3: *mut XcmsColor, _2: *mut XcmsColor, _1: c_uint) -> c_int, + pub fn XcmsCIEXYZToRGBi (_4: XcmsCCC, _3: *mut XcmsColor, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsClientWhitePointOfCCC (_1: XcmsCCC) -> *mut XcmsColor, + pub fn XcmsConvertColors (_5: XcmsCCC, _4: *mut XcmsColor, _3: c_uint, _2: c_ulong, _1: *mut c_int) -> c_int, + pub fn XcmsCreateCCC (_8: *mut Display, _7: c_int, _6: *mut Visual, _5: *mut XcmsColor, _4: Option c_int>, _3: *mut c_char, _2: Option c_int>, _1: *mut c_char) -> XcmsCCC, + pub fn XcmsDefaultCCC (_2: *mut Display, _1: c_int) -> XcmsCCC, + pub fn XcmsDisplayOfCCC (_1: XcmsCCC) -> *mut Display, + pub fn XcmsFormatOfPrefix (_1: *mut c_char) -> c_ulong, + pub fn XcmsFreeCCC (_1: XcmsCCC) -> (), + pub fn XcmsLookupColor (_6: *mut Display, _5: c_ulong, _4: *const c_char, _3: *mut XcmsColor, _2: *mut XcmsColor, _1: c_ulong) -> c_int, + pub fn XcmsPrefixOfFormat (_1: c_ulong) -> *mut c_char, + pub fn XcmsQueryBlack (_3: XcmsCCC, _2: c_ulong, _1: *mut XcmsColor) -> c_int, + pub fn XcmsQueryBlue (_3: XcmsCCC, _2: c_ulong, _1: *mut XcmsColor) -> c_int, + pub fn XcmsQueryColor (_4: *mut Display, _3: c_ulong, _2: *mut XcmsColor, _1: c_ulong) -> c_int, + pub fn XcmsQueryColors (_5: *mut Display, _4: c_ulong, _3: *mut XcmsColor, _2: c_uint, _1: c_ulong) -> c_int, + pub fn XcmsQueryGreen (_3: XcmsCCC, _2: c_ulong, _1: *mut XcmsColor) -> c_int, + pub fn XcmsQueryRed (_3: XcmsCCC, _2: c_ulong, _1: *mut XcmsColor) -> c_int, + pub fn XcmsQueryWhite (_3: XcmsCCC, _2: c_ulong, _1: *mut XcmsColor) -> c_int, + pub fn XcmsRGBiToCIEXYZ (_4: XcmsCCC, _3: *mut XcmsColor, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsRGBiToRGB (_4: XcmsCCC, _3: *mut XcmsColor, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsRGBToRGBi (_4: XcmsCCC, _3: *mut XcmsColor, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsScreenNumberOfCCC (_1: XcmsCCC) -> c_int, + pub fn XcmsScreenWhitePointOfCCC (_1: XcmsCCC) -> *mut XcmsColor, + pub fn XcmsSetCCCOfColormap (_3: *mut Display, _2: c_ulong, _1: XcmsCCC) -> XcmsCCC, + pub fn XcmsSetCompressionProc (_3: XcmsCCC, _2: Option c_int>, _1: *mut c_char) -> Option c_int>, + pub fn XcmsSetWhiteAdjustProc (_3: XcmsCCC, _2: Option c_int>, _1: *mut c_char) -> Option c_int>, + pub fn XcmsSetWhitePoint (_2: XcmsCCC, _1: *mut XcmsColor) -> c_int, + pub fn XcmsStoreColor (_3: *mut Display, _2: c_ulong, _1: *mut XcmsColor) -> c_int, + pub fn XcmsStoreColors (_5: *mut Display, _4: c_ulong, _3: *mut XcmsColor, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsTekHVCClipC (_5: XcmsCCC, _4: *mut XcmsColor, _3: c_uint, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsTekHVCClipV (_5: XcmsCCC, _4: *mut XcmsColor, _3: c_uint, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsTekHVCClipVC (_5: XcmsCCC, _4: *mut XcmsColor, _3: c_uint, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsTekHVCQueryMaxC (_4: XcmsCCC, _3: c_double, _2: c_double, _1: *mut XcmsColor) -> c_int, + pub fn XcmsTekHVCQueryMaxV (_4: XcmsCCC, _3: c_double, _2: c_double, _1: *mut XcmsColor) -> c_int, + pub fn XcmsTekHVCQueryMaxVC (_3: XcmsCCC, _2: c_double, _1: *mut XcmsColor) -> c_int, + pub fn XcmsTekHVCQueryMaxVSamples (_4: XcmsCCC, _3: c_double, _2: *mut XcmsColor, _1: c_uint) -> c_int, + pub fn XcmsTekHVCQueryMinV (_4: XcmsCCC, _3: c_double, _2: c_double, _1: *mut XcmsColor) -> c_int, + pub fn XcmsTekHVCToCIEuvY (_4: XcmsCCC, _3: *mut XcmsColor, _2: *mut XcmsColor, _1: c_uint) -> c_int, + pub fn XcmsTekHVCWhiteShiftColors (_7: XcmsCCC, _6: *mut XcmsColor, _5: *mut XcmsColor, _4: c_ulong, _3: *mut XcmsColor, _2: c_uint, _1: *mut c_int) -> c_int, + pub fn XcmsVisualOfCCC (_1: XcmsCCC) -> *mut Visual, + pub fn XConfigureWindow (_4: *mut Display, _3: c_ulong, _2: c_uint, _1: *mut XWindowChanges) -> c_int, + pub fn XConnectionNumber (_1: *mut Display) -> c_int, + pub fn XContextDependentDrawing (_1: XFontSet) -> c_int, + pub fn XContextualDrawing (_1: XFontSet) -> c_int, + pub fn XConvertCase (_3: c_ulong, _2: *mut c_ulong, _1: *mut c_ulong) -> (), + pub fn XConvertSelection (_6: *mut Display, _5: c_ulong, _4: c_ulong, _3: c_ulong, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XCopyArea (_10: *mut Display, _9: c_ulong, _8: c_ulong, _7: GC, _6: c_int, _5: c_int, _4: c_uint, _3: c_uint, _2: c_int, _1: c_int) -> c_int, + pub fn XCopyColormapAndFree (_2: *mut Display, _1: c_ulong) -> c_ulong, + pub fn XCopyGC (_4: *mut Display, _3: GC, _2: c_ulong, _1: GC) -> c_int, + pub fn XCopyPlane (_11: *mut Display, _10: c_ulong, _9: c_ulong, _8: GC, _7: c_int, _6: c_int, _5: c_uint, _4: c_uint, _3: c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XCreateBitmapFromData (_5: *mut Display, _4: c_ulong, _3: *const c_char, _2: c_uint, _1: c_uint) -> c_ulong, + pub fn XCreateColormap (_4: *mut Display, _3: c_ulong, _2: *mut Visual, _1: c_int) -> c_ulong, + pub fn XCreateFontCursor (_2: *mut Display, _1: c_uint) -> c_ulong, + pub fn XCreateFontSet (_5: *mut Display, _4: *const c_char, _3: *mut *mut *mut c_char, _2: *mut c_int, _1: *mut *mut c_char) -> XFontSet, + pub fn XCreateGC (_4: *mut Display, _3: c_ulong, _2: c_ulong, _1: *mut XGCValues) -> GC, + pub fn XCreateGlyphCursor (_7: *mut Display, _6: c_ulong, _5: c_ulong, _4: c_uint, _3: c_uint, _2: *const XColor, _1: *const XColor) -> c_ulong, + pub fn XCreateImage (_10: *mut Display, _9: *mut Visual, _8: c_uint, _7: c_int, _6: c_int, _5: *mut c_char, _4: c_uint, _3: c_uint, _2: c_int, _1: c_int) -> *mut XImage, + pub fn XCreatePixmap (_5: *mut Display, _4: c_ulong, _3: c_uint, _2: c_uint, _1: c_uint) -> c_ulong, + pub fn XCreatePixmapCursor (_7: *mut Display, _6: c_ulong, _5: c_ulong, _4: *mut XColor, _3: *mut XColor, _2: c_uint, _1: c_uint) -> c_ulong, + pub fn XCreatePixmapFromBitmapData (_8: *mut Display, _7: c_ulong, _6: *mut c_char, _5: c_uint, _4: c_uint, _3: c_ulong, _2: c_ulong, _1: c_uint) -> c_ulong, + pub fn XCreateRegion () -> Region, + pub fn XCreateSimpleWindow (_9: *mut Display, _8: c_ulong, _7: c_int, _6: c_int, _5: c_uint, _4: c_uint, _3: c_uint, _2: c_ulong, _1: c_ulong) -> c_ulong, + pub fn XCreateWindow (_12: *mut Display, _11: c_ulong, _10: c_int, _9: c_int, _8: c_uint, _7: c_uint, _6: c_uint, _5: c_int, _4: c_uint, _3: *mut Visual, _2: c_ulong, _1: *mut XSetWindowAttributes) -> c_ulong, + pub fn XDefaultColormap (_2: *mut Display, _1: c_int) -> c_ulong, + pub fn XDefaultColormapOfScreen (_1: *mut Screen) -> c_ulong, + pub fn XDefaultDepth (_2: *mut Display, _1: c_int) -> c_int, + pub fn XDefaultDepthOfScreen (_1: *mut Screen) -> c_int, + pub fn XDefaultGC (_2: *mut Display, _1: c_int) -> GC, + pub fn XDefaultGCOfScreen (_1: *mut Screen) -> GC, + pub fn XDefaultRootWindow (_1: *mut Display) -> c_ulong, + pub fn XDefaultScreen (_1: *mut Display) -> c_int, + pub fn XDefaultScreenOfDisplay (_1: *mut Display) -> *mut Screen, + pub fn XDefaultString () -> *const c_char, + pub fn XDefaultVisual (_2: *mut Display, _1: c_int) -> *mut Visual, + pub fn XDefaultVisualOfScreen (_1: *mut Screen) -> *mut Visual, + pub fn XDefineCursor (_3: *mut Display, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XDeleteContext (_3: *mut Display, _2: c_ulong, _1: c_int) -> c_int, + pub fn XDeleteModifiermapEntry (_3: *mut XModifierKeymap, _2: c_uchar, _1: c_int) -> *mut XModifierKeymap, + pub fn XDeleteProperty (_3: *mut Display, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XDestroyIC (_1: XIC) -> (), + pub fn XDestroyImage (_1: *mut XImage) -> c_int, + pub fn XDestroyOC (_1: XFontSet) -> (), + pub fn XDestroyRegion (_1: Region) -> c_int, + pub fn XDestroySubwindows (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XDestroyWindow (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XDirectionalDependentDrawing (_1: XFontSet) -> c_int, + pub fn XDisableAccessControl (_1: *mut Display) -> c_int, + pub fn XDisplayCells (_2: *mut Display, _1: c_int) -> c_int, + pub fn XDisplayHeight (_2: *mut Display, _1: c_int) -> c_int, + pub fn XDisplayHeightMM (_2: *mut Display, _1: c_int) -> c_int, + pub fn XDisplayKeycodes (_3: *mut Display, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XDisplayMotionBufferSize (_1: *mut Display) -> c_ulong, + pub fn XDisplayName (_1: *const c_char) -> *mut c_char, + pub fn XDisplayOfIM (_1: XIM) -> *mut Display, + pub fn XDisplayOfOM (_1: XOM) -> *mut Display, + pub fn XDisplayOfScreen (_1: *mut Screen) -> *mut Display, + pub fn XDisplayPlanes (_2: *mut Display, _1: c_int) -> c_int, + pub fn XDisplayString (_1: *mut Display) -> *mut c_char, + pub fn XDisplayWidth (_2: *mut Display, _1: c_int) -> c_int, + pub fn XDisplayWidthMM (_2: *mut Display, _1: c_int) -> c_int, + pub fn XDoesBackingStore (_1: *mut Screen) -> c_int, + pub fn XDoesSaveUnders (_1: *mut Screen) -> c_int, + pub fn XDrawArc (_9: *mut Display, _8: c_ulong, _7: GC, _6: c_int, _5: c_int, _4: c_uint, _3: c_uint, _2: c_int, _1: c_int) -> c_int, + pub fn XDrawArcs (_5: *mut Display, _4: c_ulong, _3: GC, _2: *mut XArc, _1: c_int) -> c_int, + pub fn XDrawImageString (_7: *mut Display, _6: c_ulong, _5: GC, _4: c_int, _3: c_int, _2: *const c_char, _1: c_int) -> c_int, + pub fn XDrawImageString16 (_7: *mut Display, _6: c_ulong, _5: GC, _4: c_int, _3: c_int, _2: *const XChar2b, _1: c_int) -> c_int, + pub fn XDrawLine (_7: *mut Display, _6: c_ulong, _5: GC, _4: c_int, _3: c_int, _2: c_int, _1: c_int) -> c_int, + pub fn XDrawLines (_6: *mut Display, _5: c_ulong, _4: GC, _3: *mut XPoint, _2: c_int, _1: c_int) -> c_int, + pub fn XDrawPoint (_5: *mut Display, _4: c_ulong, _3: GC, _2: c_int, _1: c_int) -> c_int, + pub fn XDrawPoints (_6: *mut Display, _5: c_ulong, _4: GC, _3: *mut XPoint, _2: c_int, _1: c_int) -> c_int, + pub fn XDrawRectangle (_7: *mut Display, _6: c_ulong, _5: GC, _4: c_int, _3: c_int, _2: c_uint, _1: c_uint) -> c_int, + pub fn XDrawRectangles (_5: *mut Display, _4: c_ulong, _3: GC, _2: *mut XRectangle, _1: c_int) -> c_int, + pub fn XDrawSegments (_5: *mut Display, _4: c_ulong, _3: GC, _2: *mut XSegment, _1: c_int) -> c_int, + pub fn XDrawString (_7: *mut Display, _6: c_ulong, _5: GC, _4: c_int, _3: c_int, _2: *const c_char, _1: c_int) -> c_int, + pub fn XDrawString16 (_7: *mut Display, _6: c_ulong, _5: GC, _4: c_int, _3: c_int, _2: *const XChar2b, _1: c_int) -> c_int, + pub fn XDrawText (_7: *mut Display, _6: c_ulong, _5: GC, _4: c_int, _3: c_int, _2: *mut XTextItem, _1: c_int) -> c_int, + pub fn XDrawText16 (_7: *mut Display, _6: c_ulong, _5: GC, _4: c_int, _3: c_int, _2: *mut XTextItem16, _1: c_int) -> c_int, + pub fn XEHeadOfExtensionList (_1: XEDataObject) -> *mut *mut XExtData, + pub fn XEmptyRegion (_1: Region) -> c_int, + pub fn XEnableAccessControl (_1: *mut Display) -> c_int, + pub fn XEqualRegion (_2: Region, _1: Region) -> c_int, + pub fn XESetBeforeFlush (_3: *mut Display, _2: c_int, _1: Option) -> Option, + pub fn XESetCloseDisplay (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XESetCopyEventCookie (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XESetCopyGC (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XESetCreateFont (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XESetCreateGC (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XESetError (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XESetErrorString (_3: *mut Display, _2: c_int, _1: Option *mut c_char>) -> Option *mut c_char>, + pub fn XESetEventToWire (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XESetFlushGC (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XESetFreeFont (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XESetFreeGC (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XESetPrintErrorValues (_3: *mut Display, _2: c_int, _1: Option) -> Option, + pub fn XESetWireToError (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XESetWireToEvent (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XESetWireToEventCookie (_3: *mut Display, _2: c_int, _1: Option c_int>) -> Option c_int>, + pub fn XEventMaskOfScreen (_1: *mut Screen) -> c_long, + pub fn XEventsQueued (_2: *mut Display, _1: c_int) -> c_int, + pub fn XExtendedMaxRequestSize (_1: *mut Display) -> c_long, + pub fn XExtentsOfFontSet (_1: XFontSet) -> *mut XFontSetExtents, + pub fn XFetchBuffer (_3: *mut Display, _2: *mut c_int, _1: c_int) -> *mut c_char, + pub fn XFetchBytes (_2: *mut Display, _1: *mut c_int) -> *mut c_char, + pub fn XFetchName (_3: *mut Display, _2: c_ulong, _1: *mut *mut c_char) -> c_int, + pub fn XFillArc (_9: *mut Display, _8: c_ulong, _7: GC, _6: c_int, _5: c_int, _4: c_uint, _3: c_uint, _2: c_int, _1: c_int) -> c_int, + pub fn XFillArcs (_5: *mut Display, _4: c_ulong, _3: GC, _2: *mut XArc, _1: c_int) -> c_int, + pub fn XFillPolygon (_7: *mut Display, _6: c_ulong, _5: GC, _4: *mut XPoint, _3: c_int, _2: c_int, _1: c_int) -> c_int, + pub fn XFillRectangle (_7: *mut Display, _6: c_ulong, _5: GC, _4: c_int, _3: c_int, _2: c_uint, _1: c_uint) -> c_int, + pub fn XFillRectangles (_5: *mut Display, _4: c_ulong, _3: GC, _2: *mut XRectangle, _1: c_int) -> c_int, + pub fn XFilterEvent (_2: *mut XEvent, _1: c_ulong) -> c_int, + pub fn XFindContext (_4: *mut Display, _3: c_ulong, _2: c_int, _1: *mut *mut c_char) -> c_int, + pub fn XFindOnExtensionList (_2: *mut *mut XExtData, _1: c_int) -> *mut XExtData, + pub fn XFlush (_1: *mut Display) -> c_int, + pub fn XFlushGC (_2: *mut Display, _1: GC) -> (), + pub fn XFontsOfFontSet (_3: XFontSet, _2: *mut *mut *mut XFontStruct, _1: *mut *mut *mut c_char) -> c_int, + pub fn XForceScreenSaver (_2: *mut Display, _1: c_int) -> c_int, + pub fn XFree (_1: *mut c_void) -> c_int, + pub fn XFreeColormap (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XFreeColors (_5: *mut Display, _4: c_ulong, _3: *mut c_ulong, _2: c_int, _1: c_ulong) -> c_int, + pub fn XFreeCursor (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XFreeEventData (_2: *mut Display, _1: *mut XGenericEventCookie) -> (), + pub fn XFreeExtensionList (_1: *mut *mut c_char) -> c_int, + pub fn XFreeFont (_2: *mut Display, _1: *mut XFontStruct) -> c_int, + pub fn XFreeFontInfo (_3: *mut *mut c_char, _2: *mut XFontStruct, _1: c_int) -> c_int, + pub fn XFreeFontNames (_1: *mut *mut c_char) -> c_int, + pub fn XFreeFontPath (_1: *mut *mut c_char) -> c_int, + pub fn XFreeFontSet (_2: *mut Display, _1: XFontSet) -> (), + pub fn XFreeGC (_2: *mut Display, _1: GC) -> c_int, + pub fn XFreeModifiermap (_1: *mut XModifierKeymap) -> c_int, + pub fn XFreePixmap (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XFreeStringList (_1: *mut *mut c_char) -> (), + pub fn XGContextFromGC (_1: GC) -> c_ulong, + pub fn XGeometry (_13: *mut Display, _12: c_int, _11: *const c_char, _10: *const c_char, _9: c_uint, _8: c_uint, _7: c_uint, _6: c_int, _5: c_int, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XGetAtomName (_2: *mut Display, _1: c_ulong) -> *mut c_char, + pub fn XGetAtomNames (_4: *mut Display, _3: *mut c_ulong, _2: c_int, _1: *mut *mut c_char) -> c_int, + pub fn XGetClassHint (_3: *mut Display, _2: c_ulong, _1: *mut XClassHint) -> c_int, + pub fn XGetCommand (_4: *mut Display, _3: c_ulong, _2: *mut *mut *mut c_char, _1: *mut c_int) -> c_int, + pub fn XGetDefault (_3: *mut Display, _2: *const c_char, _1: *const c_char) -> *mut c_char, + pub fn XGetErrorDatabaseText (_6: *mut Display, _5: *const c_char, _4: *const c_char, _3: *const c_char, _2: *mut c_char, _1: c_int) -> c_int, + pub fn XGetErrorText (_4: *mut Display, _3: c_int, _2: *mut c_char, _1: c_int) -> c_int, + pub fn XGetEventData (_2: *mut Display, _1: *mut XGenericEventCookie) -> c_int, + pub fn XGetFontPath (_2: *mut Display, _1: *mut c_int) -> *mut *mut c_char, + pub fn XGetFontProperty (_3: *mut XFontStruct, _2: c_ulong, _1: *mut c_ulong) -> c_int, + pub fn XGetGCValues (_4: *mut Display, _3: GC, _2: c_ulong, _1: *mut XGCValues) -> c_int, + pub fn XGetGeometry (_9: *mut Display, _8: c_ulong, _7: *mut c_ulong, _6: *mut c_int, _5: *mut c_int, _4: *mut c_uint, _3: *mut c_uint, _2: *mut c_uint, _1: *mut c_uint) -> c_int, + pub fn XGetIconName (_3: *mut Display, _2: c_ulong, _1: *mut *mut c_char) -> c_int, + pub fn XGetIconSizes (_4: *mut Display, _3: c_ulong, _2: *mut *mut XIconSize, _1: *mut c_int) -> c_int, + pub fn XGetImage (_8: *mut Display, _7: c_ulong, _6: c_int, _5: c_int, _4: c_uint, _3: c_uint, _2: c_ulong, _1: c_int) -> *mut XImage, + pub fn XGetInputFocus (_3: *mut Display, _2: *mut c_ulong, _1: *mut c_int) -> c_int, + pub fn XGetKeyboardControl (_2: *mut Display, _1: *mut XKeyboardState) -> c_int, + pub fn XGetKeyboardMapping (_4: *mut Display, _3: c_uchar, _2: c_int, _1: *mut c_int) -> *mut c_ulong, + pub fn XGetModifierMapping (_1: *mut Display) -> *mut XModifierKeymap, + pub fn XGetMotionEvents (_5: *mut Display, _4: c_ulong, _3: c_ulong, _2: c_ulong, _1: *mut c_int) -> *mut XTimeCoord, + pub fn XGetNormalHints (_3: *mut Display, _2: c_ulong, _1: *mut XSizeHints) -> c_int, + pub fn XGetPixel (_3: *mut XImage, _2: c_int, _1: c_int) -> c_ulong, + pub fn XGetPointerControl (_4: *mut Display, _3: *mut c_int, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XGetPointerMapping (_3: *mut Display, _2: *mut c_uchar, _1: c_int) -> c_int, + pub fn XGetRGBColormaps (_5: *mut Display, _4: c_ulong, _3: *mut *mut XStandardColormap, _2: *mut c_int, _1: c_ulong) -> c_int, + pub fn XGetScreenSaver (_5: *mut Display, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XGetSelectionOwner (_2: *mut Display, _1: c_ulong) -> c_ulong, + pub fn XGetSizeHints (_4: *mut Display, _3: c_ulong, _2: *mut XSizeHints, _1: c_ulong) -> c_int, + pub fn XGetStandardColormap (_4: *mut Display, _3: c_ulong, _2: *mut XStandardColormap, _1: c_ulong) -> c_int, + pub fn XGetSubImage (_11: *mut Display, _10: c_ulong, _9: c_int, _8: c_int, _7: c_uint, _6: c_uint, _5: c_ulong, _4: c_int, _3: *mut XImage, _2: c_int, _1: c_int) -> *mut XImage, + pub fn XGetTextProperty (_4: *mut Display, _3: c_ulong, _2: *mut XTextProperty, _1: c_ulong) -> c_int, + pub fn XGetTransientForHint (_3: *mut Display, _2: c_ulong, _1: *mut c_ulong) -> c_int, + pub fn XGetVisualInfo (_4: *mut Display, _3: c_long, _2: *mut XVisualInfo, _1: *mut c_int) -> *mut XVisualInfo, + pub fn XGetWindowAttributes (_3: *mut Display, _2: c_ulong, _1: *mut XWindowAttributes) -> c_int, + pub fn XGetWindowProperty (_12: *mut Display, _11: c_ulong, _10: c_ulong, _9: c_long, _8: c_long, _7: c_int, _6: c_ulong, _5: *mut c_ulong, _4: *mut c_int, _3: *mut c_ulong, _2: *mut c_ulong, _1: *mut *mut c_uchar) -> c_int, + pub fn XGetWMClientMachine (_3: *mut Display, _2: c_ulong, _1: *mut XTextProperty) -> c_int, + pub fn XGetWMColormapWindows (_4: *mut Display, _3: c_ulong, _2: *mut *mut c_ulong, _1: *mut c_int) -> c_int, + pub fn XGetWMHints (_2: *mut Display, _1: c_ulong) -> *mut XWMHints, + pub fn XGetWMIconName (_3: *mut Display, _2: c_ulong, _1: *mut XTextProperty) -> c_int, + pub fn XGetWMName (_3: *mut Display, _2: c_ulong, _1: *mut XTextProperty) -> c_int, + pub fn XGetWMNormalHints (_4: *mut Display, _3: c_ulong, _2: *mut XSizeHints, _1: *mut c_long) -> c_int, + pub fn XGetWMProtocols (_4: *mut Display, _3: c_ulong, _2: *mut *mut c_ulong, _1: *mut c_int) -> c_int, + pub fn XGetWMSizeHints (_5: *mut Display, _4: c_ulong, _3: *mut XSizeHints, _2: *mut c_long, _1: c_ulong) -> c_int, + pub fn XGetZoomHints (_3: *mut Display, _2: c_ulong, _1: *mut XSizeHints) -> c_int, + pub fn XGrabButton (_10: *mut Display, _9: c_uint, _8: c_uint, _7: c_ulong, _6: c_int, _5: c_uint, _4: c_int, _3: c_int, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XGrabKey (_7: *mut Display, _6: c_int, _5: c_uint, _4: c_ulong, _3: c_int, _2: c_int, _1: c_int) -> c_int, + pub fn XGrabKeyboard (_6: *mut Display, _5: c_ulong, _4: c_int, _3: c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XGrabPointer (_9: *mut Display, _8: c_ulong, _7: c_int, _6: c_uint, _5: c_int, _4: c_int, _3: c_ulong, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XGrabServer (_1: *mut Display) -> c_int, + pub fn XHeightMMOfScreen (_1: *mut Screen) -> c_int, + pub fn XHeightOfScreen (_1: *mut Screen) -> c_int, + pub fn XIconifyWindow (_3: *mut Display, _2: c_ulong, _1: c_int) -> c_int, + pub fn XIfEvent (_4: *mut Display, _3: *mut XEvent, _2: Option c_int>, _1: *mut c_char) -> c_int, + pub fn XImageByteOrder (_1: *mut Display) -> c_int, + pub fn XIMOfIC (_1: XIC) -> XIM, + pub fn XInitExtension (_2: *mut Display, _1: *const c_char) -> *mut XExtCodes, + pub fn XInitImage (_1: *mut XImage) -> c_int, + pub fn XInitThreads () -> c_int, + pub fn XInsertModifiermapEntry (_3: *mut XModifierKeymap, _2: c_uchar, _1: c_int) -> *mut XModifierKeymap, + pub fn XInstallColormap (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XInternalConnectionNumbers (_3: *mut Display, _2: *mut *mut c_int, _1: *mut c_int) -> c_int, + pub fn XInternAtom (_3: *mut Display, _2: *const c_char, _1: c_int) -> c_ulong, + pub fn XInternAtoms (_5: *mut Display, _4: *mut *mut c_char, _3: c_int, _2: c_int, _1: *mut c_ulong) -> c_int, + pub fn XIntersectRegion (_3: Region, _2: Region, _1: Region) -> c_int, + pub fn XkbAddDeviceLedInfo (_3: XkbDeviceInfoPtr, _2: c_uint, _1: c_uint) -> XkbDeviceLedInfoPtr, + pub fn XkbAddGeomColor (_3: XkbGeometryPtr, _2: *mut c_char, _1: c_uint) -> XkbColorPtr, + pub fn XkbAddGeomDoodad (_3: XkbGeometryPtr, _2: XkbSectionPtr, _1: c_ulong) -> XkbDoodadPtr, + pub fn XkbAddGeomKey (_1: XkbRowPtr) -> XkbKeyPtr, + pub fn XkbAddGeomKeyAlias (_3: XkbGeometryPtr, _2: *mut c_char, _1: *mut c_char) -> XkbKeyAliasPtr, + pub fn XkbAddGeomOutline (_2: XkbShapePtr, _1: c_int) -> XkbOutlinePtr, + pub fn XkbAddGeomOverlay (_3: XkbSectionPtr, _2: c_ulong, _1: c_int) -> XkbOverlayPtr, + pub fn XkbAddGeomOverlayKey (_4: XkbOverlayPtr, _3: XkbOverlayRowPtr, _2: *mut c_char, _1: *mut c_char) -> XkbOverlayKeyPtr, + pub fn XkbAddGeomOverlayRow (_3: XkbOverlayPtr, _2: c_int, _1: c_int) -> XkbOverlayRowPtr, + pub fn XkbAddGeomProperty (_3: XkbGeometryPtr, _2: *mut c_char, _1: *mut c_char) -> XkbPropertyPtr, + pub fn XkbAddGeomRow (_2: XkbSectionPtr, _1: c_int) -> XkbRowPtr, + pub fn XkbAddGeomSection (_5: XkbGeometryPtr, _4: c_ulong, _3: c_int, _2: c_int, _1: c_int) -> XkbSectionPtr, + pub fn XkbAddGeomShape (_3: XkbGeometryPtr, _2: c_ulong, _1: c_int) -> XkbShapePtr, + pub fn XkbAddKeyType (_5: XkbDescPtr, _4: c_ulong, _3: c_int, _2: c_int, _1: c_int) -> XkbKeyTypePtr, + pub fn XkbAllocClientMap (_3: XkbDescPtr, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbAllocCompatMap (_3: XkbDescPtr, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbAllocControls (_2: XkbDescPtr, _1: c_uint) -> c_int, + pub fn XkbAllocDeviceInfo (_3: c_uint, _2: c_uint, _1: c_uint) -> XkbDeviceInfoPtr, + pub fn XkbAllocGeomColors (_2: XkbGeometryPtr, _1: c_int) -> c_int, + pub fn XkbAllocGeomDoodads (_2: XkbGeometryPtr, _1: c_int) -> c_int, + pub fn XkbAllocGeometry (_2: XkbDescPtr, _1: XkbGeometrySizesPtr) -> c_int, + pub fn XkbAllocGeomKeyAliases (_2: XkbGeometryPtr, _1: c_int) -> c_int, + pub fn XkbAllocGeomKeys (_2: XkbRowPtr, _1: c_int) -> c_int, + pub fn XkbAllocGeomOutlines (_2: XkbShapePtr, _1: c_int) -> c_int, + pub fn XkbAllocGeomOverlayKeys (_2: XkbOverlayRowPtr, _1: c_int) -> c_int, + pub fn XkbAllocGeomOverlayRows (_2: XkbOverlayPtr, _1: c_int) -> c_int, + pub fn XkbAllocGeomOverlays (_2: XkbSectionPtr, _1: c_int) -> c_int, + pub fn XkbAllocGeomPoints (_2: XkbOutlinePtr, _1: c_int) -> c_int, + pub fn XkbAllocGeomProps (_2: XkbGeometryPtr, _1: c_int) -> c_int, + pub fn XkbAllocGeomRows (_2: XkbSectionPtr, _1: c_int) -> c_int, + pub fn XkbAllocGeomSectionDoodads (_2: XkbSectionPtr, _1: c_int) -> c_int, + pub fn XkbAllocGeomSections (_2: XkbGeometryPtr, _1: c_int) -> c_int, + pub fn XkbAllocGeomShapes (_2: XkbGeometryPtr, _1: c_int) -> c_int, + pub fn XkbAllocIndicatorMaps (_1: XkbDescPtr) -> c_int, + pub fn XkbAllocKeyboard () -> XkbDescPtr, + pub fn XkbAllocNames (_4: XkbDescPtr, _3: c_uint, _2: c_int, _1: c_int) -> c_int, + pub fn XkbAllocServerMap (_3: XkbDescPtr, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbApplyCompatMapToKey (_3: XkbDescPtr, _2: c_uchar, _1: XkbChangesPtr) -> c_int, + pub fn XkbApplyVirtualModChanges (_3: XkbDescPtr, _2: c_uint, _1: XkbChangesPtr) -> c_int, + pub fn XkbBell (_4: *mut Display, _3: c_ulong, _2: c_int, _1: c_ulong) -> c_int, + pub fn XkbBellEvent (_4: *mut Display, _3: c_ulong, _2: c_int, _1: c_ulong) -> c_int, + pub fn XkbChangeDeviceInfo (_3: *mut Display, _2: XkbDeviceInfoPtr, _1: XkbDeviceChangesPtr) -> c_int, + pub fn XkbChangeEnabledControls (_4: *mut Display, _3: c_uint, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbChangeKeycodeRange (_4: XkbDescPtr, _3: c_int, _2: c_int, _1: XkbChangesPtr) -> c_int, + pub fn XkbChangeMap (_3: *mut Display, _2: XkbDescPtr, _1: XkbMapChangesPtr) -> c_int, + pub fn XkbChangeNames (_3: *mut Display, _2: XkbDescPtr, _1: XkbNameChangesPtr) -> c_int, + pub fn XkbChangeTypesOfKey (_6: XkbDescPtr, _5: c_int, _4: c_int, _3: c_uint, _2: *mut c_int, _1: XkbMapChangesPtr) -> c_int, + pub fn XkbComputeEffectiveMap (_3: XkbDescPtr, _2: XkbKeyTypePtr, _1: *mut c_uchar) -> c_int, + pub fn XkbComputeRowBounds (_3: XkbGeometryPtr, _2: XkbSectionPtr, _1: XkbRowPtr) -> c_int, + pub fn XkbComputeSectionBounds (_2: XkbGeometryPtr, _1: XkbSectionPtr) -> c_int, + pub fn XkbComputeShapeBounds (_1: XkbShapePtr) -> c_int, + pub fn XkbComputeShapeTop (_2: XkbShapePtr, _1: XkbBoundsPtr) -> c_int, + pub fn XkbCopyKeyType (_2: XkbKeyTypePtr, _1: XkbKeyTypePtr) -> c_int, + pub fn XkbCopyKeyTypes (_3: XkbKeyTypePtr, _2: XkbKeyTypePtr, _1: c_int) -> c_int, + pub fn XkbDeviceBell (_7: *mut Display, _6: c_ulong, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XkbDeviceBellEvent (_7: *mut Display, _6: c_ulong, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XkbFindOverlayForKey (_3: XkbGeometryPtr, _2: XkbSectionPtr, _1: *mut c_char) -> *mut c_char, + pub fn XkbForceBell (_2: *mut Display, _1: c_int) -> c_int, + pub fn XkbForceDeviceBell (_5: *mut Display, _4: c_int, _3: c_int, _2: c_int, _1: c_int) -> c_int, + pub fn XkbFreeClientMap (_3: XkbDescPtr, _2: c_uint, _1: c_int) -> (), + pub fn XkbFreeCompatMap (_3: XkbDescPtr, _2: c_uint, _1: c_int) -> (), + pub fn XkbFreeComponentList (_1: XkbComponentListPtr) -> (), + pub fn XkbFreeControls (_3: XkbDescPtr, _2: c_uint, _1: c_int) -> (), + pub fn XkbFreeDeviceInfo (_3: XkbDeviceInfoPtr, _2: c_uint, _1: c_int) -> (), + pub fn XkbFreeGeomColors (_4: XkbGeometryPtr, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeGeomDoodads (_3: XkbDoodadPtr, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeGeometry (_3: XkbGeometryPtr, _2: c_uint, _1: c_int) -> (), + pub fn XkbFreeGeomKeyAliases (_4: XkbGeometryPtr, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeGeomKeys (_4: XkbRowPtr, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeGeomOutlines (_4: XkbShapePtr, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeGeomOverlayKeys (_4: XkbOverlayRowPtr, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeGeomOverlayRows (_4: XkbOverlayPtr, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeGeomOverlays (_4: XkbSectionPtr, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeGeomPoints (_4: XkbOutlinePtr, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeGeomProperties (_4: XkbGeometryPtr, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeGeomRows (_4: XkbSectionPtr, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeGeomSections (_4: XkbGeometryPtr, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeGeomShapes (_4: XkbGeometryPtr, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XkbFreeIndicatorMaps (_1: XkbDescPtr) -> (), + pub fn XkbFreeKeyboard (_3: XkbDescPtr, _2: c_uint, _1: c_int) -> (), + pub fn XkbFreeNames (_3: XkbDescPtr, _2: c_uint, _1: c_int) -> (), + pub fn XkbFreeServerMap (_3: XkbDescPtr, _2: c_uint, _1: c_int) -> (), + pub fn XkbGetAutoRepeatRate (_4: *mut Display, _3: c_uint, _2: *mut c_uint, _1: *mut c_uint) -> c_int, + pub fn XkbGetAutoResetControls (_3: *mut Display, _2: *mut c_uint, _1: *mut c_uint) -> c_int, + pub fn XkbGetCompatMap (_3: *mut Display, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbGetControls (_3: *mut Display, _2: c_ulong, _1: XkbDescPtr) -> c_int, + pub fn XkbGetDetectableAutoRepeat (_2: *mut Display, _1: *mut c_int) -> c_int, + pub fn XkbGetDeviceButtonActions (_5: *mut Display, _4: XkbDeviceInfoPtr, _3: c_int, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbGetDeviceInfo (_5: *mut Display, _4: c_uint, _3: c_uint, _2: c_uint, _1: c_uint) -> XkbDeviceInfoPtr, + pub fn XkbGetDeviceInfoChanges (_3: *mut Display, _2: XkbDeviceInfoPtr, _1: XkbDeviceChangesPtr) -> c_int, + pub fn XkbGetDeviceLedInfo (_5: *mut Display, _4: XkbDeviceInfoPtr, _3: c_uint, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbGetGeometry (_2: *mut Display, _1: XkbDescPtr) -> c_int, + pub fn XkbGetIndicatorMap (_3: *mut Display, _2: c_ulong, _1: XkbDescPtr) -> c_int, + pub fn XkbGetIndicatorState (_3: *mut Display, _2: c_uint, _1: *mut c_uint) -> c_int, + pub fn XkbGetKeyActions (_4: *mut Display, _3: c_uint, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbGetKeyBehaviors (_4: *mut Display, _3: c_uint, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbGetKeyboard (_3: *mut Display, _2: c_uint, _1: c_uint) -> XkbDescPtr, + pub fn XkbGetKeyboardByName (_6: *mut Display, _5: c_uint, _4: XkbComponentNamesPtr, _3: c_uint, _2: c_uint, _1: c_int) -> XkbDescPtr, + pub fn XkbGetKeyExplicitComponents (_4: *mut Display, _3: c_uint, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbGetKeyModifierMap (_4: *mut Display, _3: c_uint, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbGetKeySyms (_4: *mut Display, _3: c_uint, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbGetKeyTypes (_4: *mut Display, _3: c_uint, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbGetKeyVirtualModMap (_4: *mut Display, _3: c_uint, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbGetMap (_3: *mut Display, _2: c_uint, _1: c_uint) -> XkbDescPtr, + pub fn XkbGetMapChanges (_3: *mut Display, _2: XkbDescPtr, _1: XkbMapChangesPtr) -> c_int, + pub fn XkbGetNamedDeviceIndicator (_9: *mut Display, _8: c_uint, _7: c_uint, _6: c_uint, _5: c_ulong, _4: *mut c_int, _3: *mut c_int, _2: XkbIndicatorMapPtr, _1: *mut c_int) -> c_int, + pub fn XkbGetNamedGeometry (_3: *mut Display, _2: XkbDescPtr, _1: c_ulong) -> c_int, + pub fn XkbGetNamedIndicator (_6: *mut Display, _5: c_ulong, _4: *mut c_int, _3: *mut c_int, _2: XkbIndicatorMapPtr, _1: *mut c_int) -> c_int, + pub fn XkbGetNames (_3: *mut Display, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbGetPerClientControls (_2: *mut Display, _1: *mut c_uint) -> c_int, + pub fn XkbGetState (_3: *mut Display, _2: c_uint, _1: XkbStatePtr) -> c_int, + pub fn XkbGetUpdatedMap (_3: *mut Display, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbGetVirtualMods (_3: *mut Display, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbGetXlibControls (_1: *mut Display) -> c_uint, + pub fn XkbIgnoreExtension (_1: c_int) -> c_int, + pub fn XkbInitCanonicalKeyTypes (_3: XkbDescPtr, _2: c_uint, _1: c_int) -> c_int, + pub fn XkbKeycodeToKeysym (_4: *mut Display, _3: c_uchar, _2: c_int, _1: c_int) -> c_ulong, + pub fn XkbKeysymToModifiers (_2: *mut Display, _1: c_ulong) -> c_uint, + pub fn XkbKeyTypesForCoreSymbols (_6: XkbDescPtr, _5: c_int, _4: *mut c_ulong, _3: c_uint, _2: *mut c_int, _1: *mut c_ulong) -> c_int, + pub fn XkbLatchGroup (_3: *mut Display, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbLatchModifiers (_4: *mut Display, _3: c_uint, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbLibraryVersion (_2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XkbListComponents (_4: *mut Display, _3: c_uint, _2: XkbComponentNamesPtr, _1: *mut c_int) -> XkbComponentListPtr, + pub fn XkbLockGroup (_3: *mut Display, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbLockModifiers (_4: *mut Display, _3: c_uint, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbLookupKeyBinding (_6: *mut Display, _5: c_ulong, _4: c_uint, _3: *mut c_char, _2: c_int, _1: *mut c_int) -> c_int, + pub fn XkbLookupKeySym (_5: *mut Display, _4: c_uchar, _3: c_uint, _2: *mut c_uint, _1: *mut c_ulong) -> c_int, + pub fn XkbNoteControlsChanges (_3: XkbControlsChangesPtr, _2: *mut XkbControlsNotifyEvent, _1: c_uint) -> (), + pub fn XkbNoteDeviceChanges (_3: XkbDeviceChangesPtr, _2: *mut XkbExtensionDeviceNotifyEvent, _1: c_uint) -> (), + pub fn XkbNoteMapChanges (_3: XkbMapChangesPtr, _2: *mut XkbMapNotifyEvent, _1: c_uint) -> (), + pub fn XkbNoteNameChanges (_3: XkbNameChangesPtr, _2: *mut XkbNamesNotifyEvent, _1: c_uint) -> (), + pub fn XkbOpenDisplay (_6: *mut c_char, _5: *mut c_int, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut c_int) -> *mut Display, + pub fn XkbQueryExtension (_6: *mut Display, _5: *mut c_int, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XkbRefreshKeyboardMapping (_1: *mut XkbMapNotifyEvent) -> c_int, + pub fn XkbResizeDeviceButtonActions (_2: XkbDeviceInfoPtr, _1: c_uint) -> c_int, + pub fn XkbResizeKeyActions (_3: XkbDescPtr, _2: c_int, _1: c_int) -> *mut XkbAction, + pub fn XkbResizeKeySyms (_3: XkbDescPtr, _2: c_int, _1: c_int) -> *mut c_ulong, + pub fn XkbResizeKeyType (_5: XkbDescPtr, _4: c_int, _3: c_int, _2: c_int, _1: c_int) -> c_int, + pub fn XkbSelectEventDetails (_5: *mut Display, _4: c_uint, _3: c_uint, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XkbSelectEvents (_4: *mut Display, _3: c_uint, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XkbSetAtomFuncs (_2: Option c_ulong>, _1: Option *mut c_char>) -> (), + pub fn XkbSetAutoRepeatRate (_4: *mut Display, _3: c_uint, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbSetAutoResetControls (_4: *mut Display, _3: c_uint, _2: *mut c_uint, _1: *mut c_uint) -> c_int, + pub fn XkbSetCompatMap (_4: *mut Display, _3: c_uint, _2: XkbDescPtr, _1: c_int) -> c_int, + pub fn XkbSetControls (_3: *mut Display, _2: c_ulong, _1: XkbDescPtr) -> c_int, + pub fn XkbSetDebuggingFlags (_8: *mut Display, _7: c_uint, _6: c_uint, _5: *mut c_char, _4: c_uint, _3: c_uint, _2: *mut c_uint, _1: *mut c_uint) -> c_int, + pub fn XkbSetDetectableAutoRepeat (_3: *mut Display, _2: c_int, _1: *mut c_int) -> c_int, + pub fn XkbSetDeviceButtonActions (_4: *mut Display, _3: XkbDeviceInfoPtr, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbSetDeviceInfo (_3: *mut Display, _2: c_uint, _1: XkbDeviceInfoPtr) -> c_int, + pub fn XkbSetDeviceLedInfo (_5: *mut Display, _4: XkbDeviceInfoPtr, _3: c_uint, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbSetGeometry (_3: *mut Display, _2: c_uint, _1: XkbGeometryPtr) -> c_int, + pub fn XkbSetIgnoreLockMods (_6: *mut Display, _5: c_uint, _4: c_uint, _3: c_uint, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbSetIndicatorMap (_3: *mut Display, _2: c_ulong, _1: XkbDescPtr) -> c_int, + pub fn XkbSetMap (_3: *mut Display, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbSetNamedDeviceIndicator (_9: *mut Display, _8: c_uint, _7: c_uint, _6: c_uint, _5: c_ulong, _4: c_int, _3: c_int, _2: c_int, _1: XkbIndicatorMapPtr) -> c_int, + pub fn XkbSetNamedIndicator (_6: *mut Display, _5: c_ulong, _4: c_int, _3: c_int, _2: c_int, _1: XkbIndicatorMapPtr) -> c_int, + pub fn XkbSetNames (_5: *mut Display, _4: c_uint, _3: c_uint, _2: c_uint, _1: XkbDescPtr) -> c_int, + pub fn XkbSetPerClientControls (_3: *mut Display, _2: c_uint, _1: *mut c_uint) -> c_int, + pub fn XkbSetServerInternalMods (_6: *mut Display, _5: c_uint, _4: c_uint, _3: c_uint, _2: c_uint, _1: c_uint) -> c_int, + pub fn XkbSetXlibControls (_3: *mut Display, _2: c_uint, _1: c_uint) -> c_uint, + pub fn XkbToControl (_1: c_char) -> c_char, + pub fn XkbTranslateKeyCode (_5: XkbDescPtr, _4: c_uchar, _3: c_uint, _2: *mut c_uint, _1: *mut c_ulong) -> c_int, + pub fn XkbTranslateKeySym (_6: *mut Display, _5: *mut c_ulong, _4: c_uint, _3: *mut c_char, _2: c_int, _1: *mut c_int) -> c_int, + pub fn XkbUpdateActionVirtualMods (_3: XkbDescPtr, _2: *mut XkbAction, _1: c_uint) -> c_int, + pub fn XkbUpdateKeyTypeVirtualMods (_4: XkbDescPtr, _3: XkbKeyTypePtr, _2: c_uint, _1: XkbChangesPtr) -> (), + pub fn XkbUpdateMapFromCore (_6: XkbDescPtr, _5: c_uchar, _4: c_int, _3: c_int, _2: *mut c_ulong, _1: XkbChangesPtr) -> c_int, + pub fn XkbUseExtension (_3: *mut Display, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XkbVirtualModsToReal (_3: XkbDescPtr, _2: c_uint, _1: *mut c_uint) -> c_int, + pub fn XkbXlibControlsImplemented () -> c_uint, + pub fn XKeycodeToKeysym (_3: *mut Display, _2: c_uchar, _1: c_int) -> c_ulong, + pub fn XKeysymToKeycode (_2: *mut Display, _1: c_ulong) -> c_uchar, + pub fn XKeysymToString (_1: c_ulong) -> *mut c_char, + pub fn XKillClient (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XLastKnownRequestProcessed (_1: *mut Display) -> c_ulong, + pub fn XListDepths (_3: *mut Display, _2: c_int, _1: *mut c_int) -> *mut c_int, + pub fn XListExtensions (_2: *mut Display, _1: *mut c_int) -> *mut *mut c_char, + pub fn XListFonts (_4: *mut Display, _3: *const c_char, _2: c_int, _1: *mut c_int) -> *mut *mut c_char, + pub fn XListFontsWithInfo (_5: *mut Display, _4: *const c_char, _3: c_int, _2: *mut c_int, _1: *mut *mut XFontStruct) -> *mut *mut c_char, + pub fn XListHosts (_3: *mut Display, _2: *mut c_int, _1: *mut c_int) -> *mut XHostAddress, + pub fn XListInstalledColormaps (_3: *mut Display, _2: c_ulong, _1: *mut c_int) -> *mut c_ulong, + pub fn XListPixmapFormats (_2: *mut Display, _1: *mut c_int) -> *mut XPixmapFormatValues, + pub fn XListProperties (_3: *mut Display, _2: c_ulong, _1: *mut c_int) -> *mut c_ulong, + pub fn XLoadFont (_2: *mut Display, _1: *const c_char) -> c_ulong, + pub fn XLoadQueryFont (_2: *mut Display, _1: *const c_char) -> *mut XFontStruct, + pub fn XLocaleOfFontSet (_1: XFontSet) -> *mut c_char, + pub fn XLocaleOfIM (_1: XIM) -> *mut c_char, + pub fn XLocaleOfOM (_1: XOM) -> *mut c_char, + pub fn XLockDisplay (_1: *mut Display) -> (), + pub fn XLookupColor (_5: *mut Display, _4: c_ulong, _3: *const c_char, _2: *mut XColor, _1: *mut XColor) -> c_int, + pub fn XLookupKeysym (_2: *mut XKeyEvent, _1: c_int) -> c_ulong, + pub fn XLookupString (_5: *mut XKeyEvent, _4: *mut c_char, _3: c_int, _2: *mut c_ulong, _1: *mut XComposeStatus) -> c_int, + pub fn XLowerWindow (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XMapRaised (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XMapSubwindows (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XMapWindow (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XMaskEvent (_3: *mut Display, _2: c_long, _1: *mut XEvent) -> c_int, + pub fn XMatchVisualInfo (_5: *mut Display, _4: c_int, _3: c_int, _2: c_int, _1: *mut XVisualInfo) -> c_int, + pub fn XMaxCmapsOfScreen (_1: *mut Screen) -> c_int, + pub fn XMaxRequestSize (_1: *mut Display) -> c_long, + pub fn XmbDrawImageString (_8: *mut Display, _7: c_ulong, _6: XFontSet, _5: GC, _4: c_int, _3: c_int, _2: *const c_char, _1: c_int) -> (), + pub fn XmbDrawString (_8: *mut Display, _7: c_ulong, _6: XFontSet, _5: GC, _4: c_int, _3: c_int, _2: *const c_char, _1: c_int) -> (), + pub fn XmbDrawText (_7: *mut Display, _6: c_ulong, _5: GC, _4: c_int, _3: c_int, _2: *mut XmbTextItem, _1: c_int) -> (), + pub fn XmbLookupString (_6: XIC, _5: *mut XKeyEvent, _4: *mut c_char, _3: c_int, _2: *mut c_ulong, _1: *mut c_int) -> c_int, + pub fn XmbResetIC (_1: XIC) -> *mut c_char, + pub fn XmbSetWMProperties (_9: *mut Display, _8: c_ulong, _7: *const c_char, _6: *const c_char, _5: *mut *mut c_char, _4: c_int, _3: *mut XSizeHints, _2: *mut XWMHints, _1: *mut XClassHint) -> (), + pub fn XmbTextEscapement (_3: XFontSet, _2: *const c_char, _1: c_int) -> c_int, + pub fn XmbTextExtents (_5: XFontSet, _4: *const c_char, _3: c_int, _2: *mut XRectangle, _1: *mut XRectangle) -> c_int, + pub fn XmbTextListToTextProperty (_5: *mut Display, _4: *mut *mut c_char, _3: c_int, _2: XICCEncodingStyle, _1: *mut XTextProperty) -> c_int, + pub fn XmbTextPerCharExtents (_9: XFontSet, _8: *const c_char, _7: c_int, _6: *mut XRectangle, _5: *mut XRectangle, _4: c_int, _3: *mut c_int, _2: *mut XRectangle, _1: *mut XRectangle) -> c_int, + pub fn XmbTextPropertyToTextList (_4: *mut Display, _3: *const XTextProperty, _2: *mut *mut *mut c_char, _1: *mut c_int) -> c_int, + pub fn XMinCmapsOfScreen (_1: *mut Screen) -> c_int, + pub fn XMoveResizeWindow (_6: *mut Display, _5: c_ulong, _4: c_int, _3: c_int, _2: c_uint, _1: c_uint) -> c_int, + pub fn XMoveWindow (_4: *mut Display, _3: c_ulong, _2: c_int, _1: c_int) -> c_int, + pub fn XNewModifiermap (_1: c_int) -> *mut XModifierKeymap, + pub fn XNextEvent (_2: *mut Display, _1: *mut XEvent) -> c_int, + pub fn XNextRequest (_1: *mut Display) -> c_ulong, + pub fn XNoOp (_1: *mut Display) -> c_int, + pub fn XOffsetRegion (_3: Region, _2: c_int, _1: c_int) -> c_int, + pub fn XOMOfOC (_1: XFontSet) -> XOM, + pub fn XOpenDisplay (_1: *const c_char) -> *mut Display, + pub fn XOpenIM (_4: *mut Display, _3: XrmDatabase, _2: *mut c_char, _1: *mut c_char) -> XIM, + pub fn XOpenOM (_4: *mut Display, _3: XrmDatabase, _2: *const c_char, _1: *const c_char) -> XOM, + pub fn XParseColor (_4: *mut Display, _3: c_ulong, _2: *const c_char, _1: *mut XColor) -> c_int, + pub fn XParseGeometry (_5: *const c_char, _4: *mut c_int, _3: *mut c_int, _2: *mut c_uint, _1: *mut c_uint) -> c_int, + pub fn XPeekEvent (_2: *mut Display, _1: *mut XEvent) -> c_int, + pub fn XPeekIfEvent (_4: *mut Display, _3: *mut XEvent, _2: Option c_int>, _1: *mut c_char) -> c_int, + pub fn XPending (_1: *mut Display) -> c_int, + pub fn Xpermalloc (_1: c_uint) -> *mut c_char, + pub fn XPlanesOfScreen (_1: *mut Screen) -> c_int, + pub fn XPointInRegion (_3: Region, _2: c_int, _1: c_int) -> c_int, + pub fn XPolygonRegion (_3: *mut XPoint, _2: c_int, _1: c_int) -> Region, + pub fn XProcessInternalConnection (_2: *mut Display, _1: c_int) -> (), + pub fn XProtocolRevision (_1: *mut Display) -> c_int, + pub fn XProtocolVersion (_1: *mut Display) -> c_int, + pub fn XPutBackEvent (_2: *mut Display, _1: *mut XEvent) -> c_int, + pub fn XPutImage (_10: *mut Display, _9: c_ulong, _8: GC, _7: *mut XImage, _6: c_int, _5: c_int, _4: c_int, _3: c_int, _2: c_uint, _1: c_uint) -> c_int, + pub fn XPutPixel (_4: *mut XImage, _3: c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XQLength (_1: *mut Display) -> c_int, + pub fn XQueryBestCursor (_6: *mut Display, _5: c_ulong, _4: c_uint, _3: c_uint, _2: *mut c_uint, _1: *mut c_uint) -> c_int, + pub fn XQueryBestSize (_7: *mut Display, _6: c_int, _5: c_ulong, _4: c_uint, _3: c_uint, _2: *mut c_uint, _1: *mut c_uint) -> c_int, + pub fn XQueryBestStipple (_6: *mut Display, _5: c_ulong, _4: c_uint, _3: c_uint, _2: *mut c_uint, _1: *mut c_uint) -> c_int, + pub fn XQueryBestTile (_6: *mut Display, _5: c_ulong, _4: c_uint, _3: c_uint, _2: *mut c_uint, _1: *mut c_uint) -> c_int, + pub fn XQueryColor (_3: *mut Display, _2: c_ulong, _1: *mut XColor) -> c_int, + pub fn XQueryColors (_4: *mut Display, _3: c_ulong, _2: *mut XColor, _1: c_int) -> c_int, + pub fn XQueryExtension (_5: *mut Display, _4: *const c_char, _3: *mut c_int, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XQueryFont (_2: *mut Display, _1: c_ulong) -> *mut XFontStruct, + pub fn XQueryKeymap (_2: *mut Display, _1: *mut c_char) -> c_int, + pub fn XQueryPointer (_9: *mut Display, _8: c_ulong, _7: *mut c_ulong, _6: *mut c_ulong, _5: *mut c_int, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut c_uint) -> c_int, + pub fn XQueryTextExtents (_8: *mut Display, _7: c_ulong, _6: *const c_char, _5: c_int, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut XCharStruct) -> c_int, + pub fn XQueryTextExtents16 (_8: *mut Display, _7: c_ulong, _6: *const XChar2b, _5: c_int, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut XCharStruct) -> c_int, + pub fn XQueryTree (_6: *mut Display, _5: c_ulong, _4: *mut c_ulong, _3: *mut c_ulong, _2: *mut *mut c_ulong, _1: *mut c_uint) -> c_int, + pub fn XRaiseWindow (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XReadBitmapFile (_8: *mut Display, _7: c_ulong, _6: *const c_char, _5: *mut c_uint, _4: *mut c_uint, _3: *mut c_ulong, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XReadBitmapFileData (_6: *const c_char, _5: *mut c_uint, _4: *mut c_uint, _3: *mut *mut c_uchar, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XRebindKeysym (_6: *mut Display, _5: c_ulong, _4: *mut c_ulong, _3: c_int, _2: *const c_uchar, _1: c_int) -> c_int, + pub fn XRecolorCursor (_4: *mut Display, _3: c_ulong, _2: *mut XColor, _1: *mut XColor) -> c_int, + pub fn XReconfigureWMWindow (_5: *mut Display, _4: c_ulong, _3: c_int, _2: c_uint, _1: *mut XWindowChanges) -> c_int, + pub fn XRectInRegion (_5: Region, _4: c_int, _3: c_int, _2: c_uint, _1: c_uint) -> c_int, + pub fn XRefreshKeyboardMapping (_1: *mut XMappingEvent) -> c_int, + pub fn XRegisterIMInstantiateCallback (_6: *mut Display, _5: XrmDatabase, _4: *mut c_char, _3: *mut c_char, _2: Option, _1: *mut c_char) -> c_int, + pub fn XRemoveConnectionWatch (_3: *mut Display, _2: Option, _1: *mut c_char) -> (), + pub fn XRemoveFromSaveSet (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XRemoveHost (_2: *mut Display, _1: *mut XHostAddress) -> c_int, + pub fn XRemoveHosts (_3: *mut Display, _2: *mut XHostAddress, _1: c_int) -> c_int, + pub fn XReparentWindow (_5: *mut Display, _4: c_ulong, _3: c_ulong, _2: c_int, _1: c_int) -> c_int, + pub fn XResetScreenSaver (_1: *mut Display) -> c_int, + pub fn XResizeWindow (_4: *mut Display, _3: c_ulong, _2: c_uint, _1: c_uint) -> c_int, + pub fn XResourceManagerString (_1: *mut Display) -> *mut c_char, + pub fn XRestackWindows (_3: *mut Display, _2: *mut c_ulong, _1: c_int) -> c_int, + pub fn XrmCombineDatabase (_3: XrmDatabase, _2: *mut XrmDatabase, _1: c_int) -> (), + pub fn XrmCombineFileDatabase (_3: *const c_char, _2: *mut XrmDatabase, _1: c_int) -> c_int, + pub fn XrmDestroyDatabase (_1: XrmDatabase) -> (), + pub fn XrmEnumerateDatabase (_6: XrmDatabase, _5: *mut c_int, _4: *mut c_int, _3: c_int, _2: Option c_int>, _1: *mut c_char) -> c_int, + pub fn XrmGetDatabase (_1: *mut Display) -> XrmDatabase, + pub fn XrmGetFileDatabase (_1: *const c_char) -> XrmDatabase, + pub fn XrmGetResource (_5: XrmDatabase, _4: *const c_char, _3: *const c_char, _2: *mut *mut c_char, _1: *mut XrmValue) -> c_int, + pub fn XrmGetStringDatabase (_1: *const c_char) -> XrmDatabase, + pub fn XrmInitialize () -> (), + pub fn XrmLocaleOfDatabase (_1: XrmDatabase) -> *const c_char, + pub fn XrmMergeDatabases (_2: XrmDatabase, _1: *mut XrmDatabase) -> (), + pub fn XrmParseCommand (_6: *mut XrmDatabase, _5: XrmOptionDescList, _4: c_int, _3: *const c_char, _2: *mut c_int, _1: *mut *mut c_char) -> (), + pub fn XrmPermStringToQuark (_1: *const c_char) -> c_int, + pub fn XrmPutFileDatabase (_2: XrmDatabase, _1: *const c_char) -> (), + pub fn XrmPutLineResource (_2: *mut XrmDatabase, _1: *const c_char) -> (), + pub fn XrmPutResource (_4: *mut XrmDatabase, _3: *const c_char, _2: *const c_char, _1: *mut XrmValue) -> (), + pub fn XrmPutStringResource (_3: *mut XrmDatabase, _2: *const c_char, _1: *const c_char) -> (), + pub fn XrmQGetResource (_5: XrmDatabase, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut XrmValue) -> c_int, + pub fn XrmQGetSearchList (_5: XrmDatabase, _4: *mut c_int, _3: *mut c_int, _2: *mut *mut XrmDatabase, _1: c_int) -> c_int, + pub fn XrmQGetSearchResource (_5: *mut *mut XrmDatabase, _4: c_int, _3: c_int, _2: *mut c_int, _1: *mut XrmValue) -> c_int, + pub fn XrmQPutResource (_5: *mut XrmDatabase, _4: *mut XrmBinding, _3: *mut c_int, _2: c_int, _1: *mut XrmValue) -> (), + pub fn XrmQPutStringResource (_4: *mut XrmDatabase, _3: *mut XrmBinding, _2: *mut c_int, _1: *const c_char) -> (), + pub fn XrmQuarkToString (_1: c_int) -> *mut c_char, + pub fn XrmSetDatabase (_2: *mut Display, _1: XrmDatabase) -> (), + pub fn XrmStringToBindingQuarkList (_3: *const c_char, _2: *mut XrmBinding, _1: *mut c_int) -> (), + pub fn XrmStringToQuark (_1: *const c_char) -> c_int, + pub fn XrmStringToQuarkList (_2: *const c_char, _1: *mut c_int) -> (), + pub fn XrmUniqueQuark () -> c_int, + pub fn XRootWindow (_2: *mut Display, _1: c_int) -> c_ulong, + pub fn XRootWindowOfScreen (_1: *mut Screen) -> c_ulong, + pub fn XRotateBuffers (_2: *mut Display, _1: c_int) -> c_int, + pub fn XRotateWindowProperties (_5: *mut Display, _4: c_ulong, _3: *mut c_ulong, _2: c_int, _1: c_int) -> c_int, + pub fn XSaveContext (_4: *mut Display, _3: c_ulong, _2: c_int, _1: *const c_char) -> c_int, + pub fn XScreenCount (_1: *mut Display) -> c_int, + pub fn XScreenNumberOfScreen (_1: *mut Screen) -> c_int, + pub fn XScreenOfDisplay (_2: *mut Display, _1: c_int) -> *mut Screen, + pub fn XScreenResourceString (_1: *mut Screen) -> *mut c_char, + pub fn XSelectInput (_3: *mut Display, _2: c_ulong, _1: c_long) -> c_int, + pub fn XSendEvent (_5: *mut Display, _4: c_ulong, _3: c_int, _2: c_long, _1: *mut XEvent) -> c_int, + pub fn XServerVendor (_1: *mut Display) -> *mut c_char, + pub fn XSetAccessControl (_2: *mut Display, _1: c_int) -> c_int, + pub fn XSetAfterFunction (_2: *mut Display, _1: Option c_int>) -> Option c_int>, + pub fn XSetArcMode (_3: *mut Display, _2: GC, _1: c_int) -> c_int, + pub fn XSetAuthorization (_4: *mut c_char, _3: c_int, _2: *mut c_char, _1: c_int) -> (), + pub fn XSetBackground (_3: *mut Display, _2: GC, _1: c_ulong) -> c_int, + pub fn XSetClassHint (_3: *mut Display, _2: c_ulong, _1: *mut XClassHint) -> c_int, + pub fn XSetClipMask (_3: *mut Display, _2: GC, _1: c_ulong) -> c_int, + pub fn XSetClipOrigin (_4: *mut Display, _3: GC, _2: c_int, _1: c_int) -> c_int, + pub fn XSetClipRectangles (_7: *mut Display, _6: GC, _5: c_int, _4: c_int, _3: *mut XRectangle, _2: c_int, _1: c_int) -> c_int, + pub fn XSetCloseDownMode (_2: *mut Display, _1: c_int) -> c_int, + pub fn XSetCommand (_4: *mut Display, _3: c_ulong, _2: *mut *mut c_char, _1: c_int) -> c_int, + pub fn XSetDashes (_5: *mut Display, _4: GC, _3: c_int, _2: *const c_char, _1: c_int) -> c_int, + pub fn XSetErrorHandler (_1: Option c_int>) -> Option c_int>, + pub fn XSetFillRule (_3: *mut Display, _2: GC, _1: c_int) -> c_int, + pub fn XSetFillStyle (_3: *mut Display, _2: GC, _1: c_int) -> c_int, + pub fn XSetFont (_3: *mut Display, _2: GC, _1: c_ulong) -> c_int, + pub fn XSetFontPath (_3: *mut Display, _2: *mut *mut c_char, _1: c_int) -> c_int, + pub fn XSetForeground (_3: *mut Display, _2: GC, _1: c_ulong) -> c_int, + pub fn XSetFunction (_3: *mut Display, _2: GC, _1: c_int) -> c_int, + pub fn XSetGraphicsExposures (_3: *mut Display, _2: GC, _1: c_int) -> c_int, + pub fn XSetICFocus (_1: XIC) -> (), + pub fn XSetIconName (_3: *mut Display, _2: c_ulong, _1: *const c_char) -> c_int, + pub fn XSetIconSizes (_4: *mut Display, _3: c_ulong, _2: *mut XIconSize, _1: c_int) -> c_int, + pub fn XSetInputFocus (_4: *mut Display, _3: c_ulong, _2: c_int, _1: c_ulong) -> c_int, + pub fn XSetIOErrorHandler (_1: Option c_int>) -> Option c_int>, + pub fn XSetLineAttributes (_6: *mut Display, _5: GC, _4: c_uint, _3: c_int, _2: c_int, _1: c_int) -> c_int, + pub fn XSetLocaleModifiers (_1: *const c_char) -> *mut c_char, + pub fn XSetModifierMapping (_2: *mut Display, _1: *mut XModifierKeymap) -> c_int, + pub fn XSetNormalHints (_3: *mut Display, _2: c_ulong, _1: *mut XSizeHints) -> c_int, + pub fn XSetPlaneMask (_3: *mut Display, _2: GC, _1: c_ulong) -> c_int, + pub fn XSetPointerMapping (_3: *mut Display, _2: *const c_uchar, _1: c_int) -> c_int, + pub fn XSetRegion (_3: *mut Display, _2: GC, _1: Region) -> c_int, + pub fn XSetRGBColormaps (_5: *mut Display, _4: c_ulong, _3: *mut XStandardColormap, _2: c_int, _1: c_ulong) -> (), + pub fn XSetScreenSaver (_5: *mut Display, _4: c_int, _3: c_int, _2: c_int, _1: c_int) -> c_int, + pub fn XSetSelectionOwner (_4: *mut Display, _3: c_ulong, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XSetSizeHints (_4: *mut Display, _3: c_ulong, _2: *mut XSizeHints, _1: c_ulong) -> c_int, + pub fn XSetStandardColormap (_4: *mut Display, _3: c_ulong, _2: *mut XStandardColormap, _1: c_ulong) -> (), + pub fn XSetStandardProperties (_8: *mut Display, _7: c_ulong, _6: *const c_char, _5: *const c_char, _4: c_ulong, _3: *mut *mut c_char, _2: c_int, _1: *mut XSizeHints) -> c_int, + pub fn XSetState (_6: *mut Display, _5: GC, _4: c_ulong, _3: c_ulong, _2: c_int, _1: c_ulong) -> c_int, + pub fn XSetStipple (_3: *mut Display, _2: GC, _1: c_ulong) -> c_int, + pub fn XSetSubwindowMode (_3: *mut Display, _2: GC, _1: c_int) -> c_int, + pub fn XSetTextProperty (_4: *mut Display, _3: c_ulong, _2: *mut XTextProperty, _1: c_ulong) -> (), + pub fn XSetTile (_3: *mut Display, _2: GC, _1: c_ulong) -> c_int, + pub fn XSetTransientForHint (_3: *mut Display, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XSetTSOrigin (_4: *mut Display, _3: GC, _2: c_int, _1: c_int) -> c_int, + pub fn XSetWindowBackground (_3: *mut Display, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XSetWindowBackgroundPixmap (_3: *mut Display, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XSetWindowBorder (_3: *mut Display, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XSetWindowBorderPixmap (_3: *mut Display, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XSetWindowBorderWidth (_3: *mut Display, _2: c_ulong, _1: c_uint) -> c_int, + pub fn XSetWindowColormap (_3: *mut Display, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XSetWMClientMachine (_3: *mut Display, _2: c_ulong, _1: *mut XTextProperty) -> (), + pub fn XSetWMColormapWindows (_4: *mut Display, _3: c_ulong, _2: *mut c_ulong, _1: c_int) -> c_int, + pub fn XSetWMHints (_3: *mut Display, _2: c_ulong, _1: *mut XWMHints) -> c_int, + pub fn XSetWMIconName (_3: *mut Display, _2: c_ulong, _1: *mut XTextProperty) -> (), + pub fn XSetWMName (_3: *mut Display, _2: c_ulong, _1: *mut XTextProperty) -> (), + pub fn XSetWMNormalHints (_3: *mut Display, _2: c_ulong, _1: *mut XSizeHints) -> (), + pub fn XSetWMProperties (_9: *mut Display, _8: c_ulong, _7: *mut XTextProperty, _6: *mut XTextProperty, _5: *mut *mut c_char, _4: c_int, _3: *mut XSizeHints, _2: *mut XWMHints, _1: *mut XClassHint) -> (), + pub fn XSetWMProtocols (_4: *mut Display, _3: c_ulong, _2: *mut c_ulong, _1: c_int) -> c_int, + pub fn XSetWMSizeHints (_4: *mut Display, _3: c_ulong, _2: *mut XSizeHints, _1: c_ulong) -> (), + pub fn XSetZoomHints (_3: *mut Display, _2: c_ulong, _1: *mut XSizeHints) -> c_int, + pub fn XShrinkRegion (_3: Region, _2: c_int, _1: c_int) -> c_int, + pub fn XStoreBuffer (_4: *mut Display, _3: *const c_char, _2: c_int, _1: c_int) -> c_int, + pub fn XStoreBytes (_3: *mut Display, _2: *const c_char, _1: c_int) -> c_int, + pub fn XStoreColor (_3: *mut Display, _2: c_ulong, _1: *mut XColor) -> c_int, + pub fn XStoreColors (_4: *mut Display, _3: c_ulong, _2: *mut XColor, _1: c_int) -> c_int, + pub fn XStoreName (_3: *mut Display, _2: c_ulong, _1: *const c_char) -> c_int, + pub fn XStoreNamedColor (_5: *mut Display, _4: c_ulong, _3: *const c_char, _2: c_ulong, _1: c_int) -> c_int, + pub fn XStringListToTextProperty (_3: *mut *mut c_char, _2: c_int, _1: *mut XTextProperty) -> c_int, + pub fn XStringToKeysym (_1: *const c_char) -> c_ulong, + pub fn XSubImage (_5: *mut XImage, _4: c_int, _3: c_int, _2: c_uint, _1: c_uint) -> *mut XImage, + pub fn XSubtractRegion (_3: Region, _2: Region, _1: Region) -> c_int, + pub fn XSupportsLocale () -> c_int, + pub fn XSync (_2: *mut Display, _1: c_int) -> c_int, + pub fn XSynchronize (_2: *mut Display, _1: c_int) -> Option c_int>, + pub fn XTextExtents (_7: *mut XFontStruct, _6: *const c_char, _5: c_int, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut XCharStruct) -> c_int, + pub fn XTextExtents16 (_7: *mut XFontStruct, _6: *const XChar2b, _5: c_int, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut XCharStruct) -> c_int, + pub fn XTextPropertyToStringList (_3: *mut XTextProperty, _2: *mut *mut *mut c_char, _1: *mut c_int) -> c_int, + pub fn XTextWidth (_3: *mut XFontStruct, _2: *const c_char, _1: c_int) -> c_int, + pub fn XTextWidth16 (_3: *mut XFontStruct, _2: *const XChar2b, _1: c_int) -> c_int, + pub fn XTranslateCoordinates (_8: *mut Display, _7: c_ulong, _6: c_ulong, _5: c_int, _4: c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut c_ulong) -> c_int, + pub fn XUndefineCursor (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XUngrabButton (_4: *mut Display, _3: c_uint, _2: c_uint, _1: c_ulong) -> c_int, + pub fn XUngrabKey (_4: *mut Display, _3: c_int, _2: c_uint, _1: c_ulong) -> c_int, + pub fn XUngrabKeyboard (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XUngrabPointer (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XUngrabServer (_1: *mut Display) -> c_int, + pub fn XUninstallColormap (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XUnionRectWithRegion (_3: *mut XRectangle, _2: Region, _1: Region) -> c_int, + pub fn XUnionRegion (_3: Region, _2: Region, _1: Region) -> c_int, + pub fn XUnloadFont (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XUnlockDisplay (_1: *mut Display) -> (), + pub fn XUnmapSubwindows (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XUnmapWindow (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XUnregisterIMInstantiateCallback (_6: *mut Display, _5: XrmDatabase, _4: *mut c_char, _3: *mut c_char, _2: Option, _1: *mut c_char) -> c_int, + pub fn XUnsetICFocus (_1: XIC) -> (), + pub fn Xutf8DrawImageString (_8: *mut Display, _7: c_ulong, _6: XFontSet, _5: GC, _4: c_int, _3: c_int, _2: *const c_char, _1: c_int) -> (), + pub fn Xutf8DrawString (_8: *mut Display, _7: c_ulong, _6: XFontSet, _5: GC, _4: c_int, _3: c_int, _2: *const c_char, _1: c_int) -> (), + pub fn Xutf8DrawText (_7: *mut Display, _6: c_ulong, _5: GC, _4: c_int, _3: c_int, _2: *mut XmbTextItem, _1: c_int) -> (), + pub fn Xutf8LookupString (_6: XIC, _5: *mut XKeyEvent, _4: *mut c_char, _3: c_int, _2: *mut c_ulong, _1: *mut c_int) -> c_int, + pub fn Xutf8ResetIC (_1: XIC) -> *mut c_char, + pub fn Xutf8SetWMProperties (_9: *mut Display, _8: c_ulong, _7: *const c_char, _6: *const c_char, _5: *mut *mut c_char, _4: c_int, _3: *mut XSizeHints, _2: *mut XWMHints, _1: *mut XClassHint) -> (), + pub fn Xutf8TextEscapement (_3: XFontSet, _2: *const c_char, _1: c_int) -> c_int, + pub fn Xutf8TextExtents (_5: XFontSet, _4: *const c_char, _3: c_int, _2: *mut XRectangle, _1: *mut XRectangle) -> c_int, + pub fn Xutf8TextListToTextProperty (_5: *mut Display, _4: *mut *mut c_char, _3: c_int, _2: XICCEncodingStyle, _1: *mut XTextProperty) -> c_int, + pub fn Xutf8TextPerCharExtents (_9: XFontSet, _8: *const c_char, _7: c_int, _6: *mut XRectangle, _5: *mut XRectangle, _4: c_int, _3: *mut c_int, _2: *mut XRectangle, _1: *mut XRectangle) -> c_int, + pub fn Xutf8TextPropertyToTextList (_4: *mut Display, _3: *const XTextProperty, _2: *mut *mut *mut c_char, _1: *mut c_int) -> c_int, + pub fn XVendorRelease (_1: *mut Display) -> c_int, + pub fn XVisualIDFromVisual (_1: *mut Visual) -> c_ulong, + pub fn XWarpPointer (_9: *mut Display, _8: c_ulong, _7: c_ulong, _6: c_int, _5: c_int, _4: c_uint, _3: c_uint, _2: c_int, _1: c_int) -> c_int, + pub fn XwcDrawImageString (_8: *mut Display, _7: c_ulong, _6: XFontSet, _5: GC, _4: c_int, _3: c_int, _2: *const wchar_t, _1: c_int) -> (), + pub fn XwcDrawString (_8: *mut Display, _7: c_ulong, _6: XFontSet, _5: GC, _4: c_int, _3: c_int, _2: *const wchar_t, _1: c_int) -> (), + pub fn XwcDrawText (_7: *mut Display, _6: c_ulong, _5: GC, _4: c_int, _3: c_int, _2: *mut XwcTextItem, _1: c_int) -> (), + pub fn XwcFreeStringList (_1: *mut *mut wchar_t) -> (), + pub fn XwcLookupString (_6: XIC, _5: *mut XKeyEvent, _4: *mut wchar_t, _3: c_int, _2: *mut c_ulong, _1: *mut c_int) -> c_int, + pub fn XwcResetIC (_1: XIC) -> *mut wchar_t, + pub fn XwcTextEscapement (_3: XFontSet, _2: *const wchar_t, _1: c_int) -> c_int, + pub fn XwcTextExtents (_5: XFontSet, _4: *const wchar_t, _3: c_int, _2: *mut XRectangle, _1: *mut XRectangle) -> c_int, + pub fn XwcTextListToTextProperty (_5: *mut Display, _4: *mut *mut wchar_t, _3: c_int, _2: XICCEncodingStyle, _1: *mut XTextProperty) -> c_int, + pub fn XwcTextPerCharExtents (_9: XFontSet, _8: *const wchar_t, _7: c_int, _6: *mut XRectangle, _5: *mut XRectangle, _4: c_int, _3: *mut c_int, _2: *mut XRectangle, _1: *mut XRectangle) -> c_int, + pub fn XwcTextPropertyToTextList (_4: *mut Display, _3: *const XTextProperty, _2: *mut *mut *mut wchar_t, _1: *mut c_int) -> c_int, + pub fn XWhitePixel (_2: *mut Display, _1: c_int) -> c_ulong, + pub fn XWhitePixelOfScreen (_1: *mut Screen) -> c_ulong, + pub fn XWidthMMOfScreen (_1: *mut Screen) -> c_int, + pub fn XWidthOfScreen (_1: *mut Screen) -> c_int, + pub fn XWindowEvent (_4: *mut Display, _3: c_ulong, _2: c_long, _1: *mut XEvent) -> c_int, + pub fn XWithdrawWindow (_3: *mut Display, _2: c_ulong, _1: c_int) -> c_int, + pub fn XWMGeometry (_11: *mut Display, _10: c_int, _9: *const c_char, _8: *const c_char, _7: c_uint, _6: *mut XSizeHints, _5: *mut c_int, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XWriteBitmapFile (_7: *mut Display, _6: *const c_char, _5: c_ulong, _4: c_uint, _3: c_uint, _2: c_int, _1: c_int) -> c_int, + pub fn XXorRegion (_3: Region, _2: Region, _1: Region) -> c_int, +variadic: + pub fn XCreateIC (_1: XIM) -> XIC, + pub fn XCreateOC (_1: XOM) -> XFontSet, + pub fn XGetICValues (_1: XIC) -> *mut c_char, + pub fn XGetIMValues (_1: XIM) -> *mut c_char, + pub fn XGetOCValues (_1: XFontSet) -> *mut c_char, + pub fn XGetOMValues (_1: XOM) -> *mut c_char, + pub fn XSetICValues (_1: XIC) -> *mut c_char, + pub fn XSetIMValues (_1: XIM) -> *mut c_char, + pub fn XSetOCValues (_1: XFontSet) -> *mut c_char, + pub fn XSetOMValues (_1: XOM) -> *mut c_char, + pub fn XVaCreateNestedList (_1: c_int) -> *mut c_void, +globals: +} + + + + + + + + +pub type Atom = XID; +pub type Bool = c_int; +pub type Colormap = XID; +pub type Cursor = XID; +pub type Drawable = XID; +pub type Font = XID; +pub type GContext = XID; +pub type KeyCode = c_uchar; +pub type KeySym = XID; +pub type Mask = c_ulong; +pub type Pixmap = XID; +pub type Status = Bool; +pub type Time = c_ulong; +pub type VisualID = XID; +pub type Window = XID; +pub type XID = c_ulong; +pub type XPointer = *mut c_char; + + +pub enum _XDisplay {} +pub enum xError {} +pub enum xEvent {} +pub enum _XGC {} +pub enum _XIC {} +pub enum _XIM {} +pub enum _XRegion {} +pub enum _XOC {} +pub enum _XOM {} +pub enum _XrmHashBucketRec {} + + +#[repr(C)] pub struct _XcmsCCC; +#[repr(C)] pub struct XcmsColor; +#[repr(C)] pub struct _XcmsColorSpace; +#[repr(C)] pub struct _XcmsFunctionSet; +#[repr(C)] pub struct _XkbAction; +#[repr(C)] pub struct _XkbBounds; +#[repr(C)] pub struct _XkbChanges; +#[repr(C)] pub struct _XkbClientMapRec; +#[repr(C)] pub struct _XkbColor; +#[repr(C)] pub struct _XkbComponentList; +#[repr(C)] pub struct _XkbComponentNames; +#[repr(C)] pub struct _XkbControls; +#[repr(C)] pub struct _XkbControlsChanges; +#[repr(C)] pub struct _XkbControlsNotify; +#[repr(C)] pub struct _XkbDeviceChanges; +#[repr(C)] pub struct _XkbDeviceInfo; +#[repr(C)] pub struct _XkbDeviceLedInfo; +#[repr(C)] pub struct _XkbDoodad; +#[repr(C)] pub struct _XkbExtensionDeviceNotify; +#[repr(C)] pub struct _XkbGeometry; +#[repr(C)] pub struct _XkbGeometrySizes; +#[repr(C)] pub struct _XkbIndicatorMapRec; +#[repr(C)] pub struct _XkbKey; +#[repr(C)] pub struct _XkbKeyType; +#[repr(C)] pub struct _XkbMapChanges; +#[repr(C)] pub struct _XkbMods; +#[repr(C)] pub struct _XkbNameChanges; +#[repr(C)] pub struct _XkbNamesNotify; +#[repr(C)] pub struct _XkbOutline; +#[repr(C)] pub struct _XkbOverlay; +#[repr(C)] pub struct _XkbOverlayKey; +#[repr(C)] pub struct _XkbOverlayRow; +#[repr(C)] pub struct _XkbProperty; +#[repr(C)] pub struct _XkbRow; +#[repr(C)] pub struct _XkbSection; +#[repr(C)] pub struct _XkbServerMapRec; +#[repr(C)] pub struct _XkbShape; +#[repr(C)] pub struct _XkbStateRec; +#[repr(C)] pub struct _XkbSymInterpretRec; + + +pub type XEDataObject = *mut c_void; + + +pub type Display = _XDisplay; +pub type GC = *mut _XGC; +pub type Region = *mut _XRegion; +pub type XcmsCCC = *mut _XcmsCCC; +pub type XcmsColorSpace = _XcmsColorSpace; +pub type XcmsFunctionSet = _XcmsFunctionSet; +pub type XContext = c_int; +pub type XFontSet = *mut _XOC; +pub type XIC = *mut _XIC; +pub type XIM = *mut _XIM; +pub type XkbAction = _XkbAction; +pub type XkbBoundsPtr = *mut _XkbBounds; +pub type XkbChangesPtr = *mut _XkbChanges; +pub type XkbClientMapPtr = *mut _XkbClientMapRec; +pub type XkbColorPtr = *mut _XkbColor; +pub type XkbCompatMapPtr = *mut _XkbCompatMapRec; +pub type XkbComponentListPtr = *mut _XkbComponentList; +pub type XkbComponentNamesPtr = *mut _XkbComponentNames; +pub type XkbControlsChangesPtr = *mut _XkbControlsChanges; +pub type XkbControlsNotifyEvent = _XkbControlsNotify; +pub type XkbControlsPtr = *mut _XkbControls; +pub type XkbDescPtr = *mut _XkbDesc; +pub type XkbDeviceChangesPtr = *mut _XkbDeviceChanges; +pub type XkbDeviceInfoPtr = *mut _XkbDeviceInfo; +pub type XkbDeviceLedInfoPtr = *mut _XkbDeviceLedInfo; +pub type XkbDoodadPtr = *mut _XkbDoodad; +pub type XkbExtensionDeviceNotifyEvent = _XkbExtensionDeviceNotify; +pub type XkbGeometryPtr = *mut _XkbGeometry; +pub type XkbGeometrySizesPtr = *mut _XkbGeometrySizes; +pub type XkbIndicatorMapPtr = *mut _XkbIndicatorMapRec; +pub type XkbIndicatorMapRec = _XkbIndicatorMapRec; +pub type XkbIndicatorPtr = *mut _XkbIndicatorRec; +pub type XkbKeyTypePtr = *mut _XkbKeyType; +pub type XkbMapChangesPtr = *mut _XkbMapChanges; +pub type XkbMapNotifyEvent = _XkbMapNotifyEvent; +pub type XkbModsPtr = *mut _XkbMods; +pub type XkbModsRec = _XkbMods; +pub type XkbNameChangesPtr = *mut _XkbNameChanges; +pub type XkbNamesNotifyEvent = _XkbNamesNotify; +pub type XkbNamesPtr = *mut _XkbNamesRec; +pub type XkbKeyAliasPtr = *mut _XkbKeyAliasRec; +pub type XkbKeyNamePtr = *mut _XkbKeyNameRec; +pub type XkbKeyPtr = *mut _XkbKey; +pub type XkbOutlinePtr = *mut _XkbOutline; +pub type XkbOverlayKeyPtr = *mut _XkbOverlayKey; +pub type XkbOverlayPtr = *mut _XkbOverlay; +pub type XkbOverlayRowPtr = *mut _XkbOverlayRow; +pub type XkbPropertyPtr = *mut _XkbProperty; +pub type XkbRowPtr = *mut _XkbRow; +pub type XkbSectionPtr = *mut _XkbSection; +pub type XkbServerMapPtr = *mut _XkbServerMapRec; +pub type XkbShapePtr = *mut _XkbShape; +pub type XkbStatePtr = *mut _XkbStateRec; +pub type XkbSymInterpretPtr = *mut _XkbSymInterpretRec; +pub type XOM = *mut _XOM; +pub type XrmDatabase = *mut _XrmHashBucketRec; +pub type XrmOptionDescList = *mut XrmOptionDescRec; + + +pub type XConnectionWatchProc = Option; +pub type XIMProc = Option; +pub type XICProc = Option Bool>; + + +pub type XICCEncodingStyle = c_int; +pub type XOrientation = c_int; +pub type XrmBinding = c_int; +pub type XrmOptionKind = c_int; + +#[allow(dead_code)] +#[cfg(test)] +#[repr(C)] +enum TestEnum { + Variant1, + Variant2, +} + +#[test] +fn enum_size_test () { + assert!(::std::mem::size_of::() == ::std::mem::size_of::()); +} + + + + + + + +#[derive(Clone, Copy)] +#[repr(C)] +pub union XEvent { + pub type_: c_int, + pub any: XAnyEvent, + pub button: XButtonEvent, + pub circulate: XCirculateEvent, + pub circulate_request: XCirculateRequestEvent, + pub client_message: XClientMessageEvent, + pub colormap: XColormapEvent, + pub configure: XConfigureEvent, + pub configure_request: XConfigureRequestEvent, + pub create_window: XCreateWindowEvent, + pub crossing: XCrossingEvent, + pub destroy_window: XDestroyWindowEvent, + pub error: XErrorEvent, + pub expose: XExposeEvent, + pub focus_change: XFocusChangeEvent, + pub generic_event_cookie: XGenericEventCookie, + pub graphics_expose: XGraphicsExposeEvent, + pub gravity: XGravityEvent, + pub key: XKeyEvent, + pub keymap: XKeymapEvent, + pub map: XMapEvent, + pub mapping: XMappingEvent, + pub map_request: XMapRequestEvent, + pub motion: XMotionEvent, + pub no_expose: XNoExposeEvent, + pub property: XPropertyEvent, + pub reparent: XReparentEvent, + pub resize_request: XResizeRequestEvent, + pub selection_clear: XSelectionClearEvent, + pub selection: XSelectionEvent, + pub selection_request: XSelectionRequestEvent, + pub unmap: XUnmapEvent, + pub visibility: XVisibilityEvent, + pub pad: [c_long; 24], + + pub xf86vm_notify: xf86vmode::XF86VidModeNotifyEvent, + + pub xrr_screen_change_notify: xrandr::XRRScreenChangeNotifyEvent, + pub xrr_notify: xrandr::XRRNotifyEvent, + pub xrr_output_change_notify: xrandr::XRROutputChangeNotifyEvent, + pub xrr_crtc_change_notify: xrandr::XRRCrtcChangeNotifyEvent, + pub xrr_output_property_notify: xrandr::XRROutputPropertyNotifyEvent, + pub xrr_provider_change_notify: xrandr::XRRProviderChangeNotifyEvent, + pub xrr_provider_property_notify: xrandr::XRRProviderPropertyNotifyEvent, + pub xrr_resource_change_notify: xrandr::XRRResourceChangeNotifyEvent, + + pub xss_notify: xss::XScreenSaverNotifyEvent, +} + +impl XEvent { + pub fn get_type (&self) -> c_int { + unsafe { + self.type_ + } + } +} + +impl fmt::Debug for XEvent { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut d = f.debug_struct("XEvent"); + unsafe { + match self.type_ { + KeyPress => d.field("key", &self.key), + KeyRelease => d.field("key", &self.key), + ButtonPress => d.field("button", &self.button), + ButtonRelease => d.field("button", &self.button), + MotionNotify => d.field("motion", &self.motion), + EnterNotify => d.field("crossing", &self.crossing), + LeaveNotify => d.field("crossing", &self.crossing), + FocusIn => d.field("focus_change", &self.focus_change), + FocusOut => d.field("focus_change", &self.focus_change), + KeymapNotify => d.field("keymap", &self.keymap), + Expose => d.field("expose", &self.expose), + GraphicsExpose => d.field("graphics_expose", &self.graphics_expose), + NoExpose => d.field("no_expose", &self.no_expose), + VisibilityNotify => d.field("visibility", &self.visibility), + CreateNotify => d.field("create_window", &self.create_window), + DestroyNotify => d.field("destroy_window", &self.destroy_window), + UnmapNotify => d.field("unmap", &self.unmap), + MapNotify => d.field("map", &self.map), + MapRequest => d.field("map_request", &self.map_request), + ReparentNotify => d.field("reparent", &self.reparent), + ConfigureNotify => d.field("configure", &self.configure), + ConfigureRequest => d.field("configure_request", &self.configure_request), + GravityNotify => d.field("gravity", &self.gravity), + ResizeRequest => d.field("resize_request", &self.resize_request), + CirculateNotify => d.field("circulate", &self.circulate), + CirculateRequest => d.field("circulate_request", &self.circulate_request), + PropertyNotify => d.field("property", &self.property), + SelectionClear => d.field("selection_clear", &self.selection_clear), + SelectionRequest => d.field("selection_request", &self.selection_request), + SelectionNotify => d.field("selection", &self.selection), + ColormapNotify => d.field("colormap", &self.colormap), + ClientMessage => d.field("client_message", &self.client_message), + MappingNotify => d.field("mapping", &self.mapping), + GenericEvent => d.field("generic_event_cookie", &self.generic_event_cookie), + _ => d.field("any", &self.any), + } + }.finish() + } +} + +macro_rules! event_conversions_and_tests { + { $($field:ident: $ty:ty,)* } => { + #[test] + fn xevent_size_test () { + use std::mem::size_of; + let xevent_size = size_of::(); + $(assert!(xevent_size >= size_of::<$ty>());)* + } + + $( + impl AsMut<$ty> for XEvent { + fn as_mut (&mut self) -> &mut $ty { + unsafe { &mut self.$field } + } + } + + impl AsRef<$ty> for XEvent { + fn as_ref (&self) -> &$ty { + unsafe { &self.$field } + } + } + + impl From<$ty> for XEvent { + fn from (other: $ty) -> XEvent { + XEvent{ $field: other } + } + } + + impl<'a> From<&'a $ty> for XEvent { + fn from (other: &'a $ty) -> XEvent { + XEvent{ $field: other.clone() } + } + } + + impl From for $ty { + fn from (xevent: XEvent) -> $ty { + unsafe { xevent.$field } + } + } + + impl<'a> From<&'a XEvent> for $ty { + fn from (xevent: &'a XEvent) -> $ty { + unsafe { xevent.$field } + } + } + )* + }; +} + +event_conversions_and_tests! { + any: XAnyEvent, + button: XButtonEvent, + circulate: XCirculateEvent, + circulate_request: XCirculateRequestEvent, + client_message: XClientMessageEvent, + colormap: XColormapEvent, + configure: XConfigureEvent, + configure_request: XConfigureRequestEvent, + create_window: XCreateWindowEvent, + crossing: XCrossingEvent, + destroy_window: XDestroyWindowEvent, + error: XErrorEvent, + expose: XExposeEvent, + focus_change: XFocusChangeEvent, + generic_event_cookie: XGenericEventCookie, + graphics_expose: XGraphicsExposeEvent, + gravity: XGravityEvent, + key: XKeyEvent, + keymap: XKeymapEvent, + map: XMapEvent, + mapping: XMappingEvent, + map_request: XMapRequestEvent, + motion: XMotionEvent, + no_expose: XNoExposeEvent, + property: XPropertyEvent, + reparent: XReparentEvent, + resize_request: XResizeRequestEvent, + selection_clear: XSelectionClearEvent, + selection: XSelectionEvent, + selection_request: XSelectionRequestEvent, + unmap: XUnmapEvent, + visibility: XVisibilityEvent, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XAnyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XButtonEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub root: Window, + pub subwindow: Window, + pub time: Time, + pub x: c_int, + pub y: c_int, + pub x_root: c_int, + pub y_root: c_int, + pub state: c_uint, + pub button: c_uint, + pub same_screen: Bool, +} +pub type XButtonPressedEvent = XButtonEvent; +pub type XButtonReleasedEvent = XButtonEvent; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XCirculateEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub event: Window, + pub window: Window, + pub place: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XCirculateRequestEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub parent: Window, + pub window: Window, + pub place: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XClientMessageEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub message_type: Atom, + pub format: c_int, + pub data: ClientMessageData, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XColormapEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub colormap: Colormap, + pub new: Bool, + pub state: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XConfigureEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub event: Window, + pub window: Window, + pub x: c_int, + pub y: c_int, + pub width: c_int, + pub height: c_int, + pub border_width: c_int, + pub above: Window, + pub override_redirect: Bool, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XConfigureRequestEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub parent: Window, + pub window: Window, + pub x: c_int, + pub y: c_int, + pub width: c_int, + pub height: c_int, + pub border_width: c_int, + pub above: Window, + pub detail: c_int, + pub value_mask: c_ulong, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XCreateWindowEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub parent: Window, + pub window: Window, + pub x: c_int, + pub y: c_int, + pub width: c_int, + pub height: c_int, + pub border_width: c_int, + pub override_redirect: Bool, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XCrossingEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub root: Window, + pub subwindow: Window, + pub time: Time, + pub x: c_int, + pub y: c_int, + pub x_root: c_int, + pub y_root: c_int, + pub mode: c_int, + pub detail: c_int, + pub same_screen: Bool, + pub focus: Bool, + pub state: c_uint, +} +pub type XEnterWindowEvent = XCrossingEvent; +pub type XLeaveWindowEvent = XCrossingEvent; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XDestroyWindowEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub event: Window, + pub window: Window, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XErrorEvent { + pub type_: c_int, + pub display: *mut Display, + pub resourceid: XID, + pub serial: c_ulong, + pub error_code: c_uchar, + pub request_code: c_uchar, + pub minor_code: c_uchar, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XExposeEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub x: c_int, + pub y: c_int, + pub width: c_int, + pub height: c_int, + pub count: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XFocusChangeEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub mode: c_int, + pub detail: c_int, +} +pub type XFocusInEvent = XFocusChangeEvent; +pub type XFocusOutEvent = XFocusChangeEvent; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XGraphicsExposeEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub drawable: Drawable, + pub x: c_int, + pub y: c_int, + pub width: c_int, + pub height: c_int, + pub count: c_int, + pub major_code: c_int, + pub minor_code: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XGravityEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub event: Window, + pub window: Window, + pub x: c_int, + pub y: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XKeyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub root: Window, + pub subwindow: Window, + pub time: Time, + pub x: c_int, + pub y: c_int, + pub x_root: c_int, + pub y_root: c_int, + pub state: c_uint, + pub keycode: c_uint, + pub same_screen: Bool, +} +pub type XKeyPressedEvent = XKeyEvent; +pub type XKeyReleasedEvent = XKeyEvent; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XKeymapEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub key_vector: [c_char; 32], +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XMapEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub event: Window, + pub window: Window, + pub override_redirect: Bool, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XMappingEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub event: Window, + pub request: c_int, + pub first_keycode: c_int, + pub count: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XMapRequestEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub parent: Window, + pub window: Window, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XMotionEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub root: Window, + pub subwindow: Window, + pub time: Time, + pub x: c_int, + pub y: c_int, + pub x_root: c_int, + pub y_root: c_int, + pub state: c_uint, + pub is_hint: c_char, + pub same_screen: Bool, +} +pub type XPointerMovedEvent = XMotionEvent; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XNoExposeEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub drawable: Drawable, + pub major_code: c_int, + pub minor_code: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XPropertyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub atom: Atom, + pub time: Time, + pub state: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XReparentEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub event: Window, + pub window: Window, + pub parent: Window, + pub x: c_int, + pub y: c_int, + pub override_redirect: Bool, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XResizeRequestEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub width: c_int, + pub height: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XSelectionClearEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub selection: Atom, + pub time: Time, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XSelectionEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub requestor: Window, + pub selection: Atom, + pub target: Atom, + pub property: Atom, + pub time: Time, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XSelectionRequestEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub owner: Window, + pub requestor: Window, + pub selection: Atom, + pub target: Atom, + pub property: Atom, + pub time: Time, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XUnmapEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub event: Window, + pub window: Window, + pub from_configure: Bool, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XVisibilityEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub state: c_int, +} + + + + + + + +#[repr(C)] +pub struct _XkbCompatMapRec { + pub sym_interpret: XkbSymInterpretPtr, + pub groups: [XkbModsRec; XkbNumKbdGroups], + pub num_si: c_ushort, + pub size_si: c_ushort, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XkbDesc { + pub dpy: *mut Display, + pub flags: c_ushort, + pub device_spec: c_ushort, + pub min_key_code: KeyCode, + pub max_key_code: KeyCode, + pub ctrls: XkbControlsPtr, + pub server: XkbServerMapPtr, + pub map: XkbClientMapPtr, + pub indicators: XkbIndicatorPtr, + pub names: XkbNamesPtr, + pub compat: XkbCompatMapPtr, + pub geom: XkbGeometryPtr, +} + +#[repr(C)] +pub struct _XkbIndicatorRec { + pub phys_indicators: c_ulong, + pub maps: [XkbIndicatorMapRec; XkbNumIndicators], +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XkbKeyAliasRec { + pub real: [c_char; XkbKeyNameLength], + pub alias: [c_char; XkbKeyNameLength], +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XkbKeyNameRec { + pub name: [c_char; XkbKeyNameLength], +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XkbNamesRec { + pub keycodes: Atom, + pub geometry: Atom, + pub symbols: Atom, + pub types: Atom, + pub compat: Atom, + pub vmods: [Atom; XkbNumVirtualMods], + pub indicators: [Atom; XkbNumIndicators], + pub groups: [Atom; XkbNumKbdGroups], + pub keys: XkbKeyNamePtr, + pub key_aliases: XkbKeyAliasPtr, + pub radio_groups: *mut Atom, + pub phys_symbols: Atom, + pub num_keys: c_uchar, + pub num_key_aliases: c_uchar, + pub num_rg: c_ushort, +} + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XkbAnyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub time: Time, + pub xkb_type: c_int, + pub device: c_uint, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XkbNewKeyboardNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub time: Time, + pub xkb_type: c_int, + pub device: c_int, + pub old_device: c_int, + pub min_key_code: c_int, + pub max_key_code: c_int, + pub old_min_key_code: c_int, + pub old_max_key_code: c_int, + pub changed: c_uint, + pub req_major: c_char, + pub req_minor: c_char, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XkbMapNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub time: Time, + pub xkb_type: c_int, + pub device: c_int, + pub changed: c_uint, + pub flags: c_uint, + pub first_type: c_int, + pub num_types: c_int, + pub min_key_code: KeyCode, + pub max_key_code: KeyCode, + pub first_key_sym: KeyCode, + pub first_key_act: KeyCode, + pub first_key_bahavior: KeyCode, + pub first_key_explicit: KeyCode, + pub first_modmap_key: KeyCode, + pub first_vmodmap_key: KeyCode, + pub num_key_syms: c_int, + pub num_key_acts: c_int, + pub num_key_behaviors: c_int, + pub num_key_explicit: c_int, + pub num_modmap_keys: c_int, + pub num_vmodmap_keys: c_int, + pub vmods: c_uint, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XkbStateNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub time: Time, + pub xkb_type: c_int, + pub device: c_int, + pub changed: c_uint, + pub group: c_int, + pub base_group: c_int, + pub latched_group: c_int, + pub locked_group: c_int, + pub mods: c_uint, + pub base_mods: c_uint, + pub latched_mods: c_uint, + pub locked_mods: c_uint, + pub compat_state: c_int, + pub grab_mods: c_uchar, + pub compat_grab_mods: c_uchar, + pub lookup_mods: c_uchar, + pub compat_lookup_mods: c_uchar, + pub ptr_buttons: c_int, + pub keycode: KeyCode, + pub event_type: c_char, + pub req_major: c_char, + pub req_minor: c_char, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XkbControlsNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub time: Time, + pub xkb_type: c_int, + pub device: c_int, + pub changed_ctrls: c_uint, + pub enabled_ctrls: c_uint, + pub enabled_ctrl_changes: c_uint, + pub num_groups: c_int, + pub keycode: KeyCode, + pub event_type: c_char, + pub req_major: c_char, + pub req_minor: c_char, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XkbIndicatorNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub time: Time, + pub xkb_type: c_int, + pub device: c_int, + pub changed: c_uint, + pub state: c_uint, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XkbNamesNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub time: Time, + pub xkb_type: c_int, + pub device: c_int, + pub changed: c_uint, + pub first_type: c_int, + pub num_types: c_int, + pub first_lvl: c_int, + pub num_lvls: c_int, + pub num_aliases: c_int, + pub num_radio_groups: c_int, + pub changed_vmods: c_uint, + pub changed_groups: c_uint, + pub changed_indicators: c_uint, + pub first_key: c_int, + pub num_keys: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XkbCompatMapNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub time: Time, + pub xkb_type: c_int, + pub device: c_int, + pub changed_groups: c_uint, + pub first_si: c_int, + pub num_si: c_int, + pub num_total_si: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XkbBellNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub time: Time, + pub xkb_type: c_int, + pub device: c_int, + pub percent: c_int, + pub pitch: c_int, + pub duration: c_int, + pub bell_class: c_int, + pub bell_id: c_int, + pub name: Atom, + pub window: Window, + pub event_only: Bool, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XkbActionMessageEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub time: Time, + pub xkb_type: c_int, + pub device: c_int, + pub keycode: KeyCode, + pub press: Bool, + pub key_event_follows: Bool, + pub group: c_int, + pub mods: c_uint, + pub message: [c_char; XkbActionMessageLength + 1], +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XkbAccessXNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub time: Time, + pub xkb_type: c_int, + pub device: c_int, + pub detail: c_int, + pub keycode: c_int, + pub sk_delay: c_int, + pub debounce_delay: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XkbExtensionDeviceNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub time: Time, + pub xkb_type: c_int, + pub device: c_int, + pub reason: c_uint, + pub supported: c_uint, + pub unsupported: c_uint, + pub first_btn: c_int, + pub num_btns: c_int, + pub leds_defined: c_uint, + pub led_state: c_uint, + pub led_class: c_int, + pub led_id: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XkbEvent { + _pad: [c_long; 24], +} + +#[cfg(test)] +macro_rules! test_xkb_event_size { + { $($ty:ty,)* } => { $( + assert!(::std::mem::size_of::() >= ::std::mem::size_of::<$ty>()); + )* }; +} + +#[test] +fn xkb_event_size_test () { + test_xkb_event_size! { + XkbAnyEvent, + XkbNewKeyboardNotifyEvent, + XkbMapNotifyEvent, + XkbStateNotifyEvent, + XkbControlsNotifyEvent, + XkbIndicatorNotifyEvent, + XkbNamesNotifyEvent, + XkbCompatMapNotifyEvent, + XkbBellNotifyEvent, + XkbActionMessageEvent, + XkbAccessXNotifyEvent, + XkbExtensionDeviceNotifyEvent, + } +} + +pub enum XkbKbdDpyStateRec {} +pub type XkbKbdDpyStatePtr = *mut XkbKbdDpyStateRec; + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct Depth { + pub depth: c_int, + pub nvisuals: c_int, + pub visuals: *mut Visual, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct Screen { + pub ext_data: *mut XExtData, + pub display: *mut Display, + pub root: Window, + pub width: c_int, + pub height: c_int, + pub mwidth: c_int, + pub mheight: c_int, + pub ndepths: c_int, + pub depths: *mut Depth, + pub root_depth: c_int, + pub root_visual: *mut Visual, + pub default_gc: GC, + pub cmap: Colormap, + pub white_pixel: c_ulong, + pub black_pixel: c_ulong, + pub max_maps: c_int, + pub min_maps: c_int, + pub backing_store: c_int, + pub save_unders: Bool, + pub root_input_mask: c_long, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct ScreenFormat { + pub ext_data: *mut XExtData, + pub depth: c_int, + pub bits_per_pixel: c_int, + pub scanline_pad: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct Visual { + pub ext_data: *mut XExtData, + pub visualid: VisualID, + pub class: c_int, + pub red_mask: c_ulong, + pub green_mask: c_ulong, + pub blue_mask: c_ulong, + pub bits_per_rgb: c_int, + pub map_entries: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XArc { + pub x: c_short, + pub y: c_short, + pub width: c_ushort, + pub height: c_ushort, + pub angle1: c_short, + pub angle2: c_short, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XChar2b { + pub byte1: c_uchar, + pub byte2: c_uchar, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XCharStruct { + pub lbearing: c_short, + pub rbearing: c_short, + pub width: c_short, + pub ascent: c_short, + pub descent: c_short, + pub attributes: c_ushort, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XClassHint { + pub res_name: *mut c_char, + pub res_class: *mut c_char, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XColor { + pub pixel: c_ulong, + pub red: c_ushort, + pub green: c_ushort, + pub blue: c_ushort, + pub flags: c_char, + pub pad: c_char, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XComposeStatus { + pub compose_ptr: XPointer, + pub chars_matched: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XExtCodes { + pub extension: c_int, + pub major_opcode: c_int, + pub first_event: c_int, + pub first_error: c_int, +} + +#[repr(C)] +pub struct XExtData { + pub number: c_int, + pub next: *mut XExtData, + pub free_private: Option c_int>, + pub private_data: XPointer, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XFontProp { + pub name: Atom, + pub card32: c_ulong, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XFontSetExtents { + pub max_ink_extent: XRectangle, + pub max_logical_extent: XRectangle, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XFontStruct { + pub ext_data: *mut XExtData, + pub fid: Font, + pub direction: c_uint, + pub min_char_or_byte2: c_uint, + pub max_char_or_byte2: c_uint, + pub min_byte1: c_uint, + pub max_byte1: c_uint, + pub all_chars_exist: Bool, + pub default_char: c_uint, + pub n_properties: c_int, + pub properties: *mut XFontProp, + pub min_bounds: XCharStruct, + pub max_bounds: XCharStruct, + pub per_char: *mut XCharStruct, + pub ascent: c_int, + pub descent: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XGCValues { + pub function: c_int, + pub plane_mask: c_ulong, + pub foreground: c_ulong, + pub background: c_ulong, + pub line_width: c_int, + pub line_style: c_int, + pub cap_style: c_int, + pub join_style: c_int, + pub fill_style: c_int, + pub fill_rule: c_int, + pub arc_mode: c_int, + pub tile: Pixmap, + pub stipple: Pixmap, + pub ts_x_origin: c_int, + pub ts_y_origin: c_int, + pub font: Font, + pub subwindow_mode: c_int, + pub graphics_exposures: Bool, + pub clip_x_origin: c_int, + pub clip_y_origin: c_int, + pub clip_mask: Pixmap, + pub dash_offset: c_int, + pub dashes: c_char, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XGenericEventCookie { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub extension: c_int, + pub evtype: c_int, + pub cookie: c_uint, + pub data: *mut c_void, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XHostAddress { + pub family: c_int, + pub length: c_int, + pub address: *mut c_char, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XIconSize { + pub min_width: c_int, + pub min_height: c_int, + pub max_width: c_int, + pub max_height: c_int, + pub width_inc: c_int, + pub height_inc: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XImage { + pub width: c_int, + pub height: c_int, + pub xoffset: c_int, + pub format: c_int, + pub data: *mut c_char, + pub byte_order: c_int, + pub bitmap_unit: c_int, + pub bitmap_bit_order: c_int, + pub bitmap_pad: c_int, + pub depth: c_int, + pub bytes_per_line: c_int, + pub bits_per_pixel: c_int, + pub red_mask: c_ulong, + pub green_mask: c_ulong, + pub blue_mask: c_ulong, + pub obdata: XPointer, + pub funcs: ImageFns, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XKeyboardControl { + pub key_click_percent: c_int, + pub bell_percent: c_int, + pub bell_pitch: c_int, + pub bell_duration: c_int, + pub led: c_int, + pub led_mode: c_int, + pub key: c_int, + pub auto_repeat_mode: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XKeyboardState { + pub key_click_percent: c_int, + pub bell_percent: c_int, + pub bell_pitch: c_uint, + pub bell_duration: c_uint, + pub led_mask: c_ulong, + pub global_auto_repeat: c_int, + pub auto_repeats: [c_char; 32], +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XmbTextItem { + pub chars: *mut c_char, + pub nchars: c_int, + pub delta: c_int, + pub font_set: XFontSet, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XModifierKeymap { + pub max_keypermod: c_int, + pub modifiermap: *mut KeyCode, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XOMCharSetList { + pub charset_count: c_int, + pub charset_list: *mut *mut c_char, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XPixmapFormatValues { + pub depth: c_int, + pub bits_per_pixel: c_int, + pub scanline_pad: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XPoint { + pub x: c_short, + pub y: c_short, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRectangle { + pub x: c_short, + pub y: c_short, + pub width: c_ushort, + pub height: c_ushort, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XrmOptionDescRec { + pub option: *mut c_char, + pub specifier: *mut c_char, + pub argKind: XrmOptionKind, + pub value: XPointer, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XrmValue { + pub size: c_uint, + pub addr: XPointer, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XSegment { + pub x1: c_short, + pub y1: c_short, + pub x2: c_short, + pub y2: c_short, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XSetWindowAttributes { + pub background_pixmap: Pixmap, + pub background_pixel: c_ulong, + pub border_pixmap: Pixmap, + pub border_pixel: c_ulong, + pub bit_gravity: c_int, + pub win_gravity: c_int, + pub backing_store: c_int, + pub backing_planes: c_ulong, + pub backing_pixel: c_ulong, + pub save_under: Bool, + pub event_mask: c_long, + pub do_not_propagate_mask: c_long, + pub override_redirect: Bool, + pub colormap: Colormap, + pub cursor: Cursor, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XSizeHints { + pub flags: c_long, + pub x: c_int, + pub y: c_int, + pub width: c_int, + pub height: c_int, + pub min_width: c_int, + pub min_height: c_int, + pub max_width: c_int, + pub max_height: c_int, + pub width_inc: c_int, + pub height_inc: c_int, + pub min_aspect: AspectRatio, + pub max_aspect: AspectRatio, + pub base_width: c_int, + pub base_height: c_int, + pub win_gravity: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XStandardColormap { + pub colormap: Colormap, + pub red_max: c_ulong, + pub red_mult: c_ulong, + pub green_max: c_ulong, + pub green_mult: c_ulong, + pub blue_max: c_ulong, + pub blue_mult: c_ulong, + pub base_pixel: c_ulong, + pub visualid: VisualID, + pub killid: XID, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XTextItem { + pub chars: *mut c_char, + pub nchars: c_int, + pub delta: c_int, + pub font: Font, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XTextItem16 { + pub chars: *mut XChar2b, + pub nchars: c_int, + pub delta: c_int, + pub font: Font, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XTextProperty { + pub value: *mut c_uchar, + pub encoding: Atom, + pub format: c_int, + pub nitems: c_ulong, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XTimeCoord { + pub time: Time, + pub x: c_short, + pub y: c_short, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XVisualInfo { + pub visual: *mut Visual, + pub visualid: VisualID, + pub screen: c_int, + pub depth: c_int, + pub class: c_int, + pub red_mask: c_ulong, + pub green_mask: c_ulong, + pub blue_mask: c_ulong, + pub colormap_size: c_int, + pub bits_per_rgb: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XwcTextItem { + pub chars: *mut wchar_t, + pub nchars: c_int, + pub delta: c_int, + pub font_set: XFontSet, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XWindowAttributes { + pub x: c_int, + pub y: c_int, + pub width: c_int, + pub height: c_int, + pub border_width: c_int, + pub depth: c_int, + pub visual: *mut Visual, + pub root: Window, + pub class: c_int, + pub bit_gravity: c_int, + pub win_gravity: c_int, + pub backing_store: c_int, + pub backing_planes: c_ulong, + pub backing_pixel: c_ulong, + pub save_under: Bool, + pub colormap: Colormap, + pub map_installed: Bool, + pub map_state: c_int, + pub all_event_masks: c_long, + pub your_event_mask: c_long, + pub do_not_propagate_mask: c_long, + pub override_redirect: Bool, + pub screen: *mut Screen, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XWindowChanges { + pub x: c_int, + pub y: c_int, + pub width: c_int, + pub height: c_int, + pub border_width: c_int, + pub sibling: Window, + pub stack_mode: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XWMHints { + pub flags: c_long, + pub input: Bool, + pub initial_state: c_int, + pub icon_pixmap: Pixmap, + pub icon_window: Window, + pub icon_x: c_int, + pub icon_y: c_int, + pub icon_mask: Pixmap, + pub window_group: XID, +} + +#[repr(C)] +pub struct XIMCallback { + pub client_data: XPointer, + pub callback: XIMProc, +} + +#[repr(C)] +pub struct XICCallback { + pub client_data: XPointer, + pub callback: XICProc, +} + + + + + + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct AspectRatio { + pub x: c_int, + pub y: c_int, +} + +#[derive(Debug, Clone, Copy, Default, PartialEq)] +#[repr(C)] +pub struct ClientMessageData { + longs: [c_long; 5], +} + +impl ClientMessageData { + pub fn as_bytes (&self) -> &[c_char] { + self.as_ref() + } + + pub fn as_bytes_mut (&mut self) -> &mut [c_char] { + self.as_mut() + } + + pub fn as_longs (&self) -> &[c_long] { + self.as_ref() + } + + pub fn as_longs_mut (&mut self) -> &mut [c_long] { + self.as_mut() + } + + pub fn as_shorts (&self) -> &[c_short] { + self.as_ref() + } + + pub fn as_shorts_mut (&mut self) -> &mut [c_short] { + self.as_mut() + } + + pub fn get_byte (&self, index: usize) -> c_char { + self.as_bytes()[index] + } + + pub fn get_long (&self, index: usize) -> c_long { + self.longs[index] + } + + pub fn get_short (&self, index: usize) -> c_short { + self.as_shorts()[index] + } + + pub fn new() -> ClientMessageData { + ClientMessageData { longs: [0; 5] } + } + + pub fn set_byte (&mut self, index: usize, value: c_char) { + self.as_bytes_mut()[index] = value; + } + + pub fn set_long (&mut self, index: usize, value: c_long) { + self.longs[index] = value; + } + + pub fn set_short (&mut self, index: usize, value: c_short) { + self.as_shorts_mut()[index] = value; + } +} + +macro_rules! client_message_data_conversions { + { $($ty:ty[$n:expr],)* } => { + $( + impl AsMut<[$ty]> for ClientMessageData { + fn as_mut (&mut self) -> &mut [$ty] { + unsafe { slice::from_raw_parts_mut(self.longs.as_mut_ptr() as *mut $ty, $n) } + } + } + + impl AsRef<[$ty]> for ClientMessageData { + fn as_ref (&self) -> &[$ty] { + unsafe { slice::from_raw_parts(self.longs.as_ptr() as *mut $ty, $n) } + } + } + + impl From<[$ty; $n]> for ClientMessageData { + fn from (array: [$ty; $n]) -> ClientMessageData { + unsafe { transmute_union(&array) } + } + } + )* + }; +} + +client_message_data_conversions! { + c_schar[20], + c_uchar[20], + c_short[10], + c_ushort[10], + c_long[5], + c_ulong[5], +} + +#[test] +fn client_message_size_test () { + assert!(::std::mem::size_of::() >= ::std::mem::size_of::<[c_char; 20]>()); + assert!(::std::mem::size_of::() >= ::std::mem::size_of::<[c_short; 10]>()); +} + +#[derive(Debug, Copy)] +#[repr(C)] +pub struct ImageFns { + pub create_image: Option *mut XImage>, + pub destroy_image: Option c_int>, + pub get_pixel: Option c_ulong>, + pub put_pixel: Option c_int>, + pub sub_image: Option *mut XImage>, + pub add_pixel: Option c_int>, +} + +impl Clone for ImageFns { + fn clone (&self) -> ImageFns { + *self + } +} + +impl PartialEq for ImageFns { + fn eq (&self, rhs: &ImageFns) -> bool { + unsafe { mem_eq(self, rhs) } + } +} + + + + + + + + +pub const AllocNone: c_int = 0; +pub const AllocAll: c_int = 1; + + +pub const XkbKeyNameLength: usize = 4; +pub const XkbNumIndicators: usize = 32; +pub const XkbNumKbdGroups: usize = 4; +pub const XkbNumVirtualMods: usize = 16; + + +pub const XA_PRIMARY: Atom = 1; +pub const XA_SECONDARY: Atom = 2; +pub const XA_ARC: Atom = 3; +pub const XA_ATOM: Atom = 4; +pub const XA_BITMAP: Atom = 5; +pub const XA_CARDINAL: Atom = 6; +pub const XA_COLORMAP: Atom = 7; +pub const XA_CURSOR: Atom = 8; +pub const XA_CUT_BUFFER0: Atom = 9; +pub const XA_CUT_BUFFER1: Atom = 10; +pub const XA_CUT_BUFFER2: Atom = 11; +pub const XA_CUT_BUFFER3: Atom = 12; +pub const XA_CUT_BUFFER4: Atom = 13; +pub const XA_CUT_BUFFER5: Atom = 14; +pub const XA_CUT_BUFFER6: Atom = 15; +pub const XA_CUT_BUFFER7: Atom = 16; +pub const XA_DRAWABLE: Atom = 17; +pub const XA_FONT: Atom = 18; +pub const XA_INTEGER: Atom = 19; +pub const XA_PIXMAP: Atom = 20; +pub const XA_POINT: Atom = 21; +pub const XA_RECTANGLE: Atom = 22; +pub const XA_RESOURCE_MANAGER: Atom = 23; +pub const XA_RGB_COLOR_MAP: Atom = 24; +pub const XA_RGB_BEST_MAP: Atom = 25; +pub const XA_RGB_BLUE_MAP: Atom = 26; +pub const XA_RGB_DEFAULT_MAP: Atom = 27; +pub const XA_RGB_GRAY_MAP: Atom = 28; +pub const XA_RGB_GREEN_MAP: Atom = 29; +pub const XA_RGB_RED_MAP: Atom = 30; +pub const XA_STRING: Atom = 31; +pub const XA_VISUALID: Atom = 32; +pub const XA_WINDOW: Atom = 33; +pub const XA_WM_COMMAND: Atom = 34; +pub const XA_WM_HINTS: Atom = 35; +pub const XA_WM_CLIENT_MACHINE: Atom = 36; +pub const XA_WM_ICON_NAME: Atom = 37; +pub const XA_WM_ICON_SIZE: Atom = 38; +pub const XA_WM_NAME: Atom = 39; +pub const XA_WM_NORMAL_HINTS: Atom = 40; +pub const XA_WM_SIZE_HINTS: Atom = 41; +pub const XA_WM_ZOOM_HINTS: Atom = 42; +pub const XA_MIN_SPACE: Atom = 43; +pub const XA_NORM_SPACE: Atom = 44; +pub const XA_MAX_SPACE: Atom = 45; +pub const XA_END_SPACE: Atom = 46; +pub const XA_SUPERSCRIPT_X: Atom = 47; +pub const XA_SUPERSCRIPT_Y: Atom = 48; +pub const XA_SUBSCRIPT_X: Atom = 49; +pub const XA_SUBSCRIPT_Y: Atom = 50; +pub const XA_UNDERLINE_POSITION: Atom = 51; +pub const XA_UNDERLINE_THICKNESS: Atom = 52; +pub const XA_STRIKEOUT_ASCENT: Atom = 53; +pub const XA_STRIKEOUT_DESCENT: Atom = 54; +pub const XA_ITALIC_ANGLE: Atom = 55; +pub const XA_X_HEIGHT: Atom = 56; +pub const XA_QUAD_WIDTH: Atom = 57; +pub const XA_WEIGHT: Atom = 58; +pub const XA_POINT_SIZE: Atom = 59; +pub const XA_RESOLUTION: Atom = 60; +pub const XA_COPYRIGHT: Atom = 61; +pub const XA_NOTICE: Atom = 62; +pub const XA_FONT_NAME: Atom = 63; +pub const XA_FAMILY_NAME: Atom = 64; +pub const XA_FULL_NAME: Atom = 65; +pub const XA_CAP_HEIGHT: Atom = 66; +pub const XA_WM_CLASS: Atom = 67; +pub const XA_WM_TRANSIENT_FOR: Atom = 68; + + +pub const False: Bool = 0; +pub const True: Bool = 1; + + +pub const Unsorted: c_int = 0; +pub const YSorted: c_int = 1; +pub const YXSorted: c_int = 2; +pub const YXBanded: c_int = 3; + + +pub const DoRed: c_char = 1; +pub const DoGreen: c_char = 2; +pub const DoBlue: c_char = 4; + + +pub const Success: c_uchar = 0; +pub const BadRequest: c_uchar = 1; +pub const BadValue: c_uchar = 2; +pub const BadWindow: c_uchar = 3; +pub const BadPixmap: c_uchar = 4; +pub const BadAtom: c_uchar = 5; +pub const BadCursor: c_uchar = 6; +pub const BadFont: c_uchar = 7; +pub const BadMatch: c_uchar = 8; +pub const BadDrawable: c_uchar = 9; +pub const BadAccess: c_uchar = 10; +pub const BadAlloc: c_uchar = 11; +pub const BadColor: c_uchar = 12; +pub const BadGC: c_uchar = 13; +pub const BadIDChoice: c_uchar = 14; +pub const BadName: c_uchar = 15; +pub const BadLength: c_uchar = 16; +pub const BadImplementation: c_uchar = 17; +pub const FirstExtensionError: c_uchar = 128; +pub const LastExtensionError: c_uchar = 255; + + +pub const KeyPress: c_int = 2; +pub const KeyRelease: c_int = 3; +pub const ButtonPress: c_int = 4; +pub const ButtonRelease: c_int = 5; +pub const MotionNotify: c_int = 6; +pub const EnterNotify: c_int = 7; +pub const LeaveNotify: c_int = 8; +pub const FocusIn: c_int = 9; +pub const FocusOut: c_int = 10; +pub const KeymapNotify: c_int = 11; +pub const Expose: c_int = 12; +pub const GraphicsExpose: c_int = 13; +pub const NoExpose: c_int = 14; +pub const VisibilityNotify: c_int = 15; +pub const CreateNotify: c_int = 16; +pub const DestroyNotify: c_int = 17; +pub const UnmapNotify: c_int = 18; +pub const MapNotify: c_int = 19; +pub const MapRequest: c_int = 20; +pub const ReparentNotify: c_int = 21; +pub const ConfigureNotify: c_int = 22; +pub const ConfigureRequest: c_int = 23; +pub const GravityNotify: c_int = 24; +pub const ResizeRequest: c_int = 25; +pub const CirculateNotify: c_int = 26; +pub const CirculateRequest: c_int = 27; +pub const PropertyNotify: c_int = 28; +pub const SelectionClear: c_int = 29; +pub const SelectionRequest: c_int = 30; +pub const SelectionNotify: c_int = 31; +pub const ColormapNotify: c_int = 32; +pub const ClientMessage: c_int = 33; +pub const MappingNotify: c_int = 34; +pub const GenericEvent: c_int = 35; +pub const LASTEvent: c_int = 36; + + +pub const NoEventMask: c_long = 0; +pub const KeyPressMask: c_long = 0x0000_0001; +pub const KeyReleaseMask: c_long = 0x0000_0002; +pub const ButtonPressMask: c_long = 0x0000_0004; +pub const ButtonReleaseMask: c_long = 0x0000_0008; +pub const EnterWindowMask: c_long = 0x0000_0010; +pub const LeaveWindowMask: c_long = 0x0000_0020; +pub const PointerMotionMask: c_long = 0x0000_0040; +pub const PointerMotionHintMask: c_long = 0x0000_0080; +pub const Button1MotionMask: c_long = 0x0000_0100; +pub const Button2MotionMask: c_long = 0x0000_0200; +pub const Button3MotionMask: c_long = 0x0000_0400; +pub const Button4MotionMask: c_long = 0x0000_0800; +pub const Button5MotionMask: c_long = 0x0000_1000; +pub const ButtonMotionMask: c_long = 0x0000_2000; +pub const KeymapStateMask: c_long = 0x0000_4000; +pub const ExposureMask: c_long = 0x0000_8000; +pub const VisibilityChangeMask: c_long = 0x0001_0000; +pub const StructureNotifyMask: c_long = 0x0002_0000; +pub const ResizeRedirectMask: c_long = 0x0004_0000; +pub const SubstructureNotifyMask: c_long = 0x0008_0000; +pub const SubstructureRedirectMask: c_long = 0x0010_0000; +pub const FocusChangeMask: c_long = 0x0020_0000; +pub const PropertyChangeMask: c_long = 0x0040_0000; +pub const ColormapChangeMask: c_long = 0x0080_0000; +pub const OwnerGrabButtonMask: c_long = 0x0100_0000; + + +pub const PropModeReplace: c_int = 0; +pub const PropModePrepend: c_int = 1; +pub const PropModeAppend: c_int = 2; + + +pub const ShiftMapIndex: c_int = 0; +pub const LockMapIndex: c_int = 1; +pub const ControlMapIndex: c_int = 2; +pub const Mod1MapIndex: c_int = 3; +pub const Mod2MapIndex: c_int = 4; +pub const Mod3MapIndex: c_int = 5; +pub const Mod4MapIndex: c_int = 6; +pub const Mod5MapIndex: c_int = 7; + + +pub const Button1Mask: c_uint = (1<<8); +pub const Button2Mask: c_uint = (1<<9); +pub const Button3Mask: c_uint = (1<<10); +pub const Button4Mask: c_uint = (1<<11); +pub const Button5Mask: c_uint = (1<<12); +pub const AnyModifier: c_uint = (1<<15); + + +pub const NotifyNormal: c_int = 0; +pub const NotifyGrab: c_int = 1; +pub const NotifyUngrab: c_int = 2; +pub const NotifyWhileGrabbed: c_int = 3; + +pub const NotifyHint: c_int = 1; + + +pub const NotifyAncestor: c_int = 0; +pub const NotifyVirtual: c_int = 1; +pub const NotifyInferior: c_int = 2; +pub const NotifyNonlinear: c_int = 3; +pub const NotifyNonlinearVirtual: c_int = 4; +pub const NotifyPointer: c_int = 5; +pub const NotifyPointerRoot: c_int = 6; +pub const NotifyDetailNone: c_int = 7; + + +pub const VisibilityUnobscured: c_int = 0; +pub const VisibilityPartiallyObscured: c_int = 1; +pub const VisibilityFullyObscured: c_int = 2; + + +pub const PlaceOnTop: c_int = 0; +pub const PlaceOnBottom: c_int = 1; + + +pub const FamilyInternet: c_int = 0; +pub const FamilyDECnet: c_int = 1; +pub const FamilyChaos: c_int = 2; +pub const FamilyInternet6: c_int = 6; + + +pub const FamilyServerInterpreted: c_int = 5; + + +pub const PropertyNewValue: c_int = 0; +pub const PropertyDelete: c_int = 1; + + +pub const ColormapUninstalled: c_int = 0; +pub const ColormapInstalled: c_int = 1; + + +pub const GrabModeSync: c_int = 0; +pub const GrabModeAsync: c_int = 1; + + +pub const GrabSuccess: c_int = 0; +pub const AlreadyGrabbed: c_int = 1; +pub const GrabInvalidTime: c_int = 2; +pub const GrabNotViewable: c_int = 3; +pub const GrabFrozen: c_int = 4; + + +pub const AsyncPointer: c_int = 0; +pub const SyncPointer: c_int = 1; +pub const ReplayPointer: c_int = 2; +pub const AsyncKeyboard: c_int = 3; +pub const SyncKeyboard: c_int = 4; +pub const ReplayKeyboard: c_int = 5; +pub const AsyncBoth: c_int = 6; +pub const SyncBoth: c_int = 7; + + +pub const RevertToNone: c_int = 0; +pub const RevertToPointerRoot: c_int = 1; +pub const RevertToParent: c_int = 2; + + + +pub const CWX: c_ushort = (1<<0); +pub const CWY: c_ushort = (1<<1); +pub const CWWidth: c_ushort = (1<<2); +pub const CWHeight: c_ushort = (1<<3); +pub const CWBorderWidth: c_ushort = (1<<4); +pub const CWSibling: c_ushort = (1<<5); +pub const CWStackMode: c_ushort = (1<<6); + + +pub const ForgetGravity: c_int = 0; +pub const UnmapGravity: c_int = 0; +pub const NorthWestGravity: c_int = 1; +pub const NorthGravity: c_int = 2; +pub const NorthEastGravity: c_int = 3; +pub const WestGravity: c_int = 4; +pub const CenterGravity: c_int = 5; +pub const EastGravity: c_int = 6; +pub const SouthWestGravity: c_int = 7; +pub const SouthGravity: c_int = 8; +pub const SouthEastGravity: c_int = 9; +pub const StaticGravity: c_int = 10; + + +pub const XYBitmap: c_int = 0; +pub const XYPixmap: c_int = 1; +pub const ZPixmap: c_int = 2; + + +pub const NotUseful: c_int = 0; +pub const WhenMapped: c_int = 1; +pub const Always: c_int = 2; + + +pub const IsUnmapped: c_int = 0; +pub const IsUnviewable: c_int = 1; +pub const IsViewable: c_int = 2; + + +pub const ShiftMask: c_uint = 0x01; +pub const LockMask: c_uint = 0x02; +pub const ControlMask: c_uint = 0x04; +pub const Mod1Mask: c_uint = 0x08; +pub const Mod2Mask: c_uint = 0x10; +pub const Mod3Mask: c_uint = 0x20; +pub const Mod4Mask: c_uint = 0x40; +pub const Mod5Mask: c_uint = 0x80; + + +pub const Button1: c_uint = 1; +pub const Button2: c_uint = 2; +pub const Button3: c_uint = 3; +pub const Button4: c_uint = 4; +pub const Button5: c_uint = 5; + + +pub const USPosition: c_long = 0x0001; +pub const USSize: c_long = 0x0002; +pub const PPosition: c_long = 0x0004; +pub const PSize: c_long = 0x0008; +pub const PMinSize: c_long = 0x0010; +pub const PMaxSize: c_long = 0x0020; +pub const PResizeInc: c_long = 0x0040; +pub const PAspect: c_long = 0x0080; +pub const PBaseSize: c_long = 0x0100; +pub const PWinGravity: c_long = 0x0200; +pub const PAllHints: c_long = PPosition | PSize | PMinSize | PMaxSize | PResizeInc | PAspect; + + +pub const SetModeInsert: c_int = 0; +pub const SetModeDelete: c_int = 1; + + +pub const DestroyAll: c_int = 0; +pub const RetainPermanent: c_int = 1; +pub const RetainTemporary: c_int = 2; + + +pub const Above: c_int = 0; +pub const Below: c_int = 1; +pub const TopIf: c_int = 2; +pub const BottomIf: c_int = 3; +pub const Opposite: c_int = 4; + + +pub const RaiseLowest: c_int = 0; +pub const LowerHighest: c_int = 1; + + +pub const GXclear: c_int = 0x0; +pub const GXand: c_int = 0x1; +pub const GXandReverse: c_int = 0x2; +pub const GXcopy: c_int = 0x3; +pub const GXandInverted: c_int = 0x4; +pub const GXnoop: c_int = 0x5; +pub const GXxor: c_int = 0x6; +pub const GXor: c_int = 0x7; +pub const GXnor: c_int = 0x8; +pub const GXequiv: c_int = 0x9; +pub const GXinvert: c_int = 0xa; +pub const GXorReverse: c_int = 0xb; +pub const GXcopyInverted: c_int = 0xc; +pub const GXorInverted: c_int = 0xd; +pub const GXnand: c_int = 0xe; +pub const GXset: c_int = 0xf; + + +pub const LineSolid: c_int = 0; +pub const LineOnOffDash: c_int = 1; +pub const LineDoubleDash: c_int = 2; + + +pub const CapNotLast: c_int = 0; +pub const CapButt: c_int = 1; +pub const CapRound: c_int = 2; +pub const CapProjecting: c_int = 3; + + +pub const JoinMiter: c_int = 0; +pub const JoinRound: c_int = 1; +pub const JoinBevel: c_int = 2; + + +pub const FillSolid: c_int = 0; +pub const FillTiled: c_int = 1; +pub const FillStippled: c_int = 2; +pub const FillOpaqueStippled: c_int = 3; + + +pub const EvenOddRule: c_int = 0; +pub const WindingRule: c_int = 1; + + +pub const ClipByChildren: c_int = 0; +pub const IncludeInferiors: c_int = 1; + + +pub const CoordModeOrigin: c_int = 0; +pub const CoordModePrevious: c_int = 1; + + +pub const Complex: c_int = 0; +pub const Nonconvex: c_int = 1; +pub const Convex: c_int = 2; + + +pub const ArcChord: c_int = 0; +pub const ArcPieSlice: c_int = 1; + + +pub const GCFunction: c_uint = (1<<0); +pub const GCPlaneMask: c_uint = (1<<1); +pub const GCForeground: c_uint = (1<<2); +pub const GCBackground: c_uint = (1<<3); +pub const GCLineWidth: c_uint = (1<<4); +pub const GCLineStyle: c_uint = (1<<5); +pub const GCCapStyle: c_uint = (1<<6); +pub const GCJoinStyle: c_uint = (1<<7); +pub const GCFillStyle: c_uint = (1<<8); +pub const GCFillRule: c_uint = (1<<9); +pub const GCTile: c_uint = (1<<10); +pub const GCStipple: c_uint = (1<<11); +pub const GCTileStipXOrigin: c_uint = (1<<12); +pub const GCTileStipYOrigin: c_uint = (1<<13); +pub const GCFont : c_uint = (1<<14); +pub const GCSubwindowMode: c_uint = (1<<15); +pub const GCGraphicsExposures: c_uint = (1<<16); +pub const GCClipXOrigin: c_uint = (1<<17); +pub const GCClipYOrigin: c_uint = (1<<18); +pub const GCClipMask: c_uint = (1<<19); +pub const GCDashOffset: c_uint = (1<<20); +pub const GCDashList: c_uint = (1<<21); +pub const GCArcMode: c_uint = (1<<22); + +pub const GCLastBit: c_uint = 22; + + +pub const FontLeftToRight: c_int = 0; +pub const FontRightToLeft: c_int = 1; + +pub const FontChange: c_uchar = 255; + + +pub const CursorShape: c_int = 0; +pub const TileShape: c_int = 1; +pub const StippleShape: c_int = 2; + + +pub const AutoRepeatModeOff: c_int = 0; +pub const AutoRepeatModeOn: c_int = 1; +pub const AutoRepeatModeDefault: c_int = 2; + +pub const LedModeOff: c_int = 0; +pub const LedModeOn: c_int = 1; + + +pub const KBKeyClickPercent: c_ulong = (1<<0); +pub const KBBellPercent: c_ulong = (1<<1); +pub const KBBellPitch: c_ulong = (1<<2); +pub const KBBellDuration: c_ulong = (1<<3); +pub const KBLed: c_ulong = (1<<4); +pub const KBLedMode: c_ulong = (1<<5); +pub const KBKey: c_ulong = (1<<6); +pub const KBAutoRepeatMode: c_ulong = (1<<7); + +pub const MappingSuccess: c_uchar = 0; +pub const MappingBusy: c_uchar = 1; +pub const MappingFailed: c_uchar = 2; + +pub const MappingModifier: c_int = 0; +pub const MappingKeyboard: c_int = 1; +pub const MappingPointer: c_int = 2; + + +pub const DontPreferBlanking: c_int = 0; +pub const PreferBlanking: c_int = 1; +pub const DefaultBlanking: c_int = 2; + +pub const DisableScreenSaver: c_int = 0; +pub const DisableScreenInterval: c_int = 0; + +pub const DontAllowExposures: c_int = 0; +pub const AllowExposures: c_int = 1; +pub const DefaultExposures: c_int = 2; + +pub const ScreenSaverReset: c_int = 0; +pub const ScreenSaverActive: c_int = 1; + + +pub const HostInsert: c_uchar = 0; +pub const HostDelete: c_uchar = 1; + +pub const EnableAccess: c_int = 1; +pub const DisableAccess: c_int = 0; + + +pub const StaticGray: c_int = 0; +pub const GrayScale: c_int = 1; +pub const StaticColor: c_int = 2; +pub const PseudoColor: c_int = 3; +pub const TrueColor: c_int = 4; +pub const DirectColor: c_int = 5; + + +pub const VisualNoMask: c_long = 0x0000; +pub const VisualIDMask: c_long = 0x0001; +pub const VisualScreenMask: c_long = 0x0002; +pub const VisualDepthMask: c_long = 0x0004; +pub const VisualClassMask: c_long = 0x0008; +pub const VisualRedMaskMask: c_long = 0x0010; +pub const VisualGreenMaskMask: c_long = 0x0020; +pub const VisualBlueMaskMask: c_long = 0x0040; +pub const VisualColormapSizeMask: c_long = 0x0080; +pub const VisualBitsPerRGBMask: c_long = 0x0100; +pub const VisualAllMask: c_long = 0x01ff; + + +pub const CWBackPixmap: c_ulong = 0x0001; +pub const CWBackPixel: c_ulong = 0x0002; +pub const CWBorderPixmap: c_ulong = 0x0004; +pub const CWBorderPixel: c_ulong = 0x0008; +pub const CWBitGravity: c_ulong = 0x0010; +pub const CWWinGravity: c_ulong = 0x0020; +pub const CWBackingStore: c_ulong = 0x0040; +pub const CWBackingPlanes: c_ulong = 0x0080; +pub const CWBackingPixel: c_ulong = 0x0100; +pub const CWOverrideRedirect: c_ulong = 0x0200; +pub const CWSaveUnder: c_ulong = 0x0400; +pub const CWEventMask: c_ulong = 0x0800; +pub const CWDontPropagate: c_ulong = 0x1000; +pub const CWColormap: c_ulong = 0x2000; +pub const CWCursor: c_ulong = 0x4000; + + +pub const InputOutput: c_int = 1; +pub const InputOnly: c_int = 2; + + +pub const XIMPreeditArea: c_int = 0x0001; +pub const XIMPreeditCallbacks: c_int = 0x0002; +pub const XIMPreeditPosition: c_int = 0x0004; +pub const XIMPreeditNothing: c_int = 0x0008; +pub const XIMPreeditNone: c_int = 0x0010; +pub const XIMStatusArea: c_int = 0x0100; +pub const XIMStatusCallbacks: c_int = 0x0200; +pub const XIMStatusNothing: c_int = 0x0400; +pub const XIMStatusNone: c_int = 0x0800; + + +pub const LSBFirst: c_int = 0; +pub const MSBFirst: c_int = 1; + + + +pub const ParentRelative: c_int = 1; +pub const CopyFromParent: c_int = 0; +pub const PointerWindow: c_int = 0; +pub const InputFocus: c_int = 1; +pub const PointerRoot: c_int = 1; +pub const AnyPropertyType: c_int = 0; +pub const AnyKey: c_int = 0; +pub const AnyButton: c_int = 0; +pub const AllTemporary: c_int = 0; +pub const CurrentTime: Time = 0; +pub const NoSymbol: c_int = 0; + + +pub const X_PROTOCOL: c_int = 11; +pub const X_PROTOCOL_REVISION: c_int = 0; + +pub const XNVaNestedList: &'static str = "XNVaNestedList"; +pub const XNQueryInputStyle: &'static str = "queryInputStyle"; +pub const XNClientWindow: &'static str = "clientWindow"; +pub const XNInputStyle: &'static str = "inputStyle"; +pub const XNFocusWindow: &'static str = "focusWindow"; +pub const XNResourceName: &'static str = "resourceName"; +pub const XNResourceClass: &'static str = "resourceClass"; +pub const XNGeometryCallback: &'static str = "geometryCallback"; +pub const XNDestroyCallback: &'static str = "destroyCallback"; +pub const XNFilterEvents: &'static str = "filterEvents"; +pub const XNPreeditStartCallback: &'static str = "preeditStartCallback"; +pub const XNPreeditDoneCallback: &'static str = "preeditDoneCallback"; +pub const XNPreeditDrawCallback: &'static str = "preeditDrawCallback"; +pub const XNPreeditCaretCallback: &'static str = "preeditCaretCallback"; +pub const XNPreeditStateNotifyCallback: &'static str = "preeditStateNotifyCallback"; +pub const XNPreeditAttributes: &'static str = "preeditAttributes"; +pub const XNStatusStartCallback: &'static str = "statusStartCallback"; +pub const XNStatusDoneCallback: &'static str = "statusDoneCallback"; +pub const XNStatusDrawCallback: &'static str = "statusDrawCallback"; +pub const XNStatusAttributes: &'static str = "statusAttributes"; +pub const XNArea: &'static str = "area"; +pub const XNAreaNeeded: &'static str = "areaNeeded"; +pub const XNSpotLocation: &'static str = "spotLocation"; +pub const XNColormap: &'static str = "colorMap"; +pub const XNStdColormap: &'static str = "stdColorMap"; +pub const XNForeground: &'static str = "foreground"; +pub const XNBackground: &'static str = "background"; +pub const XNBackgroundPixmap: &'static str = "backgroundPixmap"; +pub const XNFontSet: &'static str = "fontSet"; +pub const XNLineSpace: &'static str = "lineSpace"; +pub const XNCursor: &'static str = "cursor"; + +pub const XNVaNestedList_0: &'static [u8] = b"XNVaNestedList\0"; +pub const XNQueryInputStyle_0: &'static [u8] = b"queryInputStyle\0"; +pub const XNClientWindow_0: &'static [u8] = b"clientWindow\0"; +pub const XNInputStyle_0: &'static [u8] = b"inputStyle\0"; +pub const XNFocusWindow_0: &'static [u8] = b"focusWindow\0"; +pub const XNResourceName_0: &'static [u8] = b"resourceName\0"; +pub const XNResourceClass_0: &'static [u8] = b"resourceClass\0"; +pub const XNGeometryCallback_0: &'static [u8] = b"geometryCallback\0"; +pub const XNDestroyCallback_0: &'static [u8] = b"destroyCallback\0"; +pub const XNFilterEvents_0: &'static [u8] = b"filterEvents\0"; +pub const XNPreeditStartCallback_0: &'static [u8] = b"preeditStartCallback\0"; +pub const XNPreeditDoneCallback_0: &'static [u8] = b"preeditDoneCallback\0"; +pub const XNPreeditDrawCallback_0: &'static [u8] = b"preeditDrawCallback\0"; +pub const XNPreeditCaretCallback_0: &'static [u8] = b"preeditCaretCallback\0"; +pub const XNPreeditStateNotifyCallback_0: &'static [u8] = b"preeditStateNotifyCallback\0"; +pub const XNPreeditAttributes_0: &'static [u8] = b"preeditAttributes\0"; +pub const XNStatusStartCallback_0: &'static [u8] = b"statusStartCallback\0"; +pub const XNStatusDoneCallback_0: &'static [u8] = b"statusDoneCallback\0"; +pub const XNStatusDrawCallback_0: &'static [u8] = b"statusDrawCallback\0"; +pub const XNStatusAttributes_0: &'static [u8] = b"statusAttributes\0"; +pub const XNArea_0: &'static [u8] = b"area\0"; +pub const XNAreaNeeded_0: &'static [u8] = b"areaNeeded\0"; +pub const XNSpotLocation_0: &'static [u8] = b"spotLocation\0"; +pub const XNColormap_0: &'static [u8] = b"colorMap\0"; +pub const XNStdColormap_0: &'static [u8] = b"stdColorMap\0"; +pub const XNForeground_0: &'static [u8] = b"foreground\0"; +pub const XNBackground_0: &'static [u8] = b"background\0"; +pub const XNBackgroundPixmap_0: &'static [u8] = b"backgroundPixmap\0"; +pub const XNFontSet_0: &'static [u8] = b"fontSet\0"; +pub const XNLineSpace_0: &'static [u8] = b"lineSpace\0"; +pub const XNCursor_0: &'static [u8] = b"cursor\0"; + +pub const XNQueryIMValuesList: &'static str = "queryIMValuesList"; +pub const XNQueryICValuesList: &'static str = "queryICValuesList"; +pub const XNVisiblePosition: &'static str = "visiblePosition"; +pub const XNR6PreeditCallback: &'static str = "r6PreeditCallback"; +pub const XNStringConversionCallback: &'static str = "stringConversionCallback"; +pub const XNStringConversion: &'static str = "stringConversion"; +pub const XNResetState: &'static str = "resetState"; +pub const XNHotKey: &'static str = "hotKey"; +pub const XNHotKeyState: &'static str = "hotKeyState"; +pub const XNPreeditState: &'static str = "preeditState"; +pub const XNSeparatorofNestedList: &'static str = "separatorofNestedList"; + +pub const XNQueryIMValuesList_0: &'static [u8] = b"queryIMValuesList\0"; +pub const XNQueryICValuesList_0: &'static [u8] = b"queryICValuesList\0"; +pub const XNVisiblePosition_0: &'static [u8] = b"visiblePosition\0"; +pub const XNR6PreeditCallback_0: &'static [u8] = b"r6PreeditCallback\0"; +pub const XNStringConversionCallback_0: &'static [u8] = b"stringConversionCallback\0"; +pub const XNStringConversion_0: &'static [u8] = b"stringConversion\0"; +pub const XNResetState_0: &'static [u8] = b"resetState\0"; +pub const XNHotKey_0: &'static [u8] = b"hotKey\0"; +pub const XNHotKeyState_0: &'static [u8] = b"hotKeyState\0"; +pub const XNPreeditState_0: &'static [u8] = b"preeditState\0"; +pub const XNSeparatorofNestedList_0: &'static [u8] = b"separatorofNestedList\0"; + +pub const XBufferOverflow: i32 = -1; +pub const XLookupNone: i32 = 1; +pub const XLookupChars: i32 = 2; +pub const XLookupKeySym: i32 = 3; +pub const XLookupBoth: i32 = 4; + + +pub const XkbActionMessageLength: usize = 6; + +pub const XkbOD_Success: c_int = 0; +pub const XkbOD_BadLibraryVersion: c_int = 1; +pub const XkbOD_ConnectionRefused: c_int = 2; +pub const XkbOD_NonXkbServer: c_int = 3; +pub const XkbOD_BadServerVersion: c_int = 4; + +pub const XkbLC_ForceLatinLookup: c_uint = 1 << 0; +pub const XkbLC_ConsumeLookupMods: c_uint = 1 << 1; +pub const XkbLC_AlwaysConsumeShiftAndLock: c_uint = 1 << 2; +pub const XkbLC_IgnoreNewKeyboards: c_uint = 1 << 3; +pub const XkbLC_ControlFallback: c_uint = 1 << 4; +pub const XkbLC_ConsumeKeysOnComposeFail: c_uint = 1 << 29; +pub const XkbLC_ComposeLED: c_uint = 1 << 30; +pub const XkbLC_BeepOnComposeFail: c_uint = 1 << 31; + +pub const XkbLC_AllComposeControls: c_uint = 0xc000_0000; +pub const XkbLC_AllControls: c_uint = 0xc000_001f; + +pub const XkbNewKeyboardNotify: c_int = 0; +pub const XkbMapNotify: c_int = 1; +pub const XkbStateNotify: c_int = 2; +pub const XkbControlsNotify: c_int = 3; +pub const XkbIndicatorStateNotify: c_int = 4; +pub const XkbIndicatorMapNotify: c_int = 5; +pub const XkbNamesNotify: c_int = 6; +pub const XkbCompatMapNotify: c_int = 7; +pub const XkbBellNotify: c_int = 8; +pub const XkbActionMessage: c_int = 9; +pub const XkbAccessXNotify: c_int = 10; +pub const XkbExtensionDeviceNotify: c_int = 11; + +pub const XkbNewKeyboardNotifyMask: c_ulong = 1 << 0; +pub const XkbMapNotifyMask: c_ulong = 1 << 1; +pub const XkbStateNotifyMask: c_ulong = 1 << 2; +pub const XkbControlsNotifyMask: c_ulong = 1 << 3; +pub const XkbIndicatorStateNotifyMask: c_ulong = 1 << 4; +pub const XkbIndicatorMapNotifyMask: c_ulong = 1 << 5; +pub const XkbNamesNotifyMask: c_ulong = 1 << 6; +pub const XkbCompatMapNotifyMask: c_ulong = 1 << 7; +pub const XkbBellNotifyMask: c_ulong = 1 << 8; +pub const XkbActionMessageMask: c_ulong = 1 << 9; +pub const XkbAccessXNotifyMask: c_ulong = 1 << 10; +pub const XkbExtensionDeviceNotifyMask: c_ulong = 1 << 11; +pub const XkbAllEventsMask: c_ulong = 0xfff; + + +pub const NoValue: c_int = 0x0000; +pub const XValue: c_int = 0x0001; +pub const YValue: c_int = 0x0002; +pub const WidthValue: c_int = 0x0004; +pub const HeightValue: c_int = 0x0008; +pub const AllValues: c_int = 0x000f; +pub const XNegative: c_int = 0x0010; +pub const YNegative: c_int = 0x0020; + + +pub const InputHint: c_long = 1 << 0; +pub const StateHint: c_long = 1 << 1; +pub const IconPixmapHint: c_long = 1 << 2; +pub const IconWindowHint: c_long = 1 << 3; +pub const IconPositionHint: c_long = 1 << 4; +pub const IconMaskHint: c_long = 1 << 5; +pub const WindowGroupHint: c_long = 1 << 6; +pub const AllHints: c_long = InputHint | StateHint | IconPixmapHint | IconWindowHint | IconPositionHint | IconMaskHint | WindowGroupHint; +pub const XUrgencyHint: c_long = 1 << 8; + + +pub const XStringStyle: c_int = 0; +pub const XCompoundTextStyle: c_int = 1; +pub const XTextStyle: c_int = 2; +pub const XStdICCTextStyle: c_int = 3; +pub const XUTF8StringStyle: c_int = 4; + + + + + + + +#[cfg(feature = "xlib")] +#[inline] +pub unsafe fn XUniqueContext () -> XContext { XrmUniqueQuark() } diff --git a/third_party/rust/x11/src/xlib_xcb.rs b/third_party/rust/x11/src/xlib_xcb.rs new file mode 100644 index 000000000000..c1a814588c2c --- /dev/null +++ b/third_party/rust/x11/src/xlib_xcb.rs @@ -0,0 +1,10 @@ +use std::os::raw::c_void; +use ::xlib::Display; + +x11_link! { Xlib_xcb, xlib_xcb, ["libX11-xcb.so.1", "libX11-xcb.so"], 1, + pub fn XGetXCBConnection(_1: *mut Display) -> *mut xcb_connection_t, + variadic: + globals: +} + +pub type xcb_connection_t = c_void; diff --git a/third_party/rust/x11/src/xmd.rs b/third_party/rust/x11/src/xmd.rs new file mode 100644 index 000000000000..12ec995df286 --- /dev/null +++ b/third_party/rust/x11/src/xmd.rs @@ -0,0 +1,12 @@ +pub type INT8 = i8; +pub type INT16 = i16; +pub type INT32 = i32; +pub type INT64 = i64; + +pub type CARD8 = u8; +pub type CARD16 = u16; +pub type CARD32 = u32; +pub type CARD64 = u64; + +pub type BYTE = CARD8; +pub type BOOL = CARD8; diff --git a/third_party/rust/x11/src/xmu.rs b/third_party/rust/x11/src/xmu.rs new file mode 100644 index 000000000000..05d87360a6ce --- /dev/null +++ b/third_party/rust/x11/src/xmu.rs @@ -0,0 +1,199 @@ + + + + +use std::os::raw::{ + c_char, + c_int, + c_uchar, + c_uint, + c_ulong, + c_void, +}; +use libc::FILE; + +use ::xlib::{ + Display, + GC, + Screen, + XColor, + XComposeStatus, + XErrorEvent, + XEvent, + XKeyEvent, + XrmValue, + XSizeHints, + XStandardColormap, + XVisualInfo, +}; +use ::xt::{ + Widget, + XtAppContext, +}; + + + + + + + +x11_link! { Xmu, xmu, ["libXmu.so.6", "libXmu.so"], 132, + pub fn XmuAddCloseDisplayHook (_3: *mut Display, _2: Option c_int>, _1: *mut c_char) -> *mut c_char, + pub fn XmuAddInitializer (_2: Option, _1: *mut c_char) -> (), + pub fn XmuAllStandardColormaps (_1: *mut Display) -> c_int, + pub fn XmuAppendSegment (_2: *mut XmuSegment, _1: *mut XmuSegment) -> c_int, + pub fn XmuAreaAnd (_2: *mut XmuArea, _1: *mut XmuArea) -> *mut XmuArea, + pub fn XmuAreaCopy (_2: *mut XmuArea, _1: *mut XmuArea) -> *mut XmuArea, + pub fn XmuAreaDup (_1: *mut XmuArea) -> *mut XmuArea, + pub fn XmuAreaNot (_5: *mut XmuArea, _4: c_int, _3: c_int, _2: c_int, _1: c_int) -> *mut XmuArea, + pub fn XmuAreaOrXor (_3: *mut XmuArea, _2: *mut XmuArea, _1: c_int) -> *mut XmuArea, + pub fn XmuCallInitializers (_1: XtAppContext) -> (), + pub fn XmuClientWindow (_2: *mut Display, _1: c_ulong) -> c_ulong, + pub fn XmuCompareISOLatin1 (_2: *const c_char, _1: *const c_char) -> c_int, + pub fn XmuConvertStandardSelection (_8: Widget, _7: c_ulong, _6: *mut c_ulong, _5: *mut c_ulong, _4: *mut c_ulong, _3: *mut *mut c_char, _2: *mut c_ulong, _1: *mut c_int) -> c_char, + pub fn XmuCopyISOLatin1Lowered (_2: *mut c_char, _1: *const c_char) -> (), + pub fn XmuCopyISOLatin1Uppered (_2: *mut c_char, _1: *const c_char) -> (), + pub fn XmuCreateColormap (_2: *mut Display, _1: *mut XStandardColormap) -> c_int, + pub fn XmuCreatePixmapFromBitmap (_8: *mut Display, _7: c_ulong, _6: c_ulong, _5: c_uint, _4: c_uint, _3: c_uint, _2: c_ulong, _1: c_ulong) -> c_ulong, + pub fn XmuCreateStippledPixmap (_4: *mut Screen, _3: c_ulong, _2: c_ulong, _1: c_uint) -> c_ulong, + pub fn XmuCursorNameToIndex (_1: *const c_char) -> c_int, + pub fn XmuCvtBackingStoreToString (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XmuCvtFunctionToCallback (_4: *mut XrmValue, _3: *mut c_uint, _2: *mut XrmValue, _1: *mut XrmValue) -> (), + pub fn XmuCvtGravityToString (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XmuCvtJustifyToString (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XmuCvtLongToString (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XmuCvtOrientationToString (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XmuCvtShapeStyleToString (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XmuCvtStringToBackingStore (_4: *mut XrmValue, _3: *mut c_uint, _2: *mut XrmValue, _1: *mut XrmValue) -> (), + pub fn XmuCvtStringToBitmap (_4: *mut XrmValue, _3: *mut c_uint, _2: *mut XrmValue, _1: *mut XrmValue) -> (), + pub fn XmuCvtStringToColorCursor (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XmuCvtStringToCursor (_4: *mut XrmValue, _3: *mut c_uint, _2: *mut XrmValue, _1: *mut XrmValue) -> (), + pub fn XmuCvtStringToGravity (_4: *mut XrmValue, _3: *mut c_uint, _2: *mut XrmValue, _1: *mut XrmValue) -> (), + pub fn XmuCvtStringToJustify (_4: *mut XrmValue, _3: *mut c_uint, _2: *mut XrmValue, _1: *mut XrmValue) -> (), + pub fn XmuCvtStringToLong (_4: *mut XrmValue, _3: *mut c_uint, _2: *mut XrmValue, _1: *mut XrmValue) -> (), + pub fn XmuCvtStringToOrientation (_4: *mut XrmValue, _3: *mut c_uint, _2: *mut XrmValue, _1: *mut XrmValue) -> (), + pub fn XmuCvtStringToShapeStyle (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XmuCvtStringToWidget (_4: *mut XrmValue, _3: *mut c_uint, _2: *mut XrmValue, _1: *mut XrmValue) -> (), + pub fn XmuCvtWidgetToString (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XmuDeleteStandardColormap (_3: *mut Display, _2: c_int, _1: c_ulong) -> (), + pub fn XmuDestroyScanlineList (_1: *mut XmuScanline) -> (), + pub fn XmuDestroySegmentList (_1: *mut XmuSegment) -> (), + pub fn XmuDistinguishableColors (_2: *mut XColor, _1: c_int) -> c_int, + pub fn XmuDistinguishablePixels (_4: *mut Display, _3: c_ulong, _2: *mut c_ulong, _1: c_int) -> c_int, + pub fn XmuDQAddDisplay (_3: *mut XmuDisplayQueue, _2: *mut Display, _1: *mut c_char) -> *mut XmuDisplayQueueEntry, + pub fn XmuDQCreate (_3: Option c_int>, _2: Option c_int>, _1: *mut c_char) -> *mut XmuDisplayQueue, + pub fn XmuDQDestroy (_2: *mut XmuDisplayQueue, _1: c_int) -> c_int, + pub fn XmuDQLookupDisplay (_2: *mut XmuDisplayQueue, _1: *mut Display) -> *mut XmuDisplayQueueEntry, + pub fn XmuDQRemoveDisplay (_2: *mut XmuDisplayQueue, _1: *mut Display) -> c_int, + pub fn XmuDrawLogo (_8: *mut Display, _7: c_ulong, _6: GC, _5: GC, _4: c_int, _3: c_int, _2: c_uint, _1: c_uint) -> (), + pub fn XmuDrawRoundedRectangle (_9: *mut Display, _8: c_ulong, _7: GC, _6: c_int, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XmuFillRoundedRectangle (_9: *mut Display, _8: c_ulong, _7: GC, _6: c_int, _5: c_int, _4: c_int, _3: c_int, _2: c_int, _1: c_int) -> (), + pub fn XmuGetAtomName (_2: *mut Display, _1: c_ulong) -> *mut c_char, + pub fn XmuGetColormapAllocation (_5: *mut XVisualInfo, _4: c_ulong, _3: *mut c_ulong, _2: *mut c_ulong, _1: *mut c_ulong) -> c_int, + pub fn XmuGetHostname (_2: *mut c_char, _1: c_int) -> c_int, + pub fn XmuInternAtom (_2: *mut Display, _1: AtomPtr) -> c_ulong, + pub fn XmuInternStrings (_4: *mut Display, _3: *mut *mut c_char, _2: c_uint, _1: *mut c_ulong) -> (), + pub fn XmuLocateBitmapFile (_8: *mut Screen, _7: *const c_char, _6: *mut c_char, _5: c_int, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut c_int) -> c_ulong, + pub fn XmuLocatePixmapFile (_11: *mut Screen, _10: *const c_char, _9: c_ulong, _8: c_ulong, _7: c_uint, _6: *mut c_char, _5: c_int, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut c_int) -> c_ulong, + pub fn XmuLookupAPL (_5: *mut XKeyEvent, _4: *mut c_uchar, _3: c_int, _2: *mut c_ulong, _1: *mut XComposeStatus) -> c_int, + pub fn XmuLookupArabic (_5: *mut XKeyEvent, _4: *mut c_uchar, _3: c_int, _2: *mut c_ulong, _1: *mut XComposeStatus) -> c_int, + pub fn XmuLookupCloseDisplayHook (_4: *mut Display, _3: *mut c_char, _2: Option c_int>, _1: *mut c_char) -> c_int, + pub fn XmuLookupCyrillic (_5: *mut XKeyEvent, _4: *mut c_uchar, _3: c_int, _2: *mut c_ulong, _1: *mut XComposeStatus) -> c_int, + pub fn XmuLookupGreek (_5: *mut XKeyEvent, _4: *mut c_uchar, _3: c_int, _2: *mut c_ulong, _1: *mut XComposeStatus) -> c_int, + pub fn XmuLookupHebrew (_5: *mut XKeyEvent, _4: *mut c_uchar, _3: c_int, _2: *mut c_ulong, _1: *mut XComposeStatus) -> c_int, + pub fn XmuLookupJISX0201 (_5: *mut XKeyEvent, _4: *mut c_uchar, _3: c_int, _2: *mut c_ulong, _1: *mut XComposeStatus) -> c_int, + pub fn XmuLookupKana (_5: *mut XKeyEvent, _4: *mut c_uchar, _3: c_int, _2: *mut c_ulong, _1: *mut XComposeStatus) -> c_int, + pub fn XmuLookupLatin1 (_5: *mut XKeyEvent, _4: *mut c_uchar, _3: c_int, _2: *mut c_ulong, _1: *mut XComposeStatus) -> c_int, + pub fn XmuLookupLatin2 (_5: *mut XKeyEvent, _4: *mut c_uchar, _3: c_int, _2: *mut c_ulong, _1: *mut XComposeStatus) -> c_int, + pub fn XmuLookupLatin3 (_5: *mut XKeyEvent, _4: *mut c_uchar, _3: c_int, _2: *mut c_ulong, _1: *mut XComposeStatus) -> c_int, + pub fn XmuLookupLatin4 (_5: *mut XKeyEvent, _4: *mut c_uchar, _3: c_int, _2: *mut c_ulong, _1: *mut XComposeStatus) -> c_int, + pub fn XmuLookupStandardColormap (_7: *mut Display, _6: c_int, _5: c_ulong, _4: c_uint, _3: c_ulong, _2: c_int, _1: c_int) -> c_int, + pub fn XmuLookupString (_6: *mut XKeyEvent, _5: *mut c_uchar, _4: c_int, _3: *mut c_ulong, _2: *mut XComposeStatus, _1: c_ulong) -> c_int, + pub fn XmuMakeAtom (_1: *const c_char) -> AtomPtr, + pub fn XmuNameOfAtom (_1: AtomPtr) -> *mut c_char, + pub fn XmuNCopyISOLatin1Lowered (_3: *mut c_char, _2: *const c_char, _1: c_int) -> (), + pub fn XmuNCopyISOLatin1Uppered (_3: *mut c_char, _2: *const c_char, _1: c_int) -> (), + pub fn XmuNewArea (_4: c_int, _3: c_int, _2: c_int, _1: c_int) -> *mut XmuArea, + pub fn XmuNewCvtStringToWidget (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XmuNewScanline (_3: c_int, _2: c_int, _1: c_int) -> *mut XmuScanline, + pub fn XmuNewSegment (_2: c_int, _1: c_int) -> *mut XmuSegment, + pub fn XmuOptimizeArea (_1: *mut XmuArea) -> *mut XmuArea, + pub fn XmuOptimizeScanline (_1: *mut XmuScanline) -> *mut XmuScanline, + pub fn XmuPrintDefaultErrorMessage (_3: *mut Display, _2: *mut XErrorEvent, _1: *mut FILE) -> c_int, + pub fn XmuReadBitmapData (_6: *mut FILE, _5: *mut c_uint, _4: *mut c_uint, _3: *mut *mut c_uchar, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XmuReadBitmapDataFromFile (_6: *const c_char, _5: *mut c_uint, _4: *mut c_uint, _3: *mut *mut c_uchar, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XmuRegisterExternalAgent (_4: Widget, _3: *mut c_void, _2: *mut XEvent, _1: *mut c_char) -> (), + pub fn XmuReleaseStippledPixmap (_2: *mut Screen, _1: c_ulong) -> (), + pub fn XmuRemoveCloseDisplayHook (_4: *mut Display, _3: *mut c_char, _2: Option c_int>, _1: *mut c_char) -> c_int, + pub fn XmuReshapeWidget (_4: Widget, _3: c_int, _2: c_int, _1: c_int) -> c_char, + pub fn XmuScanlineAnd (_2: *mut XmuScanline, _1: *mut XmuScanline) -> *mut XmuScanline, + pub fn XmuScanlineAndSegment (_2: *mut XmuScanline, _1: *mut XmuSegment) -> *mut XmuScanline, + pub fn XmuScanlineCopy (_2: *mut XmuScanline, _1: *mut XmuScanline) -> *mut XmuScanline, + pub fn XmuScanlineEqu (_2: *mut XmuScanline, _1: *mut XmuScanline) -> c_int, + pub fn XmuScanlineNot (_3: *mut XmuScanline, _2: c_int, _1: c_int) -> *mut XmuScanline, + pub fn XmuScanlineOr (_2: *mut XmuScanline, _1: *mut XmuScanline) -> *mut XmuScanline, + pub fn XmuScanlineOrSegment (_2: *mut XmuScanline, _1: *mut XmuSegment) -> *mut XmuScanline, + pub fn XmuScanlineXor (_2: *mut XmuScanline, _1: *mut XmuScanline) -> *mut XmuScanline, + pub fn XmuScanlineXorSegment (_2: *mut XmuScanline, _1: *mut XmuSegment) -> *mut XmuScanline, + pub fn XmuScreenOfWindow (_2: *mut Display, _1: c_ulong) -> *mut Screen, + pub fn XmuSimpleErrorHandler (_2: *mut Display, _1: *mut XErrorEvent) -> c_int, + pub fn XmuStandardColormap (_9: *mut Display, _8: c_int, _7: c_ulong, _6: c_uint, _5: c_ulong, _4: c_ulong, _3: c_ulong, _2: c_ulong, _1: c_ulong) -> *mut XStandardColormap, + pub fn XmuUpdateMapHints (_3: *mut Display, _2: c_ulong, _1: *mut XSizeHints) -> c_int, + pub fn XmuValidArea (_1: *mut XmuArea) -> c_int, + pub fn XmuValidScanline (_1: *mut XmuScanline) -> c_int, + pub fn XmuVisualStandardColormaps (_6: *mut Display, _5: c_int, _4: c_ulong, _3: c_uint, _2: c_int, _1: c_int) -> c_int, + pub fn XmuWnCountOwnedResources (_3: *mut XmuWidgetNode, _2: *mut XmuWidgetNode, _1: c_int) -> c_int, + pub fn XmuWnFetchResources (_3: *mut XmuWidgetNode, _2: Widget, _1: *mut XmuWidgetNode) -> (), + pub fn XmuWnInitializeNodes (_2: *mut XmuWidgetNode, _1: c_int) -> (), + pub fn XmuWnNameToNode (_3: *mut XmuWidgetNode, _2: c_int, _1: *const c_char) -> *mut XmuWidgetNode, +variadic: + pub fn XmuSnprintf (_3: *mut c_char, _2: c_int, _1: *const c_char) -> c_int, +globals: + pub static _XA_ATOM_PAIR: AtomPtr, + pub static _XA_CHARACTER_POSITION: AtomPtr, + pub static _XA_CLASS: AtomPtr, + pub static _XA_CLIENT_WINDOW: AtomPtr, + pub static _XA_CLIPBOARD: AtomPtr, + pub static _XA_COMPOUND_TEXT: AtomPtr, + pub static _XA_DECNET_ADDRESS: AtomPtr, + pub static _XA_DELETE: AtomPtr, + pub static _XA_FILENAME: AtomPtr, + pub static _XA_HOSTNAME: AtomPtr, + pub static _XA_IP_ADDRESS: AtomPtr, + pub static _XA_LENGTH: AtomPtr, + pub static _XA_LIST_LENGTH: AtomPtr, + pub static _XA_NAME: AtomPtr, + pub static _XA_NET_ADDRESS: AtomPtr, + pub static _XA_NULL: AtomPtr, + pub static _XA_OWNER_OS: AtomPtr, + pub static _XA_SPAN: AtomPtr, + pub static _XA_TARGETS: AtomPtr, + pub static _XA_TEXT: AtomPtr, + pub static _XA_TIMESTAMP: AtomPtr, + pub static _XA_USER: AtomPtr, + pub static _XA_UTF8_STRING: AtomPtr, +} + + + + + + + + +#[repr(C)] pub struct _AtomRec; +#[repr(C)] pub struct _XmuArea; +#[repr(C)] pub struct _XmuDisplayQueue; +#[repr(C)] pub struct _XmuDisplayQueueEntry; +#[repr(C)] pub struct _XmuScanline; +#[repr(C)] pub struct _XmuSegment; +#[repr(C)] pub struct _XmuWidgetNode; + + +pub type AtomPtr = *mut _AtomRec; +pub type XmuArea = _XmuArea; +pub type XmuDisplayQueue = _XmuDisplayQueue; +pub type XmuDisplayQueueEntry = _XmuDisplayQueueEntry; +pub type XmuScanline = _XmuScanline; +pub type XmuSegment = _XmuSegment; +pub type XmuWidgetNode = _XmuWidgetNode; diff --git a/third_party/rust/x11/src/xrandr.rs b/third_party/rust/x11/src/xrandr.rs new file mode 100644 index 000000000000..7d2f105c46ca --- /dev/null +++ b/third_party/rust/x11/src/xrandr.rs @@ -0,0 +1,558 @@ + + + + +use std::os::raw::{ c_char, c_int, c_long, c_short, c_uchar, c_uint, c_ulong, c_ushort }; + +use xlib::{ Atom, Bool, Display, Drawable, Status, Time, XEvent, XID, Window }; +use xrender::{ XFixed, XTransform }; + + + + + + + +x11_link! { Xrandr, xrandr, ["libXrandr.so.2", "libXrandr.so"], 70, + pub fn XRRAddOutputMode (dpy: *mut Display, output: RROutput, mode: RRMode) -> (), + pub fn XRRAllocGamma (size: c_int) -> *mut XRRCrtcGamma, + pub fn XRRAllocModeInfo (name: *const c_char, nameLength: c_int) -> *mut XRRModeInfo, + pub fn XRRAllocateMonitor (dpy: *mut Display, noutput: c_int) -> *mut XRRMonitorInfo, + pub fn XRRChangeOutputProperty (dpy: *mut Display, output: RROutput, property: Atom, type_: Atom, format: c_int, mode: c_int, data: *const c_uchar, nelements: c_int) -> (), + pub fn XRRChangeProviderProperty (dpy: *mut Display, provider: RRProvider, property: Atom, type_: Atom, format: c_int, mode: c_int, data: *const c_uchar, nelements: c_int) -> (), + pub fn XRRConfigCurrentConfiguration (config: *mut XRRScreenConfiguration, rotation: *mut Rotation) -> SizeID, + pub fn XRRConfigCurrentRate (config: *mut XRRScreenConfiguration) -> c_short, + pub fn XRRConfigRates (config: *mut XRRScreenConfiguration, sizeID: c_int, nrates: *mut c_int) -> *mut c_short, + pub fn XRRConfigRotations (config: *mut XRRScreenConfiguration, current_rotation: *mut Rotation) -> Rotation, + pub fn XRRConfigSizes (config: *mut XRRScreenConfiguration, nsizes: *mut c_int) -> *mut XRRScreenSize, + pub fn XRRConfigTimes (config: *mut XRRScreenConfiguration, config_timestamp: *mut Time) -> Time, + pub fn XRRConfigureOutputProperty (dpy: *mut Display, output: RROutput, property: Atom, pending: Bool, range: Bool, num_values: c_int, values: *mut c_long) -> (), + pub fn XRRConfigureProviderProperty (dpy: *mut Display, provider: RRProvider, property: Atom, pending: Bool, range: Bool, num_values: c_int, values: *mut c_long) -> (), + pub fn XRRCreateMode (dpy: *mut Display, window: Window, modeInfo: *mut XRRModeInfo) -> RRMode, + pub fn XRRDeleteMonitor (dpy: *mut Display, window: Window, name: Atom) -> (), + pub fn XRRDeleteOutputMode (dpy: *mut Display, output: RROutput, mode: RRMode) -> (), + pub fn XRRDeleteOutputProperty (dpy: *mut Display, output: RROutput, property: Atom) -> (), + pub fn XRRDeleteProviderProperty (dpy: *mut Display, provider: RRProvider, property: Atom) -> (), + pub fn XRRDestroyMode (dpy: *mut Display, mode: RRMode) -> (), + pub fn XRRFreeCrtcInfo (crtcInfo: *mut XRRCrtcInfo) -> (), + pub fn XRRFreeGamma (gamma: *mut XRRCrtcGamma) -> (), + pub fn XRRFreeModeInfo (modeInfo: *mut XRRModeInfo) -> (), + pub fn XRRFreeMonitors (monitors: *mut XRRMonitorInfo) -> (), + pub fn XRRFreeOutputInfo (outputInfo: *mut XRROutputInfo) -> (), + pub fn XRRFreePanning (panning: *mut XRRPanning) -> (), + pub fn XRRFreeProviderInfo (provider: *mut XRRProviderInfo) -> (), + pub fn XRRFreeProviderResources (resources: *mut XRRProviderResources) -> (), + pub fn XRRFreeScreenConfigInfo (config: *mut XRRScreenConfiguration) -> (), + pub fn XRRFreeScreenResources (resources: *mut XRRScreenResources) -> (), + pub fn XRRGetCrtcGamma (dpy: *mut Display, crtc: RRCrtc) -> *mut XRRCrtcGamma, + pub fn XRRGetCrtcGammaSize (dpy: *mut Display, crtc: RRCrtc) -> c_int, + pub fn XRRGetCrtcInfo (dpy: *mut Display, resources: *mut XRRScreenResources, crtc: RRCrtc) -> *mut XRRCrtcInfo, + pub fn XRRGetCrtcTransform (dpy: *mut Display, crtc: RRCrtc, attributes: *mut *mut XRRCrtcTransformAttributes) -> Status, + pub fn XRRGetMonitors (dpy: *mut Display, window: Window, get_active: Bool, nmonitors: *mut c_int) -> *mut XRRMonitorInfo, + pub fn XRRGetOutputInfo (dpy: *mut Display, resources: *mut XRRScreenResources, output: RROutput) -> *mut XRROutputInfo, + pub fn XRRGetOutputPrimary (dpy: *mut Display, window: Window) -> RROutput, + pub fn XRRGetOutputProperty (dpy: *mut Display, output: RROutput, property: Atom, offset: c_long, length: c_long, _delete: Bool, pending: Bool, req_type: Atom, actual_type: *mut Atom, actual_format: *mut c_int, nitems: *mut c_ulong, bytes_after: *mut c_ulong, prop: *mut *mut c_uchar) -> c_int, + pub fn XRRGetPanning (dpy: *mut Display, resources: *mut XRRScreenResources, crtc: RRCrtc) -> *mut XRRPanning, + pub fn XRRGetProviderInfo (dpy: *mut Display, resources: *mut XRRScreenResources, provider: RRProvider) -> *mut XRRProviderInfo, + pub fn XRRGetProviderProperty (dpy: *mut Display, provider: RRProvider, property: Atom, offset: c_long, length: c_long, _delete: Bool, pending: Bool, req_type: Atom, actual_type: *mut Atom, actual_format: *mut c_int, nitems: *mut c_ulong, bytes_after: *mut c_ulong, prop: *mut *mut c_uchar) -> c_int, + pub fn XRRGetProviderResources (dpy: *mut Display, window: Window) -> *mut XRRProviderResources, + pub fn XRRGetScreenInfo (dpy: *mut Display, window: Window) -> *mut XRRScreenConfiguration, + pub fn XRRGetScreenResources (dpy: *mut Display, window: Window) -> *mut XRRScreenResources, + pub fn XRRGetScreenResourcesCurrent (dpy: *mut Display, window: Window) -> *mut XRRScreenResources, + pub fn XRRGetScreenSizeRange (dpy: *mut Display, window: Window, minWidth: *mut c_int, minHeight: *mut c_int, maxWidth: *mut c_int, maxHeight: *mut c_int) -> Status, + pub fn XRRListOutputProperties (dpy: *mut Display, output: RROutput, nprop: *mut c_int) -> *mut Atom, + pub fn XRRListProviderProperties (dpy: *mut Display, provider: RRProvider, nprop: *mut c_int) -> *mut Atom, + pub fn XRRQueryExtension (dpy: *mut Display, event_base_return: *mut c_int, error_base_return: *mut c_int) -> Bool, + pub fn XRRQueryOutputProperty (dpy: *mut Display, output: RROutput, property: Atom) -> *mut XRRPropertyInfo, + pub fn XRRQueryProviderProperty (dpy: *mut Display, provider: RRProvider, property: Atom) -> *mut XRRPropertyInfo, + pub fn XRRQueryVersion (dpy: *mut Display, major_version_return: *mut c_int, minor_version_return: *mut c_int) -> Status, + pub fn XRRRates (dpy: *mut Display, screen: c_int, sizeID: c_int, nrates: *mut c_int) -> *mut c_short, + pub fn XRRRootToScreen (dpy: *mut Display, root: Window) -> c_int, + pub fn XRRRotations (dpy: *mut Display, screen: c_int, current_rotation: *mut Rotation) -> Rotation, + pub fn XRRSelectInput (dpy: *mut Display, window: Window, mask: c_int) -> (), + pub fn XRRSetCrtcConfig (dpy: *mut Display, resources: *mut XRRScreenResources, crtc: RRCrtc, timestamp: Time, x: c_int, y: c_int, mode: RRMode, rotation: Rotation, outputs: *mut RROutput, noutputs: c_int) -> Status, + pub fn XRRSetCrtcGamma (dpy: *mut Display, crtc: RRCrtc, gamma: *mut XRRCrtcGamma) -> (), + pub fn XRRSetCrtcTransform (dpy: *mut Display, crtc: RRCrtc, transform: *mut XTransform, filter: *const c_char, params: *mut XFixed, nparams: c_int) -> (), + pub fn XRRSetMonitor (dpy: *mut Display, window: Window, monitor: *mut XRRMonitorInfo) -> (), + pub fn XRRSetOutputPrimary (dpy: *mut Display, window: Window, output: RROutput) -> (), + pub fn XRRSetPanning (dpy: *mut Display, resources: *mut XRRScreenResources, crtc: RRCrtc, panning: *mut XRRPanning) -> Status, + pub fn XRRSetProviderOffloadSink (dpy: *mut Display, provider: XID, sink_provider: XID) -> c_int, + pub fn XRRSetProviderOutputSource (dpy: *mut Display, provider: XID, source_provider: XID) -> c_int, + pub fn XRRSetScreenConfig (dpy: *mut Display, config: *mut XRRScreenConfiguration, draw: Drawable, size_index: c_int, rotation: Rotation, timestamp: Time) -> Status, + pub fn XRRSetScreenConfigAndRate (dpy: *mut Display, config: *mut XRRScreenConfiguration, draw: Drawable, size_index: c_int, rotation: Rotation, rate: c_short, timestamp: Time) -> Status, + pub fn XRRSetScreenSize (dpy: *mut Display, window: Window, width: c_int, height: c_int, mmWidth: c_int, mmHeight: c_int) -> (), + pub fn XRRSizes (dpy: *mut Display, screen: c_int, nsizes: *mut c_int) -> *mut XRRScreenSize, + pub fn XRRTimes (dpy: *mut Display, screen: c_int, config_timestamp: *mut Time) -> Time, + pub fn XRRUpdateConfiguration (event: *mut XEvent) -> c_int, +variadic: +globals: +} + + + + + + + +pub type Connection = c_ushort; +pub type Rotation = c_ushort; +pub type SizeID = c_ushort; +pub type SubpixelOrder = c_ushort; + +pub type RROutput = XID; +pub type RRCrtc = XID; +pub type RRMode = XID; +pub type RRProvider = XID; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRScreenSize { + pub width: c_int, + pub height: c_int, + pub mwidth: c_int, + pub mheight: c_int, +} + +#[repr(C)] pub struct XRRScreenConfiguration; + +pub type XRRModeFlags = c_ulong; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRModeInfo { + pub id: RRMode, + pub width: c_uint, + pub height: c_uint, + pub dotClock: c_ulong, + pub hSyncStart: c_uint, + pub hSyncEnd: c_uint, + pub hTotal: c_uint, + pub hSkew: c_uint, + pub vSyncStart: c_uint, + pub vSyncEnd: c_uint, + pub vTotal: c_uint, + pub name: *mut c_char, + pub nameLength: c_uint, + pub modeFlags: XRRModeFlags, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRScreenResources { + pub timestamp: Time, + pub configTimestamp: Time, + pub ncrtc: c_int, + pub crtcs: *mut RRCrtc, + pub noutput: c_int, + pub outputs: *mut RROutput, + pub nmode: c_int, + pub modes: *mut XRRModeInfo, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRROutputInfo { + pub timestamp: Time, + pub crtc: RRCrtc, + pub name: *mut c_char, + pub nameLen: c_int, + pub mm_width: c_ulong, + pub mm_height: c_ulong, + pub connection: Connection, + pub subpixel_order: SubpixelOrder, + pub ncrtc: c_int, + pub crtcs: *mut RRCrtc, + pub nclone: c_int, + pub clones: *mut RROutput, + pub nmode: c_int, + pub npreferred: c_int, + pub modes: *mut RRMode, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRPropertyInfo { + pub pending: Bool, + pub range: Bool, + pub immutable: Bool, + pub num_values: c_int, + pub values: *mut c_long, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRCrtcInfo { + pub timestamp: Time, + pub x: c_int, + pub y: c_int, + pub width: c_uint, + pub height: c_uint, + pub mode: RRMode, + pub rotation: Rotation, + pub noutput: c_int, + pub outputs: *mut RROutput, + pub rotations: Rotation, + pub npossible: c_int, + pub possible: *mut RROutput, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRCrtcGamma { + pub size: c_int, + pub red: *mut c_ushort, + pub green: *mut c_ushort, + pub blue: *mut c_ushort, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRCrtcTransformAttributes { + pub pendingTransform: XTransform, + pub pendingFilter: *mut c_char, + pub pendingNparams: c_int, + pub pendingParams: *mut XFixed, + pub currentTransform: XTransform, + pub currentFilter: *mut c_char, + pub currentNparams: c_int, + pub currentParams: *mut XFixed, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRPanning { + pub timestamp: Time, + pub left: c_uint, + pub top: c_uint, + pub width: c_uint, + pub height: c_uint, + pub track_left: c_uint, + pub track_top: c_uint, + pub track_width: c_uint, + pub track_height: c_uint, + pub border_left: c_int, + pub border_top: c_int, + pub border_right: c_int, + pub border_bottom: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRProviderResources { + pub timestamp: Time, + pub nproviders: c_int, + pub providers: *mut RRProvider, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRProviderInfo { + pub capabilities: c_uint, + pub ncrtcs: c_int, + pub crtcs: *mut RRCrtc, + pub noutputs: c_int, + pub outputs: *mut RROutput, + pub name: *mut c_char, + pub nassociatedproviders: c_int, + pub associated_providers: *mut RRProvider, + pub associated_capability: *mut c_uint, + pub nameLen: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRMonitorInfo { + pub name: Atom, + pub primary: Bool, + pub automatic: Bool, + pub noutput: c_int, + pub x: c_int, + pub y: c_int, + pub width: c_int, + pub height: c_int, + pub mwidth: c_int, + pub mheight: c_int, + pub outputs: *mut RROutput, +} + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRScreenChangeNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub root: Window, + pub timestamp: Time, + pub config_timestamp: Time, + pub size_index: SizeID, + pub subpixel_order: SubpixelOrder, + pub rotation: Rotation, + pub width: c_int, + pub height: c_int, + pub mwidth: c_int, + pub mheight: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub subtype: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRROutputChangeNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub subtype: c_int, + pub output: RROutput, + pub crtc: RRCrtc, + pub mode: RRMode, + pub rotation: Rotation, + pub connection: Connection, + pub subpixel_order: SubpixelOrder, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRCrtcChangeNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub subtype: c_int, + pub crtc: RRCrtc, + pub mode: RRMode, + pub rotation: Rotation, + pub x: c_int, + pub y: c_int, + pub width: c_uint, + pub height: c_uint, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRROutputPropertyNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub subtype: c_int, + pub output: RROutput, + pub property: Atom, + pub timestamp: Time, + pub state: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRProviderChangeNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub subtype: c_int, + pub provider: RRProvider, + pub timestamp: Time, + pub current_role: c_uint, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRProviderPropertyNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub subtype: c_int, + pub provider: RRProvider, + pub property: Atom, + pub timestamp: Time, + pub state: c_int, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRRResourceChangeNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub subtype: c_int, + pub timestamp: Time, +} + +event_conversions_and_tests! { + xrr_screen_change_notify: XRRScreenChangeNotifyEvent, + xrr_notify: XRRNotifyEvent, + xrr_output_change_notify: XRROutputChangeNotifyEvent, + xrr_crtc_change_notify: XRRCrtcChangeNotifyEvent, + xrr_output_property_notify: XRROutputPropertyNotifyEvent, + xrr_provider_change_notify: XRRProviderChangeNotifyEvent, + xrr_provider_property_notify: XRRProviderPropertyNotifyEvent, + xrr_resource_change_notify: XRRResourceChangeNotifyEvent, +} + + + + + + + +pub const RANDR_NAME: &'static str = "RANDR"; +pub const RANDR_MAJOR: c_int = 1; +pub const RANDR_MINOR: c_int = 5; + +pub const RRNumberErrors: c_int = 4; +pub const RRNumberEvents: c_int = 2; +pub const RRNumberRequests: c_int = 45; + +pub const X_RRQueryVersion: c_int = 0; +pub const X_RROldGetScreenInfo: c_int = 1; +pub const X_RRSetScreenConfig: c_int = 2; +pub const X_RROldScreenChangeSelectInput: c_int = 3; +pub const X_RRSelectInput: c_int = 4; +pub const X_RRGetScreenInfo: c_int = 5; + +pub const X_RRGetScreenSizeRange: c_int = 6; +pub const X_RRSetScreenSize: c_int = 7; +pub const X_RRGetScreenResources: c_int = 8; +pub const X_RRGetOutputInfo: c_int = 9; +pub const X_RRListOutputProperties: c_int = 10; +pub const X_RRQueryOutputProperty: c_int = 11; +pub const X_RRConfigureOutputProperty: c_int = 12; +pub const X_RRChangeOutputProperty: c_int = 13; +pub const X_RRDeleteOutputProperty: c_int = 14; +pub const X_RRGetOutputProperty: c_int = 15; +pub const X_RRCreateMode: c_int = 16; +pub const X_RRDestroyMode: c_int = 17; +pub const X_RRAddOutputMode: c_int = 18; +pub const X_RRDeleteOutputMode: c_int = 19; +pub const X_RRGetCrtcInfo: c_int = 20; +pub const X_RRSetCrtcConfig: c_int = 21; +pub const X_RRGetCrtcGammaSize: c_int = 22; +pub const X_RRGetCrtcGamma: c_int = 23; +pub const X_RRSetCrtcGamma: c_int = 24; + +pub const X_RRGetScreenResourcesCurrent: c_int = 25; +pub const X_RRSetCrtcTransform: c_int = 26; +pub const X_RRGetCrtcTransform: c_int = 27; +pub const X_RRGetPanning: c_int = 28; +pub const X_RRSetPanning: c_int = 29; +pub const X_RRSetOutputPrimary: c_int = 30; +pub const X_RRGetOutputPrimary: c_int = 31; + +pub const X_RRGetProviders: c_int = 32; +pub const X_RRGetProviderInfo: c_int = 33; +pub const X_RRSetProviderOffloadSink: c_int = 34; +pub const X_RRSetProviderOutputSource: c_int = 35; +pub const X_RRListProviderProperties: c_int = 36; +pub const X_RRQueryProviderProperty: c_int = 37; +pub const X_RRConfigureProviderProperty: c_int = 38; +pub const X_RRChangeProviderProperty: c_int = 39; +pub const X_RRDeleteProviderProperty: c_int = 40; +pub const X_RRGetProviderProperty: c_int = 41; + +pub const X_RRGetMonitors: c_int = 42; +pub const X_RRSetMonitor: c_int = 43; +pub const X_RRDeleteMonitor: c_int = 44; + +pub const RRTransformUnit: c_int = 1 << 0; +pub const RRTransformScaleUp: c_int = 1 << 1; +pub const RRTransformScaleDown: c_int = 1 << 2; +pub const RRTransformProjective: c_int = 1 << 3; + +pub const RRScreenChangeNotifyMask: c_int = 1 << 0; +pub const RRCrtcChangeNotifyMask: c_int = 1 << 1; +pub const RROutputChangeNotifyMask: c_int = 1 << 2; +pub const RROutputPropertyNotifyMask: c_int = 1 << 3; +pub const RRProviderChangeNotifyMask: c_int = 1 << 4; +pub const RRProviderPropertyNotifyMask: c_int = 1 << 5; +pub const RRResourceChangeNotifyMask: c_int = 1 << 6; + +pub const RRScreenChangeNotify: c_int = 0; +pub const RRNotify: c_int = 1; +pub const RRNotify_CrtcChange: c_int = 0; +pub const RRNotify_OutputChange: c_int = 1; +pub const RRNotify_OutputProperty: c_int = 2; +pub const RRNotify_ProviderChange: c_int = 3; +pub const RRNotify_ProviderProperty: c_int = 4; +pub const RRNotify_ResourceChange: c_int = 5; + +pub const RR_Rotate_0: c_int = 1; +pub const RR_Rotate_90: c_int = 2; +pub const RR_Rotate_180: c_int = 4; +pub const RR_Rotate_270: c_int = 8; + +pub const RR_Reflect_X: c_int = 16; +pub const RR_Reflect_Y: c_int = 32; + +pub const RRSetConfigSuccess: c_int = 0; +pub const RRSetConfigInvalidConfigTime: c_int = 1; +pub const RRSetConfigInvalidTime: c_int = 2; +pub const RRSetConfigFailed: c_int = 3; + +pub const RR_HSyncPositive: c_int = 0x00000001; +pub const RR_HSyncNegative: c_int = 0x00000002; +pub const RR_VSyncPositive: c_int = 0x00000004; +pub const RR_VSyncNegative: c_int = 0x00000008; +pub const RR_Interlace: c_int = 0x00000010; +pub const RR_DoubleScan: c_int = 0x00000020; +pub const RR_CSync: c_int = 0x00000040; +pub const RR_CSyncPositive: c_int = 0x00000080; +pub const RR_CSyncNegative: c_int = 0x00000100; +pub const RR_HSkewPresent: c_int = 0x00000200; +pub const RR_BCast: c_int = 0x00000400; +pub const RR_PixelMultiplex: c_int = 0x00000800; +pub const RR_DoubleClock: c_int = 0x00001000; +pub const RR_ClockDivideBy2: c_int = 0x00002000; + +pub const RR_Connected: c_int = 0; +pub const RR_Disconnected: c_int = 1; +pub const RR_UnknownConnection: c_int = 2; + +pub const BadRROutput: c_int = 0; +pub const BadRRCrtc: c_int = 1; +pub const BadRRMode: c_int = 2; +pub const BadRRProvider: c_int = 3; + +pub const RR_PROPERTY_BACKLIGHT: &'static str = "Backlight"; +pub const RR_PROPERTY_RANDR_EDID: &'static str = "EDID"; +pub const RR_PROPERTY_SIGNAL_FORMAT: &'static str = "SignalFormat"; +pub const RR_PROPERTY_SIGNAL_PROPERTIES: &'static str = "SignalProperties"; +pub const RR_PROPERTY_CONNECTOR_TYPE: &'static str = "ConnectorType"; +pub const RR_PROPERTY_CONNECTOR_NUMBER: &'static str = "ConnectorNumber"; +pub const RR_PROPERTY_COMPATIBILITY_LIST: &'static str = "CompatibilityList"; +pub const RR_PROPERTY_CLONE_LIST: &'static str = "CloneList"; +pub const RR_PROPERTY_BORDER: &'static str = "Border"; +pub const RR_PROPERTY_BORDER_DIMENSIONS: &'static str = "BorderDimensions"; +pub const RR_PROPERTY_GUID: &'static str = "GUID"; +pub const RR_PROPERTY_RANDR_TILE: &'static str = "TILE"; + +pub const RR_Capability_None: c_int = 0; +pub const RR_Capability_SourceOutput: c_int = 1; +pub const RR_Capability_SinkOutput: c_int = 2; +pub const RR_Capability_SourceOffload: c_int = 4; +pub const RR_Capability_SinkOffload: c_int = 8; + diff --git a/third_party/rust/x11/src/xrecord.rs b/third_party/rust/x11/src/xrecord.rs new file mode 100644 index 000000000000..c3c4c4bcaf4d --- /dev/null +++ b/third_party/rust/x11/src/xrecord.rs @@ -0,0 +1,137 @@ + + + + +use std::os::raw::{ + c_char, + c_int, + c_uchar, + c_ulong, + c_ushort, +}; + +use ::xlib::{ + Bool, + Display, + Time, + XID, +}; + + + + + + + +x11_link! { Xf86vmode, xtst, ["libXtst.so.6", "libXtst.so"], 14, + pub fn XRecordAllocRange () -> *mut XRecordRange, + pub fn XRecordCreateContext (_6: *mut Display, _5: c_int, _4: *mut c_ulong, _3: c_int, _2: *mut *mut XRecordRange, _1: c_int) -> c_ulong, + pub fn XRecordDisableContext (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XRecordEnableContext (_4: *mut Display, _3: c_ulong, _2: Option, _1: *mut c_char) -> c_int, + pub fn XRecordEnableContextAsync (_4: *mut Display, _3: c_ulong, _2: Option, _1: *mut c_char) -> c_int, + pub fn XRecordFreeContext (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XRecordFreeData (_1: *mut XRecordInterceptData) -> (), + pub fn XRecordFreeState (_1: *mut XRecordState) -> (), + pub fn XRecordGetContext (_3: *mut Display, _2: c_ulong, _1: *mut *mut XRecordState) -> c_int, + pub fn XRecordIdBaseMask (_1: *mut Display) -> c_ulong, + pub fn XRecordProcessReplies (_1: *mut Display) -> (), + pub fn XRecordQueryVersion (_3: *mut Display, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XRecordRegisterClients (_7: *mut Display, _6: c_ulong, _5: c_int, _4: *mut c_ulong, _3: c_int, _2: *mut *mut XRecordRange, _1: c_int) -> c_int, + pub fn XRecordUnregisterClients (_4: *mut Display, _3: c_ulong, _2: *mut c_ulong, _1: c_int) -> c_int, +variadic: +globals: +} + + + + + + + +pub const XRecordFromServerTime: c_int = 0x01; +pub const XRecordFromClientTime: c_int = 0x02; +pub const XRecordFromClientSequence: c_int = 0x04; + +pub const XRecordCurrentClients: c_ulong = 1; +pub const XRecordFutureClients: c_ulong = 2; +pub const XRecordAllClients: c_ulong = 3; + +pub const XRecordFromServer: c_int = 0; +pub const XRecordFromClient: c_int = 1; +pub const XRecordClientStarted: c_int = 2; +pub const XRecordClientDied: c_int = 3; +pub const XRecordStartOfData: c_int = 4; +pub const XRecordEndOfData: c_int = 5; + + + + + + + +pub type XRecordClientSpec = c_ulong; +pub type XRecordContext = c_ulong; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRecordClientInfo { + pub client: XRecordClientSpec, + pub nranges: c_ulong, + pub ranges: *mut *mut XRecordRange, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRecordExtRange { + pub ext_major: XRecordRange8, + pub ext_minor: XRecordRange16, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRecordInterceptData { + pub id_base: XID, + pub server_time: Time, + pub client_seq: c_ulong, + pub category: c_int, + pub client_swapped: Bool, + pub data: *mut c_uchar, + pub data_len: c_ulong, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRecordRange { + pub core_requests: XRecordRange8, + pub core_replies: XRecordRange8, + pub ext_requests: XRecordExtRange, + pub ext_replies: XRecordExtRange, + pub delivered_events: XRecordRange8, + pub device_events: XRecordRange8, + pub errors: XRecordRange8, + pub client_started: Bool, + pub client_died: Bool, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRecordRange8 { + pub first: c_uchar, + pub last: c_uchar, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRecordRange16 { + pub first: c_ushort, + pub last: c_ushort, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRecordState { + pub enabled: Bool, + pub datum_flags: c_int, + pub nclients: c_ulong, + pub client_info: *mut *mut XRecordClientInfo, +} diff --git a/third_party/rust/x11/src/xrender.rs b/third_party/rust/x11/src/xrender.rs new file mode 100644 index 000000000000..2f4cd8dd158d --- /dev/null +++ b/third_party/rust/x11/src/xrender.rs @@ -0,0 +1,463 @@ + + + + +use std::os::raw::{ + c_char, + c_double, + c_int, + c_short, + c_uint, + c_ulong, + c_ushort, +}; + +use ::xlib::{ + Atom, + Bool, + Colormap, + Cursor, + Display, + Pixmap, + Region, + Visual, + XID, + XRectangle, +}; + + + + + + + +x11_link! { Xrender, xrender, ["libXrender.so.1", "libXrender.so"], 44, + pub fn XRenderAddGlyphs (_7: *mut Display, _6: c_ulong, _5: *const c_ulong, _4: *const XGlyphInfo, _3: c_int, _2: *const c_char, _1: c_int) -> (), + pub fn XRenderAddTraps (_6: *mut Display, _5: c_ulong, _4: c_int, _3: c_int, _2: *const XTrap, _1: c_int) -> (), + pub fn XRenderChangePicture (_4: *mut Display, _3: c_ulong, _2: c_ulong, _1: *const XRenderPictureAttributes) -> (), + pub fn XRenderComposite (_13: *mut Display, _12: c_int, _11: c_ulong, _10: c_ulong, _9: c_ulong, _8: c_int, _7: c_int, _6: c_int, _5: c_int, _4: c_int, _3: c_int, _2: c_uint, _1: c_uint) -> (), + pub fn XRenderCompositeDoublePoly (_12: *mut Display, _11: c_int, _10: c_ulong, _9: c_ulong, _8: *const XRenderPictFormat, _7: c_int, _6: c_int, _5: c_int, _4: c_int, _3: *const XPointDouble, _2: c_int, _1: c_int) -> (), + pub fn XRenderCompositeString16 (_12: *mut Display, _11: c_int, _10: c_ulong, _9: c_ulong, _8: *const XRenderPictFormat, _7: c_ulong, _6: c_int, _5: c_int, _4: c_int, _3: c_int, _2: *const c_ushort, _1: c_int) -> (), + pub fn XRenderCompositeString32 (_12: *mut Display, _11: c_int, _10: c_ulong, _9: c_ulong, _8: *const XRenderPictFormat, _7: c_ulong, _6: c_int, _5: c_int, _4: c_int, _3: c_int, _2: *const c_uint, _1: c_int) -> (), + pub fn XRenderCompositeString8 (_12: *mut Display, _11: c_int, _10: c_ulong, _9: c_ulong, _8: *const XRenderPictFormat, _7: c_ulong, _6: c_int, _5: c_int, _4: c_int, _3: c_int, _2: *const c_char, _1: c_int) -> (), + pub fn XRenderCompositeText16 (_11: *mut Display, _10: c_int, _9: c_ulong, _8: c_ulong, _7: *const XRenderPictFormat, _6: c_int, _5: c_int, _4: c_int, _3: c_int, _2: *const XGlyphElt16, _1: c_int) -> (), + pub fn XRenderCompositeText32 (_11: *mut Display, _10: c_int, _9: c_ulong, _8: c_ulong, _7: *const XRenderPictFormat, _6: c_int, _5: c_int, _4: c_int, _3: c_int, _2: *const XGlyphElt32, _1: c_int) -> (), + pub fn XRenderCompositeText8 (_11: *mut Display, _10: c_int, _9: c_ulong, _8: c_ulong, _7: *const XRenderPictFormat, _6: c_int, _5: c_int, _4: c_int, _3: c_int, _2: *const XGlyphElt8, _1: c_int) -> (), + pub fn XRenderCompositeTrapezoids (_9: *mut Display, _8: c_int, _7: c_ulong, _6: c_ulong, _5: *const XRenderPictFormat, _4: c_int, _3: c_int, _2: *const XTrapezoid, _1: c_int) -> (), + pub fn XRenderCompositeTriangles (_9: *mut Display, _8: c_int, _7: c_ulong, _6: c_ulong, _5: *const XRenderPictFormat, _4: c_int, _3: c_int, _2: *const XTriangle, _1: c_int) -> (), + pub fn XRenderCompositeTriFan (_9: *mut Display, _8: c_int, _7: c_ulong, _6: c_ulong, _5: *const XRenderPictFormat, _4: c_int, _3: c_int, _2: *const XPointFixed, _1: c_int) -> (), + pub fn XRenderCompositeTriStrip (_9: *mut Display, _8: c_int, _7: c_ulong, _6: c_ulong, _5: *const XRenderPictFormat, _4: c_int, _3: c_int, _2: *const XPointFixed, _1: c_int) -> (), + pub fn XRenderCreateAnimCursor (_3: *mut Display, _2: c_int, _1: *mut XAnimCursor) -> c_ulong, + pub fn XRenderCreateConicalGradient (_5: *mut Display, _4: *const XConicalGradient, _3: *const c_int, _2: *const XRenderColor, _1: c_int) -> c_ulong, + pub fn XRenderCreateCursor (_4: *mut Display, _3: c_ulong, _2: c_uint, _1: c_uint) -> c_ulong, + pub fn XRenderCreateGlyphSet (_2: *mut Display, _1: *const XRenderPictFormat) -> c_ulong, + pub fn XRenderCreateLinearGradient (_5: *mut Display, _4: *const XLinearGradient, _3: *const c_int, _2: *const XRenderColor, _1: c_int) -> c_ulong, + pub fn XRenderCreatePicture (_5: *mut Display, _4: c_ulong, _3: *const XRenderPictFormat, _2: c_ulong, _1: *const XRenderPictureAttributes) -> c_ulong, + pub fn XRenderCreateRadialGradient (_5: *mut Display, _4: *const XRadialGradient, _3: *const c_int, _2: *const XRenderColor, _1: c_int) -> c_ulong, + pub fn XRenderCreateSolidFill (_2: *mut Display, _1: *const XRenderColor) -> c_ulong, + pub fn XRenderFillRectangle (_8: *mut Display, _7: c_int, _6: c_ulong, _5: *const XRenderColor, _4: c_int, _3: c_int, _2: c_uint, _1: c_uint) -> (), + pub fn XRenderFillRectangles (_6: *mut Display, _5: c_int, _4: c_ulong, _3: *const XRenderColor, _2: *const XRectangle, _1: c_int) -> (), + pub fn XRenderFindFormat (_4: *mut Display, _3: c_ulong, _2: *const XRenderPictFormat, _1: c_int) -> *mut XRenderPictFormat, + pub fn XRenderFindStandardFormat (_2: *mut Display, _1: c_int) -> *mut XRenderPictFormat, + pub fn XRenderFindVisualFormat (_2: *mut Display, _1: *const Visual) -> *mut XRenderPictFormat, + pub fn XRenderFreeGlyphs (_4: *mut Display, _3: c_ulong, _2: *const c_ulong, _1: c_int) -> (), + pub fn XRenderFreeGlyphSet (_2: *mut Display, _1: c_ulong) -> (), + pub fn XRenderFreePicture (_2: *mut Display, _1: c_ulong) -> (), + pub fn XRenderParseColor (_3: *mut Display, _2: *mut c_char, _1: *mut XRenderColor) -> c_int, + pub fn XRenderQueryExtension (_3: *mut Display, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XRenderQueryFilters (_2: *mut Display, _1: c_ulong) -> *mut XFilters, + pub fn XRenderQueryFormats (_1: *mut Display) -> c_int, + pub fn XRenderQueryPictIndexValues (_3: *mut Display, _2: *const XRenderPictFormat, _1: *mut c_int) -> *mut XIndexValue, + pub fn XRenderQuerySubpixelOrder (_2: *mut Display, _1: c_int) -> c_int, + pub fn XRenderQueryVersion (_3: *mut Display, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XRenderReferenceGlyphSet (_2: *mut Display, _1: c_ulong) -> c_ulong, + pub fn XRenderSetPictureClipRectangles (_6: *mut Display, _5: c_ulong, _4: c_int, _3: c_int, _2: *const XRectangle, _1: c_int) -> (), + pub fn XRenderSetPictureClipRegion (_3: *mut Display, _2: c_ulong, _1: Region) -> (), + pub fn XRenderSetPictureFilter (_5: *mut Display, _4: c_ulong, _3: *const c_char, _2: *mut c_int, _1: c_int) -> (), + pub fn XRenderSetPictureTransform (_3: *mut Display, _2: c_ulong, _1: *mut XTransform) -> (), + pub fn XRenderSetSubpixelOrder (_3: *mut Display, _2: c_int, _1: c_int) -> c_int, +variadic: +globals: +} + + + + + + + +pub type Glyph = XID; +pub type GlyphSet = XID; +pub type PictFormat = XID; +pub type Picture = XID; +pub type XDouble = c_double; +pub type XFixed = c_int; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XAnimCursor { + pub cursor: Cursor, + pub delay: c_ulong, +} +pub type XAnimCursor = _XAnimCursor; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XCircle { + pub x: XFixed, + pub y: XFixed, + pub radius: XFixed, +} +pub type XCircle = _XCircle; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XConicalGradient { + pub center: XPointFixed, + pub angle: XFixed, +} +pub type XConicalGradient = _XConicalGradient; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XFilters { + pub nfilter: c_int, + pub filter: *mut *mut c_char, + pub nalias: c_int, + pub alias: *mut c_short, +} +pub type XFilters = _XFilters; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XGlyphElt8 { + pub glyphset: GlyphSet, + pub chars: *mut c_char, + pub nchars: c_int, + pub xOff: c_int, + pub yOff: c_int, +} +pub type XGlyphElt8 = _XGlyphElt8; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XGlyphElt16 { + pub glyphset: GlyphSet, + pub chars: *mut c_ushort, + pub nchars: c_int, + pub xOff: c_int, + pub yOff: c_int, +} +pub type XGlyphElt16 = _XGlyphElt16; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XGlyphElt32 { + pub glyphset: GlyphSet, + pub chars: *mut c_uint, + pub nchars: c_int, + pub xOff: c_int, + pub yOff: c_int, +} +pub type XGlyphElt32 = _XGlyphElt32; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XGlyphInfo { + pub width: c_ushort, + pub height: c_ushort, + pub x: c_short, + pub y: c_short, + pub xOff: c_short, + pub yOff: c_short, +} +pub type XGlyphInfo = _XGlyphInfo; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XIndexValue { + pub pixel: c_ulong, + pub red: c_ushort, + pub green: c_ushort, + pub blue: c_ushort, + pub alpha: c_ushort, +} +pub type XIndexValue = _XIndexValue; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XLinearGradient { + pub p1: XPointFixed, + pub p2: XPointFixed, +} +pub type XLinearGradient = _XLinearGradient; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XLineFixed { + pub p1: XPointFixed, + pub p2: XPointFixed, +} +pub type XLineFixed = _XLineFixed; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XPointDouble { + pub x: XDouble, + pub y: XDouble, +} +pub type XPointDouble = _XPointDouble; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XPointFixed { + pub x: XFixed, + pub y: XFixed, +} +pub type XPointFixed = _XPointFixed; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XRadialGradient { + pub inner: XCircle, + pub outer: XCircle, +} +pub type XRadialGradient = _XRadialGradient; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRenderColor { + pub red: c_ushort, + pub green: c_ushort, + pub blue: c_ushort, + pub alpha: c_ushort, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRenderDirectFormat { + pub red: c_short, + pub redMask: c_short, + pub green: c_short, + pub greenMask: c_short, + pub blue: c_short, + pub blueMask: c_short, + pub alpha: c_short, + pub alphaMask: c_short, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XRenderPictFormat { + pub id: PictFormat, + pub type_: c_int, + pub depth: c_int, + pub direct: XRenderDirectFormat, + pub colormap: Colormap, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XRenderPictureAttributes { + pub repeat: c_int, + pub alpha_map: Picture, + pub alpha_x_origin: c_int, + pub alpha_y_origin: c_int, + pub clip_x_origin: c_int, + pub clip_y_origin: c_int, + pub clip_mask: Pixmap, + pub graphics_exposures: Bool, + pub subwindow_mode: c_int, + pub poly_edge: c_int, + pub poly_mode: c_int, + pub dither: Atom, + pub component_alpha: Bool, +} +pub type XRenderPictureAttributes = _XRenderPictureAttributes; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XSpanFix { + pub left: XFixed, + pub right: XFixed, + pub y: XFixed, +} +pub type XSpanFix = _XSpanFix; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XTrap { + pub top: XSpanFix, + pub bottom: XSpanFix, +} +pub type XTrap = _XTrap; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XTrapezoid { + pub top: XFixed, + pub bottom: XFixed, + pub left: XLineFixed, + pub right: XLineFixed, +} +pub type XTrapezoid = _XTrapezoid; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XTriangle { + pub p1: XPointFixed, + pub p2: XPointFixed, + pub p3: XPointFixed, +} +pub type XTriangle = _XTriangle; + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct _XTransform { + pub matrix: [[XFixed; 3]; 3], +} +pub type XTransform = _XTransform; + + + + + + + + +pub const PictFormatID: c_ulong = 1 << 0; +pub const PictFormatType: c_ulong = 1 << 1; +pub const PictFormatDepth: c_ulong = 1 << 2; +pub const PictFormatRed: c_ulong = 1 << 3; +pub const PictFormatRedMask: c_ulong = 1 << 4; +pub const PictFormatGreen: c_ulong = 1 << 5; +pub const PictFormatGreenMask: c_ulong = 1 << 6; +pub const PictFormatBlue: c_ulong = 1 << 7; +pub const PictFormatBlueMask: c_ulong = 1 << 8; +pub const PictFormatAlpha: c_ulong = 1 << 9; +pub const PictFormatAlphaMask: c_ulong = 1 << 10; +pub const PictFormatColormap: c_ulong = 1 << 11; + + +pub const BadPictFormat: c_int = 0; +pub const BadPicture: c_int = 1; +pub const BadPictOp: c_int = 2; +pub const BadGlyphSet: c_int = 3; +pub const BadGlyph: c_int = 4; +pub const RenderNumberErrors: c_int = BadGlyph + 1; + + +pub const PictTypeIndexed: c_int = 0; +pub const PictTypeDirect: c_int = 1; + + +pub const PictOpMinimum: c_int = 0; +pub const PictOpClear: c_int = 0; +pub const PictOpSrc: c_int = 1; +pub const PictOpDst: c_int = 2; +pub const PictOpOver: c_int = 3; +pub const PictOpOverReverse: c_int = 4; +pub const PictOpIn: c_int = 5; +pub const PictOpInReverse: c_int = 6; +pub const PictOpOut: c_int = 7; +pub const PictOpOutReverse: c_int = 8; +pub const PictOpAtop: c_int = 9; +pub const PictOpAtopReverse: c_int = 10; +pub const PictOpXor: c_int = 11; +pub const PictOpAdd: c_int = 12; +pub const PictOpSaturate: c_int = 13; +pub const PictOpMaximum: c_int = 13; + +pub const PictOpDisjointMinimum: c_int = 0x10; +pub const PictOpDisjointClear: c_int = 0x10; +pub const PictOpDisjointSrc: c_int = 0x11; +pub const PictOpDisjointDst: c_int = 0x12; +pub const PictOpDisjointOver: c_int = 0x13; +pub const PictOpDisjointOverReverse: c_int = 0x14; +pub const PictOpDisjointIn: c_int = 0x15; +pub const PictOpDisjointInReverse: c_int = 0x16; +pub const PictOpDisjointOut: c_int = 0x17; +pub const PictOpDisjointOutReverse: c_int = 0x18; +pub const PictOpDisjointAtop: c_int = 0x19; +pub const PictOpDisjointAtopReverse: c_int = 0x1a; +pub const PictOpDisjointXor: c_int = 0x1b; +pub const PictOpDisjointMaximum: c_int = 0x1b; + +pub const PictOpConjointMinimum: c_int = 0x20; +pub const PictOpConjointClear: c_int = 0x20; +pub const PictOpConjointSrc: c_int = 0x21; +pub const PictOpConjointDst: c_int = 0x22; +pub const PictOpConjointOver: c_int = 0x23; +pub const PictOpConjointOverReverse: c_int = 0x24; +pub const PictOpConjointIn: c_int = 0x25; +pub const PictOpConjointInReverse: c_int = 0x26; +pub const PictOpConjointOut: c_int = 0x27; +pub const PictOpConjointOutReverse: c_int = 0x28; +pub const PictOpConjointAtop: c_int = 0x29; +pub const PictOpConjointAtopReverse: c_int = 0x2a; +pub const PictOpConjointXor: c_int = 0x2b; +pub const PictOpConjointMaximum: c_int = 0x2b; + +pub const PictOpBlendMinimum: c_int = 0x30; +pub const PictOpMultiply: c_int = 0x30; +pub const PictOpScreen: c_int = 0x31; +pub const PictOpOverlay: c_int = 0x32; +pub const PictOpDarken: c_int = 0x33; +pub const PictOpLighten: c_int = 0x34; +pub const PictOpColorDodge: c_int = 0x35; +pub const PictOpColorBurn: c_int = 0x36; +pub const PictOpHardLight: c_int = 0x37; +pub const PictOpSoftLight: c_int = 0x38; +pub const PictOpDifference: c_int = 0x39; +pub const PictOpExclusion: c_int = 0x3a; +pub const PictOpHSLHue: c_int = 0x3b; +pub const PictOpHSLSaturation: c_int = 0x3c; +pub const PictOpHSLColor: c_int = 0x3d; +pub const PictOpHSLLuminosity: c_int = 0x3e; +pub const PictOpBlendMaximum: c_int = 0x3e; + + +pub const PolyEdgeSharp: c_int = 0; +pub const PolyEdgeSmooth: c_int = 1; + + +pub const PolyModePrecise: c_int = 0; +pub const PolyModeImprecise: c_int = 1; + + +pub const CPRepeat: c_int = 1 << 0; +pub const CPAlphaMap: c_int = 1 << 1; +pub const CPAlphaXOrigin: c_int = 1 << 2; +pub const CPAlphaYOrigin: c_int = 1 << 3; +pub const CPClipXOrigin: c_int = 1 << 4; +pub const CPClipYOrigin: c_int = 1 << 5; +pub const CPClipMask: c_int = 1 << 6; +pub const CPGraphicsExposure: c_int = 1 << 7; +pub const CPSubwindowMode: c_int = 1 << 8; +pub const CPPolyEdge: c_int = 1 << 9; +pub const CPPolyMode: c_int = 1 << 10; +pub const CPDither: c_int = 1 << 11; +pub const CPComponentAlpha: c_int = 1 << 12; +pub const CPLastBit: c_int = 12; + + +pub const FilterNearest: &'static str = "nearest"; +pub const FilterBilinear: &'static str = "bilinear"; +pub const FilterConvolution: &'static str = "convolution"; +pub const FilterFast: &'static str = "fast"; +pub const FilterGood: &'static str = "good"; +pub const FilterBest: &'static str = "best"; + + +pub const SubPixelUnknown: c_int = 0; +pub const SubPixelHorizontalRGB: c_int = 1; +pub const SubPixelHorizontalBGR: c_int = 2; +pub const SubPixelVerticalRGB: c_int = 3; +pub const SubPixelVerticalBGR: c_int = 4; +pub const SubPixelNone: c_int = 5; + + +pub const RepeatNone: c_int = 0; +pub const RepeatNormal: c_int = 1; +pub const RepeatPad: c_int = 2; +pub const RepeatReflect: c_int = 3; diff --git a/third_party/rust/x11/src/xss.rs b/third_party/rust/x11/src/xss.rs new file mode 100644 index 000000000000..899019469bf0 --- /dev/null +++ b/third_party/rust/x11/src/xss.rs @@ -0,0 +1,98 @@ + + + + +use std::os::raw::{ c_int, c_uint, c_ulong }; + +use xlib::{ Atom, Bool, Display, Drawable, Status, Time, Visual, XEvent, XID, XSetWindowAttributes, Window }; + + + + + + + +x11_link! { Xss, xscrnsaver, ["libXss.so.2", "libXss.so"], 11, + pub fn XScreenSaverQueryExtension (_1: *mut Display, _2: *mut c_int, _3: *mut c_int) -> Bool, + pub fn XScreenSaverQueryVersion (_1: *mut Display, _2: *mut c_int, _3: *mut c_int) -> Status, + pub fn XScreenSaverAllocInfo () -> *mut XScreenSaverInfo, + pub fn XScreenSaverQueryInfo (_1: *mut Display, _2: Drawable, _3: *mut XScreenSaverInfo) -> Status, + pub fn XScreenSaverSelectInput (_1: *mut Display, _2: Drawable, _3: c_ulong) -> (), + pub fn XScreenSaverSetAttributes (_1: *mut Display, _2: Drawable, _3: c_int, _4: c_int, _5: c_uint, _6: c_uint, _7: c_uint, _8: c_int, _9: c_uint, _10: *mut Visual, _11: c_ulong, _12: *mut XSetWindowAttributes) -> (), + pub fn XScreenSaverUnsetAttributes (_1: *mut Display, _2: Drawable) -> (), + pub fn XScreenSaverRegister (_1: *mut Display, _2: c_int, _3: XID, _4: Atom) -> Status, + pub fn XScreenSaverUnregister (_1: *mut Display, _2: c_int) -> Status, + pub fn XScreenSaverGetRegistered (_1: *mut Display, _2: c_int, _3: *mut XID, _4: *mut Atom) -> Status, + pub fn XScreenSaverSuspend (_1: *mut Display, _2: Bool) -> (), +variadic: +globals: +} + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XScreenSaverInfo { + pub window: Window, + pub state: c_int, + pub kind: c_int, + pub til_or_since: c_ulong, + pub idle: c_ulong, + pub eventMask: c_ulong, +} + + + + + + + +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(C)] +pub struct XScreenSaverNotifyEvent { + pub type_: c_int, + pub serial: c_ulong, + pub send_event: Bool, + pub display: *mut Display, + pub window: Window, + pub root: Window, + pub state: c_int, + pub kind: c_int, + pub forced: Bool, + pub time: Time, +} + +event_conversions_and_tests! { + xss_notify: XScreenSaverNotifyEvent, +} + + + + + + + +pub const ScreenSaverName: &'static str = "MIT-SCREEN-SAVER"; +pub const ScreenSaverPropertyName: &'static str = "_MIT_SCREEN_SAVER_ID"; + +pub const ScreenSaverNotifyMask: c_ulong = 0x00000001; +pub const ScreenSaverCycleMask: c_ulong = 0x00000002; + +pub const ScreenSaverMajorVersion: c_int = 1; +pub const ScreenSaverMinorVersion: c_int = 1; + +pub const ScreenSaverOff: c_int = 0; +pub const ScreenSaverOn: c_int = 1; +pub const ScreenSaverCycle: c_int = 2; +pub const ScreenSaverDisabled: c_int = 3; + +pub const ScreenSaverBlanked: c_int = 0; +pub const ScreenSaverInternal: c_int = 1; +pub const ScreenSaverExternal: c_int = 2; + +pub const ScreenSaverNotify: c_int = 0; +pub const ScreenSaverNumberEvents: c_int = 1; diff --git a/third_party/rust/x11/src/xt.rs b/third_party/rust/x11/src/xt.rs new file mode 100644 index 000000000000..be5d9078a1e9 --- /dev/null +++ b/third_party/rust/x11/src/xt.rs @@ -0,0 +1,398 @@ + + + + +use std::os::raw::{ + c_char, + c_int, + c_long, + c_short, + c_uchar, + c_uint, + c_ulong, + c_ushort, + c_void, +}; + +use ::xlib::{ + Display, + GC, + Region, + Screen, + Visual, + XEvent, + XGCValues, + _XrmHashBucketRec, + XrmOptionDescList, + XrmValue, + XSelectionRequestEvent, + XSetWindowAttributes, +}; + + + + + + + +x11_link! { Xt, xt, ["libXt.so.6", "libXt.so"], 300, + pub fn XtAddActions (_2: *mut XtActionsRec, _1: c_uint) -> (), + pub fn XtAddCallback (_4: Widget, _3: *const c_char, _2: Option, _1: *mut c_void) -> (), + pub fn XtAddCallbacks (_3: Widget, _2: *const c_char, _1: XtCallbackList) -> (), + pub fn XtAddConverter (_5: *const c_char, _4: *const c_char, _3: Option, _2: XtConvertArgList, _1: c_uint) -> (), + pub fn XtAddEventHandler (_5: Widget, _4: c_ulong, _3: c_char, _2: Option, _1: *mut c_void) -> (), + pub fn XtAddExposureToRegion (_2: *mut XEvent, _1: Region) -> (), + pub fn XtAddGrab (_3: Widget, _2: c_char, _1: c_char) -> (), + pub fn XtAddInput (_4: c_int, _3: *mut c_void, _2: Option, _1: *mut c_void) -> c_ulong, + pub fn XtAddRawEventHandler (_5: Widget, _4: c_ulong, _3: c_char, _2: Option, _1: *mut c_void) -> (), + pub fn XtAddSignal (_2: Option, _1: *mut c_void) -> c_ulong, + pub fn XtAddTimeOut (_3: c_ulong, _2: Option, _1: *mut c_void) -> c_ulong, + pub fn XtAddWorkProc (_2: Option c_char>, _1: *mut c_void) -> c_ulong, + pub fn XtAllocateGC (_6: Widget, _5: c_uint, _4: c_ulong, _3: *mut XGCValues, _2: c_ulong, _1: c_ulong) -> GC, + pub fn XtAppAddActionHook (_3: XtAppContext, _2: Option, _1: *mut c_void) -> *mut c_void, + pub fn XtAppAddActions (_3: XtAppContext, _2: *mut XtActionsRec, _1: c_uint) -> (), + pub fn XtAppAddBlockHook (_3: XtAppContext, _2: Option, _1: *mut c_void) -> c_ulong, + pub fn XtAppAddConverter (_6: XtAppContext, _5: *const c_char, _4: *const c_char, _3: Option, _2: XtConvertArgList, _1: c_uint) -> (), + pub fn XtAppAddInput (_5: XtAppContext, _4: c_int, _3: *mut c_void, _2: Option, _1: *mut c_void) -> c_ulong, + pub fn XtAppAddSignal (_3: XtAppContext, _2: Option, _1: *mut c_void) -> c_ulong, + pub fn XtAppAddTimeOut (_4: XtAppContext, _3: c_ulong, _2: Option, _1: *mut c_void) -> c_ulong, + pub fn XtAppAddWorkProc (_3: XtAppContext, _2: Option c_char>, _1: *mut c_void) -> c_ulong, + pub fn XtAppCreateShell (_6: *const c_char, _5: *const c_char, _4: WidgetClass, _3: *mut Display, _2: *mut Arg, _1: c_uint) -> Widget, + pub fn XtAppError (_2: XtAppContext, _1: *const c_char) -> (), + pub fn XtAppErrorMsg (_7: XtAppContext, _6: *const c_char, _5: *const c_char, _4: *const c_char, _3: *const c_char, _2: *mut *mut c_char, _1: *mut c_uint) -> (), + pub fn XtAppGetErrorDatabase (_1: XtAppContext) -> *mut *mut _XrmHashBucketRec, + pub fn XtAppGetErrorDatabaseText (_8: XtAppContext, _7: *const c_char, _6: *const c_char, _5: *const c_char, _4: *const c_char, _3: *mut c_char, _2: c_int, _1: *mut _XrmHashBucketRec) -> (), + pub fn XtAppGetExitFlag (_1: XtAppContext) -> c_char, + pub fn XtAppGetSelectionTimeout (_1: XtAppContext) -> c_ulong, + pub fn XtAppInitialize (_9: *mut XtAppContext, _8: *const c_char, _7: XrmOptionDescList, _6: c_uint, _5: *mut c_int, _4: *mut *mut c_char, _3: *mut *mut c_char, _2: *mut Arg, _1: c_uint) -> Widget, + pub fn XtAppLock (_1: XtAppContext) -> (), + pub fn XtAppMainLoop (_1: XtAppContext) -> (), + pub fn XtAppNextEvent (_2: XtAppContext, _1: *mut XEvent) -> (), + pub fn XtAppPeekEvent (_2: XtAppContext, _1: *mut XEvent) -> c_char, + pub fn XtAppPending (_1: XtAppContext) -> c_ulong, + pub fn XtAppProcessEvent (_2: XtAppContext, _1: c_ulong) -> (), + pub fn XtAppReleaseCacheRefs (_2: XtAppContext, _1: *mut *mut c_void) -> (), + pub fn XtAppSetErrorHandler (_2: XtAppContext, _1: Option) -> Option, + pub fn XtAppSetErrorMsgHandler (_2: XtAppContext, _1: Option) -> Option, + pub fn XtAppSetExitFlag (_1: XtAppContext) -> (), + pub fn XtAppSetFallbackResources (_2: XtAppContext, _1: *mut *mut c_char) -> (), + pub fn XtAppSetSelectionTimeout (_2: XtAppContext, _1: c_ulong) -> (), + pub fn XtAppSetTypeConverter (_8: XtAppContext, _7: *const c_char, _6: *const c_char, _5: Option c_char>, _4: XtConvertArgList, _3: c_uint, _2: c_int, _1: Option) -> (), + pub fn XtAppSetWarningHandler (_2: XtAppContext, _1: Option) -> Option, + pub fn XtAppSetWarningMsgHandler (_2: XtAppContext, _1: Option) -> Option, + pub fn XtAppUnlock (_1: XtAppContext) -> (), + pub fn XtAppWarning (_2: XtAppContext, _1: *const c_char) -> (), + pub fn XtAppWarningMsg (_7: XtAppContext, _6: *const c_char, _5: *const c_char, _4: *const c_char, _3: *const c_char, _2: *mut *mut c_char, _1: *mut c_uint) -> (), + pub fn XtAugmentTranslations (_2: Widget, _1: *mut _TranslationData) -> (), + pub fn XtBuildEventMask (_1: Widget) -> c_ulong, + pub fn XtCallAcceptFocus (_2: Widget, _1: *mut c_ulong) -> c_char, + pub fn XtCallActionProc (_5: Widget, _4: *const c_char, _3: *mut XEvent, _2: *mut *mut c_char, _1: c_uint) -> (), + pub fn XtCallbackExclusive (_3: Widget, _2: *mut c_void, _1: *mut c_void) -> (), + pub fn XtCallbackNone (_3: Widget, _2: *mut c_void, _1: *mut c_void) -> (), + pub fn XtCallbackNonexclusive (_3: Widget, _2: *mut c_void, _1: *mut c_void) -> (), + pub fn XtCallbackPopdown (_3: Widget, _2: *mut c_void, _1: *mut c_void) -> (), + pub fn XtCallbackReleaseCacheRef (_3: Widget, _2: *mut c_void, _1: *mut c_void) -> (), + pub fn XtCallbackReleaseCacheRefList (_3: Widget, _2: *mut c_void, _1: *mut c_void) -> (), + pub fn XtCallCallbackList (_3: Widget, _2: XtCallbackList, _1: *mut c_void) -> (), + pub fn XtCallCallbacks (_3: Widget, _2: *const c_char, _1: *mut c_void) -> (), + pub fn XtCallConverter (_7: *mut Display, _6: Option c_char>, _5: *mut XrmValue, _4: c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCalloc (_2: c_uint, _1: c_uint) -> *mut c_char, + pub fn XtCancelSelectionRequest (_2: Widget, _1: c_ulong) -> (), + pub fn XtChangeManagedSet (_6: *mut Widget, _5: c_uint, _4: Option, _3: *mut c_void, _2: *mut Widget, _1: c_uint) -> (), + pub fn XtClass (_1: Widget) -> WidgetClass, + pub fn XtCloseDisplay (_1: *mut Display) -> (), + pub fn XtConfigureWidget (_6: Widget, _5: c_short, _4: c_short, _3: c_ushort, _2: c_ushort, _1: c_ushort) -> (), + pub fn XtConvert (_5: Widget, _4: *const c_char, _3: *mut XrmValue, _2: *const c_char, _1: *mut XrmValue) -> (), + pub fn XtConvertAndStore (_5: Widget, _4: *const c_char, _3: *mut XrmValue, _2: *const c_char, _1: *mut XrmValue) -> c_char, + pub fn XtConvertCase (_4: *mut Display, _3: c_ulong, _2: *mut c_ulong, _1: *mut c_ulong) -> (), + pub fn XtCreateApplicationContext () -> XtAppContext, + pub fn XtCreateApplicationShell (_4: *const c_char, _3: WidgetClass, _2: *mut Arg, _1: c_uint) -> Widget, + pub fn XtCreateManagedWidget (_5: *const c_char, _4: WidgetClass, _3: Widget, _2: *mut Arg, _1: c_uint) -> Widget, + pub fn XtCreatePopupShell (_5: *const c_char, _4: WidgetClass, _3: Widget, _2: *mut Arg, _1: c_uint) -> Widget, + pub fn XtCreateSelectionRequest (_2: Widget, _1: c_ulong) -> (), + pub fn XtCreateWidget (_5: *const c_char, _4: WidgetClass, _3: Widget, _2: *mut Arg, _1: c_uint) -> Widget, + pub fn XtCreateWindow (_5: Widget, _4: c_uint, _3: *mut Visual, _2: c_ulong, _1: *mut XSetWindowAttributes) -> (), + pub fn XtCvtColorToPixel (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtIntToBool (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtIntToBoolean (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtIntToColor (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtIntToFloat (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtIntToFont (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtIntToPixel (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtIntToPixmap (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtIntToShort (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtIntToUnsignedChar (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToAcceleratorTable (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToAtom (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToBool (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToBoolean (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToCommandArgArray (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToCursor (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToDimension (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToDirectoryString (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToDisplay (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToFile (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToFloat (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToFont (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToFontSet (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToFontStruct (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToGravity (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToInitialState (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToInt (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToPixel (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToRestartStyle (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToShort (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToTranslationTable (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToUnsignedChar (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtCvtStringToVisual (_6: *mut Display, _5: *mut XrmValue, _4: *mut c_uint, _3: *mut XrmValue, _2: *mut XrmValue, _1: *mut *mut c_void) -> c_char, + pub fn XtDatabase (_1: *mut Display) -> *mut _XrmHashBucketRec, + pub fn XtDestroyApplicationContext (_1: XtAppContext) -> (), + pub fn XtDestroyGC (_1: GC) -> (), + pub fn XtDestroyWidget (_1: Widget) -> (), + pub fn XtDirectConvert (_5: Option, _4: *mut XrmValue, _3: c_uint, _2: *mut XrmValue, _1: *mut XrmValue) -> (), + pub fn XtDisownSelection (_3: Widget, _2: c_ulong, _1: c_ulong) -> (), + pub fn XtDispatchEvent (_1: *mut XEvent) -> c_char, + pub fn XtDispatchEventToWidget (_2: Widget, _1: *mut XEvent) -> c_char, + pub fn XtDisplay (_1: Widget) -> *mut Display, + pub fn XtDisplayInitialize (_8: XtAppContext, _7: *mut Display, _6: *const c_char, _5: *const c_char, _4: XrmOptionDescList, _3: c_uint, _2: *mut c_int, _1: *mut *mut c_char) -> (), + pub fn XtDisplayOfObject (_1: Widget) -> *mut Display, + pub fn XtDisplayStringConversionWarning (_3: *mut Display, _2: *const c_char, _1: *const c_char) -> (), + pub fn XtDisplayToApplicationContext (_1: *mut Display) -> XtAppContext, + pub fn XtError (_1: *const c_char) -> (), + pub fn XtErrorMsg (_6: *const c_char, _5: *const c_char, _4: *const c_char, _3: *const c_char, _2: *mut *mut c_char, _1: *mut c_uint) -> (), + pub fn XtFindFile (_4: *const c_char, _3: Substitution, _2: c_uint, _1: Option c_char>) -> *mut c_char, + pub fn XtFree (_1: *mut c_char) -> (), + pub fn XtGetActionKeysym (_2: *mut XEvent, _1: *mut c_uint) -> c_ulong, + pub fn XtGetActionList (_3: WidgetClass, _2: *mut *mut XtActionsRec, _1: *mut c_uint) -> (), + pub fn XtGetApplicationNameAndClass (_3: *mut Display, _2: *mut *mut c_char, _1: *mut *mut c_char) -> (), + pub fn XtGetApplicationResources (_6: Widget, _5: *mut c_void, _4: *mut XtResource, _3: c_uint, _2: *mut Arg, _1: c_uint) -> (), + pub fn XtGetClassExtension (_5: WidgetClass, _4: c_uint, _3: c_int, _2: c_long, _1: c_uint) -> *mut c_void, + pub fn XtGetConstraintResourceList (_3: WidgetClass, _2: *mut *mut XtResource, _1: *mut c_uint) -> (), + pub fn XtGetDisplays (_3: XtAppContext, _2: *mut *mut *mut Display, _1: *mut c_uint) -> (), + pub fn XtGetErrorDatabase () -> *mut *mut _XrmHashBucketRec, + pub fn XtGetErrorDatabaseText (_6: *const c_char, _5: *const c_char, _4: *const c_char, _3: *const c_char, _2: *mut c_char, _1: c_int) -> (), + pub fn XtGetGC (_3: Widget, _2: c_ulong, _1: *mut XGCValues) -> GC, + pub fn XtGetKeyboardFocusWidget (_1: Widget) -> Widget, + pub fn XtGetKeysymTable (_3: *mut Display, _2: *mut c_uchar, _1: *mut c_int) -> *mut c_ulong, + pub fn XtGetMultiClickTime (_1: *mut Display) -> c_int, + pub fn XtGetResourceList (_3: WidgetClass, _2: *mut *mut XtResource, _1: *mut c_uint) -> (), + pub fn XtGetSelectionParameters (_7: Widget, _6: c_ulong, _5: *mut c_void, _4: *mut c_ulong, _3: *mut *mut c_void, _2: *mut c_ulong, _1: *mut c_int) -> (), + pub fn XtGetSelectionRequest (_3: Widget, _2: c_ulong, _1: *mut c_void) -> *mut XSelectionRequestEvent, + pub fn XtGetSelectionTimeout () -> c_ulong, + pub fn XtGetSelectionValue (_6: Widget, _5: c_ulong, _4: c_ulong, _3: Option, _2: *mut c_void, _1: c_ulong) -> (), + pub fn XtGetSelectionValueIncremental (_6: Widget, _5: c_ulong, _4: c_ulong, _3: Option, _2: *mut c_void, _1: c_ulong) -> (), + pub fn XtGetSelectionValues (_7: Widget, _6: c_ulong, _5: *mut c_ulong, _4: c_int, _3: Option, _2: *mut *mut c_void, _1: c_ulong) -> (), + pub fn XtGetSelectionValuesIncremental (_7: Widget, _6: c_ulong, _5: *mut c_ulong, _4: c_int, _3: Option, _2: *mut *mut c_void, _1: c_ulong) -> (), + pub fn XtGetSubresources (_8: Widget, _7: *mut c_void, _6: *const c_char, _5: *const c_char, _4: *mut XtResource, _3: c_uint, _2: *mut Arg, _1: c_uint) -> (), + pub fn XtGetSubvalues (_5: *mut c_void, _4: *mut XtResource, _3: c_uint, _2: *mut Arg, _1: c_uint) -> (), + pub fn XtGetValues (_3: Widget, _2: *mut Arg, _1: c_uint) -> (), + pub fn XtGrabButton (_9: Widget, _8: c_int, _7: c_uint, _6: c_char, _5: c_uint, _4: c_int, _3: c_int, _2: c_ulong, _1: c_ulong) -> (), + pub fn XtGrabKey (_6: Widget, _5: c_uchar, _4: c_uint, _3: c_char, _2: c_int, _1: c_int) -> (), + pub fn XtGrabKeyboard (_5: Widget, _4: c_char, _3: c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XtGrabPointer (_8: Widget, _7: c_char, _6: c_uint, _5: c_int, _4: c_int, _3: c_ulong, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XtHasCallbacks (_2: Widget, _1: *const c_char) -> XtCallbackStatus, + pub fn XtHooksOfDisplay (_1: *mut Display) -> Widget, + pub fn XtInitialize (_6: *const c_char, _5: *const c_char, _4: XrmOptionDescList, _3: c_uint, _2: *mut c_int, _1: *mut *mut c_char) -> Widget, + pub fn XtInitializeWidgetClass (_1: WidgetClass) -> (), + pub fn XtInsertEventHandler (_6: Widget, _5: c_ulong, _4: c_char, _3: Option, _2: *mut c_void, _1: XtListPosition) -> (), + pub fn XtInsertEventTypeHandler (_6: Widget, _5: c_int, _4: *mut c_void, _3: Option, _2: *mut c_void, _1: XtListPosition) -> (), + pub fn XtInsertRawEventHandler (_6: Widget, _5: c_ulong, _4: c_char, _3: Option, _2: *mut c_void, _1: XtListPosition) -> (), + pub fn XtInstallAccelerators (_2: Widget, _1: Widget) -> (), + pub fn XtInstallAllAccelerators (_2: Widget, _1: Widget) -> (), + pub fn XtIsApplicationShell (_1: Widget) -> c_char, + pub fn XtIsComposite (_1: Widget) -> c_char, + pub fn XtIsConstraint (_1: Widget) -> c_char, + pub fn XtIsManaged (_1: Widget) -> c_char, + pub fn XtIsObject (_1: Widget) -> c_char, + pub fn XtIsOverrideShell (_1: Widget) -> c_char, + pub fn XtIsRealized (_1: Widget) -> c_char, + pub fn XtIsRectObj (_1: Widget) -> c_char, + pub fn XtIsSensitive (_1: Widget) -> c_char, + pub fn XtIsSessionShell (_1: Widget) -> c_char, + pub fn XtIsShell (_1: Widget) -> c_char, + pub fn XtIsSubclass (_2: Widget, _1: WidgetClass) -> c_char, + pub fn XtIsTopLevelShell (_1: Widget) -> c_char, + pub fn XtIsTransientShell (_1: Widget) -> c_char, + pub fn XtIsVendorShell (_1: Widget) -> c_char, + pub fn XtIsWidget (_1: Widget) -> c_char, + pub fn XtIsWMShell (_1: Widget) -> c_char, + pub fn XtKeysymToKeycodeList (_4: *mut Display, _3: c_ulong, _2: *mut *mut c_uchar, _1: *mut c_uint) -> (), + pub fn XtLastEventProcessed (_1: *mut Display) -> *mut XEvent, + pub fn XtLastTimestampProcessed (_1: *mut Display) -> c_ulong, + pub fn XtMainLoop () -> (), + pub fn XtMakeGeometryRequest (_3: Widget, _2: *mut XtWidgetGeometry, _1: *mut XtWidgetGeometry) -> XtGeometryResult, + pub fn XtMakeResizeRequest (_5: Widget, _4: c_ushort, _3: c_ushort, _2: *mut c_ushort, _1: *mut c_ushort) -> XtGeometryResult, + pub fn XtMalloc (_1: c_uint) -> *mut c_char, + pub fn XtManageChild (_1: Widget) -> (), + pub fn XtManageChildren (_2: *mut Widget, _1: c_uint) -> (), + pub fn XtMapWidget (_1: Widget) -> (), + pub fn XtMenuPopupAction (_4: Widget, _3: *mut XEvent, _2: *mut *mut c_char, _1: *mut c_uint) -> (), + pub fn XtMergeArgLists (_4: *mut Arg, _3: c_uint, _2: *mut Arg, _1: c_uint) -> *mut Arg, + pub fn XtMoveWidget (_3: Widget, _2: c_short, _1: c_short) -> (), + pub fn XtName (_1: Widget) -> *mut c_char, + pub fn XtNameToWidget (_2: Widget, _1: *const c_char) -> Widget, + pub fn XtNewString (_1: *mut c_char) -> *mut c_char, + pub fn XtNextEvent (_1: *mut XEvent) -> (), + pub fn XtNoticeSignal (_1: c_ulong) -> (), + pub fn XtOpenApplication (_10: *mut XtAppContext, _9: *const c_char, _8: XrmOptionDescList, _7: c_uint, _6: *mut c_int, _5: *mut *mut c_char, _4: *mut *mut c_char, _3: WidgetClass, _2: *mut Arg, _1: c_uint) -> Widget, + pub fn XtOpenDisplay (_8: XtAppContext, _7: *const c_char, _6: *const c_char, _5: *const c_char, _4: XrmOptionDescList, _3: c_uint, _2: *mut c_int, _1: *mut *mut c_char) -> *mut Display, + pub fn XtOverrideTranslations (_2: Widget, _1: *mut _TranslationData) -> (), + pub fn XtOwnSelection (_6: Widget, _5: c_ulong, _4: c_ulong, _3: Option c_char>, _2: Option, _1: Option) -> c_char, + pub fn XtOwnSelectionIncremental (_8: Widget, _7: c_ulong, _6: c_ulong, _5: Option c_char>, _4: Option, _3: Option, _2: Option, _1: *mut c_void) -> c_char, + pub fn XtParent (_1: Widget) -> Widget, + pub fn XtParseAcceleratorTable (_1: *const c_char) -> *mut _TranslationData, + pub fn XtParseTranslationTable (_1: *const c_char) -> *mut _TranslationData, + pub fn XtPeekEvent (_1: *mut XEvent) -> c_char, + pub fn XtPending () -> c_char, + pub fn XtPopdown (_1: Widget) -> (), + pub fn XtPopup (_2: Widget, _1: XtGrabKind) -> (), + pub fn XtPopupSpringLoaded (_1: Widget) -> (), + pub fn XtProcessEvent (_1: c_ulong) -> (), + pub fn XtProcessLock () -> (), + pub fn XtProcessUnlock () -> (), + pub fn XtQueryGeometry (_3: Widget, _2: *mut XtWidgetGeometry, _1: *mut XtWidgetGeometry) -> XtGeometryResult, + pub fn XtRealizeWidget (_1: Widget) -> (), + pub fn XtRealloc (_2: *mut c_char, _1: c_uint) -> *mut c_char, + pub fn XtRegisterCaseConverter (_4: *mut Display, _3: Option, _2: c_ulong, _1: c_ulong) -> (), + pub fn XtRegisterDrawable (_3: *mut Display, _2: c_ulong, _1: Widget) -> (), + pub fn XtRegisterExtensionSelector (_5: *mut Display, _4: c_int, _3: c_int, _2: Option, _1: *mut c_void) -> (), + pub fn XtRegisterGrabAction (_5: Option, _4: c_char, _3: c_uint, _2: c_int, _1: c_int) -> (), + pub fn XtReleaseGC (_2: Widget, _1: GC) -> (), + pub fn XtReleasePropertyAtom (_2: Widget, _1: c_ulong) -> (), + pub fn XtRemoveActionHook (_1: *mut c_void) -> (), + pub fn XtRemoveAllCallbacks (_2: Widget, _1: *const c_char) -> (), + pub fn XtRemoveBlockHook (_1: c_ulong) -> (), + pub fn XtRemoveCallback (_4: Widget, _3: *const c_char, _2: Option, _1: *mut c_void) -> (), + pub fn XtRemoveCallbacks (_3: Widget, _2: *const c_char, _1: XtCallbackList) -> (), + pub fn XtRemoveEventHandler (_5: Widget, _4: c_ulong, _3: c_char, _2: Option, _1: *mut c_void) -> (), + pub fn XtRemoveEventTypeHandler (_5: Widget, _4: c_int, _3: *mut c_void, _2: Option, _1: *mut c_void) -> (), + pub fn XtRemoveGrab (_1: Widget) -> (), + pub fn XtRemoveInput (_1: c_ulong) -> (), + pub fn XtRemoveRawEventHandler (_5: Widget, _4: c_ulong, _3: c_char, _2: Option, _1: *mut c_void) -> (), + pub fn XtRemoveSignal (_1: c_ulong) -> (), + pub fn XtRemoveTimeOut (_1: c_ulong) -> (), + pub fn XtRemoveWorkProc (_1: c_ulong) -> (), + pub fn XtReservePropertyAtom (_1: Widget) -> c_ulong, + pub fn XtResizeWidget (_4: Widget, _3: c_ushort, _2: c_ushort, _1: c_ushort) -> (), + pub fn XtResizeWindow (_1: Widget) -> (), + pub fn XtResolvePathname (_8: *mut Display, _7: *const c_char, _6: *const c_char, _5: *const c_char, _4: *const c_char, _3: Substitution, _2: c_uint, _1: Option c_char>) -> *mut c_char, + pub fn XtScreen (_1: Widget) -> *mut Screen, + pub fn XtScreenDatabase (_1: *mut Screen) -> *mut _XrmHashBucketRec, + pub fn XtScreenOfObject (_1: Widget) -> *mut Screen, + pub fn XtSendSelectionRequest (_3: Widget, _2: c_ulong, _1: c_ulong) -> (), + pub fn XtSessionGetToken (_1: Widget) -> XtCheckpointToken, + pub fn XtSessionReturnToken (_1: XtCheckpointToken) -> (), + pub fn XtSetErrorHandler (_1: Option) -> (), + pub fn XtSetErrorMsgHandler (_1: Option) -> (), + pub fn XtSetEventDispatcher (_3: *mut Display, _2: c_int, _1: Option c_char>) -> Option c_char>, + pub fn XtSetKeyboardFocus (_2: Widget, _1: Widget) -> (), + pub fn XtSetKeyTranslator (_2: *mut Display, _1: Option) -> (), + pub fn XtSetLanguageProc (_3: XtAppContext, _2: Option *mut c_char>, _1: *mut c_void) -> Option *mut c_char>, + pub fn XtSetMappedWhenManaged (_2: Widget, _1: c_char) -> (), + pub fn XtSetMultiClickTime (_2: *mut Display, _1: c_int) -> (), + pub fn XtSetSelectionParameters (_6: Widget, _5: c_ulong, _4: c_ulong, _3: *mut c_void, _2: c_ulong, _1: c_int) -> (), + pub fn XtSetSelectionTimeout (_1: c_ulong) -> (), + pub fn XtSetSensitive (_2: Widget, _1: c_char) -> (), + pub fn XtSetSubvalues (_5: *mut c_void, _4: *mut XtResource, _3: c_uint, _2: *mut Arg, _1: c_uint) -> (), + pub fn XtSetTypeConverter (_7: *const c_char, _6: *const c_char, _5: Option c_char>, _4: XtConvertArgList, _3: c_uint, _2: c_int, _1: Option) -> (), + pub fn XtSetValues (_3: Widget, _2: *mut Arg, _1: c_uint) -> (), + pub fn XtSetWarningHandler (_1: Option) -> (), + pub fn XtSetWarningMsgHandler (_1: Option) -> (), + pub fn XtSetWMColormapWindows (_3: Widget, _2: *mut Widget, _1: c_uint) -> (), + pub fn XtStringConversionWarning (_2: *const c_char, _1: *const c_char) -> (), + pub fn XtSuperclass (_1: Widget) -> WidgetClass, + pub fn XtToolkitInitialize () -> (), + pub fn XtToolkitThreadInitialize () -> c_char, + pub fn XtTranslateCoords (_5: Widget, _4: c_short, _3: c_short, _2: *mut c_short, _1: *mut c_short) -> (), + pub fn XtTranslateKey (_5: *mut Display, _4: c_uchar, _3: c_uint, _2: *mut c_uint, _1: *mut c_ulong) -> (), + pub fn XtTranslateKeycode (_5: *mut Display, _4: c_uchar, _3: c_uint, _2: *mut c_uint, _1: *mut c_ulong) -> (), + pub fn XtUngrabButton (_3: Widget, _2: c_uint, _1: c_uint) -> (), + pub fn XtUngrabKey (_3: Widget, _2: c_uchar, _1: c_uint) -> (), + pub fn XtUngrabKeyboard (_2: Widget, _1: c_ulong) -> (), + pub fn XtUngrabPointer (_2: Widget, _1: c_ulong) -> (), + pub fn XtUninstallTranslations (_1: Widget) -> (), + pub fn XtUnmanageChild (_1: Widget) -> (), + pub fn XtUnmanageChildren (_2: *mut Widget, _1: c_uint) -> (), + pub fn XtUnmapWidget (_1: Widget) -> (), + pub fn XtUnrealizeWidget (_1: Widget) -> (), + pub fn XtUnregisterDrawable (_2: *mut Display, _1: c_ulong) -> (), + pub fn XtWarning (_1: *const c_char) -> (), + pub fn XtWarningMsg (_6: *const c_char, _5: *const c_char, _4: *const c_char, _3: *const c_char, _2: *mut *mut c_char, _1: *mut c_uint) -> (), + pub fn XtWidgetToApplicationContext (_1: Widget) -> XtAppContext, + pub fn XtWindow (_1: Widget) -> c_ulong, + pub fn XtWindowOfObject (_1: Widget) -> c_ulong, + pub fn XtWindowToWidget (_2: *mut Display, _1: c_ulong) -> Widget, +variadic: + pub fn XtAsprintf (_2: *mut *mut c_char, _1: *const c_char) -> c_uint, + pub fn XtVaAppCreateShell (_4: *const c_char, _3: *const c_char, _2: WidgetClass, _1: *mut Display) -> Widget, + pub fn XtVaAppInitialize (_7: *mut XtAppContext, _6: *const c_char, _5: XrmOptionDescList, _4: c_uint, _3: *mut c_int, _2: *mut *mut c_char, _1: *mut *mut c_char) -> Widget, + pub fn XtVaCreateArgsList (_1: *mut c_void) -> *mut c_void, + pub fn XtVaCreateManagedWidget (_3: *const c_char, _2: WidgetClass, _1: Widget) -> Widget, + pub fn XtVaCreatePopupShell (_3: *const c_char, _2: WidgetClass, _1: Widget) -> Widget, + pub fn XtVaCreateWidget (_3: *const c_char, _2: WidgetClass, _1: Widget) -> Widget, + pub fn XtVaGetApplicationResources (_4: Widget, _3: *mut c_void, _2: *mut XtResource, _1: c_uint) -> (), + pub fn XtVaGetSubresources (_6: Widget, _5: *mut c_void, _4: *const c_char, _3: *const c_char, _2: *mut XtResource, _1: c_uint) -> (), + pub fn XtVaGetSubvalues (_3: *mut c_void, _2: *mut XtResource, _1: c_uint) -> (), + pub fn XtVaGetValues (_1: Widget) -> (), + pub fn XtVaOpenApplication (_8: *mut XtAppContext, _7: *const c_char, _6: XrmOptionDescList, _5: c_uint, _4: *mut c_int, _3: *mut *mut c_char, _2: *mut *mut c_char, _1: WidgetClass) -> Widget, + pub fn XtVaSetSubvalues (_3: *mut c_void, _2: *mut XtResource, _1: c_uint) -> (), + pub fn XtVaSetValues (_1: Widget) -> (), +globals: +} + + + + + + + + +#[repr(C)] pub struct Arg; +#[repr(C)] pub struct SubstitutionRec; +#[repr(C)] pub struct _TranslationData; +#[repr(C)] pub struct _WidgetClassRec; +#[repr(C)] pub struct _WidgetRec; +#[repr(C)] pub struct _XtActionsRec; +#[repr(C)] pub struct _XtAppStruct; +#[repr(C)] pub struct _XtCallbackRec; +#[repr(C)] pub struct _XtCheckpointTokenRec; +#[repr(C)] pub struct XtConvertArgRec; +#[repr(C)] pub struct _XtResource; +#[repr(C)] pub struct XtWidgetGeometry; + + +pub type XtCallbackStatus = c_int; +pub type XtGeometryResult = c_int; +pub type XtGrabKind = c_int; +pub type XtListPosition = c_int; + +#[allow(dead_code)] +#[cfg(test)] +#[repr(C)] +enum TestEnum { + Variant1, + Variant2, +} + +#[test] +fn enum_size_test () { + assert!(::std::mem::size_of::() == ::std::mem::size_of::()); +} + + +pub type ArgList = *mut Arg; +pub type Substitution = *mut SubstitutionRec; +pub type Widget = *mut _WidgetRec; +pub type WidgetClass = *mut _WidgetClassRec; +pub type XtAccelerators = *mut _TranslationData; +pub type XtActionList = *mut _XtActionsRec; +pub type XtActionsRec = _XtActionsRec; +pub type XtAppContext = *mut _XtAppStruct; +pub type XtCallbackList = *mut _XtCallbackRec; +pub type XtCallbackRec = _XtCallbackRec; +pub type XtCheckpointToken = *mut _XtCheckpointTokenRec; +pub type XtCheckpointTokenRec = _XtCheckpointTokenRec; +pub type XtConvertArgList = *mut XtConvertArgRec; +pub type XtResource = _XtResource; +pub type XtResourceList = *mut _XtResource; +pub type XtTranslations = *mut _TranslationData; diff --git a/third_party/rust/x11/src/xtest.rs b/third_party/rust/x11/src/xtest.rs new file mode 100644 index 000000000000..dc8f91f8be11 --- /dev/null +++ b/third_party/rust/x11/src/xtest.rs @@ -0,0 +1,42 @@ + + + + +use std::os::raw::{ + c_int, + c_uint, + c_ulong, +}; + +use ::xinput::XDevice; +use ::xlib::{ + Display, + GC, + Visual, +}; + + + + + + + +x11_link! { Xf86vmode, xtst, ["libXtst.so.6", "libXtst.so"], 15, + pub fn XTestCompareCurrentCursorWithWindow (_2: *mut Display, _1: c_ulong) -> c_int, + pub fn XTestCompareCursorWithWindow (_3: *mut Display, _2: c_ulong, _1: c_ulong) -> c_int, + pub fn XTestDiscard (_1: *mut Display) -> c_int, + pub fn XTestFakeButtonEvent (_4: *mut Display, _3: c_uint, _2: c_int, _1: c_ulong) -> c_int, + pub fn XTestFakeDeviceButtonEvent (_7: *mut Display, _6: *mut XDevice, _5: c_uint, _4: c_int, _3: *mut c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XTestFakeDeviceKeyEvent (_7: *mut Display, _6: *mut XDevice, _5: c_uint, _4: c_int, _3: *mut c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XTestFakeDeviceMotionEvent (_7: *mut Display, _6: *mut XDevice, _5: c_int, _4: c_int, _3: *mut c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XTestFakeKeyEvent (_4: *mut Display, _3: c_uint, _2: c_int, _1: c_ulong) -> c_int, + pub fn XTestFakeMotionEvent (_5: *mut Display, _4: c_int, _3: c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XTestFakeProximityEvent (_6: *mut Display, _5: *mut XDevice, _4: c_int, _3: *mut c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XTestFakeRelativeMotionEvent (_4: *mut Display, _3: c_int, _2: c_int, _1: c_ulong) -> c_int, + pub fn XTestGrabControl (_2: *mut Display, _1: c_int) -> c_int, + pub fn XTestQueryExtension (_5: *mut Display, _4: *mut c_int, _3: *mut c_int, _2: *mut c_int, _1: *mut c_int) -> c_int, + pub fn XTestSetGContextOfGC (_2: GC, _1: c_ulong) -> (), + pub fn XTestSetVisualIDOfVisual (_2: *mut Visual, _1: c_ulong) -> (), +variadic: +globals: +} diff --git a/toolkit/library/gtest/rust/Cargo.toml b/toolkit/library/gtest/rust/Cargo.toml index a0184e1dc243..62d67acece2c 100644 --- a/toolkit/library/gtest/rust/Cargo.toml +++ b/toolkit/library/gtest/rust/Cargo.toml @@ -28,6 +28,7 @@ new_cert_storage = ["gkrust-shared/new_cert_storage"] fuzzing_interfaces = ["gkrust-shared/fuzzing_interfaces", "gecko-fuzz-targets"] webrtc = ["gkrust-shared/webrtc"] wasm_library_sandboxing = ["gkrust-shared/wasm_library_sandboxing"] +webgpu = ["gkrust-shared/webgpu"] [dependencies] bench-collections-gtest = { path = "../../../../xpcom/rust/gtest/bench-collections" } diff --git a/toolkit/library/moz.build b/toolkit/library/moz.build index 5055697e0792..9d31e34b2dcd 100644 --- a/toolkit/library/moz.build +++ b/toolkit/library/moz.build @@ -300,6 +300,13 @@ if CONFIG['OS_ARCH'] == 'FreeBSD': 'util', ] +if CONFIG['OS_ARCH'] == 'Darwin': + OS_LIBS += [ + # Link to Metal as required by the Metal gfx-hal backend + '-framework Metal', + ] + + if CONFIG['OS_ARCH'] == 'WINNT': OS_LIBS += [ 'shell32', @@ -327,6 +334,10 @@ if CONFIG['OS_ARCH'] == 'WINNT': 'sapi', 'dxguid', 'dhcpcsvc', + # gfx-rs supports D3D11 and D3D12, but we are not linking to them implicitly + #"d3d11", # should be explicitly linked by gfx-backend-d3d11 + #'d3d12', # should be explicitly linked by d3d12-rs + 'd3dcompiler', ] if CONFIG['CC_TYPE'] == 'clang-cl': @@ -339,6 +350,9 @@ if CONFIG['OS_ARCH'] == 'WINNT': 'oleacc', ] + # Prevent winapi-rs from statically linking + LIBRARY_DEFINES['WINAPI_NO_BUNDLED_LIBRARIES'] = True + if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows': OS_LIBS += [ 'usp10', diff --git a/toolkit/library/rust/Cargo.toml b/toolkit/library/rust/Cargo.toml index 21d29c3663c3..786645106085 100644 --- a/toolkit/library/rust/Cargo.toml +++ b/toolkit/library/rust/Cargo.toml @@ -29,6 +29,7 @@ new_cert_storage = ["gkrust-shared/new_cert_storage"] fuzzing_interfaces = ["gkrust-shared/fuzzing_interfaces"] webrtc = ["gkrust-shared/webrtc"] wasm_library_sandboxing = ["gkrust-shared/wasm_library_sandboxing"] +webgpu = ["gkrust-shared/webgpu"] [dependencies] gkrust-shared = { path = "shared" } diff --git a/toolkit/library/rust/gkrust-features.mozbuild b/toolkit/library/rust/gkrust-features.mozbuild index 8c6c48e499d5..a08bb8a094b9 100644 --- a/toolkit/library/rust/gkrust-features.mozbuild +++ b/toolkit/library/rust/gkrust-features.mozbuild @@ -12,7 +12,7 @@ if CONFIG['MOZ_DEBUG']: 'gecko_refcount_logging', ] -gkrust_features += ['quantum_render'] +gkrust_features += ['quantum_render', 'webgpu'] if CONFIG['MOZ_WEBRENDER_DEBUGGER']: gkrust_features += ['webrender_debugger'] diff --git a/toolkit/library/rust/shared/Cargo.toml b/toolkit/library/rust/shared/Cargo.toml index 709edc20a373..e300040f0845 100644 --- a/toolkit/library/rust/shared/Cargo.toml +++ b/toolkit/library/rust/shared/Cargo.toml @@ -34,7 +34,7 @@ log = {version = "0.4", features = ["release_max_level_info"]} env_logger = {version = "0.6", default-features = false} # disable `regex` to reduce code size cose-c = { version = "0.1.5" } jsrust_shared = { path = "../../../../js/src/rust/shared" } -arrayvec = "0.4" +arrayvec = "0.5" cert_storage = { path = "../../../../security/manager/ssl/cert_storage", optional = true } bitsdownload = { path = "../../../components/bitsdownload", optional = true } storage = { path = "../../../../storage/rust" } @@ -45,6 +45,7 @@ mdns_service = { path="../../../../media/mtransport/mdns_service", optional = tr neqo_glue = { path = "../../../../netwerk/socket/neqo_glue" } rlbox_lucet_sandbox = { version = "0.1.0", optional = true } mapped_hyph = { git = "https://github.com/jfkthame/mapped_hyph.git", tag = "v0.3.0" } +wgpu-remote = { path = "../../../../dom/webgpu/wgpu-remote", optional = true } [build-dependencies] rustc_version = "0.2" @@ -73,6 +74,7 @@ new_cert_storage = ["cert_storage"] fuzzing_interfaces = [] webrtc = ["mdns_service"] wasm_library_sandboxing = ["rlbox_lucet_sandbox"] +webgpu = ["wgpu-remote"] [lib] path = "lib.rs" diff --git a/toolkit/library/rust/shared/lib.rs b/toolkit/library/rust/shared/lib.rs index ece53a712bda..5885e8a9334c 100644 --- a/toolkit/library/rust/shared/lib.rs +++ b/toolkit/library/rust/shared/lib.rs @@ -54,6 +54,8 @@ extern crate audio_thread_priority; #[cfg(feature = "webrtc")] extern crate mdns_service; extern crate neqo_glue; +#[cfg(feature = "webgpu")] +extern crate wgpu_remote; #[cfg(feature = "wasm_library_sandboxing")] extern crate rlbox_lucet_sandbox; @@ -197,11 +199,11 @@ fn str_truncate_valid(s: &str, mut mid: usize) -> &str { #[derive(Debug, PartialEq)] -struct ArrayCString> { +struct ArrayCString> { inner: ArrayString, } -impl, A: Array> From for ArrayCString { +impl, A: Copy + Array> From for ArrayCString { @@ -211,7 +213,7 @@ impl, A: Array> From for ArrayCString { fn from(s: S) -> Self { let s = s.as_ref(); - let len = cmp::min(s.len(), A::capacity() - 1); + let len = cmp::min(s.len(), A::CAPACITY - 1); let mut result = Self { inner: ArrayString::from(str_truncate_valid(s, len)).unwrap(), }; @@ -220,7 +222,7 @@ impl, A: Array> From for ArrayCString { } } -impl> Deref for ArrayCString { +impl> Deref for ArrayCString { type Target = str; fn deref(&self) -> &str { diff --git a/tools/vcs/mach_commands.py b/tools/vcs/mach_commands.py index df6b41752955..79336be56ff9 100644 --- a/tools/vcs/mach_commands.py +++ b/tools/vcs/mach_commands.py @@ -32,6 +32,12 @@ 'bugzilla_product': 'Core', 'bugzilla_component': 'Graphics: WebRender', }, + 'webgpu': { + 'github': 'gfx-rs/wgpu', + 'path': 'dom/webgpu', + 'bugzilla_product': 'Core', + 'bugzilla_component': 'Graphics: WebGPU', + }, 'debugger': { 'github': 'firefox-devtools/debugger', 'path': 'devtools/client/debugger',

( + &self, + _path: P, + ) -> Result + where + P: AsRef, + { + unimplemented!() + } + + pub fn create_shader_library_from_source( + &self, + source: S, + version: LanguageVersion, + rasterization_enabled: bool, + ) -> Result + where + S: AsRef, + { + let options = metal::CompileOptions::new(); + let msl_version = match version { + LanguageVersion { major: 1, minor: 0 } => MTLLanguageVersion::V1_0, + LanguageVersion { major: 1, minor: 1 } => MTLLanguageVersion::V1_1, + LanguageVersion { major: 1, minor: 2 } => MTLLanguageVersion::V1_2, + LanguageVersion { major: 2, minor: 0 } => MTLLanguageVersion::V2_0, + LanguageVersion { major: 2, minor: 1 } => MTLLanguageVersion::V2_1, + _ => { + return Err(ShaderError::CompilationFailed( + "shader model not supported".into(), + )); + } + }; + if msl_version > self.shared.private_caps.msl_version { + return Err(ShaderError::CompilationFailed( + "shader model too high".into(), + )); + } + options.set_language_version(msl_version); + + self.shared + .device + .lock() + .new_library_with_source(source.as_ref(), &options) + .map(|library| { + n::ShaderModule::Compiled(n::ModuleInfo { + library, + entry_point_map: n::EntryPointMap::default(), + rasterization_enabled, + }) + }) + .map_err(|e| ShaderError::CompilationFailed(e.into())) + } + + fn compile_shader_library( + device: &Mutex, + raw_data: &[u32], + compiler_options: &msl::CompilerOptions, + msl_version: MTLLanguageVersion, + specialization: &pso::Specialization, + ) -> Result { + let module = spirv::Module::from_words(raw_data); + + + let mut ast = spirv::Ast::::parse(&module).map_err(|err| { + ShaderError::CompilationFailed(match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unexpected parse error".into(), + }) + })?; + + spirv_cross_specialize_ast(&mut ast, specialization)?; + + ast.set_compiler_options(compiler_options).map_err(gen_unexpected_error)?; + + let entry_points = ast.get_entry_points().map_err(|err| { + ShaderError::CompilationFailed(match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unexpected entry point error".into(), + }) + })?; + + let shader_code = ast.compile().map_err(|err| { + ShaderError::CompilationFailed(match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unknown compile error".into(), + }) + })?; + + let mut entry_point_map = n::EntryPointMap::default(); + for entry_point in entry_points { + info!("Entry point {:?}", entry_point); + let cleansed = ast + .get_cleansed_entry_point_name(&entry_point.name, entry_point.execution_model) + .map_err(|err| { + ShaderError::CompilationFailed(match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unknown compile error".into(), + }) + })?; + entry_point_map.insert( + entry_point.name, + spirv::EntryPoint { + name: cleansed, + ..entry_point + }, + ); + } + + let rasterization_enabled = ast + .is_rasterization_enabled() + .map_err(|_| ShaderError::CompilationFailed("Unknown compile error".into()))?; + + + debug!("SPIRV-Cross generated shader:\n{}", shader_code); + + let options = metal::CompileOptions::new(); + options.set_language_version(msl_version); + + let library = device + .lock() + .new_library_with_source(shader_code.as_ref(), &options) + .map_err(|err| ShaderError::CompilationFailed(err.into()))?; + + Ok(n::ModuleInfo { + library, + entry_point_map, + rasterization_enabled, + }) + } + + fn load_shader( + &self, + ep: &pso::EntryPoint, + layout: &n::PipelineLayout, + primitive_class: MTLPrimitiveTopologyClass, + pipeline_cache: Option<&n::PipelineCache>, + ) -> Result<(metal::Library, metal::Function, metal::MTLSize, bool), pso::CreationError> { + let device = &self.shared.device; + let msl_version = self.shared.private_caps.msl_version; + let module_map; + let (info_owned, info_guard); + + let info = match *ep.module { + n::ShaderModule::Compiled(ref info) => info, + n::ShaderModule::Raw(ref data) => { + let compiler_options = match primitive_class { + MTLPrimitiveTopologyClass::Point => &layout.shader_compiler_options_point, + _ => &layout.shader_compiler_options, + }; + match pipeline_cache { + Some(cache) => { + module_map = cache + .modules + .get_or_create_with(compiler_options, || FastStorageMap::default()); + info_guard = module_map.get_or_create_with(data, || { + Self::compile_shader_library( + device, + data, + compiler_options, + msl_version, + &ep.specialization, + ) + .unwrap() + }); + &*info_guard + } + None => { + info_owned = Self::compile_shader_library( + device, + data, + compiler_options, + msl_version, + &ep.specialization, + ) + .map_err(|e| { + error!("Error compiling the shader {:?}", e); + pso::CreationError::Other + })?; + &info_owned + } + } + } + }; + + let lib = info.library.clone(); + let (name, wg_size) = match info.entry_point_map.get(ep.entry) { + Some(p) => ( + p.name.as_str(), + metal::MTLSize { + width: p.work_group_size.x as _, + height: p.work_group_size.y as _, + depth: p.work_group_size.z as _, + }, + ), + + None => ( + ep.entry, + metal::MTLSize { + width: 0, + height: 0, + depth: 0, + }, + ), + }; + let mtl_function = get_final_function( + &lib, + name, + &ep.specialization, + self.shared.private_caps.function_specialization, + ) + .map_err(|e| { + error!("Invalid shader entry point '{}': {:?}", name, e); + pso::CreationError::Other + })?; + + Ok((lib, mtl_function, wg_size, info.rasterization_enabled)) + } + + fn make_sampler_descriptor( + &self, + info: &image::SamplerDesc, + ) -> Option { + let caps = &self.shared.private_caps; + let descriptor = metal::SamplerDescriptor::new(); + + descriptor.set_normalized_coordinates(info.normalized); + + descriptor.set_min_filter(conv::map_filter(info.min_filter)); + descriptor.set_mag_filter(conv::map_filter(info.mag_filter)); + descriptor.set_mip_filter(match info.mip_filter { + + + image::Filter::Nearest if info.lod_range.end.0 < 0.5 => { + MTLSamplerMipFilter::NotMipmapped + } + image::Filter::Nearest => MTLSamplerMipFilter::Nearest, + image::Filter::Linear => MTLSamplerMipFilter::Linear, + }); + + if let image::Anisotropic::On(aniso) = info.anisotropic { + descriptor.set_max_anisotropy(aniso as _); + } + + let (s, t, r) = info.wrap_mode; + descriptor.set_address_mode_s(conv::map_wrap_mode(s)); + descriptor.set_address_mode_t(conv::map_wrap_mode(t)); + descriptor.set_address_mode_r(conv::map_wrap_mode(r)); + + let lod_bias = info.lod_bias.0; + if lod_bias != 0.0 { + if self.features.contains(hal::Features::SAMPLER_MIP_LOD_BIAS) { + unsafe { + descriptor.set_lod_bias(lod_bias); + } + } else { + error!("Lod bias {:?} is not supported", info.lod_bias); + } + } + descriptor.set_lod_min_clamp(info.lod_range.start.0); + descriptor.set_lod_max_clamp(info.lod_range.end.0); + + + if (caps.os_is_mac && caps.has_version_at_least(10, 13)) + || (!caps.os_is_mac && caps.has_version_at_least(9, 0)) + { + descriptor.set_lod_average(true); + } + + if let Some(fun) = info.comparison { + if !caps.mutable_comparison_samplers { + return None; + } + descriptor.set_compare_function(conv::map_compare_function(fun)); + } + if [r, s, t].iter().any(|&am| am == image::WrapMode::Border) { + descriptor.set_border_color(match info.border.0 { + 0x0000_0000 => MTLSamplerBorderColor::TransparentBlack, + 0x0000_00FF => MTLSamplerBorderColor::OpaqueBlack, + 0xFFFF_FFFF => MTLSamplerBorderColor::OpaqueWhite, + other => { + error!("Border color 0x{:X} is not supported", other); + MTLSamplerBorderColor::TransparentBlack + } + }); + } + + if caps.argument_buffers { + descriptor.set_support_argument_buffers(true); + } + + Some(descriptor) + } + + fn make_sampler_data(info: &image::SamplerDesc) -> msl::SamplerData { + fn map_address(wrap: image::WrapMode) -> msl::SamplerAddress { + match wrap { + image::WrapMode::Tile => msl::SamplerAddress::Repeat, + image::WrapMode::Mirror => msl::SamplerAddress::MirroredRepeat, + image::WrapMode::Clamp => msl::SamplerAddress::ClampToEdge, + image::WrapMode::Border => msl::SamplerAddress::ClampToBorder, + } + } + + let lods = info.lod_range.start.0 .. info.lod_range.end.0; + msl::SamplerData { + coord: if info.normalized { + msl::SamplerCoord::Normalized + } else { + msl::SamplerCoord::Pixel + }, + min_filter: match info.min_filter { + image::Filter::Nearest => msl::SamplerFilter::Nearest, + image::Filter::Linear => msl::SamplerFilter::Linear, + }, + mag_filter: match info.mag_filter { + image::Filter::Nearest => msl::SamplerFilter::Nearest, + image::Filter::Linear => msl::SamplerFilter::Linear, + }, + mip_filter: match info.min_filter { + image::Filter::Nearest if info.lod_range.end.0 < 0.5 => { + msl::SamplerMipFilter::None + } + image::Filter::Nearest => msl::SamplerMipFilter::Nearest, + image::Filter::Linear => msl::SamplerMipFilter::Linear, + }, + s_address: map_address(info.wrap_mode.0), + t_address: map_address(info.wrap_mode.1), + r_address: map_address(info.wrap_mode.2), + compare_func: match info.comparison { + Some(func) => unsafe { mem::transmute(conv::map_compare_function(func) as u32) }, + None => msl::SamplerCompareFunc::Always, + }, + border_color: match info.border.0 { + 0x0000_0000 => msl::SamplerBorderColor::TransparentBlack, + 0x0000_00FF => msl::SamplerBorderColor::OpaqueBlack, + 0xFFFF_FFFF => msl::SamplerBorderColor::OpaqueWhite, + other => { + error!("Border color 0x{:X} is not supported", other); + msl::SamplerBorderColor::TransparentBlack + } + }, + lod_clamp_min: lods.start.into(), + lod_clamp_max: lods.end.into(), + max_anisotropy: match info.anisotropic { + image::Anisotropic::On(aniso) => aniso as i32, + image::Anisotropic::Off => 0, + }, + } + } +} + +impl hal::device::Device for Device { + unsafe fn create_command_pool( + &self, + _family: QueueFamilyId, + _flags: CommandPoolCreateFlags, + ) -> Result { + Ok(command::CommandPool::new( + &self.shared, + self.online_recording.clone(), + )) + } + + unsafe fn destroy_command_pool(&self, mut pool: command::CommandPool) { + use hal::pool::CommandPool as _; + pool.reset(false); + } + + unsafe fn create_render_pass<'a, IA, IS, ID>( + &self, + attachments: IA, + subpasses: IS, + _dependencies: ID, + ) -> Result + where + IA: IntoIterator, + IA::Item: Borrow, + IS: IntoIterator, + IS::Item: Borrow>, + ID: IntoIterator, + ID::Item: Borrow, + { + let attachments: Vec = attachments + .into_iter() + .map(|at| at.borrow().clone()) + .collect(); + + let mut subpasses: Vec = subpasses + .into_iter() + .map(|sp| { + let sub = sp.borrow(); + let mut colors: ArrayVec<[_; MAX_COLOR_ATTACHMENTS]> = sub + .colors + .iter() + .map(|&(id, _)| (id, n::SubpassOps::empty(), None)) + .collect(); + for (color, &(resolve_id, _)) in colors.iter_mut().zip(sub.resolves.iter()) { + if resolve_id != pass::ATTACHMENT_UNUSED { + color.2 = Some(resolve_id); + } + } + + n::Subpass { + colors, + depth_stencil: sub + .depth_stencil + .map(|&(id, _)| (id, n::SubpassOps::empty())), + inputs: sub.inputs.iter().map(|&(id, _)| id).collect(), + target_formats: n::SubpassFormats { + colors: sub + .colors + .iter() + .map(|&(id, _)| { + let format = + attachments[id].format.expect("No color format provided"); + let mtl_format = self + .shared + .private_caps + .map_format(format) + .expect("Unable to map color format!"); + (mtl_format, Channel::from(format.base_format().1)) + }) + .collect(), + depth_stencil: sub.depth_stencil.map(|&(id, _)| { + self.shared + .private_caps + .map_format( + attachments[id] + .format + .expect("No depth-stencil format provided"), + ) + .expect("Unable to map depth-stencil format!") + }), + }, + } + }) + .collect(); + + + + let mut use_mask = 0u64; + for sub in subpasses.iter_mut() { + for &mut (id, ref mut ops, _) in sub.colors.iter_mut() { + if use_mask & 1 << id == 0 { + *ops |= n::SubpassOps::LOAD; + use_mask ^= 1 << id; + } + } + if let Some((id, ref mut ops)) = sub.depth_stencil { + if use_mask & 1 << id == 0 { + *ops |= n::SubpassOps::LOAD; + use_mask ^= 1 << id; + } + } + } + + + for sub in subpasses.iter_mut().rev() { + for &mut (id, ref mut ops, _) in sub.colors.iter_mut() { + if use_mask & 1 << id != 0 { + *ops |= n::SubpassOps::STORE; + use_mask ^= 1 << id; + } + } + if let Some((id, ref mut ops)) = sub.depth_stencil { + if use_mask & 1 << id == 0 { + *ops |= n::SubpassOps::LOAD; + use_mask ^= 1 << id; + } + } + } + + Ok(n::RenderPass { + attachments, + subpasses, + name: String::new(), + }) + } + + unsafe fn create_pipeline_layout( + &self, + set_layouts: IS, + push_constant_ranges: IR, + ) -> Result + where + IS: IntoIterator, + IS::Item: Borrow, + IR: IntoIterator, + IR::Item: Borrow<(pso::ShaderStageFlags, Range)>, + { + let mut stage_infos = [ + ( + pso::ShaderStageFlags::VERTEX, + spirv::ExecutionModel::Vertex, + n::ResourceData::::new(), + ), + ( + pso::ShaderStageFlags::FRAGMENT, + spirv::ExecutionModel::Fragment, + n::ResourceData::::new(), + ), + ( + pso::ShaderStageFlags::COMPUTE, + spirv::ExecutionModel::GlCompute, + n::ResourceData::::new(), + ), + ]; + let mut res_overrides = BTreeMap::new(); + let mut const_samplers = BTreeMap::new(); + let mut infos = Vec::new(); + + + let mut pc_buffers = [None; 3]; + let mut pc_limits = [0u32; 3]; + for pcr in push_constant_ranges { + let (flags, range) = pcr.borrow(); + for (limit, &(stage_bit, _, _)) in pc_limits.iter_mut().zip(&stage_infos) { + if flags.contains(stage_bit) { + debug_assert_eq!(range.end % 4, 0); + *limit = (range.end / 4).max(*limit); + } + } + } + + const LIMIT_MASK: u32 = 3; + + + + for limit in &mut pc_limits { + if *limit > LIMIT_MASK { + *limit = (*limit + LIMIT_MASK) & !LIMIT_MASK; + } + } + + for ((limit, ref mut buffer_index), &mut (_, stage, ref mut counters)) in pc_limits + .iter() + .zip(pc_buffers.iter_mut()) + .zip(stage_infos.iter_mut()) + { + + if *limit != 0 { + let index = counters.buffers; + **buffer_index = Some(index); + counters.buffers += 1; + + res_overrides.insert( + msl::ResourceBindingLocation { + stage, + desc_set: PUSH_CONSTANTS_DESC_SET, + binding: PUSH_CONSTANTS_DESC_BINDING, + }, + msl::ResourceBinding { + buffer_id: index as _, + texture_id: !0, + sampler_id: !0, + }, + ); + } + } + + + for (set_index, set_layout) in set_layouts.into_iter().enumerate() { + + let mut dynamic_buffers = Vec::new(); + let offsets = n::MultiStageResourceCounters { + vs: stage_infos[0].2.clone(), + ps: stage_infos[1].2.clone(), + cs: stage_infos[2].2.clone(), + }; + match *set_layout.borrow() { + n::DescriptorSetLayout::Emulated(ref desc_layouts, ref samplers) => { + for &(binding, ref data) in samplers { + + const_samplers.insert( + msl::SamplerLocation { + desc_set: set_index as u32, + binding, + }, + data.clone(), + ); + } + for layout in desc_layouts.iter() { + if layout + .content + .contains(n::DescriptorContent::DYNAMIC_BUFFER) + { + dynamic_buffers.alloc().init(n::MultiStageData { + vs: if layout.stages.contains(pso::ShaderStageFlags::VERTEX) { + stage_infos[0].2.buffers + } else { + !0 + }, + ps: if layout.stages.contains(pso::ShaderStageFlags::FRAGMENT) { + stage_infos[1].2.buffers + } else { + !0 + }, + cs: if layout.stages.contains(pso::ShaderStageFlags::COMPUTE) { + stage_infos[2].2.buffers + } else { + !0 + }, + }); + } + for &mut (stage_bit, stage, ref mut counters) in stage_infos.iter_mut() { + if !layout.stages.contains(stage_bit) { + continue; + } + let res = msl::ResourceBinding { + buffer_id: if layout.content.contains(n::DescriptorContent::BUFFER) + { + counters.buffers += 1; + (counters.buffers - 1) as _ + } else { + !0 + }, + texture_id: if layout + .content + .contains(n::DescriptorContent::TEXTURE) + { + counters.textures += 1; + (counters.textures - 1) as _ + } else { + !0 + }, + sampler_id: if layout + .content + .contains(n::DescriptorContent::SAMPLER) + { + counters.samplers += 1; + (counters.samplers - 1) as _ + } else { + !0 + }, + }; + if layout.array_index == 0 { + let location = msl::ResourceBindingLocation { + stage, + desc_set: set_index as _, + binding: layout.binding, + }; + res_overrides.insert(location, res); + } + } + } + } + n::DescriptorSetLayout::ArgumentBuffer { + ref bindings, + stage_flags, + .. + } => { + for &mut (stage_bit, stage, ref mut counters) in stage_infos.iter_mut() { + let has_stage = stage_flags.contains(stage_bit); + res_overrides.insert( + msl::ResourceBindingLocation { + stage, + desc_set: set_index as _, + binding: msl::ARGUMENT_BUFFER_BINDING, + }, + msl::ResourceBinding { + buffer_id: if has_stage { counters.buffers } else { !0 }, + texture_id: !0, + sampler_id: !0, + }, + ); + if has_stage { + res_overrides.extend(bindings.iter().map(|(&binding, arg)| { + let key = msl::ResourceBindingLocation { + stage, + desc_set: set_index as _, + binding, + }; + (key, arg.res.clone()) + })); + counters.buffers += 1; + } + } + } + } + + infos.alloc().init(n::DescriptorSetInfo { + offsets, + dynamic_buffers, + }); + } + + + for &(_, _, ref counters) in stage_infos.iter() { + assert!(counters.buffers <= self.shared.private_caps.max_buffers_per_stage); + assert!(counters.textures <= self.shared.private_caps.max_textures_per_stage); + assert!(counters.samplers <= self.shared.private_caps.max_samplers_per_stage); + } + + let mut shader_compiler_options = msl::CompilerOptions::default(); + shader_compiler_options.version = match self.shared.private_caps.msl_version { + MTLLanguageVersion::V1_0 => msl::Version::V1_0, + MTLLanguageVersion::V1_1 => msl::Version::V1_1, + MTLLanguageVersion::V1_2 => msl::Version::V1_2, + MTLLanguageVersion::V2_0 => msl::Version::V2_0, + MTLLanguageVersion::V2_1 => msl::Version::V2_1, + MTLLanguageVersion::V2_2 => msl::Version::V2_2, + }; + shader_compiler_options.enable_point_size_builtin = false; + shader_compiler_options.vertex.invert_y = true; + shader_compiler_options.resource_binding_overrides = res_overrides; + shader_compiler_options.const_samplers = const_samplers; + shader_compiler_options.enable_argument_buffers = self.shared.private_caps.argument_buffers; + let mut shader_compiler_options_point = shader_compiler_options.clone(); + shader_compiler_options_point.enable_point_size_builtin = true; + + Ok(n::PipelineLayout { + shader_compiler_options, + shader_compiler_options_point, + infos, + total: n::MultiStageResourceCounters { + vs: stage_infos[0].2.clone(), + ps: stage_infos[1].2.clone(), + cs: stage_infos[2].2.clone(), + }, + push_constants: n::MultiStageData { + vs: pc_buffers[0].map(|buffer_index| n::PushConstantInfo { + count: pc_limits[0], + buffer_index, + }), + ps: pc_buffers[1].map(|buffer_index| n::PushConstantInfo { + count: pc_limits[1], + buffer_index, + }), + cs: pc_buffers[2].map(|buffer_index| n::PushConstantInfo { + count: pc_limits[2], + buffer_index, + }), + }, + total_push_constants: pc_limits[0].max(pc_limits[1]).max(pc_limits[2]), + }) + } + + unsafe fn create_pipeline_cache( + &self, + _data: Option<&[u8]>, + ) -> Result { + Ok(n::PipelineCache { + modules: FastStorageMap::default(), + }) + } + + unsafe fn get_pipeline_cache_data( + &self, + _cache: &n::PipelineCache, + ) -> Result, OutOfMemory> { + + Ok(Vec::new()) + } + + unsafe fn destroy_pipeline_cache(&self, _cache: n::PipelineCache) { + + } + + unsafe fn merge_pipeline_caches( + &self, + target: &n::PipelineCache, + sources: I, + ) -> Result<(), OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow, + { + let mut dst = target.modules.whole_write(); + for source in sources { + let src = source.borrow().modules.whole_write(); + for (key, value) in src.iter() { + let storage = match dst.entry(key.clone()) { + Entry::Vacant(e) => e.insert(FastStorageMap::default()), + Entry::Occupied(e) => e.into_mut(), + }; + let mut dst_module = storage.whole_write(); + let src_module = value.whole_write(); + for (key_module, value_module) in src_module.iter() { + match dst_module.entry(key_module.clone()) { + Entry::Vacant(em) => { + em.insert(value_module.clone()); + } + Entry::Occupied(em) => { + assert_eq!(em.get().library.as_ptr(), value_module.library.as_ptr()); + assert_eq!(em.get().entry_point_map, value_module.entry_point_map); + } + } + } + } + } + + Ok(()) + } + + unsafe fn create_graphics_pipeline<'a>( + &self, + pipeline_desc: &pso::GraphicsPipelineDesc<'a, Backend>, + cache: Option<&n::PipelineCache>, + ) -> Result { + debug!("create_graphics_pipeline {:#?}", pipeline_desc); + let pipeline = metal::RenderPipelineDescriptor::new(); + let pipeline_layout = &pipeline_desc.layout; + let (rp_attachments, subpass) = { + let pass::Subpass { main_pass, index } = pipeline_desc.subpass; + (&main_pass.attachments, &main_pass.subpasses[index]) + }; + + let (primitive_class, primitive_type) = match pipeline_desc.input_assembler.primitive { + pso::Primitive::PointList => { + (MTLPrimitiveTopologyClass::Point, MTLPrimitiveType::Point) + } + pso::Primitive::LineList => (MTLPrimitiveTopologyClass::Line, MTLPrimitiveType::Line), + pso::Primitive::LineStrip => { + (MTLPrimitiveTopologyClass::Line, MTLPrimitiveType::LineStrip) + } + pso::Primitive::TriangleList => ( + MTLPrimitiveTopologyClass::Triangle, + MTLPrimitiveType::Triangle, + ), + pso::Primitive::TriangleStrip => ( + MTLPrimitiveTopologyClass::Triangle, + MTLPrimitiveType::TriangleStrip, + ), + pso::Primitive::PatchList(_) => ( + MTLPrimitiveTopologyClass::Unspecified, + MTLPrimitiveType::Point, + ), + }; + if self.shared.private_caps.layered_rendering { + pipeline.set_input_primitive_topology(primitive_class); + } + + + let (vs_lib, vs_function, _, enable_rasterization) = self.load_shader( + &pipeline_desc.shaders.vertex, + pipeline_layout, + primitive_class, + cache, + )?; + pipeline.set_vertex_function(Some(&vs_function)); + + + let fs_function; + let fs_lib = match pipeline_desc.shaders.fragment { + Some(ref ep) => { + let (lib, fun, _, _) = + self.load_shader(ep, pipeline_layout, primitive_class, cache)?; + fs_function = fun; + pipeline.set_fragment_function(Some(&fs_function)); + Some(lib) + } + None => { + + + if subpass.colors.is_empty() && subpass.depth_stencil.is_none() { + pipeline.set_depth_attachment_pixel_format(metal::MTLPixelFormat::Depth32Float); + } + None + } + }; + + + if pipeline_desc.shaders.hull.is_some() { + return Err(pso::CreationError::Shader(ShaderError::UnsupportedStage( + pso::Stage::Hull, + ))); + } + if pipeline_desc.shaders.domain.is_some() { + return Err(pso::CreationError::Shader(ShaderError::UnsupportedStage( + pso::Stage::Domain, + ))); + } + if pipeline_desc.shaders.geometry.is_some() { + return Err(pso::CreationError::Shader(ShaderError::UnsupportedStage( + pso::Stage::Geometry, + ))); + } + + pipeline.set_rasterization_enabled(enable_rasterization); + + + let blend_targets = pipeline_desc + .blender + .targets + .iter() + .chain(iter::repeat(&pso::ColorBlendDesc::EMPTY)); + for (i, (&(mtl_format, _), color_desc)) in subpass + .target_formats + .colors + .iter() + .zip(blend_targets) + .enumerate() + { + let desc = pipeline + .color_attachments() + .object_at(i) + .expect("too many color attachments"); + + desc.set_pixel_format(mtl_format); + desc.set_write_mask(conv::map_write_mask(color_desc.mask)); + + if let Some(ref blend) = color_desc.blend { + desc.set_blending_enabled(true); + let (color_op, color_src, color_dst) = conv::map_blend_op(blend.color); + let (alpha_op, alpha_src, alpha_dst) = conv::map_blend_op(blend.alpha); + + desc.set_rgb_blend_operation(color_op); + desc.set_source_rgb_blend_factor(color_src); + desc.set_destination_rgb_blend_factor(color_dst); + + desc.set_alpha_blend_operation(alpha_op); + desc.set_source_alpha_blend_factor(alpha_src); + desc.set_destination_alpha_blend_factor(alpha_dst); + } + } + if let Some(mtl_format) = subpass.target_formats.depth_stencil { + let orig_format = rp_attachments[subpass.depth_stencil.unwrap().0] + .format + .unwrap(); + if orig_format.is_depth() { + pipeline.set_depth_attachment_pixel_format(mtl_format); + } + if orig_format.is_stencil() { + pipeline.set_stencil_attachment_pixel_format(mtl_format); + } + } + + + let vertex_descriptor = metal::VertexDescriptor::new(); + let mut vertex_buffers: n::VertexBufferVec = Vec::new(); + trace!("Vertex attribute remapping started"); + + for &pso::AttributeDesc { + location, + binding, + element, + } in &pipeline_desc.attributes + { + let original = pipeline_desc + .vertex_buffers + .iter() + .find(|vb| vb.binding == binding) + .expect("no associated vertex buffer found"); + + let elem_size = element.format.surface_desc().bits as pso::ElemOffset / 8; + let (cut_offset, base_offset) = + if original.stride == 0 || element.offset + elem_size <= original.stride { + (element.offset, 0) + } else { + let remainder = element.offset % original.stride; + if remainder + elem_size <= original.stride { + (remainder, element.offset - remainder) + } else { + (0, element.offset) + } + }; + let relative_index = vertex_buffers + .iter() + .position(|(ref vb, offset)| vb.binding == binding && base_offset == *offset) + .unwrap_or_else(|| { + vertex_buffers.alloc().init((original.clone(), base_offset)); + vertex_buffers.len() - 1 + }); + let mtl_buffer_index = self.shared.private_caps.max_buffers_per_stage + - 1 + - (relative_index as ResourceIndex); + if mtl_buffer_index < pipeline_layout.total.vs.buffers { + error!("Attribute offset {} exceeds the stride {}, and there is no room for replacement.", + element.offset, original.stride); + return Err(pso::CreationError::Other); + } + trace!("\tAttribute[{}] is mapped to vertex buffer[{}] with binding {} and offsets {} + {}", + location, binding, mtl_buffer_index, base_offset, cut_offset); + + let mtl_attribute_desc = vertex_descriptor + .attributes() + .object_at(location as usize) + .expect("too many vertex attributes"); + let mtl_vertex_format = + conv::map_vertex_format(element.format).expect("unsupported vertex format"); + mtl_attribute_desc.set_format(mtl_vertex_format); + mtl_attribute_desc.set_buffer_index(mtl_buffer_index as _); + mtl_attribute_desc.set_offset(cut_offset as _); + } + + for (i, (vb, _)) in vertex_buffers.iter().enumerate() { + let mtl_buffer_desc = vertex_descriptor + .layouts() + .object_at(self.shared.private_caps.max_buffers_per_stage as usize - 1 - i) + .expect("too many vertex descriptor layouts"); + if vb.stride % STRIDE_GRANULARITY != 0 { + error!( + "Stride ({}) must be a multiple of {}", + vb.stride, STRIDE_GRANULARITY + ); + return Err(pso::CreationError::Other); + } + if vb.stride != 0 { + mtl_buffer_desc.set_stride(vb.stride as u64); + match vb.rate { + VertexInputRate::Vertex => { + mtl_buffer_desc.set_step_function(MTLVertexStepFunction::PerVertex); + } + VertexInputRate::Instance(divisor) => { + mtl_buffer_desc.set_step_function(MTLVertexStepFunction::PerInstance); + mtl_buffer_desc.set_step_rate(divisor as u64); + } + } + } else { + mtl_buffer_desc.set_stride(256); + mtl_buffer_desc.set_step_function(MTLVertexStepFunction::PerInstance); + mtl_buffer_desc.set_step_rate(!0); + } + } + if !vertex_buffers.is_empty() { + pipeline.set_vertex_descriptor(Some(&vertex_descriptor)); + } + + let rasterizer_state = Some(n::RasterizerState { + front_winding: conv::map_winding(pipeline_desc.rasterizer.front_face), + fill_mode: conv::map_polygon_mode(pipeline_desc.rasterizer.polygon_mode), + cull_mode: match conv::map_cull_face(pipeline_desc.rasterizer.cull_face) { + Some(mode) => mode, + None => { + + + error!("Culling both sides is not yet supported"); + + metal::MTLCullMode::None + } + }, + depth_clip: if self.shared.private_caps.depth_clip_mode { + Some(if pipeline_desc.rasterizer.depth_clamping { + metal::MTLDepthClipMode::Clamp + } else { + metal::MTLDepthClipMode::Clip + }) + } else { + None + }, + }); + let depth_bias = pipeline_desc + .rasterizer + .depth_bias + .unwrap_or(pso::State::Static(pso::DepthBias::default())); + + + let device = self.shared.device.lock(); + self.shared + .service_pipes + .depth_stencil_states + .prepare(&pipeline_desc.depth_stencil, &*device); + + if let Some(multisampling) = &pipeline_desc.multisampling { + pipeline.set_sample_count(multisampling.rasterization_samples as u64); + pipeline.set_alpha_to_coverage_enabled(multisampling.alpha_coverage); + pipeline.set_alpha_to_one_enabled(multisampling.alpha_to_one); + + + } + + device + .new_render_pipeline_state(&pipeline) + .map(|raw| n::GraphicsPipeline { + vs_lib, + fs_lib, + raw, + primitive_type, + vs_pc_info: pipeline_desc.layout.push_constants.vs, + ps_pc_info: pipeline_desc.layout.push_constants.ps, + rasterizer_state, + depth_bias, + depth_stencil_desc: pipeline_desc.depth_stencil.clone(), + baked_states: pipeline_desc.baked_states.clone(), + vertex_buffers, + attachment_formats: subpass.target_formats.clone(), + }) + .map_err(|err| { + error!("PSO creation failed: {}", err); + pso::CreationError::Other + }) + } + + unsafe fn create_compute_pipeline<'a>( + &self, + pipeline_desc: &pso::ComputePipelineDesc<'a, Backend>, + cache: Option<&n::PipelineCache>, + ) -> Result { + debug!("create_compute_pipeline {:?}", pipeline_desc); + let pipeline = metal::ComputePipelineDescriptor::new(); + + let (cs_lib, cs_function, work_group_size, _) = self.load_shader( + &pipeline_desc.shader, + &pipeline_desc.layout, + MTLPrimitiveTopologyClass::Unspecified, + cache, + )?; + pipeline.set_compute_function(Some(&cs_function)); + + self.shared + .device + .lock() + .new_compute_pipeline_state(&pipeline) + .map(|raw| n::ComputePipeline { + cs_lib, + raw, + work_group_size, + pc_info: pipeline_desc.layout.push_constants.cs, + }) + .map_err(|err| { + error!("PSO creation failed: {}", err); + pso::CreationError::Other + }) + } + + unsafe fn create_framebuffer( + &self, + _render_pass: &n::RenderPass, + attachments: I, + extent: image::Extent, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + Ok(n::Framebuffer { + extent, + attachments: attachments + .into_iter() + .map(|at| at.borrow().texture.clone()) + .collect(), + }) + } + + unsafe fn create_shader_module( + &self, + raw_data: &[u32], + ) -> Result { + + let depends_on_pipeline_layout = true; + + + + + Ok(if depends_on_pipeline_layout { + n::ShaderModule::Raw(raw_data.to_vec()) + } else { + let mut options = msl::CompilerOptions::default(); + options.enable_point_size_builtin = false; + options.vertex.invert_y = true; + let info = Self::compile_shader_library( + &self.shared.device, + raw_data, + &options, + self.shared.private_caps.msl_version, + &pso::Specialization::default(), + + + )?; + n::ShaderModule::Compiled(info) + }) + } + + unsafe fn create_sampler( + &self, + info: &image::SamplerDesc, + ) -> Result { + Ok(n::Sampler { + raw: match self.make_sampler_descriptor(&info) { + Some(ref descriptor) => Some(self.shared.device.lock().new_sampler(descriptor)), + None => None, + }, + data: Self::make_sampler_data(&info), + }) + } + + unsafe fn destroy_sampler(&self, _sampler: n::Sampler) {} + + unsafe fn map_memory>( + &self, + memory: &n::Memory, + generic_range: R, + ) -> Result<*mut u8, MapError> { + let range = memory.resolve(&generic_range); + debug!("map_memory of size {} at {:?}", memory.size, range); + + let base_ptr = match memory.heap { + n::MemoryHeap::Public(_, ref cpu_buffer) => cpu_buffer.contents() as *mut u8, + n::MemoryHeap::Native(_) | n::MemoryHeap::Private => panic!("Unable to map memory!"), + }; + Ok(base_ptr.offset(range.start as _)) + } + + unsafe fn unmap_memory(&self, memory: &n::Memory) { + debug!("unmap_memory of size {}", memory.size); + } + + unsafe fn flush_mapped_memory_ranges<'a, I, R>(&self, iter: I) -> Result<(), OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a n::Memory, R)>, + R: RangeArg, + { + debug!("flush_mapped_memory_ranges"); + for item in iter { + let (memory, ref generic_range) = *item.borrow(); + let range = memory.resolve(generic_range); + debug!("\trange {:?}", range); + + match memory.heap { + n::MemoryHeap::Native(_) => unimplemented!(), + n::MemoryHeap::Public(mt, ref cpu_buffer) + if 1 << mt.0 != MemoryTypes::SHARED.bits() as usize => + { + cpu_buffer.did_modify_range(NSRange { + location: range.start as _, + length: (range.end - range.start) as _, + }); + } + n::MemoryHeap::Public(..) => continue, + n::MemoryHeap::Private => panic!("Can't map private memory!"), + }; + } + + Ok(()) + } + + unsafe fn invalidate_mapped_memory_ranges<'a, I, R>(&self, iter: I) -> Result<(), OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a n::Memory, R)>, + R: RangeArg, + { + let mut num_syncs = 0; + debug!("invalidate_mapped_memory_ranges"); + + + + let cmd_queue = self.shared.queue.lock(); + let cmd_buffer = cmd_queue.spawn_temp(); + autoreleasepool(|| { + let encoder = cmd_buffer.new_blit_command_encoder(); + + for item in iter { + let (memory, ref generic_range) = *item.borrow(); + let range = memory.resolve(generic_range); + debug!("\trange {:?}", range); + + match memory.heap { + n::MemoryHeap::Native(_) => unimplemented!(), + n::MemoryHeap::Public(mt, ref cpu_buffer) + if 1 << mt.0 != MemoryTypes::SHARED.bits() as usize => + { + num_syncs += 1; + encoder.synchronize_resource(cpu_buffer); + } + n::MemoryHeap::Public(..) => continue, + n::MemoryHeap::Private => panic!("Can't map private memory!"), + }; + } + encoder.end_encoding(); + }); + + if num_syncs != 0 { + debug!("\twaiting..."); + cmd_buffer.set_label("invalidate_mapped_memory_ranges"); + cmd_buffer.commit(); + cmd_buffer.wait_until_completed(); + } + + Ok(()) + } + + fn create_semaphore(&self) -> Result { + Ok(n::Semaphore { + + + system: if self.shared.private_caps.exposed_queues > 1 { + Some(n::SystemSemaphore::new()) + } else { + None + }, + image_ready: Arc::new(Mutex::new(None)), + }) + } + + unsafe fn create_descriptor_pool( + &self, + max_sets: usize, + descriptor_ranges: I, + _flags: pso::DescriptorPoolCreateFlags, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + if self.shared.private_caps.argument_buffers { + let mut arguments = n::ArgumentArray::default(); + for desc_range in descriptor_ranges { + let dr = desc_range.borrow(); + let content = n::DescriptorContent::from(dr.ty); + let usage = n::ArgumentArray::describe_usage(dr.ty); + if content.contains(n::DescriptorContent::BUFFER) { + arguments.push(metal::MTLDataType::Pointer, dr.count, usage); + } + if content.contains(n::DescriptorContent::TEXTURE) { + arguments.push(metal::MTLDataType::Texture, dr.count, usage); + } + if content.contains(n::DescriptorContent::SAMPLER) { + arguments.push(metal::MTLDataType::Sampler, dr.count, usage); + } + } + + let device = self.shared.device.lock(); + let (array_ref, total_resources) = arguments.build(); + let encoder = device.new_argument_encoder(array_ref); + + let alignment = self.shared.private_caps.buffer_alignment; + let total_size = encoder.encoded_length() + (max_sets as u64) * alignment; + let raw = device.new_buffer(total_size, MTLResourceOptions::empty()); + + Ok(n::DescriptorPool::new_argument( + raw, + total_size, + alignment, + total_resources, + )) + } else { + let mut counters = n::ResourceData::::new(); + for desc_range in descriptor_ranges { + let dr = desc_range.borrow(); + counters.add_many( + n::DescriptorContent::from(dr.ty), + dr.count as pso::DescriptorBinding, + ); + } + Ok(n::DescriptorPool::new_emulated(counters)) + } + } + + unsafe fn create_descriptor_set_layout( + &self, + binding_iter: I, + immutable_samplers: J, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + if self.shared.private_caps.argument_buffers { + let mut stage_flags = pso::ShaderStageFlags::empty(); + let mut arguments = n::ArgumentArray::default(); + let mut bindings = FastHashMap::default(); + for desc in binding_iter { + let desc = desc.borrow(); + + + match desc.ty { + pso::DescriptorType::UniformBufferDynamic + | pso::DescriptorType::StorageBufferDynamic => { + + error!("Dynamic offsets are not yet supported in argument buffers!"); + } + pso::DescriptorType::StorageImage | pso::DescriptorType::StorageTexelBuffer => { + + error!("Storage images are not yet supported in argument buffers!"); + } + _ => {} + } + + stage_flags |= desc.stage_flags; + let content = n::DescriptorContent::from(desc.ty); + let usage = n::ArgumentArray::describe_usage(desc.ty); + let res = msl::ResourceBinding { + buffer_id: if content.contains(n::DescriptorContent::BUFFER) { + arguments.push(metal::MTLDataType::Pointer, desc.count, usage) as u32 + } else { + !0 + }, + texture_id: if content.contains(n::DescriptorContent::TEXTURE) { + arguments.push(metal::MTLDataType::Texture, desc.count, usage) as u32 + } else { + !0 + }, + sampler_id: if content.contains(n::DescriptorContent::SAMPLER) { + arguments.push(metal::MTLDataType::Sampler, desc.count, usage) as u32 + } else { + !0 + }, + }; + let res_offset = res.buffer_id.min(res.texture_id).min(res.sampler_id); + bindings.insert( + desc.binding, + n::ArgumentLayout { + res, + res_offset, + count: desc.count, + usage, + content, + }, + ); + } + + let (array_ref, arg_total) = arguments.build(); + let encoder = self.shared.device.lock().new_argument_encoder(array_ref); + + Ok(n::DescriptorSetLayout::ArgumentBuffer { + encoder, + stage_flags, + bindings: Arc::new(bindings), + total: arg_total as n::PoolResourceIndex, + }) + } else { + struct TempSampler { + data: msl::SamplerData, + binding: pso::DescriptorBinding, + array_index: pso::DescriptorArrayIndex, + }; + let mut immutable_sampler_iter = immutable_samplers.into_iter(); + let mut tmp_samplers = Vec::new(); + let mut desc_layouts = Vec::new(); + + for set_layout_binding in binding_iter { + let slb = set_layout_binding.borrow(); + let mut content = n::DescriptorContent::from(slb.ty); + + if slb.immutable_samplers { + tmp_samplers.extend( + immutable_sampler_iter + .by_ref() + .take(slb.count) + .enumerate() + .map(|(array_index, sm)| TempSampler { + data: sm.borrow().data.clone(), + binding: slb.binding, + array_index, + }), + ); + content |= n::DescriptorContent::IMMUTABLE_SAMPLER; + } + + desc_layouts.extend((0 .. slb.count).map(|array_index| n::DescriptorLayout { + content, + stages: slb.stage_flags, + binding: slb.binding, + array_index, + })); + } + + desc_layouts.sort_by_key(|dl| (dl.binding, dl.array_index)); + tmp_samplers.sort_by_key(|ts| (ts.binding, ts.array_index)); + + + desc_layouts.dedup_by(|a, b| { + if (a.binding, a.array_index) == (b.binding, b.array_index) { + debug_assert!(!b.stages.intersects(a.stages)); + debug_assert_eq!(a.content, b.content); + b.stages |= a.stages; + true + } else { + false + } + }); + + Ok(n::DescriptorSetLayout::Emulated( + Arc::new(desc_layouts), + tmp_samplers + .into_iter() + .map(|ts| (ts.binding, ts.data)) + .collect(), + )) + } + } + + unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I) + where + I: IntoIterator>, + J: IntoIterator, + J::Item: Borrow>, + { + debug!("write_descriptor_sets"); + for write in write_iter { + match *write.set { + n::DescriptorSet::Emulated { + ref pool, + ref layouts, + ref resources, + } => { + let mut counters = resources.map(|r| r.start); + let mut start = None; + for (i, layout) in layouts.iter().enumerate() { + if layout.binding == write.binding + && layout.array_index == write.array_offset + { + start = Some(i); + break; + } + counters.add(layout.content); + } + let mut data = pool.write(); + + for (layout, descriptor) in + layouts[start.unwrap() ..].iter().zip(write.descriptors) + { + trace!("\t{:?}", layout); + match *descriptor.borrow() { + pso::Descriptor::Sampler(sam) => { + debug_assert!(!layout + .content + .contains(n::DescriptorContent::IMMUTABLE_SAMPLER)); + data.samplers[counters.samplers as usize] = + Some(AsNative::from(sam.raw.as_ref().unwrap().as_ref())); + } + pso::Descriptor::Image(view, il) => { + data.textures[counters.textures as usize] = + Some((AsNative::from(view.texture.as_ref()), il)); + } + pso::Descriptor::CombinedImageSampler(view, il, sam) => { + if !layout + .content + .contains(n::DescriptorContent::IMMUTABLE_SAMPLER) + { + data.samplers[counters.samplers as usize] = + Some(AsNative::from(sam.raw.as_ref().unwrap().as_ref())); + } + data.textures[counters.textures as usize] = + Some((AsNative::from(view.texture.as_ref()), il)); + } + pso::Descriptor::UniformTexelBuffer(view) + | pso::Descriptor::StorageTexelBuffer(view) => { + data.textures[counters.textures as usize] = Some(( + AsNative::from(view.raw.as_ref()), + image::Layout::General, + )); + } + pso::Descriptor::Buffer(buf, ref desc_range) => { + let (raw, range) = buf.as_bound(); + if let Some(end) = desc_range.end { + debug_assert!(range.start + end <= range.end); + } + let start = range.start + desc_range.start.unwrap_or(0); + let pair = (AsNative::from(raw), start); + data.buffers[counters.buffers as usize] = Some(pair); + } + } + counters.add(layout.content); + } + } + n::DescriptorSet::ArgumentBuffer { + ref raw, + raw_offset, + ref pool, + ref range, + ref encoder, + ref bindings, + .. + } => { + debug_assert!(self.shared.private_caps.argument_buffers); + + encoder.set_argument_buffer(raw, raw_offset); + let mut arg_index = { + let binding = &bindings[&write.binding]; + debug_assert!((write.array_offset as usize) < binding.count); + (binding.res_offset as NSUInteger) + (write.array_offset as NSUInteger) + }; + + for (data, descriptor) in pool.write().resources + [range.start as usize + arg_index as usize .. range.end as usize] + .iter_mut() + .zip(write.descriptors) + { + match *descriptor.borrow() { + pso::Descriptor::Sampler(sampler) => { + debug_assert!(!bindings[&write.binding] + .content + .contains(n::DescriptorContent::IMMUTABLE_SAMPLER)); + encoder.set_sampler_state(sampler.raw.as_ref().unwrap(), arg_index); + arg_index += 1; + } + pso::Descriptor::Image(image, _layout) => { + let tex_ref = image.texture.as_ref(); + encoder.set_texture(tex_ref, arg_index); + data.ptr = (&**tex_ref).as_ptr(); + arg_index += 1; + } + pso::Descriptor::CombinedImageSampler(image, _il, sampler) => { + let binding = &bindings[&write.binding]; + if !binding + .content + .contains(n::DescriptorContent::IMMUTABLE_SAMPLER) + { + + + + assert!( + arg_index + < (binding.res_offset as NSUInteger) + + (binding.count as NSUInteger) + ); + encoder.set_sampler_state( + sampler.raw.as_ref().unwrap(), + arg_index + binding.count as NSUInteger, + ); + } + let tex_ref = image.texture.as_ref(); + encoder.set_texture(tex_ref, arg_index); + data.ptr = (&**tex_ref).as_ptr(); + } + pso::Descriptor::UniformTexelBuffer(view) + | pso::Descriptor::StorageTexelBuffer(view) => { + encoder.set_texture(&view.raw, arg_index); + data.ptr = (&**view.raw).as_ptr(); + arg_index += 1; + } + pso::Descriptor::Buffer(buffer, ref desc_range) => { + let (buf_raw, buf_range) = buffer.as_bound(); + encoder.set_buffer( + buf_raw, + buf_range.start + desc_range.start.unwrap_or(0), + arg_index, + ); + data.ptr = (&**buf_raw).as_ptr(); + arg_index += 1; + } + } + } + } + } + } + } + + unsafe fn copy_descriptor_sets<'a, I>(&self, copies: I) + where + I: IntoIterator, + I::Item: Borrow>, + { + for _copy in copies { + unimplemented!() + } + } + + unsafe fn destroy_descriptor_pool(&self, _pool: n::DescriptorPool) {} + + unsafe fn destroy_descriptor_set_layout(&self, _layout: n::DescriptorSetLayout) {} + + unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: n::PipelineLayout) {} + + unsafe fn destroy_shader_module(&self, _module: n::ShaderModule) {} + + unsafe fn destroy_render_pass(&self, _pass: n::RenderPass) {} + + unsafe fn destroy_graphics_pipeline(&self, _pipeline: n::GraphicsPipeline) {} + + unsafe fn destroy_compute_pipeline(&self, _pipeline: n::ComputePipeline) {} + + unsafe fn destroy_framebuffer(&self, _buffer: n::Framebuffer) {} + + unsafe fn destroy_semaphore(&self, _semaphore: n::Semaphore) {} + + unsafe fn allocate_memory( + &self, + memory_type: hal::MemoryTypeId, + size: u64, + ) -> Result { + let (storage, cache) = MemoryTypes::describe(memory_type.0); + let device = self.shared.device.lock(); + debug!("allocate_memory type {:?} of size {}", memory_type, size); + + + + let heap = if self.shared.private_caps.resource_heaps + && storage != MTLStorageMode::Shared + && false + { + let descriptor = metal::HeapDescriptor::new(); + descriptor.set_storage_mode(storage); + descriptor.set_cpu_cache_mode(cache); + descriptor.set_size(size); + let heap_raw = device.new_heap(&descriptor); + n::MemoryHeap::Native(heap_raw) + } else if storage == MTLStorageMode::Private { + n::MemoryHeap::Private + } else { + let options = conv::resource_options_from_storage_and_cache(storage, cache); + let cpu_buffer = device.new_buffer(size, options); + debug!("\tbacked by cpu buffer {:?}", cpu_buffer.as_ptr()); + n::MemoryHeap::Public(memory_type, cpu_buffer) + }; + + Ok(n::Memory::new(heap, size)) + } + + unsafe fn free_memory(&self, memory: n::Memory) { + debug!("free_memory of size {}", memory.size); + if let n::MemoryHeap::Public(_, ref cpu_buffer) = memory.heap { + debug!("\tbacked by cpu buffer {:?}", cpu_buffer.as_ptr()); + } + } + + unsafe fn create_buffer( + &self, + size: u64, + usage: buffer::Usage, + ) -> Result { + debug!("create_buffer of size {} and usage {:?}", size, usage); + Ok(n::Buffer::Unbound { usage, size, name: String::new() }) + } + + unsafe fn get_buffer_requirements(&self, buffer: &n::Buffer) -> memory::Requirements { + let (size, usage) = match *buffer { + n::Buffer::Unbound { size, usage, .. } => (size, usage), + n::Buffer::Bound { .. } => panic!("Unexpected Buffer::Bound"), + }; + let mut max_size = size; + let mut max_alignment = self.shared.private_caps.buffer_alignment; + + if self.shared.private_caps.resource_heaps { + + + for (i, _mt) in self.memory_types.iter().enumerate() { + let (storage, cache) = MemoryTypes::describe(i); + let options = conv::resource_options_from_storage_and_cache(storage, cache); + let requirements = self + .shared + .device + .lock() + .heap_buffer_size_and_align(size, options); + max_size = cmp::max(max_size, requirements.size); + max_alignment = cmp::max(max_alignment, requirements.align); + } + } + + + + const SIZE_MASK: u64 = 0xFF; + let supports_texel_view = + usage.intersects(buffer::Usage::UNIFORM_TEXEL | buffer::Usage::STORAGE_TEXEL); + + memory::Requirements { + size: (max_size + SIZE_MASK) & !SIZE_MASK, + alignment: max_alignment, + type_mask: if !supports_texel_view || self.shared.private_caps.shared_textures { + MemoryTypes::all().bits() + } else { + (MemoryTypes::all() ^ MemoryTypes::SHARED).bits() + }, + } + } + + unsafe fn bind_buffer_memory( + &self, + memory: &n::Memory, + offset: u64, + buffer: &mut n::Buffer, + ) -> Result<(), BindError> { + let (size, name) = match buffer { + n::Buffer::Unbound { size, name, .. } => (*size, name), + n::Buffer::Bound { .. } => panic!("Unexpected Buffer::Bound"), + }; + debug!("bind_buffer_memory of size {} at offset {}", size, offset); + *buffer = match memory.heap { + n::MemoryHeap::Native(ref heap) => { + let options = conv::resource_options_from_storage_and_cache( + heap.storage_mode(), + heap.cpu_cache_mode(), + ); + let raw = heap.new_buffer(size, options).unwrap_or_else(|| { + + self.shared.device.lock().new_buffer(size, options) + }); + raw.set_label(name); + n::Buffer::Bound { + raw, + options, + range: 0 .. size, + } + } + n::MemoryHeap::Public(mt, ref cpu_buffer) => { + debug!( + "\tmapped to public heap with address {:?}", + cpu_buffer.as_ptr() + ); + let (storage, cache) = MemoryTypes::describe(mt.0); + let options = conv::resource_options_from_storage_and_cache(storage, cache); + if offset == 0x0 && size == cpu_buffer.length() { + cpu_buffer.set_label(name); + } else { + cpu_buffer.add_debug_marker(name, NSRange { location: offset, length: size }); + } + n::Buffer::Bound { + raw: cpu_buffer.clone(), + options, + range: offset .. offset + size, + } + } + n::MemoryHeap::Private => { + + let options = MTLResourceOptions::StorageModePrivate + | MTLResourceOptions::CPUCacheModeDefaultCache; + let raw = self.shared.device.lock().new_buffer(size, options); + raw.set_label(name); + n::Buffer::Bound { + raw, + options, + range: 0 .. size, + } + } + }; + + Ok(()) + } + + unsafe fn destroy_buffer(&self, buffer: n::Buffer) { + if let n::Buffer::Bound { raw, range, .. } = buffer { + debug!( + "destroy_buffer {:?} occupying memory {:?}", + raw.as_ptr(), + range + ); + } + } + + unsafe fn create_buffer_view>( + &self, + buffer: &n::Buffer, + format_maybe: Option, + range: R, + ) -> Result { + let (raw, base_range, options) = match *buffer { + n::Buffer::Bound { + ref raw, + ref range, + options, + } => (raw, range, options), + n::Buffer::Unbound { .. } => panic!("Unexpected Buffer::Unbound"), + }; + let start = base_range.start + *range.start().unwrap_or(&0); + let end_rough = match range.end() { + Some(end) => base_range.start + end, + None => base_range.end, + }; + let format = match format_maybe { + Some(fmt) => fmt, + None => { + return Err(buffer::ViewCreationError::UnsupportedFormat { + format: format_maybe, + }); + } + }; + let format_desc = format.surface_desc(); + if format_desc.aspects != format::Aspects::COLOR || format_desc.is_compressed() { + + return Err(buffer::ViewCreationError::UnsupportedFormat { + format: format_maybe, + }); + } + + + let texel_count = (end_rough - start) * 8 / format_desc.bits as u64; + let col_count = cmp::min(texel_count, self.shared.private_caps.max_texture_size); + let row_count = (texel_count + self.shared.private_caps.max_texture_size - 1) + / self.shared.private_caps.max_texture_size; + let mtl_format = self.shared.private_caps.map_format(format).ok_or( + buffer::ViewCreationError::UnsupportedFormat { + format: format_maybe, + }, + )?; + + let descriptor = metal::TextureDescriptor::new(); + descriptor.set_texture_type(MTLTextureType::D2); + descriptor.set_width(col_count); + descriptor.set_height(row_count); + descriptor.set_mipmap_level_count(1); + descriptor.set_pixel_format(mtl_format); + descriptor.set_resource_options(options); + descriptor.set_storage_mode(raw.storage_mode()); + descriptor.set_usage(metal::MTLTextureUsage::ShaderRead); + + let align_mask = self.shared.private_caps.buffer_alignment - 1; + let stride = (col_count * (format_desc.bits as u64 / 8) + align_mask) & !align_mask; + + Ok(n::BufferView { + raw: raw.new_texture_from_contents(&descriptor, start, stride), + }) + } + + unsafe fn destroy_buffer_view(&self, _view: n::BufferView) { + + } + + unsafe fn create_image( + &self, + kind: image::Kind, + mip_levels: image::Level, + format: format::Format, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Result { + debug!( + "create_image {:?} with {} mips of {:?} {:?} and usage {:?}", + kind, mip_levels, format, tiling, usage + ); + + let is_cube = view_caps.contains(image::ViewCapabilities::KIND_CUBE); + let mtl_format = self + .shared + .private_caps + .map_format(format) + .ok_or_else(|| image::CreationError::Format(format))?; + + let descriptor = metal::TextureDescriptor::new(); + + let (mtl_type, num_layers) = match kind { + image::Kind::D1(_, 1) => { + assert!(!is_cube); + (MTLTextureType::D1, None) + } + image::Kind::D1(_, layers) => { + assert!(!is_cube); + (MTLTextureType::D1Array, Some(layers)) + } + image::Kind::D2(_, _, layers, 1) => { + if is_cube && layers > 6 { + assert_eq!(layers % 6, 0); + (MTLTextureType::CubeArray, Some(layers / 6)) + } else if is_cube { + assert_eq!(layers, 6); + (MTLTextureType::Cube, None) + } else if layers > 1 { + (MTLTextureType::D2Array, Some(layers)) + } else { + (MTLTextureType::D2, None) + } + } + image::Kind::D2(_, _, 1, samples) if !is_cube => { + descriptor.set_sample_count(samples as u64); + (MTLTextureType::D2Multisample, None) + } + image::Kind::D2(..) => { + error!( + "Multi-sampled array textures or cubes are not supported: {:?}", + kind + ); + return Err(image::CreationError::Kind); + } + image::Kind::D3(..) => { + assert!(!is_cube && !view_caps.contains(image::ViewCapabilities::KIND_2D_ARRAY)); + (MTLTextureType::D3, None) + } + }; + + descriptor.set_texture_type(mtl_type); + if let Some(count) = num_layers { + descriptor.set_array_length(count as u64); + } + let extent = kind.extent(); + descriptor.set_width(extent.width as u64); + descriptor.set_height(extent.height as u64); + descriptor.set_depth(extent.depth as u64); + descriptor.set_mipmap_level_count(mip_levels as u64); + descriptor.set_pixel_format(mtl_format); + descriptor.set_usage(conv::map_texture_usage(usage, tiling)); + + let base = format.base_format(); + let format_desc = base.0.desc(); + let mip_sizes = (0 .. mip_levels) + .map(|level| { + let pitches = n::Image::pitches_impl(extent.at_level(level), format_desc); + num_layers.unwrap_or(1) as buffer::Offset * pitches[3] + }) + .collect(); + + let host_usage = image::Usage::TRANSFER_SRC | image::Usage::TRANSFER_DST; + let host_visible = mtl_type == MTLTextureType::D2 + && mip_levels == 1 + && num_layers.is_none() + && format_desc.aspects.contains(format::Aspects::COLOR) + && tiling == image::Tiling::Linear + && host_usage.contains(usage); + + Ok(n::Image { + like: n::ImageLike::Unbound { + descriptor, + mip_sizes, + host_visible, + name: String::new(), + }, + kind, + format_desc, + shader_channel: base.1.into(), + mtl_format, + mtl_type, + }) + } + + unsafe fn get_image_requirements(&self, image: &n::Image) -> memory::Requirements { + let (descriptor, mip_sizes, host_visible) = match image.like { + n::ImageLike::Unbound { + ref descriptor, + ref mip_sizes, + host_visible, + .. + } => (descriptor, mip_sizes, host_visible), + n::ImageLike::Texture(..) | n::ImageLike::Buffer(..) => { + panic!("Expected Image::Unbound") + } + }; + + if self.shared.private_caps.resource_heaps { + + + let mut max_size = 0; + let mut max_alignment = 0; + let types = if host_visible { + MemoryTypes::all() + } else { + MemoryTypes::PRIVATE + }; + for (i, _) in self.memory_types.iter().enumerate() { + if !types.contains(MemoryTypes::from_bits(1 << i).unwrap()) { + continue; + } + let (storage, cache_mode) = MemoryTypes::describe(i); + descriptor.set_storage_mode(storage); + descriptor.set_cpu_cache_mode(cache_mode); + + let requirements = self + .shared + .device + .lock() + .heap_texture_size_and_align(descriptor); + max_size = cmp::max(max_size, requirements.size); + max_alignment = cmp::max(max_alignment, requirements.align); + } + memory::Requirements { + size: max_size, + alignment: max_alignment, + type_mask: types.bits(), + } + } else if host_visible { + assert_eq!(mip_sizes.len(), 1); + let mask = self.shared.private_caps.buffer_alignment - 1; + memory::Requirements { + size: (mip_sizes[0] + mask) & !mask, + alignment: self.shared.private_caps.buffer_alignment, + type_mask: MemoryTypes::all().bits(), + } + } else { + memory::Requirements { + size: mip_sizes.iter().sum(), + alignment: 4, + type_mask: MemoryTypes::PRIVATE.bits(), + } + } + } + + unsafe fn get_image_subresource_footprint( + &self, + image: &n::Image, + sub: image::Subresource, + ) -> image::SubresourceFootprint { + let num_layers = image.kind.num_layers() as buffer::Offset; + let level_offset = (0 .. sub.level).fold(0, |offset, level| { + let pitches = image.pitches(level); + offset + num_layers * pitches[3] + }); + let pitches = image.pitches(sub.level); + let layer_offset = level_offset + sub.layer as buffer::Offset * pitches[3]; + image::SubresourceFootprint { + slice: layer_offset .. layer_offset + pitches[3], + row_pitch: pitches[1] as _, + depth_pitch: pitches[2] as _, + array_pitch: pitches[3] as _, + } + } + + unsafe fn bind_image_memory( + &self, + memory: &n::Memory, + offset: u64, + image: &mut n::Image, + ) -> Result<(), BindError> { + let like = { + let (descriptor, mip_sizes, name) = match image.like { + n::ImageLike::Unbound { + ref descriptor, + ref mip_sizes, + ref name, + .. + } => (descriptor, mip_sizes, name), + n::ImageLike::Texture(..) | n::ImageLike::Buffer(..) => { + panic!("Expected Image::Unbound") + } + }; + + match memory.heap { + n::MemoryHeap::Native(ref heap) => { + let resource_options = conv::resource_options_from_storage_and_cache( + heap.storage_mode(), + heap.cpu_cache_mode(), + ); + descriptor.set_resource_options(resource_options); + n::ImageLike::Texture(heap.new_texture(descriptor).unwrap_or_else(|| { + + let texture = self.shared.device.lock().new_texture(&descriptor); + texture.set_label(name); + texture + })) + } + n::MemoryHeap::Public(_memory_type, ref cpu_buffer) => { + assert_eq!(mip_sizes.len(), 1); + if offset == 0x0 && cpu_buffer.length() == mip_sizes[0] { + cpu_buffer.set_label(name); + } else { + cpu_buffer.add_debug_marker(name, NSRange { location: offset, length: mip_sizes[0] }); + } + n::ImageLike::Buffer(n::Buffer::Bound { + raw: cpu_buffer.clone(), + range: offset .. offset + mip_sizes[0] as u64, + options: MTLResourceOptions::StorageModeShared, + }) + } + n::MemoryHeap::Private => { + descriptor.set_storage_mode(MTLStorageMode::Private); + let texture = self.shared.device.lock().new_texture(descriptor); + texture.set_label(name); + n::ImageLike::Texture(texture) + } + } + }; + + Ok(image.like = like) + } + + unsafe fn destroy_image(&self, _image: n::Image) { + + } + + unsafe fn create_image_view( + &self, + image: &n::Image, + kind: image::ViewKind, + format: format::Format, + swizzle: format::Swizzle, + range: image::SubresourceRange, + ) -> Result { + let mtl_format = match self + .shared + .private_caps + .map_format_with_swizzle(format, swizzle) + { + Some(f) => f, + None => { + error!("failed to swizzle format {:?} with {:?}", format, swizzle); + return Err(image::ViewError::BadFormat(format)); + } + }; + let raw = image.like.as_texture(); + let full_range = image::SubresourceRange { + aspects: image.format_desc.aspects, + levels: 0 .. raw.mipmap_level_count() as image::Level, + layers: 0 .. image.kind.num_layers(), + }; + let mtl_type = if image.mtl_type == MTLTextureType::D2Multisample { + if kind != image::ViewKind::D2 { + error!("Requested {:?} for MSAA texture", kind); + } + image.mtl_type + } else { + conv::map_texture_type(kind) + }; + + let texture = if mtl_format == image.mtl_format + && mtl_type == image.mtl_type + && swizzle == format::Swizzle::NO + && range == full_range + { + + + raw.to_owned() + } else { + raw.new_texture_view_from_slice( + mtl_format, + mtl_type, + NSRange { + location: range.levels.start as _, + length: (range.levels.end - range.levels.start) as _, + }, + NSRange { + location: range.layers.start as _, + length: (range.layers.end - range.layers.start) as _, + }, + ) + }; + + Ok(n::ImageView { + texture, + mtl_format, + }) + } + + unsafe fn destroy_image_view(&self, _view: n::ImageView) {} + + fn create_fence(&self, signaled: bool) -> Result { + let cell = RefCell::new(n::FenceInner::Idle { signaled }); + debug!( + "Creating fence ptr {:?} with signal={}", + cell.as_ptr(), + signaled + ); + Ok(n::Fence(cell)) + } + + unsafe fn reset_fence(&self, fence: &n::Fence) -> Result<(), OutOfMemory> { + debug!("Resetting fence ptr {:?}", fence.0.as_ptr()); + fence.0.replace(n::FenceInner::Idle { signaled: false }); + Ok(()) + } + + unsafe fn wait_for_fence( + &self, + fence: &n::Fence, + timeout_ns: u64, + ) -> Result { + unsafe fn to_ns(duration: time::Duration) -> u64 { + duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64 + } + + debug!("wait_for_fence {:?} for {} ms", fence, timeout_ns); + match *fence.0.borrow() { + n::FenceInner::Idle { signaled } => { + if !signaled { + warn!( + "Fence ptr {:?} is not pending, waiting not possible", + fence.0.as_ptr() + ); + } + Ok(signaled) + } + n::FenceInner::PendingSubmission(ref cmd_buf) => { + if timeout_ns == !0 { + cmd_buf.wait_until_completed(); + return Ok(true); + } + let start = time::Instant::now(); + loop { + if let metal::MTLCommandBufferStatus::Completed = cmd_buf.status() { + return Ok(true); + } + if to_ns(start.elapsed()) >= timeout_ns { + return Ok(false); + } + thread::sleep(time::Duration::from_millis(1)); + self.shared.queue_blocker.lock().triage(); + } + } + n::FenceInner::AcquireFrame { + ref swapchain_image, + iteration, + } => { + if swapchain_image.iteration() > iteration { + Ok(true) + } else if timeout_ns == 0 { + Ok(false) + } else { + swapchain_image.wait_until_ready(); + Ok(true) + } + } + } + } + + unsafe fn get_fence_status(&self, fence: &n::Fence) -> Result { + Ok(match *fence.0.borrow() { + n::FenceInner::Idle { signaled } => signaled, + n::FenceInner::PendingSubmission(ref cmd_buf) => match cmd_buf.status() { + metal::MTLCommandBufferStatus::Completed => true, + _ => false, + }, + n::FenceInner::AcquireFrame { + ref swapchain_image, + iteration, + } => swapchain_image.iteration() > iteration, + }) + } + + unsafe fn destroy_fence(&self, _fence: n::Fence) { + + } + + fn create_event(&self) -> Result { + Ok(n::Event(Arc::new(AtomicBool::new(false)))) + } + + unsafe fn get_event_status(&self, event: &n::Event) -> Result { + Ok(event.0.load(Ordering::Acquire)) + } + + unsafe fn set_event(&self, event: &n::Event) -> Result<(), OutOfMemory> { + event.0.store(true, Ordering::Release); + self.shared.queue_blocker.lock().triage(); + Ok(()) + } + + unsafe fn reset_event(&self, event: &n::Event) -> Result<(), OutOfMemory> { + Ok(event.0.store(false, Ordering::Release)) + } + + unsafe fn destroy_event(&self, _event: n::Event) { + + } + + unsafe fn create_query_pool( + &self, + ty: query::Type, + count: query::Id, + ) -> Result { + match ty { + query::Type::Occlusion => { + let range = self + .shared + .visibility + .allocator + .lock() + .allocate_range(count) + .map_err(|_| { + error!("Not enough space to allocate an occlusion query pool"); + OutOfMemory::Host + })?; + Ok(n::QueryPool::Occlusion(range)) + } + _ => { + error!("Only occlusion queries are currently supported"); + Err(query::CreationError::Unsupported(ty)) + } + } + } + + unsafe fn destroy_query_pool(&self, pool: n::QueryPool) { + match pool { + n::QueryPool::Occlusion(range) => { + self.shared.visibility.allocator.lock().free_range(range); + } + } + } + + unsafe fn get_query_pool_results( + &self, + pool: &n::QueryPool, + queries: Range, + data: &mut [u8], + stride: buffer::Offset, + flags: query::ResultFlags, + ) -> Result { + let is_ready = match *pool { + n::QueryPool::Occlusion(ref pool_range) => { + let visibility = &self.shared.visibility; + let is_ready = if flags.contains(query::ResultFlags::WAIT) { + let mut guard = visibility.allocator.lock(); + while !visibility.are_available(pool_range.start, &queries) { + visibility.condvar.wait(&mut guard); + } + true + } else { + visibility.are_available(pool_range.start, &queries) + }; + + let size_data = mem::size_of::() as buffer::Offset; + if stride == size_data + && flags.contains(query::ResultFlags::BITS_64) + && !flags.contains(query::ResultFlags::WITH_AVAILABILITY) + { + + ptr::copy_nonoverlapping( + (visibility.buffer.contents() as *const u8).offset( + (pool_range.start + queries.start) as isize * size_data as isize, + ), + data.as_mut_ptr(), + stride as usize * (queries.end - queries.start) as usize, + ); + } else { + + for i in 0 .. queries.end - queries.start { + let absolute_index = (pool_range.start + queries.start + i) as isize; + let value = + *(visibility.buffer.contents() as *const u64).offset(absolute_index); + let base = (visibility.buffer.contents() as *const u8) + .offset(visibility.availability_offset as isize); + let availability = *(base as *const u32).offset(absolute_index); + let data_ptr = data[i as usize * stride as usize ..].as_mut_ptr(); + if flags.contains(query::ResultFlags::BITS_64) { + *(data_ptr as *mut u64) = value; + if flags.contains(query::ResultFlags::WITH_AVAILABILITY) { + *(data_ptr as *mut u64).offset(1) = availability as u64; + } + } else { + *(data_ptr as *mut u32) = value as u32; + if flags.contains(query::ResultFlags::WITH_AVAILABILITY) { + *(data_ptr as *mut u32).offset(1) = availability; + } + } + } + } + + is_ready + } + }; + + Ok(is_ready) + } + + unsafe fn create_swapchain( + &self, + surface: &mut Surface, + config: window::SwapchainConfig, + old_swapchain: Option, + ) -> Result<(Swapchain, Vec), window::CreationError> { + Ok(self.build_swapchain(surface, config, old_swapchain)) + } + + unsafe fn destroy_swapchain(&self, _swapchain: Swapchain) {} + + fn wait_idle(&self) -> Result<(), OutOfMemory> { + command::QueueInner::wait_idle(&self.shared.queue); + Ok(()) + } + + unsafe fn set_image_name(&self, image: &mut n::Image, name: &str) { + match image { + n::Image { like: n::ImageLike::Buffer(ref mut buf), .. } => self.set_buffer_name(buf, name), + n::Image { like: n::ImageLike::Texture(ref tex), .. } => tex.set_label(name), + n::Image { like: n::ImageLike::Unbound { name: ref mut unbound_name, .. }, .. } => { + *unbound_name = name.to_string(); + } + }; + } + + unsafe fn set_buffer_name(&self, buffer: &mut n::Buffer, name: &str) { + match buffer { + n::Buffer::Unbound { name: ref mut unbound_name, .. } => { + *unbound_name = name.to_string(); + }, + n::Buffer::Bound { ref raw, ref range, .. } => { + raw.add_debug_marker( + name, + NSRange { location: range.start, length: range.end - range.start } + ); + } + } + } + + unsafe fn set_command_buffer_name( + &self, + command_buffer: &mut command::CommandBuffer, + name: &str, + ) { + command_buffer.name = name.to_string(); + } + + unsafe fn set_semaphore_name(&self, _semaphore: &mut n::Semaphore, _name: &str) { + } + + unsafe fn set_fence_name(&self, _fence: &mut n::Fence, _name: &str) { + } + + unsafe fn set_framebuffer_name(&self, _framebuffer: &mut n::Framebuffer, _name: &str) { + } + + unsafe fn set_render_pass_name(&self, render_pass: &mut n::RenderPass, name: &str) { + render_pass.name = name.to_string(); + } + + unsafe fn set_descriptor_set_name(&self, _descriptor_set: &mut n::DescriptorSet, _name: &str) { + + } + + unsafe fn set_descriptor_set_layout_name( + &self, + _descriptor_set_layout: &mut n::DescriptorSetLayout, + _name: &str, + ) { + + } +} + +#[test] +fn test_send_sync() { + fn foo() {} + foo::() +} diff --git a/third_party/rust/gfx-backend-metal/src/internal.rs b/third_party/rust/gfx-backend-metal/src/internal.rs new file mode 100644 index 000000000000..d9f3371622ea --- /dev/null +++ b/third_party/rust/gfx-backend-metal/src/internal.rs @@ -0,0 +1,569 @@ +use crate::{conversions as conv, PrivateCapabilities}; + +use auxil::FastHashMap; +use hal::{ + command::ClearColor, + format::{Aspects, ChannelType}, + image::Filter, + pso, +}; + +use metal; +use parking_lot::{Mutex, RawRwLock}; +use storage_map::{StorageMap, StorageMapGuard}; + +use std::mem; + + +pub type FastStorageMap = StorageMap>; +pub type FastStorageGuard<'a, V> = StorageMapGuard<'a, RawRwLock, V>; + +#[derive(Clone, Debug)] +pub struct ClearVertex { + pub pos: [f32; 4], +} + +#[derive(Clone, Debug)] +pub struct BlitVertex { + pub uv: [f32; 4], + pub pos: [f32; 4], +} + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] +pub enum Channel { + Float, + Int, + Uint, +} + +impl From for Channel { + fn from(channel_type: ChannelType) -> Self { + match channel_type { + ChannelType::Unorm + | ChannelType::Snorm + | ChannelType::Ufloat + | ChannelType::Sfloat + | ChannelType::Uscaled + | ChannelType::Sscaled + | ChannelType::Srgb => Channel::Float, + ChannelType::Uint => Channel::Uint, + ChannelType::Sint => Channel::Int, + } + } +} + +impl Channel { + pub fn interpret(self, raw: ClearColor) -> metal::MTLClearColor { + unsafe { + match self { + Channel::Float => metal::MTLClearColor::new( + raw.float32[0] as _, + raw.float32[1] as _, + raw.float32[2] as _, + raw.float32[3] as _, + ), + Channel::Int => metal::MTLClearColor::new( + raw.sint32[0] as _, + raw.sint32[1] as _, + raw.sint32[2] as _, + raw.sint32[3] as _, + ), + Channel::Uint => metal::MTLClearColor::new( + raw.uint32[0] as _, + raw.uint32[1] as _, + raw.uint32[2] as _, + raw.uint32[3] as _, + ), + } + } + } +} + +#[derive(Debug)] +pub struct SamplerStates { + nearest: metal::SamplerState, + linear: metal::SamplerState, +} + +impl SamplerStates { + fn new(device: &metal::DeviceRef) -> Self { + let desc = metal::SamplerDescriptor::new(); + desc.set_min_filter(metal::MTLSamplerMinMagFilter::Nearest); + desc.set_mag_filter(metal::MTLSamplerMinMagFilter::Nearest); + desc.set_mip_filter(metal::MTLSamplerMipFilter::Nearest); + let nearest = device.new_sampler(&desc); + desc.set_min_filter(metal::MTLSamplerMinMagFilter::Linear); + desc.set_mag_filter(metal::MTLSamplerMinMagFilter::Linear); + let linear = device.new_sampler(&desc); + + SamplerStates { nearest, linear } + } + + pub fn get(&self, filter: Filter) -> &metal::SamplerStateRef { + match filter { + Filter::Nearest => &self.nearest, + Filter::Linear => &self.linear, + } + } +} + +#[derive(Debug)] +pub struct DepthStencilStates { + map: FastStorageMap, + write_none: pso::DepthStencilDesc, + write_depth: pso::DepthStencilDesc, + write_stencil: pso::DepthStencilDesc, + write_all: pso::DepthStencilDesc, +} + +impl DepthStencilStates { + fn new(device: &metal::DeviceRef) -> Self { + let write_none = pso::DepthStencilDesc { + depth: None, + depth_bounds: false, + stencil: None, + }; + let write_depth = pso::DepthStencilDesc { + depth: Some(pso::DepthTest { + fun: pso::Comparison::Always, + write: true, + }), + depth_bounds: false, + stencil: None, + }; + let face = pso::StencilFace { + fun: pso::Comparison::Always, + op_fail: pso::StencilOp::Replace, + op_depth_fail: pso::StencilOp::Replace, + op_pass: pso::StencilOp::Replace, + }; + let write_stencil = pso::DepthStencilDesc { + depth: None, + depth_bounds: false, + stencil: Some(pso::StencilTest { + faces: pso::Sided::new(face), + ..pso::StencilTest::default() + }), + }; + let write_all = pso::DepthStencilDesc { + depth: Some(pso::DepthTest { + fun: pso::Comparison::Always, + write: true, + }), + depth_bounds: false, + stencil: Some(pso::StencilTest { + faces: pso::Sided::new(face), + ..pso::StencilTest::default() + }), + }; + + let map = FastStorageMap::default(); + for desc in &[&write_none, &write_depth, &write_stencil, &write_all] { + map.get_or_create_with(*desc, || { + let raw_desc = Self::create_desc(desc).unwrap(); + device.new_depth_stencil_state(&raw_desc) + }); + } + + DepthStencilStates { + map, + write_none, + write_depth, + write_stencil, + write_all, + } + } + + pub fn get_write(&self, aspects: Aspects) -> FastStorageGuard { + let key = if aspects.contains(Aspects::DEPTH | Aspects::STENCIL) { + &self.write_all + } else if aspects.contains(Aspects::DEPTH) { + &self.write_depth + } else if aspects.contains(Aspects::STENCIL) { + &self.write_stencil + } else { + &self.write_none + }; + self.map.get_or_create_with(key, || unreachable!()) + } + + pub fn prepare(&self, desc: &pso::DepthStencilDesc, device: &metal::DeviceRef) { + self.map.prepare_maybe(desc, || { + Self::create_desc(desc).map(|raw_desc| device.new_depth_stencil_state(&raw_desc)) + }); + } + + + pub fn get( + &self, + desc: pso::DepthStencilDesc, + device: &Mutex, + ) -> FastStorageGuard { + self.map.get_or_create_with(&desc, || { + let raw_desc = Self::create_desc(&desc).expect("Incomplete descriptor provided"); + device.lock().new_depth_stencil_state(&raw_desc) + }) + } + + fn create_stencil( + face: &pso::StencilFace, + read_mask: pso::StencilValue, + write_mask: pso::StencilValue, + ) -> metal::StencilDescriptor { + let desc = metal::StencilDescriptor::new(); + desc.set_stencil_compare_function(conv::map_compare_function(face.fun)); + desc.set_read_mask(read_mask); + desc.set_write_mask(write_mask); + desc.set_stencil_failure_operation(conv::map_stencil_op(face.op_fail)); + desc.set_depth_failure_operation(conv::map_stencil_op(face.op_depth_fail)); + desc.set_depth_stencil_pass_operation(conv::map_stencil_op(face.op_pass)); + desc + } + + fn create_desc(desc: &pso::DepthStencilDesc) -> Option { + let raw = metal::DepthStencilDescriptor::new(); + + if let Some(ref stencil) = desc.stencil { + let read_masks = match stencil.read_masks { + pso::State::Static(value) => value, + pso::State::Dynamic => return None, + }; + let write_masks = match stencil.write_masks { + pso::State::Static(value) => value, + pso::State::Dynamic => return None, + }; + let front_desc = + Self::create_stencil(&stencil.faces.front, read_masks.front, write_masks.front); + raw.set_front_face_stencil(Some(&front_desc)); + let back_desc = if stencil.faces.front == stencil.faces.back + && read_masks.front == read_masks.back + && write_masks.front == write_masks.back + { + front_desc + } else { + Self::create_stencil(&stencil.faces.back, read_masks.back, write_masks.back) + }; + raw.set_back_face_stencil(Some(&back_desc)); + } + + if let Some(ref depth) = desc.depth { + raw.set_depth_compare_function(conv::map_compare_function(depth.fun)); + raw.set_depth_write_enabled(depth.write); + } + + Some(raw) + } +} + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] +pub struct ClearKey { + pub framebuffer_aspects: Aspects, + pub color_formats: [metal::MTLPixelFormat; 1], + pub depth_stencil_format: metal::MTLPixelFormat, + pub target_index: Option<(u8, Channel)>, +} + +#[derive(Debug)] +pub struct ImageClearPipes { + map: FastStorageMap, +} + +impl ImageClearPipes { + pub(crate) fn get( + &self, + key: ClearKey, + library: &Mutex, + device: &Mutex, + private_caps: &PrivateCapabilities, + ) -> FastStorageGuard { + self.map.get_or_create_with(&key, || { + Self::create(key, &*library.lock(), &*device.lock(), private_caps) + }) + } + + fn create( + key: ClearKey, + library: &metal::LibraryRef, + device: &metal::DeviceRef, + private_caps: &PrivateCapabilities, + ) -> metal::RenderPipelineState { + let pipeline = metal::RenderPipelineDescriptor::new(); + if private_caps.layered_rendering { + pipeline.set_input_primitive_topology(metal::MTLPrimitiveTopologyClass::Triangle); + } + + let vs_clear = library.get_function("vs_clear", None).unwrap(); + pipeline.set_vertex_function(Some(&vs_clear)); + + if key.framebuffer_aspects.contains(Aspects::COLOR) { + for (i, &format) in key.color_formats.iter().enumerate() { + pipeline + .color_attachments() + .object_at(i) + .unwrap() + .set_pixel_format(format); + } + } + if key.framebuffer_aspects.contains(Aspects::DEPTH) { + pipeline.set_depth_attachment_pixel_format(key.depth_stencil_format); + } + if key.framebuffer_aspects.contains(Aspects::STENCIL) { + pipeline.set_stencil_attachment_pixel_format(key.depth_stencil_format); + } + + if let Some((index, channel)) = key.target_index { + assert!(key.framebuffer_aspects.contains(Aspects::COLOR)); + let s_channel = match channel { + Channel::Float => "float", + Channel::Int => "int", + Channel::Uint => "uint", + }; + let ps_name = format!("ps_clear{}_{}", index, s_channel); + let ps_fun = library.get_function(&ps_name, None).unwrap(); + pipeline.set_fragment_function(Some(&ps_fun)); + } + + + let vertex_descriptor = metal::VertexDescriptor::new(); + let mtl_buffer_desc = vertex_descriptor.layouts().object_at(0).unwrap(); + mtl_buffer_desc.set_stride(mem::size_of::() as _); + for i in 0 .. 1 { + let mtl_attribute_desc = vertex_descriptor + .attributes() + .object_at(i) + .expect("too many vertex attributes"); + mtl_attribute_desc.set_buffer_index(0); + mtl_attribute_desc.set_offset((i * mem::size_of::<[f32; 4]>()) as _); + mtl_attribute_desc.set_format(metal::MTLVertexFormat::Float4); + } + pipeline.set_vertex_descriptor(Some(&vertex_descriptor)); + + device.new_render_pipeline_state(&pipeline).unwrap() + } +} + +pub type BlitKey = ( + metal::MTLTextureType, + metal::MTLPixelFormat, + Aspects, + Channel, +); + +#[derive(Debug)] +pub struct ImageBlitPipes { + map: FastStorageMap, +} + +impl ImageBlitPipes { + pub(crate) fn get( + &self, + key: BlitKey, + library: &Mutex, + device: &Mutex, + private_caps: &PrivateCapabilities, + ) -> FastStorageGuard { + self.map.get_or_create_with(&key, || { + Self::create(key, &*library.lock(), &*device.lock(), private_caps) + }) + } + + fn create( + key: BlitKey, + library: &metal::LibraryRef, + device: &metal::DeviceRef, + private_caps: &PrivateCapabilities, + ) -> metal::RenderPipelineState { + use metal::MTLTextureType as Tt; + + let pipeline = metal::RenderPipelineDescriptor::new(); + if private_caps.layered_rendering { + pipeline.set_input_primitive_topology(metal::MTLPrimitiveTopologyClass::Triangle); + } + + let s_type = match key.0 { + Tt::D1 => "1d", + Tt::D1Array => "1d_array", + Tt::D2 => "2d", + Tt::D2Array => "2d_array", + Tt::D3 => "3d", + Tt::D2Multisample => panic!("Can't blit MSAA surfaces"), + Tt::Cube | Tt::CubeArray => unimplemented!(), + }; + let s_channel = if key.2.contains(Aspects::COLOR) { + match key.3 { + Channel::Float => "float", + Channel::Int => "int", + Channel::Uint => "uint", + } + } else { + "depth" + }; + let ps_name = format!("ps_blit_{}_{}", s_type, s_channel); + + let vs_blit = library.get_function("vs_blit", None).unwrap(); + let ps_blit = library.get_function(&ps_name, None).unwrap(); + pipeline.set_vertex_function(Some(&vs_blit)); + pipeline.set_fragment_function(Some(&ps_blit)); + + if key.2.contains(Aspects::COLOR) { + pipeline + .color_attachments() + .object_at(0) + .unwrap() + .set_pixel_format(key.1); + } + if key.2.contains(Aspects::DEPTH) { + pipeline.set_depth_attachment_pixel_format(key.1); + } + if key.2.contains(Aspects::STENCIL) { + pipeline.set_stencil_attachment_pixel_format(key.1); + } + + + let vertex_descriptor = metal::VertexDescriptor::new(); + let mtl_buffer_desc = vertex_descriptor.layouts().object_at(0).unwrap(); + mtl_buffer_desc.set_stride(mem::size_of::() as _); + for i in 0 .. 2 { + let mtl_attribute_desc = vertex_descriptor + .attributes() + .object_at(i) + .expect("too many vertex attributes"); + mtl_attribute_desc.set_buffer_index(0); + mtl_attribute_desc.set_offset((i * mem::size_of::<[f32; 4]>()) as _); + mtl_attribute_desc.set_format(metal::MTLVertexFormat::Float4); + } + pipeline.set_vertex_descriptor(Some(&vertex_descriptor)); + + device.new_render_pipeline_state(&pipeline).unwrap() + } +} + +#[derive(Debug)] +pub struct ServicePipes { + pub library: Mutex, + pub sampler_states: SamplerStates, + pub depth_stencil_states: DepthStencilStates, + pub clears: ImageClearPipes, + pub blits: ImageBlitPipes, + pub copy_buffer: metal::ComputePipelineState, + pub fill_buffer: metal::ComputePipelineState, +} + +impl ServicePipes { + pub fn new(device: &metal::DeviceRef) -> Self { + let data = include_bytes!("./../shaders/gfx_shaders.metallib"); + let library = device.new_library_with_data(data).unwrap(); + + let copy_buffer = Self::create_copy_buffer(&library, device); + let fill_buffer = Self::create_fill_buffer(&library, device); + + ServicePipes { + library: Mutex::new(library), + sampler_states: SamplerStates::new(device), + depth_stencil_states: DepthStencilStates::new(device), + clears: ImageClearPipes { + map: FastStorageMap::default(), + }, + blits: ImageBlitPipes { + map: FastStorageMap::default(), + }, + copy_buffer, + fill_buffer, + } + } + + fn create_copy_buffer( + library: &metal::LibraryRef, + device: &metal::DeviceRef, + ) -> metal::ComputePipelineState { + let pipeline = metal::ComputePipelineDescriptor::new(); + + let cs_copy_buffer = library.get_function("cs_copy_buffer", None).unwrap(); + pipeline.set_compute_function(Some(&cs_copy_buffer)); + pipeline.set_thread_group_size_is_multiple_of_thread_execution_width(true); + + + + + + + + + unsafe { device.new_compute_pipeline_state(&pipeline) }.unwrap() + } + + fn create_fill_buffer( + library: &metal::LibraryRef, + device: &metal::DeviceRef, + ) -> metal::ComputePipelineState { + let pipeline = metal::ComputePipelineDescriptor::new(); + + let cs_fill_buffer = library.get_function("cs_fill_buffer", None).unwrap(); + pipeline.set_compute_function(Some(&cs_fill_buffer)); + pipeline.set_thread_group_size_is_multiple_of_thread_execution_width(true); + + + + + + + + unsafe { device.new_compute_pipeline_state(&pipeline) }.unwrap() + } + + pub(crate) fn simple_blit( + &self, + device: &Mutex, + cmd_buffer: &metal::CommandBufferRef, + src: &metal::TextureRef, + dst: &metal::TextureRef, + private_caps: &PrivateCapabilities, + ) { + let key = ( + metal::MTLTextureType::D2, + dst.pixel_format(), + Aspects::COLOR, + Channel::Float, + ); + let pso = self.blits.get(key, &self.library, device, private_caps); + let vertices = [ + BlitVertex { + uv: [0.0, 1.0, 0.0, 0.0], + pos: [0.0, 0.0, 0.0, 0.0], + }, + BlitVertex { + uv: [0.0, 0.0, 0.0, 0.0], + pos: [0.0, 1.0, 0.0, 0.0], + }, + BlitVertex { + uv: [1.0, 1.0, 0.0, 0.0], + pos: [1.0, 0.0, 0.0, 0.0], + }, + BlitVertex { + uv: [1.0, 0.0, 0.0, 0.0], + pos: [1.0, 1.0, 0.0, 0.0], + }, + ]; + + let descriptor = metal::RenderPassDescriptor::new(); + if private_caps.layered_rendering { + descriptor.set_render_target_array_length(1); + } + let attachment = descriptor.color_attachments().object_at(0).unwrap(); + attachment.set_texture(Some(dst)); + attachment.set_load_action(metal::MTLLoadAction::DontCare); + attachment.set_store_action(metal::MTLStoreAction::Store); + + let encoder = cmd_buffer.new_render_command_encoder(descriptor); + encoder.set_render_pipeline_state(pso.as_ref()); + encoder.set_fragment_sampler_state(0, Some(&self.sampler_states.linear)); + encoder.set_fragment_texture(0, Some(src)); + encoder.set_vertex_bytes( + 0, + (vertices.len() * mem::size_of::()) as u64, + vertices.as_ptr() as *const _, + ); + encoder.draw_primitives(metal::MTLPrimitiveType::TriangleStrip, 0, 4); + encoder.end_encoding(); + } +} diff --git a/third_party/rust/gfx-backend-metal/src/lib.rs b/third_party/rust/gfx-backend-metal/src/lib.rs new file mode 100644 index 000000000000..b591c391518a --- /dev/null +++ b/third_party/rust/gfx-backend-metal/src/lib.rs @@ -0,0 +1,1033 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#[macro_use] +extern crate bitflags; +#[macro_use] +extern crate objc; +#[macro_use] +extern crate log; + +use hal::{ + adapter::{Adapter, AdapterInfo, DeviceType}, + queue::{QueueFamilyId, QueueType}, +}; +use range_alloc::RangeAllocator; + +use cocoa::foundation::NSInteger; +use core_graphics::base::CGFloat; +use core_graphics::geometry::CGRect; +#[cfg(feature = "dispatch")] +use dispatch; +use foreign_types::ForeignTypeRef; +use metal::MTLFeatureSet; +use metal::MTLLanguageVersion; +use objc::{ + declare::ClassDecl, + runtime::{Object, BOOL, YES, Sel, Class} +}; +use parking_lot::{Condvar, Mutex}; +use lazy_static::lazy_static; + +use std::mem; +use std::os::raw::c_void; +use std::ptr::NonNull; +use std::sync::Arc; + +mod command; +mod conversions; +mod device; +mod internal; +mod native; +mod soft; +mod window; + +pub use crate::command::CommandPool; +pub use crate::device::{Device, LanguageVersion, PhysicalDevice}; +pub use crate::window::{AcquireMode, CAMetalLayer, Surface, Swapchain}; + +pub type GraphicsCommandPool = CommandPool; + + + + +type ResourceIndex = u32; + + +#[derive(Clone, Debug, Hash, PartialEq)] +pub enum OnlineRecording { + + Immediate, + + Deferred, + #[cfg(feature = "dispatch")] + + Remote(dispatch::QueuePriority), +} + +impl Default for OnlineRecording { + fn default() -> Self { + OnlineRecording::Immediate + } +} + +const MAX_ACTIVE_COMMAND_BUFFERS: usize = 1 << 14; +const MAX_VISIBILITY_QUERIES: usize = 1 << 14; +const MAX_COLOR_ATTACHMENTS: usize = 4; +const MAX_BOUND_DESCRIPTOR_SETS: usize = 8; + +#[derive(Debug, Clone, Copy)] +pub struct QueueFamily {} + +impl hal::queue::QueueFamily for QueueFamily { + fn queue_type(&self) -> QueueType { + QueueType::General + } + fn max_queues(&self) -> usize { + 1 + } + fn id(&self) -> QueueFamilyId { + QueueFamilyId(0) + } +} + +#[derive(Debug)] +struct VisibilityShared { + + + buffer: metal::Buffer, + allocator: Mutex>, + availability_offset: hal::buffer::Offset, + condvar: Condvar, +} + +#[derive(Debug)] +struct Shared { + device: Mutex, + queue: Mutex, + queue_blocker: Mutex, + service_pipes: internal::ServicePipes, + disabilities: PrivateDisabilities, + private_caps: PrivateCapabilities, + visibility: VisibilityShared, +} + +unsafe impl Send for Shared {} +unsafe impl Sync for Shared {} + +impl Shared { + fn new(device: metal::Device, experiments: &Experiments) -> Self { + let private_caps = PrivateCapabilities::new(&device, experiments); + + let visibility = VisibilityShared { + buffer: device.new_buffer( + MAX_VISIBILITY_QUERIES as u64 + * (mem::size_of::() + mem::size_of::()) as u64, + metal::MTLResourceOptions::StorageModeShared, + ), + allocator: Mutex::new(RangeAllocator::new( + 0 .. MAX_VISIBILITY_QUERIES as hal::query::Id, + )), + availability_offset: (MAX_VISIBILITY_QUERIES * mem::size_of::()) + as hal::buffer::Offset, + condvar: Condvar::new(), + }; + Shared { + queue: Mutex::new(command::QueueInner::new( + &device, + Some(MAX_ACTIVE_COMMAND_BUFFERS), + )), + queue_blocker: Mutex::new(command::QueueBlocker::default()), + service_pipes: internal::ServicePipes::new(&device), + disabilities: PrivateDisabilities { + broken_viewport_near_depth: device.name().starts_with("Intel") + && !device.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v4), + broken_layered_clear_image: device.name().starts_with("Intel"), + }, + private_caps, + device: Mutex::new(device), + visibility, + } + } +} + +#[derive(Clone, Debug, Default)] +pub struct Experiments { + pub argument_buffers: bool, +} + +#[derive(Debug)] +pub struct Instance { + pub experiments: Experiments, + gfx_managed_metal_layer_delegate: GfxManagedMetalLayerDelegate, +} + +impl hal::Instance for Instance { + fn create(_: &str, _: u32) -> Result { + Ok(Instance { + experiments: Experiments::default(), + gfx_managed_metal_layer_delegate: GfxManagedMetalLayerDelegate::new() + }) + } + + fn enumerate_adapters(&self) -> Vec> { + let devices = metal::Device::all(); + let mut adapters: Vec> = devices + .into_iter() + .map(|dev| { + let name = dev.name().into(); + let shared = Shared::new(dev, &self.experiments); + let physical_device = device::PhysicalDevice::new(Arc::new(shared)); + Adapter { + info: AdapterInfo { + name, + vendor: 0, + device: 0, + device_type: if physical_device.shared.private_caps.low_power { + DeviceType::IntegratedGpu + } else { + DeviceType::DiscreteGpu + }, + }, + physical_device, + queue_families: vec![QueueFamily {}], + } + }) + .collect(); + adapters.sort_by_key(|adapt| { + ( + adapt.physical_device.shared.private_caps.low_power, + adapt.physical_device.shared.private_caps.headless, + ) + }); + adapters + } + + unsafe fn create_surface( + &self, + has_handle: &impl raw_window_handle::HasRawWindowHandle, + ) -> Result { + match has_handle.raw_window_handle() { + #[cfg(target_os = "ios")] + raw_window_handle::RawWindowHandle::IOS(handle) => { + Ok(self.create_surface_from_uiview(handle.ui_view, false)) + } + #[cfg(target_os = "macos")] + raw_window_handle::RawWindowHandle::MacOS(handle) => { + Ok(self.create_surface_from_nsview(handle.ns_view, false)) + } + _ => Err(hal::window::InitError::UnsupportedWindowHandle), + } + } + + unsafe fn destroy_surface(&self, _surface: Surface) { + + } +} + +lazy_static! { + static ref GFX_MANAGED_METAL_LAYER_DELEGATE_CLASS: &'static Class = unsafe { + let mut decl = ClassDecl::new("GfxManagedMetalLayerDelegate", class!(NSObject)).unwrap(); + decl.add_method( + sel!(layer:shouldInheritContentsScale:fromWindow:), + layer_should_inherit_contents_scale_from_window + as extern "C" fn(&Object, Sel, *mut Object, CGFloat, *mut Object) -> BOOL, + ); + decl.register() + }; +} + +extern "C" fn layer_should_inherit_contents_scale_from_window( + _: &Object, + _: Sel, + _layer: *mut Object, + _new_scale: CGFloat, + _from_window: *mut Object +) -> BOOL { + return YES; +} + +#[derive(Debug)] +struct GfxManagedMetalLayerDelegate(*mut Object); + +impl GfxManagedMetalLayerDelegate { + pub fn new() -> Self { + unsafe { + let mut delegate: *mut Object = msg_send![*GFX_MANAGED_METAL_LAYER_DELEGATE_CLASS, alloc]; + delegate = msg_send![delegate, init]; + Self(delegate) + } + } +} + +impl Drop for GfxManagedMetalLayerDelegate { + fn drop(&mut self) { + unsafe { + let () = msg_send![self.0, release]; + } + } +} + +unsafe impl Send for GfxManagedMetalLayerDelegate {} +unsafe impl Sync for GfxManagedMetalLayerDelegate {} + +impl Instance { + #[cfg(target_os = "ios")] + unsafe fn create_from_uiview(&self, uiview: *mut c_void) -> window::SurfaceInner { + let view: cocoa::base::id = mem::transmute(uiview); + if view.is_null() { + panic!("window does not have a valid contentView"); + } + + let main_layer: CAMetalLayer = msg_send![view, layer]; + let class = class!(CAMetalLayer); + let is_valid_layer: BOOL = msg_send![main_layer, isKindOfClass: class]; + let render_layer = if is_valid_layer == YES { + main_layer + } else { + + + let new_layer: CAMetalLayer = msg_send![class, new]; + + let bounds: CGRect = msg_send![main_layer, bounds]; + let () = msg_send![new_layer, setFrame: bounds]; + + let () = msg_send![main_layer, addSublayer: new_layer]; + new_layer + }; + + let window: cocoa::base::id = msg_send![view, window]; + if !window.is_null() { + let screen: cocoa::base::id = msg_send![window, screen]; + assert!(!screen.is_null(), "window is not attached to a screen"); + + let scale_factor: CGFloat = msg_send![screen, nativeScale]; + let () = msg_send![view, setContentScaleFactor: scale_factor]; + } + + let _: *mut c_void = msg_send![view, retain]; + window::SurfaceInner::new(NonNull::new(view), render_layer) + } + + #[cfg(target_os = "macos")] + unsafe fn create_from_nsview(&self, nsview: *mut c_void) -> window::SurfaceInner { + let view: cocoa::base::id = mem::transmute(nsview); + if view.is_null() { + panic!("window does not have a valid contentView"); + } + + let existing: CAMetalLayer = msg_send![view, layer]; + let class = class!(CAMetalLayer); + + let is_actually_layer: BOOL = msg_send![view, isKindOfClass: class]; + if is_actually_layer == YES { + return self.create_from_layer(view); + } + + let use_current = if existing.is_null() { + false + } else { + let result: BOOL = msg_send![existing, isKindOfClass: class]; + result == YES + }; + + let render_layer: CAMetalLayer = if use_current { + existing + } else { + let layer: CAMetalLayer = msg_send![class, new]; + let () = msg_send![view, setLayer: layer]; + let () = msg_send![view, setWantsLayer: YES]; + let bounds: CGRect = msg_send![view, bounds]; + let () = msg_send![layer, setBounds: bounds]; + + let window: cocoa::base::id = msg_send![view, window]; + if !window.is_null() { + let scale_factor: CGFloat = msg_send![window, backingScaleFactor]; + let () = msg_send![layer, setContentsScale: scale_factor]; + } + let () = msg_send![layer, setDelegate: self.gfx_managed_metal_layer_delegate.0]; + layer + }; + + let _: *mut c_void = msg_send![view, retain]; + window::SurfaceInner::new(NonNull::new(view), render_layer) + } + + unsafe fn create_from_layer(&self, layer: CAMetalLayer) -> window::SurfaceInner { + let class = class!(CAMetalLayer); + let proper_kind: BOOL = msg_send![layer, isKindOfClass: class]; + assert_eq!(proper_kind, YES); + let _: *mut c_void = msg_send![layer, retain]; + window::SurfaceInner::new(None, layer) + } + + pub fn create_surface_from_layer( + &self, + layer: CAMetalLayer, + enable_signposts: bool, + ) -> Surface { + unsafe { self.create_from_layer(layer) }.into_surface(enable_signposts) + } + + #[cfg(target_os = "macos")] + pub fn create_surface_from_nsview( + &self, + nsview: *mut c_void, + enable_signposts: bool, + ) -> Surface { + unsafe { self.create_from_nsview(nsview) }.into_surface(enable_signposts) + } + + #[cfg(target_os = "ios")] + pub fn create_surface_from_uiview( + &self, + uiview: *mut c_void, + enable_signposts: bool, + ) -> Surface { + unsafe { self.create_from_uiview(uiview) }.into_surface(enable_signposts) + } +} + +#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)] +pub enum Backend {} +impl hal::Backend for Backend { + type Instance = Instance; + type PhysicalDevice = device::PhysicalDevice; + type Device = device::Device; + + type Surface = window::Surface; + type Swapchain = window::Swapchain; + + type QueueFamily = QueueFamily; + type CommandQueue = command::CommandQueue; + type CommandBuffer = command::CommandBuffer; + + type Memory = native::Memory; + type CommandPool = command::CommandPool; + + type ShaderModule = native::ShaderModule; + type RenderPass = native::RenderPass; + type Framebuffer = native::Framebuffer; + + type Buffer = native::Buffer; + type BufferView = native::BufferView; + type Image = native::Image; + type ImageView = native::ImageView; + type Sampler = native::Sampler; + + type ComputePipeline = native::ComputePipeline; + type GraphicsPipeline = native::GraphicsPipeline; + type PipelineCache = native::PipelineCache; + type PipelineLayout = native::PipelineLayout; + type DescriptorSetLayout = native::DescriptorSetLayout; + type DescriptorPool = native::DescriptorPool; + type DescriptorSet = native::DescriptorSet; + + type Fence = native::Fence; + type Semaphore = native::Semaphore; + type Event = native::Event; + type QueryPool = native::QueryPool; +} + +const RESOURCE_HEAP_SUPPORT: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily1_v3, + MTLFeatureSet::iOS_GPUFamily2_v3, + MTLFeatureSet::iOS_GPUFamily3_v2, + MTLFeatureSet::tvOS_GPUFamily1_v2, +]; + +const ARGUMENT_BUFFER_SUPPORT: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily1_v4, + MTLFeatureSet::tvOS_GPUFamily1_v3, + MTLFeatureSet::macOS_GPUFamily1_v3, +]; + +const MUTABLE_COMPARISON_SAMPLER_SUPPORT: &[MTLFeatureSet] = &[ + MTLFeatureSet::macOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily3_v1, +]; + +const ASTC_PIXEL_FORMAT_FEATURES: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily2_v1, + MTLFeatureSet::iOS_GPUFamily2_v2, + MTLFeatureSet::iOS_GPUFamily3_v1, + MTLFeatureSet::iOS_GPUFamily2_v3, + MTLFeatureSet::iOS_GPUFamily3_v2, + MTLFeatureSet::iOS_GPUFamily2_v4, + MTLFeatureSet::iOS_GPUFamily3_v3, + MTLFeatureSet::iOS_GPUFamily4_v1, + MTLFeatureSet::tvOS_GPUFamily1_v1, + MTLFeatureSet::tvOS_GPUFamily1_v2, + MTLFeatureSet::tvOS_GPUFamily1_v3, + MTLFeatureSet::tvOS_GPUFamily2_v1, +]; + +const R8UNORM_SRGB_ALL: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily3_v1, + MTLFeatureSet::iOS_GPUFamily2_v3, + MTLFeatureSet::iOS_GPUFamily3_v2, + MTLFeatureSet::iOS_GPUFamily2_v4, + MTLFeatureSet::iOS_GPUFamily3_v3, + MTLFeatureSet::iOS_GPUFamily4_v1, + MTLFeatureSet::tvOS_GPUFamily1_v2, + MTLFeatureSet::tvOS_GPUFamily1_v3, + MTLFeatureSet::tvOS_GPUFamily2_v1, +]; + +const R8SNORM_NO_RESOLVE: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily1_v2, + MTLFeatureSet::iOS_GPUFamily1_v3, + MTLFeatureSet::iOS_GPUFamily1_v4, +]; + +const RG8UNORM_SRGB_NO_WRITE: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + MTLFeatureSet::iOS_GPUFamily1_v2, + MTLFeatureSet::iOS_GPUFamily2_v2, + MTLFeatureSet::iOS_GPUFamily1_v3, + MTLFeatureSet::iOS_GPUFamily1_v4, + MTLFeatureSet::tvOS_GPUFamily1_v1, +]; + +const RG8SNORM_NO_RESOLVE: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily1_v2, + MTLFeatureSet::iOS_GPUFamily1_v3, + MTLFeatureSet::iOS_GPUFamily1_v4, +]; + +const RGBA8_SRGB: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily3_v1, + MTLFeatureSet::iOS_GPUFamily2_v3, + MTLFeatureSet::iOS_GPUFamily3_v2, + MTLFeatureSet::iOS_GPUFamily2_v4, + MTLFeatureSet::iOS_GPUFamily3_v3, + MTLFeatureSet::iOS_GPUFamily4_v1, + MTLFeatureSet::tvOS_GPUFamily1_v2, + MTLFeatureSet::tvOS_GPUFamily1_v3, + MTLFeatureSet::tvOS_GPUFamily2_v1, +]; + +const RGB10A2UNORM_ALL: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily3_v1, + MTLFeatureSet::iOS_GPUFamily3_v2, + MTLFeatureSet::iOS_GPUFamily3_v3, + MTLFeatureSet::iOS_GPUFamily4_v1, + MTLFeatureSet::tvOS_GPUFamily2_v1, + MTLFeatureSet::macOS_GPUFamily1_v1, + MTLFeatureSet::macOS_GPUFamily1_v2, + MTLFeatureSet::macOS_GPUFamily1_v3, +]; + +const RGB10A2UINT_COLOR_WRITE: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily3_v1, + MTLFeatureSet::iOS_GPUFamily3_v2, + MTLFeatureSet::iOS_GPUFamily3_v3, + MTLFeatureSet::iOS_GPUFamily4_v1, + MTLFeatureSet::tvOS_GPUFamily2_v1, + MTLFeatureSet::macOS_GPUFamily1_v1, + MTLFeatureSet::macOS_GPUFamily1_v2, + MTLFeatureSet::macOS_GPUFamily1_v3, +]; + +const RG11B10FLOAT_ALL: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily3_v1, + MTLFeatureSet::iOS_GPUFamily3_v2, + MTLFeatureSet::iOS_GPUFamily3_v3, + MTLFeatureSet::iOS_GPUFamily4_v1, + MTLFeatureSet::tvOS_GPUFamily2_v1, + MTLFeatureSet::macOS_GPUFamily1_v1, + MTLFeatureSet::macOS_GPUFamily1_v2, + MTLFeatureSet::macOS_GPUFamily1_v3, +]; + +const RGB9E5FLOAT_ALL: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily3_v1, + MTLFeatureSet::iOS_GPUFamily3_v2, + MTLFeatureSet::iOS_GPUFamily3_v3, + MTLFeatureSet::iOS_GPUFamily4_v1, + MTLFeatureSet::tvOS_GPUFamily2_v1, +]; + +const BGR10A2_ALL: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily1_v4, + MTLFeatureSet::iOS_GPUFamily2_v4, + MTLFeatureSet::iOS_GPUFamily3_v3, + MTLFeatureSet::iOS_GPUFamily4_v1, + MTLFeatureSet::tvOS_GPUFamily1_v3, + MTLFeatureSet::tvOS_GPUFamily2_v1, +]; + +const BASE_INSTANCE_SUPPORT: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily1_v4, + MTLFeatureSet::iOS_GPUFamily3_v1, +]; + +const DUAL_SOURCE_BLEND_SUPPORT: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily1_v4, + MTLFeatureSet::tvOS_GPUFamily1_v3, + MTLFeatureSet::macOS_GPUFamily1_v2, +]; + +const LAYERED_RENDERING_SUPPORT: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily5_v1, + MTLFeatureSet::macOS_GPUFamily1_v1, +]; + +const FUNCTION_SPECIALIZATION_SUPPORT: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily1_v3, + MTLFeatureSet::tvOS_GPUFamily1_v2, + MTLFeatureSet::macOS_GPUFamily1_v2, +]; + +const DEPTH_CLIP_MODE: &[MTLFeatureSet] = &[ + MTLFeatureSet::iOS_GPUFamily4_v1, + MTLFeatureSet::tvOS_GPUFamily1_v3, + MTLFeatureSet::macOS_GPUFamily1_v1, +]; + +#[derive(Clone, Debug)] +struct PrivateCapabilities { + pub os_is_mac: bool, + os_version: (u32, u32), + msl_version: metal::MTLLanguageVersion, + exposed_queues: usize, + + expose_line_mode: bool, + resource_heaps: bool, + argument_buffers: bool, + shared_textures: bool, + mutable_comparison_samplers: bool, + base_instance: bool, + dual_source_blending: bool, + low_power: bool, + headless: bool, + layered_rendering: bool, + function_specialization: bool, + depth_clip_mode: bool, + format_depth24_stencil8: bool, + format_depth32_stencil8_filter: bool, + format_depth32_stencil8_none: bool, + format_min_srgb_channels: u8, + format_b5: bool, + format_bc: bool, + format_eac_etc: bool, + format_astc: bool, + format_r8unorm_srgb_all: bool, + format_r8unorm_srgb_no_write: bool, + format_r8snorm_all: bool, + format_r16_norm_all: bool, + format_rg8unorm_srgb_all: bool, + format_rg8unorm_srgb_no_write: bool, + format_rg8snorm_all: bool, + format_r32_all: bool, + format_r32_no_write: bool, + format_r32float_no_write_no_filter: bool, + format_r32float_no_filter: bool, + format_r32float_all: bool, + format_rgba8_srgb_all: bool, + format_rgba8_srgb_no_write: bool, + format_rgb10a2_unorm_all: bool, + format_rgb10a2_unorm_no_write: bool, + format_rgb10a2_uint_color: bool, + format_rgb10a2_uint_color_write: bool, + format_rg11b10_all: bool, + format_rg11b10_no_write: bool, + format_rgb9e5_all: bool, + format_rgb9e5_no_write: bool, + format_rgb9e5_filter_only: bool, + format_rg32_color: bool, + format_rg32_color_write: bool, + format_rg32float_all: bool, + format_rg32float_color_blend: bool, + format_rg32float_no_filter: bool, + format_rgba32int_color: bool, + format_rgba32int_color_write: bool, + format_rgba32float_color: bool, + format_rgba32float_color_write: bool, + format_rgba32float_all: bool, + format_depth16unorm: bool, + format_depth32float_filter: bool, + format_depth32float_none: bool, + format_bgr10a2_all: bool, + format_bgr10a2_no_write: bool, + max_buffers_per_stage: ResourceIndex, + max_textures_per_stage: ResourceIndex, + max_samplers_per_stage: ResourceIndex, + buffer_alignment: u64, + max_buffer_size: u64, + max_texture_size: u64, + max_texture_3d_size: u64, + max_texture_layers: u64, + max_fragment_input_components: u64, + sample_count_mask: u8, +} + +impl PrivateCapabilities { + fn version_at_least(major: u32, minor: u32, needed_major: u32, needed_minor: u32) -> bool { + major > needed_major || (major == needed_major && minor >= needed_minor) + } + + fn supports_any(raw: &metal::DeviceRef, features_sets: &[MTLFeatureSet]) -> bool { + features_sets + .iter() + .cloned() + .any(|x| raw.supports_feature_set(x)) + } + + fn new(device: &metal::Device, experiments: &Experiments) -> Self { + #[repr(C)] + #[derive(Clone, Copy, Debug)] + struct NSOperatingSystemVersion { + major: NSInteger, + minor: NSInteger, + patch: NSInteger, + } + + let version: NSOperatingSystemVersion = unsafe { + let process_info: *mut Object = msg_send![class!(NSProcessInfo), processInfo]; + msg_send![process_info, operatingSystemVersion] + }; + + let major = version.major as u32; + let minor = version.minor as u32; + let os_is_mac = device.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v1); + + let mut sample_count_mask: u8 = 1 | 4; + if device.supports_sample_count(2) { + sample_count_mask |= 2; + } + if device.supports_sample_count(8) { + sample_count_mask |= 8; + } + + PrivateCapabilities { + os_is_mac, + os_version: (major as u32, minor as u32), + msl_version: if os_is_mac { + if Self::version_at_least(major, minor, 10, 14) { + MTLLanguageVersion::V2_1 + } else if Self::version_at_least(major, minor, 10, 13) { + MTLLanguageVersion::V2_0 + } else if Self::version_at_least(major, minor, 10, 12) { + MTLLanguageVersion::V1_2 + } else if Self::version_at_least(major, minor, 10, 11) { + MTLLanguageVersion::V1_1 + } else { + MTLLanguageVersion::V1_0 + } + } else if Self::version_at_least(major, minor, 12, 0) { + MTLLanguageVersion::V2_1 + } else if Self::version_at_least(major, minor, 11, 0) { + MTLLanguageVersion::V2_0 + } else if Self::version_at_least(major, minor, 10, 0) { + MTLLanguageVersion::V1_2 + } else if Self::version_at_least(major, minor, 9, 0) { + MTLLanguageVersion::V1_1 + } else { + MTLLanguageVersion::V1_0 + }, + exposed_queues: 1, + expose_line_mode: true, + resource_heaps: Self::supports_any(&device, RESOURCE_HEAP_SUPPORT), + argument_buffers: experiments.argument_buffers + && Self::supports_any(&device, ARGUMENT_BUFFER_SUPPORT), + shared_textures: !os_is_mac, + mutable_comparison_samplers: Self::supports_any( + &device, + MUTABLE_COMPARISON_SAMPLER_SUPPORT, + ), + base_instance: Self::supports_any(&device, BASE_INSTANCE_SUPPORT), + dual_source_blending: Self::supports_any(&device, DUAL_SOURCE_BLEND_SUPPORT), + low_power: !os_is_mac || device.is_low_power(), + headless: os_is_mac && device.is_headless(), + layered_rendering: Self::supports_any(&device, LAYERED_RENDERING_SUPPORT), + function_specialization: Self::supports_any(&device, FUNCTION_SPECIALIZATION_SUPPORT), + depth_clip_mode: Self::supports_any(&device, DEPTH_CLIP_MODE), + format_depth24_stencil8: os_is_mac && device.d24_s8_supported(), + format_depth32_stencil8_filter: os_is_mac, + format_depth32_stencil8_none: !os_is_mac, + format_min_srgb_channels: if os_is_mac { 4 } else { 1 }, + format_b5: !os_is_mac, + format_bc: os_is_mac, + format_eac_etc: !os_is_mac, + format_astc: Self::supports_any(&device, ASTC_PIXEL_FORMAT_FEATURES), + format_r8unorm_srgb_all: Self::supports_any(&device, R8UNORM_SRGB_ALL), + format_r8unorm_srgb_no_write: !Self::supports_any(&device, R8UNORM_SRGB_ALL) + && !os_is_mac, + format_r8snorm_all: !Self::supports_any(&device, R8SNORM_NO_RESOLVE), + format_r16_norm_all: os_is_mac, + format_rg8unorm_srgb_all: Self::supports_any(&device, RG8UNORM_SRGB_NO_WRITE), + format_rg8unorm_srgb_no_write: !Self::supports_any(&device, RG8UNORM_SRGB_NO_WRITE) + && !os_is_mac, + format_rg8snorm_all: !Self::supports_any(&device, RG8SNORM_NO_RESOLVE), + format_r32_all: !Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + ], + ), + format_r32_no_write: Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + ], + ), + format_r32float_no_write_no_filter: Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + ], + ) && !os_is_mac, + format_r32float_no_filter: !Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + ], + ) && !os_is_mac, + format_r32float_all: os_is_mac, + format_rgba8_srgb_all: Self::supports_any(&device, RGBA8_SRGB), + format_rgba8_srgb_no_write: !Self::supports_any(&device, RGBA8_SRGB), + format_rgb10a2_unorm_all: Self::supports_any(&device, RGB10A2UNORM_ALL), + format_rgb10a2_unorm_no_write: !Self::supports_any(&device, RGB10A2UNORM_ALL), + format_rgb10a2_uint_color: !Self::supports_any(&device, RGB10A2UINT_COLOR_WRITE), + format_rgb10a2_uint_color_write: Self::supports_any(&device, RGB10A2UINT_COLOR_WRITE), + format_rg11b10_all: Self::supports_any(&device, RG11B10FLOAT_ALL), + format_rg11b10_no_write: !Self::supports_any(&device, RG11B10FLOAT_ALL), + format_rgb9e5_all: Self::supports_any(&device, RGB9E5FLOAT_ALL), + format_rgb9e5_no_write: !Self::supports_any(&device, RGB9E5FLOAT_ALL) && !os_is_mac, + format_rgb9e5_filter_only: os_is_mac, + format_rg32_color: Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + ], + ), + format_rg32_color_write: !Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + ], + ), + format_rg32float_all: os_is_mac, + format_rg32float_color_blend: Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + ], + ), + format_rg32float_no_filter: !os_is_mac + && !Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + ], + ), + format_rgba32int_color: Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + ], + ), + format_rgba32int_color_write: !Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + ], + ), + format_rgba32float_color: Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + ], + ), + format_rgba32float_color_write: !Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v1, + MTLFeatureSet::iOS_GPUFamily2_v1, + ], + ) && !os_is_mac, + format_rgba32float_all: os_is_mac, + format_depth16unorm: device.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v2), + format_depth32float_filter: device + .supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v1), + format_depth32float_none: !device + .supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v1), + format_bgr10a2_all: Self::supports_any(&device, BGR10A2_ALL), + format_bgr10a2_no_write: !device + .supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v3), + max_buffers_per_stage: 31, + max_textures_per_stage: if os_is_mac { 128 } else { 31 }, + max_samplers_per_stage: 16, + buffer_alignment: if os_is_mac { 256 } else { 64 }, + max_buffer_size: if device.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v2) { + 1 << 30 + } else { + 1 << 28 + }, + max_texture_size: if Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily3_v1, + MTLFeatureSet::tvOS_GPUFamily2_v1, + MTLFeatureSet::macOS_GPUFamily1_v1, + ], + ) { + 16384 + } else if Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily1_v2, + MTLFeatureSet::iOS_GPUFamily2_v2, + MTLFeatureSet::tvOS_GPUFamily1_v1, + ], + ) { + 8192 + } else { + 4096 + }, + max_texture_3d_size: 2048, + max_texture_layers: 2048, + max_fragment_input_components: if os_is_mac { 128 } else { 60 }, + sample_count_mask, + } + } + + fn has_version_at_least(&self, needed_major: u32, needed_minor: u32) -> bool { + let (major, minor) = self.os_version; + Self::version_at_least(major, minor, needed_major, needed_minor) + } +} + +#[derive(Clone, Copy, Debug)] +struct PrivateDisabilities { + + broken_viewport_near_depth: bool, + + broken_layered_clear_image: bool, +} + +trait AsNative { + type Native; + fn from(native: &Self::Native) -> Self; + fn as_native(&self) -> &Self::Native; +} + +pub type BufferPtr = NonNull; +pub type TexturePtr = NonNull; +pub type SamplerPtr = NonNull; +pub type ResourcePtr = NonNull; + + + +impl AsNative for BufferPtr { + type Native = metal::BufferRef; + #[inline] + fn from(native: &metal::BufferRef) -> Self { + unsafe { NonNull::new_unchecked(native.as_ptr()) } + } + #[inline] + fn as_native(&self) -> &metal::BufferRef { + unsafe { metal::BufferRef::from_ptr(self.as_ptr()) } + } +} + +impl AsNative for TexturePtr { + type Native = metal::TextureRef; + #[inline] + fn from(native: &metal::TextureRef) -> Self { + unsafe { NonNull::new_unchecked(native.as_ptr()) } + } + #[inline] + fn as_native(&self) -> &metal::TextureRef { + unsafe { metal::TextureRef::from_ptr(self.as_ptr()) } + } +} + +impl AsNative for SamplerPtr { + type Native = metal::SamplerStateRef; + #[inline] + fn from(native: &metal::SamplerStateRef) -> Self { + unsafe { NonNull::new_unchecked(native.as_ptr()) } + } + #[inline] + fn as_native(&self) -> &metal::SamplerStateRef { + unsafe { metal::SamplerStateRef::from_ptr(self.as_ptr()) } + } +} + +impl AsNative for ResourcePtr { + type Native = metal::ResourceRef; + #[inline] + fn from(native: &metal::ResourceRef) -> Self { + unsafe { NonNull::new_unchecked(native.as_ptr()) } + } + #[inline] + fn as_native(&self) -> &metal::ResourceRef { + unsafe { metal::ResourceRef::from_ptr(self.as_ptr()) } + } +} diff --git a/third_party/rust/gfx-backend-metal/src/native.rs b/third_party/rust/gfx-backend-metal/src/native.rs new file mode 100644 index 000000000000..92988bc6af3d --- /dev/null +++ b/third_party/rust/gfx-backend-metal/src/native.rs @@ -0,0 +1,1085 @@ +use crate::{ + internal::{Channel, FastStorageMap}, + window::SwapchainImage, + Backend, + BufferPtr, + ResourceIndex, + SamplerPtr, + TexturePtr, + MAX_COLOR_ATTACHMENTS, +}; + +use auxil::FastHashMap; +use hal::{ + buffer, + format::FormatDesc, + image, + pass::{Attachment, AttachmentId}, + pso, + range::RangeArg, + MemoryTypeId, +}; +use range_alloc::RangeAllocator; + +use arrayvec::ArrayVec; +use cocoa::foundation::NSRange; +use metal; +use parking_lot::{Mutex, RwLock}; +use spirv_cross::{msl, spirv}; + +use std::{ + cell::RefCell, + fmt, + ops::Range, + os::raw::{c_long, c_void}, + ptr, + sync::{atomic::AtomicBool, Arc}, +}; + + +pub type EntryPointMap = FastHashMap; + +pub type PoolResourceIndex = u32; + + + +pub enum ShaderModule { + Compiled(ModuleInfo), + Raw(Vec), +} + +impl fmt::Debug for ShaderModule { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match *self { + ShaderModule::Compiled(_) => write!(formatter, "ShaderModule::Compiled(..)"), + ShaderModule::Raw(ref vec) => { + write!(formatter, "ShaderModule::Raw(length = {})", vec.len()) + } + } + } +} + +unsafe impl Send for ShaderModule {} +unsafe impl Sync for ShaderModule {} + +bitflags! { + /// Subpass attachment operations. + pub struct SubpassOps: u8 { + const LOAD = 0x0; + const STORE = 0x1; + } +} + +#[derive(Clone, Debug, Default, Eq, PartialEq)] +pub struct SubpassFormats { + pub colors: ArrayVec<[(metal::MTLPixelFormat, Channel); MAX_COLOR_ATTACHMENTS]>, + pub depth_stencil: Option, +} + +impl SubpassFormats { + pub fn copy_from(&mut self, other: &Self) { + self.colors.clear(); + self.colors.extend(other.colors.iter().cloned()); + self.depth_stencil = other.depth_stencil; + } +} + +#[derive(Debug)] +pub struct Subpass { + pub colors: ArrayVec<[(AttachmentId, SubpassOps, Option); MAX_COLOR_ATTACHMENTS]>, + pub depth_stencil: Option<(AttachmentId, SubpassOps)>, + pub inputs: Vec, + pub target_formats: SubpassFormats, +} + +#[derive(Debug)] +pub struct RenderPass { + pub(crate) attachments: Vec, + pub(crate) subpasses: Vec, + pub(crate) name: String, +} + +#[derive(Debug)] +pub struct Framebuffer { + pub(crate) extent: image::Extent, + pub(crate) attachments: Vec, +} + +unsafe impl Send for Framebuffer {} +unsafe impl Sync for Framebuffer {} + + +#[derive(Clone, Debug)] +pub struct ResourceData { + pub buffers: T, + pub textures: T, + pub samplers: T, +} + +impl ResourceData { + pub fn map V>(&self, fun: F) -> ResourceData { + ResourceData { + buffers: fun(&self.buffers), + textures: fun(&self.textures), + samplers: fun(&self.samplers), + } + } +} + +impl ResourceData> { + pub fn expand(&mut self, point: ResourceData) { + + self.buffers.end = self.buffers.end.max(point.buffers); + self.textures.end = self.textures.end.max(point.textures); + self.samplers.end = self.samplers.end.max(point.samplers); + } +} + +impl ResourceData { + pub fn new() -> Self { + ResourceData { + buffers: 0, + textures: 0, + samplers: 0, + } + } +} + + + + + + + + + + + +impl ResourceData { + #[inline] + pub fn add_many(&mut self, content: DescriptorContent, count: PoolResourceIndex) { + if content.contains(DescriptorContent::BUFFER) { + self.buffers += count; + } + if content.contains(DescriptorContent::TEXTURE) { + self.textures += count; + } + if content.contains(DescriptorContent::SAMPLER) { + self.samplers += count; + } + } + #[inline] + pub fn add(&mut self, content: DescriptorContent) { + self.add_many(content, 1) + } +} + +#[derive(Debug)] +pub struct MultiStageData { + pub vs: T, + pub ps: T, + pub cs: T, +} + +pub type MultiStageResourceCounters = MultiStageData>; + +#[derive(Debug)] +pub struct DescriptorSetInfo { + pub offsets: MultiStageResourceCounters, + pub dynamic_buffers: Vec>, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct PushConstantInfo { + pub count: u32, + pub buffer_index: ResourceIndex, +} + +#[derive(Debug)] +pub struct PipelineLayout { + pub(crate) shader_compiler_options: msl::CompilerOptions, + pub(crate) shader_compiler_options_point: msl::CompilerOptions, + pub(crate) infos: Vec, + pub(crate) total: MultiStageResourceCounters, + pub(crate) push_constants: MultiStageData>, + pub(crate) total_push_constants: u32, +} + +#[derive(Clone)] +pub struct ModuleInfo { + pub library: metal::Library, + pub entry_point_map: EntryPointMap, + pub rasterization_enabled: bool, +} + +pub struct PipelineCache { + pub(crate) modules: FastStorageMap, ModuleInfo>>, +} + +impl fmt::Debug for PipelineCache { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "PipelineCache") + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct RasterizerState { + + pub front_winding: metal::MTLWinding, + pub fill_mode: metal::MTLTriangleFillMode, + pub cull_mode: metal::MTLCullMode, + pub depth_clip: Option, +} + +impl Default for RasterizerState { + fn default() -> Self { + RasterizerState { + front_winding: metal::MTLWinding::Clockwise, + fill_mode: metal::MTLTriangleFillMode::Fill, + cull_mode: metal::MTLCullMode::None, + depth_clip: None, + } + } +} + +#[derive(Debug)] +pub struct StencilState { + pub reference_values: pso::Sided, + pub read_masks: pso::Sided, + pub write_masks: pso::Sided, +} + +pub type VertexBufferVec = Vec<(pso::VertexBufferDesc, pso::ElemOffset)>; + +#[derive(Debug)] +pub struct GraphicsPipeline { + + + pub(crate) vs_lib: metal::Library, + pub(crate) fs_lib: Option, + pub(crate) raw: metal::RenderPipelineState, + pub(crate) primitive_type: metal::MTLPrimitiveType, + pub(crate) vs_pc_info: Option, + pub(crate) ps_pc_info: Option, + pub(crate) rasterizer_state: Option, + pub(crate) depth_bias: pso::State, + pub(crate) depth_stencil_desc: pso::DepthStencilDesc, + pub(crate) baked_states: pso::BakedStates, + + + + + pub(crate) vertex_buffers: VertexBufferVec, + + pub(crate) attachment_formats: SubpassFormats, +} + +unsafe impl Send for GraphicsPipeline {} +unsafe impl Sync for GraphicsPipeline {} + +#[derive(Debug)] +pub struct ComputePipeline { + pub(crate) cs_lib: metal::Library, + pub(crate) raw: metal::ComputePipelineState, + pub(crate) work_group_size: metal::MTLSize, + pub(crate) pc_info: Option, +} + +unsafe impl Send for ComputePipeline {} +unsafe impl Sync for ComputePipeline {} + +#[derive(Debug)] +pub enum ImageLike { + + Unbound { + descriptor: metal::TextureDescriptor, + mip_sizes: Vec, + host_visible: bool, + name: String, + }, + + Buffer(Buffer), + + Texture(metal::Texture), +} + +impl ImageLike { + pub fn as_texture(&self) -> &metal::TextureRef { + match *self { + ImageLike::Unbound { .. } | ImageLike::Buffer(..) => panic!("Expected bound image!"), + ImageLike::Texture(ref tex) => tex, + } + } +} + +#[derive(Debug)] +pub struct Image { + pub(crate) like: ImageLike, + pub(crate) kind: image::Kind, + pub(crate) format_desc: FormatDesc, + pub(crate) shader_channel: Channel, + pub(crate) mtl_format: metal::MTLPixelFormat, + pub(crate) mtl_type: metal::MTLTextureType, +} + +impl Image { + pub(crate) fn pitches_impl( + extent: image::Extent, + format_desc: FormatDesc, + ) -> [buffer::Offset; 4] { + let bytes_per_texel = format_desc.bits as image::Size >> 3; + let row_pitch = extent.width * bytes_per_texel; + let depth_pitch = extent.height * row_pitch; + let array_pitch = extent.depth * depth_pitch; + [ + bytes_per_texel as _, + row_pitch as _, + depth_pitch as _, + array_pitch as _, + ] + } + pub(crate) fn pitches(&self, level: image::Level) -> [buffer::Offset; 4] { + let extent = self.kind.extent().at_level(level); + Self::pitches_impl(extent, self.format_desc) + } + pub(crate) fn byte_offset(&self, offset: image::Offset) -> buffer::Offset { + let pitches = Self::pitches_impl(self.kind.extent(), self.format_desc); + pitches[0] * offset.x as buffer::Offset + + pitches[1] * offset.y as buffer::Offset + + pitches[2] * offset.z as buffer::Offset + } + pub(crate) fn byte_extent(&self, extent: image::Extent) -> buffer::Offset { + let bytes_per_texel = self.format_desc.bits as image::Size >> 3; + (bytes_per_texel * extent.width * extent.height * extent.depth) as _ + } + + pub(crate) fn view_cube_as_2d(&self) -> Option { + match self.mtl_type { + metal::MTLTextureType::Cube | metal::MTLTextureType::CubeArray => { + let raw = self.like.as_texture(); + Some(raw.new_texture_view_from_slice( + self.mtl_format, + metal::MTLTextureType::D2Array, + NSRange { + location: 0, + length: raw.mipmap_level_count(), + }, + NSRange { + location: 0, + length: self.kind.num_layers() as _, + }, + )) + } + _ => None, + } + } +} + +unsafe impl Send for Image {} +unsafe impl Sync for Image {} + +#[derive(Debug)] +pub struct BufferView { + pub(crate) raw: metal::Texture, +} + +unsafe impl Send for BufferView {} +unsafe impl Sync for BufferView {} + +#[derive(Debug)] +pub struct ImageView { + pub(crate) texture: metal::Texture, + pub(crate) mtl_format: metal::MTLPixelFormat, +} + +unsafe impl Send for ImageView {} +unsafe impl Sync for ImageView {} + +#[derive(Debug)] +pub struct Sampler { + pub(crate) raw: Option, + pub(crate) data: msl::SamplerData, +} + +unsafe impl Send for Sampler {} +unsafe impl Sync for Sampler {} + +#[derive(Clone, Debug)] +pub struct Semaphore { + pub(crate) system: Option, + pub(crate) image_ready: Arc>>, +} + +#[derive(Debug)] +pub enum Buffer { + Unbound { + size: u64, + usage: buffer::Usage, + name: String, + }, + Bound { + raw: metal::Buffer, + range: Range, + options: metal::MTLResourceOptions, + }, +} + +unsafe impl Send for Buffer {} +unsafe impl Sync for Buffer {} + +impl Buffer { + + pub fn as_bound(&self) -> (&metal::BufferRef, &Range) { + match *self { + Buffer::Unbound { .. } => panic!("Expected bound buffer!"), + Buffer::Bound { + ref raw, ref range, .. + } => (raw, range), + } + } +} + +#[derive(Debug)] +pub struct DescriptorEmulatedPoolInner { + pub(crate) samplers: Vec>, + pub(crate) textures: Vec>, + pub(crate) buffers: Vec>, +} + +#[derive(Debug)] +pub struct DescriptorArgumentPoolInner { + pub(crate) resources: Vec, +} + +#[derive(Debug)] +pub enum DescriptorPool { + Emulated { + inner: Arc>, + allocators: ResourceData>, + }, + ArgumentBuffer { + raw: metal::Buffer, + raw_allocator: RangeAllocator, + alignment: buffer::Offset, + inner: Arc>, + res_allocator: RangeAllocator, + }, +} + +unsafe impl Send for DescriptorPool {} +unsafe impl Sync for DescriptorPool {} + +impl DescriptorPool { + pub(crate) fn new_emulated(counters: ResourceData) -> Self { + let inner = DescriptorEmulatedPoolInner { + samplers: vec![None; counters.samplers as usize], + textures: vec![None; counters.textures as usize], + buffers: vec![None; counters.buffers as usize], + }; + DescriptorPool::Emulated { + inner: Arc::new(RwLock::new(inner)), + allocators: ResourceData { + samplers: RangeAllocator::new(0 .. counters.samplers), + textures: RangeAllocator::new(0 .. counters.textures), + buffers: RangeAllocator::new(0 .. counters.buffers), + }, + } + } + + pub(crate) fn new_argument( + raw: metal::Buffer, + total_bytes: buffer::Offset, + alignment: buffer::Offset, + total_resources: usize, + ) -> Self { + let default = UsedResource { + ptr: ptr::null_mut(), + usage: metal::MTLResourceUsage::empty(), + }; + DescriptorPool::ArgumentBuffer { + raw, + raw_allocator: RangeAllocator::new(0 .. total_bytes), + alignment, + inner: Arc::new(RwLock::new(DescriptorArgumentPoolInner { + resources: vec![default; total_resources], + })), + res_allocator: RangeAllocator::new(0 .. total_resources as PoolResourceIndex), + } + } + + fn report_available(&self) { + match *self { + DescriptorPool::Emulated { ref allocators, .. } => { + trace!( + "\tavailable {} samplers, {} textures, and {} buffers", + allocators.samplers.total_available(), + allocators.textures.total_available(), + allocators.buffers.total_available(), + ); + } + DescriptorPool::ArgumentBuffer { + ref raw_allocator, + ref res_allocator, + .. + } => { + trace!( + "\tavailable {} bytes for {} resources", + raw_allocator.total_available(), + res_allocator.total_available(), + ); + } + } + } +} + +impl pso::DescriptorPool for DescriptorPool { + unsafe fn allocate_set( + &mut self, + set_layout: &DescriptorSetLayout, + ) -> Result { + self.report_available(); + match *self { + DescriptorPool::Emulated { + ref inner, + ref mut allocators, + } => { + debug!("pool: allocate_set"); + let layouts = match *set_layout { + DescriptorSetLayout::Emulated(ref layouts, _) => layouts, + _ => return Err(pso::AllocationError::IncompatibleLayout), + }; + + + let mut total = ResourceData::new(); + for layout in layouts.iter() { + total.add(layout.content); + } + debug!("\ttotal {:?}", total); + + + let sampler_range = if total.samplers != 0 { + match allocators.samplers.allocate_range(total.samplers as _) { + Ok(range) => range, + Err(e) => { + return Err(if e.fragmented_free_length >= total.samplers { + pso::AllocationError::FragmentedPool + } else { + pso::AllocationError::OutOfPoolMemory + }); + } + } + } else { + 0 .. 0 + }; + let texture_range = if total.textures != 0 { + match allocators.textures.allocate_range(total.textures as _) { + Ok(range) => range, + Err(e) => { + if sampler_range.end != 0 { + allocators.samplers.free_range(sampler_range); + } + return Err(if e.fragmented_free_length >= total.samplers { + pso::AllocationError::FragmentedPool + } else { + pso::AllocationError::OutOfPoolMemory + }); + } + } + } else { + 0 .. 0 + }; + let buffer_range = if total.buffers != 0 { + match allocators.buffers.allocate_range(total.buffers as _) { + Ok(range) => range, + Err(e) => { + if sampler_range.end != 0 { + allocators.samplers.free_range(sampler_range); + } + if texture_range.end != 0 { + allocators.textures.free_range(texture_range); + } + return Err(if e.fragmented_free_length >= total.samplers { + pso::AllocationError::FragmentedPool + } else { + pso::AllocationError::OutOfPoolMemory + }); + } + } + } else { + 0 .. 0 + }; + + let resources = ResourceData { + buffers: buffer_range, + textures: texture_range, + samplers: sampler_range, + }; + + Ok(DescriptorSet::Emulated { + pool: Arc::clone(inner), + layouts: Arc::clone(layouts), + resources, + }) + } + DescriptorPool::ArgumentBuffer { + ref raw, + ref mut raw_allocator, + alignment, + ref inner, + ref mut res_allocator, + } => { + let (encoder, stage_flags, bindings, total) = match *set_layout { + DescriptorSetLayout::ArgumentBuffer { + ref encoder, + stage_flags, + ref bindings, + total, + .. + } => (encoder, stage_flags, bindings, total), + _ => return Err(pso::AllocationError::IncompatibleLayout), + }; + let range = res_allocator + .allocate_range(total as PoolResourceIndex) + .map_err(|_| pso::AllocationError::OutOfPoolMemory)?; + + let raw_range = raw_allocator + .allocate_range(encoder.encoded_length() + alignment) + .expect("Argument encoding length is inconsistent!"); + let raw_offset = (raw_range.start + alignment - 1) & !(alignment - 1); + + let mut data = inner.write(); + for arg in bindings.values() { + if arg.res.buffer_id != !0 || arg.res.texture_id != !0 { + let pos = (range.start + arg.res_offset) as usize; + for ur in data.resources[pos .. pos + arg.count].iter_mut() { + ur.usage = arg.usage; + } + } + } + + Ok(DescriptorSet::ArgumentBuffer { + raw: raw.clone(), + raw_offset, + pool: Arc::clone(inner), + range, + encoder: encoder.clone(), + bindings: Arc::clone(bindings), + stage_flags, + }) + } + } + } + + unsafe fn free_sets(&mut self, descriptor_sets: I) + where + I: IntoIterator, + { + match self { + DescriptorPool::Emulated { + ref inner, + ref mut allocators, + } => { + debug!("pool: free_sets"); + let mut data = inner.write(); + for descriptor_set in descriptor_sets { + match descriptor_set { + DescriptorSet::Emulated { resources, .. } => { + debug!("\t{:?} resources", resources); + for sampler in &mut data.samplers[resources.samplers.start as usize + .. resources.samplers.end as usize] + { + *sampler = None; + } + if resources.samplers.start != resources.samplers.end { + allocators.samplers.free_range(resources.samplers); + } + for image in &mut data.textures[resources.textures.start as usize + .. resources.textures.end as usize] + { + *image = None; + } + if resources.textures.start != resources.textures.end { + allocators.textures.free_range(resources.textures); + } + for buffer in &mut data.buffers + [resources.buffers.start as usize .. resources.buffers.end as usize] + { + *buffer = None; + } + if resources.buffers.start != resources.buffers.end { + allocators.buffers.free_range(resources.buffers); + } + } + DescriptorSet::ArgumentBuffer { .. } => panic!( + "Tried to free a DescriptorSet not given out by this DescriptorPool!" + ), + } + } + } + DescriptorPool::ArgumentBuffer { + ref mut raw_allocator, + ref mut res_allocator, + ref inner, + .. + } => { + let mut data = inner.write(); + for descriptor_set in descriptor_sets { + match descriptor_set { + DescriptorSet::Emulated { .. } => panic!( + "Tried to free a DescriptorSet not given out by this DescriptorPool!" + ), + DescriptorSet::ArgumentBuffer { + raw_offset, + range, + encoder, + .. + } => { + for ur in data.resources[range.start as usize .. range.end as usize] + .iter_mut() + { + ur.ptr = ptr::null_mut(); + ur.usage = metal::MTLResourceUsage::empty(); + } + + let handle_range = raw_offset .. raw_offset + encoder.encoded_length(); + raw_allocator.free_range(handle_range); + res_allocator.free_range(range); + } + } + } + } + } + self.report_available(); + } + + unsafe fn reset(&mut self) { + match *self { + DescriptorPool::Emulated { + ref inner, + ref mut allocators, + } => { + debug!("pool: reset"); + if allocators.samplers.is_empty() + && allocators.textures.is_empty() + && allocators.buffers.is_empty() + { + return; + } + let mut data = inner.write(); + + for range in allocators.samplers.allocated_ranges() { + for sampler in &mut data.samplers[range.start as usize .. range.end as usize] { + *sampler = None; + } + } + for range in allocators.textures.allocated_ranges() { + for texture in &mut data.textures[range.start as usize .. range.end as usize] { + *texture = None; + } + } + for range in allocators.buffers.allocated_ranges() { + for buffer in &mut data.buffers[range.start as usize .. range.end as usize] { + *buffer = None; + } + } + + allocators.samplers.reset(); + allocators.textures.reset(); + allocators.buffers.reset(); + } + DescriptorPool::ArgumentBuffer { + ref mut raw_allocator, + ref mut res_allocator, + .. + } => { + raw_allocator.reset(); + res_allocator.reset(); + } + } + } +} + +bitflags! { + /// Descriptor content flags. + pub struct DescriptorContent: u8 { + const BUFFER = 1<<0; + const DYNAMIC_BUFFER = 1<<1; + const TEXTURE = 1<<2; + const SAMPLER = 1<<3; + const IMMUTABLE_SAMPLER = 1<<4; + } +} + +impl From for DescriptorContent { + fn from(ty: pso::DescriptorType) -> Self { + match ty { + pso::DescriptorType::Sampler => DescriptorContent::SAMPLER, + pso::DescriptorType::CombinedImageSampler => { + DescriptorContent::TEXTURE | DescriptorContent::SAMPLER + } + pso::DescriptorType::SampledImage + | pso::DescriptorType::StorageImage + | pso::DescriptorType::UniformTexelBuffer + | pso::DescriptorType::StorageTexelBuffer + | pso::DescriptorType::InputAttachment => DescriptorContent::TEXTURE, + pso::DescriptorType::UniformBuffer | pso::DescriptorType::StorageBuffer => { + DescriptorContent::BUFFER + } + pso::DescriptorType::UniformBufferDynamic + | pso::DescriptorType::StorageBufferDynamic => { + DescriptorContent::BUFFER | DescriptorContent::DYNAMIC_BUFFER + } + } + } +} + + +#[derive(Debug)] +pub struct DescriptorLayout { + pub content: DescriptorContent, + pub stages: pso::ShaderStageFlags, + pub binding: pso::DescriptorBinding, + pub array_index: pso::DescriptorArrayIndex, +} + +#[derive(Debug)] +pub struct ArgumentLayout { + pub(crate) res: msl::ResourceBinding, + pub(crate) res_offset: PoolResourceIndex, + pub(crate) count: pso::DescriptorArrayIndex, + pub(crate) usage: metal::MTLResourceUsage, + pub(crate) content: DescriptorContent, +} + +#[derive(Debug)] +pub enum DescriptorSetLayout { + Emulated( + Arc>, + Vec<(pso::DescriptorBinding, msl::SamplerData)>, + ), + ArgumentBuffer { + encoder: metal::ArgumentEncoder, + stage_flags: pso::ShaderStageFlags, + bindings: Arc>, + total: PoolResourceIndex, + }, +} +unsafe impl Send for DescriptorSetLayout {} +unsafe impl Sync for DescriptorSetLayout {} + +#[derive(Clone, Debug)] +pub struct UsedResource { + pub(crate) ptr: *mut metal::MTLResource, + pub(crate) usage: metal::MTLResourceUsage, +} + +#[derive(Debug)] +pub enum DescriptorSet { + Emulated { + pool: Arc>, + layouts: Arc>, + resources: ResourceData>, + }, + ArgumentBuffer { + raw: metal::Buffer, + raw_offset: buffer::Offset, + pool: Arc>, + range: Range, + encoder: metal::ArgumentEncoder, + bindings: Arc>, + stage_flags: pso::ShaderStageFlags, + }, +} +unsafe impl Send for DescriptorSet {} +unsafe impl Sync for DescriptorSet {} + +#[derive(Debug)] +pub struct Memory { + pub(crate) heap: MemoryHeap, + pub(crate) size: u64, +} + +impl Memory { + pub(crate) fn new(heap: MemoryHeap, size: u64) -> Self { + Memory { heap, size } + } + + pub(crate) fn resolve>(&self, range: &R) -> Range { + *range.start().unwrap_or(&0) .. *range.end().unwrap_or(&self.size) + } +} + +unsafe impl Send for Memory {} +unsafe impl Sync for Memory {} + +#[derive(Debug)] +pub(crate) enum MemoryHeap { + Private, + Public(MemoryTypeId, metal::Buffer), + Native(metal::Heap), +} + +#[derive(Default)] +pub(crate) struct ArgumentArray { + arguments: Vec, + position: usize, +} + +impl ArgumentArray { + pub fn describe_usage(ty: pso::DescriptorType) -> metal::MTLResourceUsage { + use hal::pso::DescriptorType as Dt; + use metal::MTLResourceUsage; + + match ty { + Dt::Sampler => MTLResourceUsage::empty(), + Dt::CombinedImageSampler | Dt::SampledImage | Dt::InputAttachment => { + MTLResourceUsage::Sample + } + Dt::UniformTexelBuffer => MTLResourceUsage::Sample, + Dt::UniformBuffer | Dt::UniformBufferDynamic => MTLResourceUsage::Read, + Dt::StorageImage + | Dt::StorageBuffer + | Dt::StorageBufferDynamic + | Dt::StorageTexelBuffer => MTLResourceUsage::Write, + } + } + + pub fn push( + &mut self, + ty: metal::MTLDataType, + count: usize, + usage: metal::MTLResourceUsage, + ) -> usize { + use metal::{MTLArgumentAccess, MTLResourceUsage}; + + let pos = self.position; + self.position += count; + let access = if usage == MTLResourceUsage::Write { + MTLArgumentAccess::ReadWrite + } else { + MTLArgumentAccess::ReadOnly + }; + + let arg = metal::ArgumentDescriptor::new(); + arg.set_array_length(count as u64); + arg.set_index(pos as u64); + arg.set_access(access); + arg.set_data_type(ty); + self.arguments.push(arg.to_owned()); + + pos + } + + pub fn build<'a>(self) -> (&'a metal::ArrayRef, usize) { + ( + metal::Array::from_owned_slice(&self.arguments), + self.position, + ) + } +} + +#[derive(Debug)] +pub enum QueryPool { + Occlusion(Range), +} + +#[derive(Debug)] +pub enum FenceInner { + Idle { + signaled: bool, + }, + PendingSubmission(metal::CommandBuffer), + AcquireFrame { + swapchain_image: SwapchainImage, + iteration: usize, + }, +} + +#[derive(Debug)] +pub struct Fence(pub(crate) RefCell); + +unsafe impl Send for Fence {} +unsafe impl Sync for Fence {} + + +#[derive(Debug)] +pub struct Event(pub(crate) Arc); + +extern "C" { + fn dispatch_semaphore_wait(semaphore: *mut c_void, timeout: u64) -> c_long; + fn dispatch_semaphore_signal(semaphore: *mut c_void) -> c_long; + fn dispatch_semaphore_create(value: c_long) -> *mut c_void; + fn dispatch_release(object: *mut c_void); +} + +#[cfg(feature = "signpost")] +extern "C" { + fn kdebug_signpost(code: u32, arg1: usize, arg2: usize, arg3: usize, arg4: usize); + fn kdebug_signpost_start(code: u32, arg1: usize, arg2: usize, arg3: usize, arg4: usize); + fn kdebug_signpost_end(code: u32, arg1: usize, arg2: usize, arg3: usize, arg4: usize); +} + +#[derive(Clone, Debug)] +pub struct SystemSemaphore(*mut c_void); +unsafe impl Send for SystemSemaphore {} +unsafe impl Sync for SystemSemaphore {} + +impl Drop for SystemSemaphore { + fn drop(&mut self) { + unsafe { dispatch_release(self.0) } + } +} +impl SystemSemaphore { + pub(crate) fn new() -> Self { + SystemSemaphore(unsafe { dispatch_semaphore_create(1) }) + } + pub(crate) fn signal(&self) { + unsafe { + dispatch_semaphore_signal(self.0); + } + } + pub(crate) fn wait(&self, timeout: u64) { + unsafe { + dispatch_semaphore_wait(self.0, timeout); + } + } +} + +#[derive(Clone, Debug)] +pub struct Signpost { + code: u32, + args: [usize; 4], +} + +impl Drop for Signpost { + fn drop(&mut self) { + #[cfg(feature = "signpost")] + unsafe { + kdebug_signpost_end( + self.code, + self.args[0], + self.args[1], + self.args[2], + self.args[3], + ); + } + } +} + +impl Signpost { + pub(crate) fn new(code: u32, args: [usize; 4]) -> Self { + #[cfg(feature = "signpost")] + unsafe { + kdebug_signpost_start(code, args[0], args[1], args[2], args[3]); + } + Signpost { code, args } + } + pub(crate) fn place(code: u32, args: [usize; 4]) { + #[cfg(feature = "signpost")] + unsafe { + kdebug_signpost(code, args[0], args[1], args[2], args[3]); + } + #[cfg(not(feature = "signpost"))] + let _ = (code, args); + } +} diff --git a/third_party/rust/gfx-backend-metal/src/soft.rs b/third_party/rust/gfx-backend-metal/src/soft.rs new file mode 100644 index 000000000000..53c720bd9e98 --- /dev/null +++ b/third_party/rust/gfx-backend-metal/src/soft.rs @@ -0,0 +1,519 @@ +use crate::{ + command::IndexBuffer, + native::RasterizerState, + BufferPtr, + ResourceIndex, + ResourcePtr, + SamplerPtr, + TexturePtr, +}; + +use hal; +use metal; + +use std::{fmt::Debug, ops::Range}; + + +pub type CacheResourceIndex = u32; + +pub trait Resources: Debug { + type Data: Debug; + type BufferArray: Debug; + type TextureArray: Debug; + type SamplerArray: Debug; + type DepthStencil: Debug; + type RenderPipeline: Debug; + type ComputePipeline: Debug; +} + +#[derive(Clone, Debug, Default)] +pub struct Own { + pub buffers: Vec>, + pub buffer_offsets: Vec, + pub textures: Vec>, + pub samplers: Vec>, +} + +impl Resources for Own { + type Data = Vec; + type BufferArray = Range; + type TextureArray = Range; + type SamplerArray = Range; + type DepthStencil = metal::DepthStencilState; + type RenderPipeline = metal::RenderPipelineState; + type ComputePipeline = metal::ComputePipelineState; +} + +#[derive(Debug)] +pub struct Ref; +impl<'a> Resources for &'a Ref { + type Data = &'a [u32]; + type BufferArray = (&'a [Option], &'a [hal::buffer::Offset]); + type TextureArray = &'a [Option]; + type SamplerArray = &'a [Option]; + type DepthStencil = &'a metal::DepthStencilStateRef; + type RenderPipeline = &'a metal::RenderPipelineStateRef; + type ComputePipeline = &'a metal::ComputePipelineStateRef; +} + + + +#[derive(Clone, Debug)] +pub enum RenderCommand { + SetViewport(hal::pso::Rect, Range), + SetScissor(metal::MTLScissorRect), + SetBlendColor(hal::pso::ColorValue), + SetDepthBias(hal::pso::DepthBias), + SetDepthStencilState(R::DepthStencil), + SetStencilReferenceValues(hal::pso::Sided), + SetRasterizerState(RasterizerState), + SetVisibilityResult(metal::MTLVisibilityResultMode, hal::buffer::Offset), + BindBuffer { + stage: hal::pso::Stage, + index: ResourceIndex, + buffer: BufferPtr, + offset: hal::buffer::Offset, + }, + BindBuffers { + stage: hal::pso::Stage, + index: ResourceIndex, + buffers: R::BufferArray, + }, + BindBufferData { + stage: hal::pso::Stage, + index: ResourceIndex, + words: R::Data, + }, + BindTextures { + stage: hal::pso::Stage, + index: ResourceIndex, + textures: R::TextureArray, + }, + BindSamplers { + stage: hal::pso::Stage, + index: ResourceIndex, + samplers: R::SamplerArray, + }, + BindPipeline(R::RenderPipeline), + UseResource { + resource: ResourcePtr, + usage: metal::MTLResourceUsage, + }, + Draw { + primitive_type: metal::MTLPrimitiveType, + vertices: Range, + instances: Range, + }, + DrawIndexed { + primitive_type: metal::MTLPrimitiveType, + index: IndexBuffer, + indices: Range, + base_vertex: hal::VertexOffset, + instances: Range, + }, + DrawIndirect { + primitive_type: metal::MTLPrimitiveType, + buffer: BufferPtr, + offset: hal::buffer::Offset, + }, + DrawIndexedIndirect { + primitive_type: metal::MTLPrimitiveType, + index: IndexBuffer, + buffer: BufferPtr, + offset: hal::buffer::Offset, + }, +} + +#[derive(Clone, Debug)] +pub enum BlitCommand { + FillBuffer { + dst: BufferPtr, + range: Range, + value: u8, + }, + CopyBuffer { + src: BufferPtr, + dst: BufferPtr, + region: hal::command::BufferCopy, + }, + CopyImage { + src: TexturePtr, + dst: TexturePtr, + region: hal::command::ImageCopy, + }, + CopyBufferToImage { + src: BufferPtr, + dst: TexturePtr, + dst_desc: hal::format::FormatDesc, + region: hal::command::BufferImageCopy, + }, + CopyImageToBuffer { + src: TexturePtr, + src_desc: hal::format::FormatDesc, + dst: BufferPtr, + region: hal::command::BufferImageCopy, + }, +} + +#[derive(Clone, Debug)] +pub enum ComputeCommand { + BindBuffer { + index: ResourceIndex, + buffer: BufferPtr, + offset: hal::buffer::Offset, + }, + BindBuffers { + index: ResourceIndex, + buffers: R::BufferArray, + }, + BindBufferData { + index: ResourceIndex, + words: R::Data, + }, + BindTextures { + index: ResourceIndex, + textures: R::TextureArray, + }, + BindSamplers { + index: ResourceIndex, + samplers: R::SamplerArray, + }, + BindPipeline(R::ComputePipeline), + UseResource { + resource: ResourcePtr, + usage: metal::MTLResourceUsage, + }, + Dispatch { + wg_size: metal::MTLSize, + wg_count: metal::MTLSize, + }, + DispatchIndirect { + wg_size: metal::MTLSize, + buffer: BufferPtr, + offset: hal::buffer::Offset, + }, +} + +#[derive(Clone, Debug)] +pub enum Pass { + Render(metal::RenderPassDescriptor), + Blit, + Compute, +} + +impl Own { + pub fn clear(&mut self) { + self.buffers.clear(); + self.buffer_offsets.clear(); + self.textures.clear(); + self.samplers.clear(); + } + + pub fn own_render(&mut self, com: RenderCommand<&Ref>) -> RenderCommand { + use self::RenderCommand::*; + match com { + SetViewport(rect, depth) => SetViewport(rect, depth), + SetScissor(rect) => SetScissor(rect), + SetBlendColor(color) => SetBlendColor(color), + SetDepthBias(bias) => SetDepthBias(bias), + SetDepthStencilState(state) => SetDepthStencilState(state.to_owned()), + SetStencilReferenceValues(sided) => SetStencilReferenceValues(sided), + SetRasterizerState(ref state) => SetRasterizerState(state.clone()), + SetVisibilityResult(mode, offset) => SetVisibilityResult(mode, offset), + BindBuffer { + stage, + index, + buffer, + offset, + } => BindBuffer { + stage, + index, + buffer, + offset, + }, + BindBuffers { + stage, + index, + buffers: (buffers, offsets), + } => BindBuffers { + stage, + index, + buffers: { + let start = self.buffers.len() as CacheResourceIndex; + self.buffers.extend_from_slice(buffers); + self.buffer_offsets.extend_from_slice(offsets); + start .. self.buffers.len() as CacheResourceIndex + }, + }, + BindBufferData { + stage, + index, + words, + } => BindBufferData { + stage, + index, + words: words.to_vec(), + }, + BindTextures { + stage, + index, + textures, + } => BindTextures { + stage, + index, + textures: { + let start = self.textures.len() as CacheResourceIndex; + self.textures.extend_from_slice(textures); + start .. self.textures.len() as CacheResourceIndex + }, + }, + BindSamplers { + stage, + index, + samplers, + } => BindSamplers { + stage, + index, + samplers: { + let start = self.samplers.len() as CacheResourceIndex; + self.samplers.extend_from_slice(samplers); + start .. self.samplers.len() as CacheResourceIndex + }, + }, + BindPipeline(pso) => BindPipeline(pso.to_owned()), + UseResource { resource, usage } => UseResource { resource, usage }, + Draw { + primitive_type, + vertices, + instances, + } => Draw { + primitive_type, + vertices, + instances, + }, + DrawIndexed { + primitive_type, + index, + indices, + base_vertex, + instances, + } => DrawIndexed { + primitive_type, + index, + indices, + base_vertex, + instances, + }, + DrawIndirect { + primitive_type, + buffer, + offset, + } => DrawIndirect { + primitive_type, + buffer, + offset, + }, + DrawIndexedIndirect { + primitive_type, + index, + buffer, + offset, + } => DrawIndexedIndirect { + primitive_type, + index, + buffer, + offset, + }, + } + } + + pub fn own_compute(&mut self, com: ComputeCommand<&Ref>) -> ComputeCommand { + use self::ComputeCommand::*; + match com { + BindBuffer { + index, + buffer, + offset, + } => BindBuffer { + index, + buffer, + offset, + }, + BindBuffers { + index, + buffers: (buffers, offsets), + } => BindBuffers { + index, + buffers: { + let start = self.buffers.len() as CacheResourceIndex; + self.buffers.extend_from_slice(buffers); + self.buffer_offsets.extend_from_slice(offsets); + start .. self.buffers.len() as CacheResourceIndex + }, + }, + BindBufferData { index, words } => BindBufferData { + index, + words: words.to_vec(), + }, + BindTextures { index, textures } => BindTextures { + index, + textures: { + let start = self.textures.len() as CacheResourceIndex; + self.textures.extend_from_slice(textures); + start .. self.textures.len() as CacheResourceIndex + }, + }, + BindSamplers { index, samplers } => BindSamplers { + index, + samplers: { + let start = self.samplers.len() as CacheResourceIndex; + self.samplers.extend_from_slice(samplers); + start .. self.samplers.len() as CacheResourceIndex + }, + }, + BindPipeline(pso) => BindPipeline(pso.to_owned()), + UseResource { resource, usage } => UseResource { resource, usage }, + Dispatch { wg_size, wg_count } => Dispatch { wg_size, wg_count }, + DispatchIndirect { + wg_size, + buffer, + offset, + } => DispatchIndirect { + wg_size, + buffer, + offset, + }, + } + } + + pub fn rebase_render(&self, com: &mut RenderCommand) { + use self::RenderCommand::*; + match *com { + SetViewport(..) + | SetScissor(..) + | SetBlendColor(..) + | SetDepthBias(..) + | SetDepthStencilState(..) + | SetStencilReferenceValues(..) + | SetRasterizerState(..) + | SetVisibilityResult(..) + | BindBuffer { .. } => {} + BindBuffers { + ref mut buffers, .. + } => { + buffers.start += self.buffers.len() as CacheResourceIndex; + buffers.end += self.buffers.len() as CacheResourceIndex; + } + BindBufferData { .. } => {} + BindTextures { + ref mut textures, .. + } => { + textures.start += self.textures.len() as CacheResourceIndex; + textures.end += self.textures.len() as CacheResourceIndex; + } + BindSamplers { + ref mut samplers, .. + } => { + samplers.start += self.samplers.len() as CacheResourceIndex; + samplers.end += self.samplers.len() as CacheResourceIndex; + } + BindPipeline(..) + | UseResource { .. } + | Draw { .. } + | DrawIndexed { .. } + | DrawIndirect { .. } + | DrawIndexedIndirect { .. } => {} + } + } + + pub fn rebase_compute(&self, com: &mut ComputeCommand) { + use self::ComputeCommand::*; + match *com { + BindBuffer { .. } => {} + BindBuffers { + ref mut buffers, .. + } => { + buffers.start += self.buffers.len() as CacheResourceIndex; + buffers.end += self.buffers.len() as CacheResourceIndex; + } + BindBufferData { .. } => {} + BindTextures { + ref mut textures, .. + } => { + textures.start += self.textures.len() as CacheResourceIndex; + textures.end += self.textures.len() as CacheResourceIndex; + } + BindSamplers { + ref mut samplers, .. + } => { + samplers.start += self.samplers.len() as CacheResourceIndex; + samplers.end += self.samplers.len() as CacheResourceIndex; + } + BindPipeline(..) | UseResource { .. } | Dispatch { .. } | DispatchIndirect { .. } => {} + } + } + + pub fn extend(&mut self, other: &Self) { + self.buffers.extend_from_slice(&other.buffers); + self.buffer_offsets.extend_from_slice(&other.buffer_offsets); + self.textures.extend_from_slice(&other.textures); + self.samplers.extend_from_slice(&other.samplers); + } +} + + + +pub trait AsSlice { + fn as_slice<'a>(&'a self, resources: &'a R) -> &'a [T]; +} +impl<'b, T> AsSlice, &'b Ref> for &'b [Option] { + #[inline(always)] + fn as_slice<'a>(&'a self, _: &'a &'b Ref) -> &'a [Option] { + self + } +} +impl<'b> AsSlice, &'b Ref> + for (&'b [Option], &'b [hal::buffer::Offset]) +{ + #[inline(always)] + fn as_slice<'a>(&'a self, _: &'a &'b Ref) -> &'a [Option] { + self.0 + } +} +impl<'b> AsSlice + for (&'b [Option], &'b [hal::buffer::Offset]) +{ + #[inline(always)] + fn as_slice<'a>(&'a self, _: &'a &'b Ref) -> &'a [hal::buffer::Offset] { + self.1 + } +} +impl AsSlice, Own> for Range { + #[inline(always)] + fn as_slice<'a>(&'a self, resources: &'a Own) -> &'a [Option] { + &resources.buffers[self.start as usize .. self.end as usize] + } +} +impl AsSlice for Range { + #[inline(always)] + fn as_slice<'a>(&'a self, resources: &'a Own) -> &'a [hal::buffer::Offset] { + &resources.buffer_offsets[self.start as usize .. self.end as usize] + } +} +impl AsSlice, Own> for Range { + #[inline(always)] + fn as_slice<'a>(&'a self, resources: &'a Own) -> &'a [Option] { + &resources.textures[self.start as usize .. self.end as usize] + } +} +impl AsSlice, Own> for Range { + #[inline(always)] + fn as_slice<'a>(&'a self, resources: &'a Own) -> &'a [Option] { + &resources.samplers[self.start as usize .. self.end as usize] + } +} + +fn _test_render_command_size(com: RenderCommand) -> [usize; 6] { + use std::mem; + unsafe { mem::transmute(com) } +} diff --git a/third_party/rust/gfx-backend-metal/src/window.rs b/third_party/rust/gfx-backend-metal/src/window.rs new file mode 100644 index 000000000000..a0f67a12d34d --- /dev/null +++ b/third_party/rust/gfx-backend-metal/src/window.rs @@ -0,0 +1,732 @@ +use crate::{ + device::{Device, PhysicalDevice}, + internal::Channel, + native, + Backend, + QueueFamily, + Shared, +}; + +use hal::{format, image, window as w}; + +use core_graphics::base::CGFloat; +use core_graphics::geometry::{CGRect, CGSize}; +use foreign_types::{ForeignType, ForeignTypeRef}; +use metal; +use objc::rc::autoreleasepool; +use objc::runtime::Object; +use parking_lot::{Mutex, MutexGuard}; + +use std::borrow::Borrow; +use std::ptr::NonNull; +use std::sync::Arc; +use std::thread; + + + + + +pub type CAMetalLayer = *mut Object; + + +const SIGNPOST_ID: u32 = 0x100; + +#[derive(Debug)] +pub struct Surface { + inner: Arc, + swapchain_format: metal::MTLPixelFormat, + main_thread_id: thread::ThreadId, +} + +#[derive(Debug)] +pub(crate) struct SurfaceInner { + view: Option>, + render_layer: Mutex, + + enable_signposts: bool, +} + +unsafe impl Send for SurfaceInner {} +unsafe impl Sync for SurfaceInner {} + +impl Drop for SurfaceInner { + fn drop(&mut self) { + let object = match self.view { + Some(view) => view.as_ptr(), + None => *self.render_layer.lock(), + }; + unsafe { + let () = msg_send![object, release]; + } + } +} + +#[derive(Debug)] +struct FrameNotFound { + drawable: metal::Drawable, + texture: metal::Texture, +} + +impl SurfaceInner { + pub fn new(view: Option>, layer: CAMetalLayer) -> Self { + SurfaceInner { + view, + render_layer: Mutex::new(layer), + enable_signposts: false, + } + } + + pub fn into_surface(mut self, enable_signposts: bool) -> Surface { + self.enable_signposts = enable_signposts; + Surface { + inner: Arc::new(self), + swapchain_format: metal::MTLPixelFormat::Invalid, + main_thread_id: thread::current().id(), + } + } + + fn configure(&self, shared: &Shared, config: &w::SwapchainConfig) -> metal::MTLPixelFormat { + info!("build swapchain {:?}", config); + + let caps = &shared.private_caps; + let mtl_format = caps + .map_format(config.format) + .expect("unsupported backbuffer format"); + + let render_layer_borrow = self.render_layer.lock(); + let render_layer = *render_layer_borrow; + let framebuffer_only = config.image_usage == image::Usage::COLOR_ATTACHMENT; + let display_sync = config.present_mode != w::PresentMode::IMMEDIATE; + let is_mac = caps.os_is_mac; + let can_set_next_drawable_timeout = if is_mac { + caps.has_version_at_least(10, 13) + } else { + caps.has_version_at_least(11, 0) + }; + let can_set_display_sync = is_mac && caps.has_version_at_least(10, 13); + let drawable_size = CGSize::new(config.extent.width as f64, config.extent.height as f64); + + let device_raw = shared.device.lock().as_ptr(); + unsafe { + + + + + #[cfg(target_os = "ios")] + { + if let Some(view) = self.view { + let main_layer: *mut Object = msg_send![view.as_ptr(), layer]; + let bounds: CGRect = msg_send![main_layer, bounds]; + let () = msg_send![render_layer, setFrame: bounds]; + } + } + let () = msg_send![render_layer, setDevice: device_raw]; + let () = msg_send![render_layer, setPixelFormat: mtl_format]; + let () = msg_send![render_layer, setFramebufferOnly: framebuffer_only]; + + + let () = msg_send![render_layer, setMaximumDrawableCount: config.image_count as u64]; + + let () = msg_send![render_layer, setDrawableSize: drawable_size]; + if can_set_next_drawable_timeout { + let () = msg_send![render_layer, setAllowsNextDrawableTimeout:false]; + } + if can_set_display_sync { + let () = msg_send![render_layer, setDisplaySyncEnabled: display_sync]; + } + }; + + mtl_format + } + + fn next_frame<'a>( + &self, + frames: &'a [Frame], + ) -> Result<(usize, MutexGuard<'a, FrameInner>), FrameNotFound> { + let layer_ref = self.render_layer.lock(); + autoreleasepool(|| { + + let _signpost = if self.enable_signposts { + Some(native::Signpost::new(SIGNPOST_ID, [0, 0, 0, 0])) + } else { + None + }; + let (drawable, texture_temp): (&metal::DrawableRef, &metal::TextureRef) = unsafe { + let drawable = msg_send![*layer_ref, nextDrawable]; + (drawable, msg_send![drawable, texture]) + }; + + trace!("looking for {:?}", texture_temp); + match frames + .iter() + .position(|f| f.texture.as_ptr() == texture_temp.as_ptr()) + { + Some(index) => { + let mut frame = frames[index].inner.lock(); + assert!(frame.drawable.is_none()); + frame.iteration += 1; + frame.drawable = Some(drawable.to_owned()); + if self.enable_signposts && false { + + frame.signpost = Some(native::Signpost::new( + SIGNPOST_ID, + [1, index as usize, 0, 0], + )); + } + + debug!("Next is frame[{}]", index); + Ok((index, frame)) + } + None => Err(FrameNotFound { + drawable: drawable.to_owned(), + texture: texture_temp.to_owned(), + }), + } + }) + } + + fn dimensions(&self) -> w::Extent2D { + let (size, scale): (CGSize, CGFloat) = match self.view { + Some(view) if !cfg!(target_os = "macos") => unsafe { + let bounds: CGRect = msg_send![view.as_ptr(), bounds]; + let window: Option> = msg_send![view.as_ptr(), window]; + let screen = window.and_then(|window| -> Option> { + msg_send![window.as_ptr(), screen] + }); + match screen { + Some(screen) => { + let screen_space: *mut Object = msg_send![screen.as_ptr(), coordinateSpace]; + let rect: CGRect = msg_send![view.as_ptr(), convertRect:bounds toCoordinateSpace:screen_space]; + let scale_factor: CGFloat = msg_send![screen.as_ptr(), nativeScale]; + (rect.size, scale_factor) + } + None => (bounds.size, 1.0), + } + }, + _ => unsafe { + let render_layer = *self.render_layer.lock(); + let bounds: CGRect = msg_send![render_layer, bounds]; + let contents_scale: CGFloat = msg_send![render_layer, contentsScale]; + (bounds.size, contents_scale) + }, + }; + w::Extent2D { + width: (size.width * scale) as u32, + height: (size.height * scale) as u32, + } + } +} + +#[derive(Debug)] +struct FrameInner { + drawable: Option, + signpost: Option, + + + + available: bool, + + + linked: bool, + iteration: usize, + last_frame: usize, +} + +#[derive(Debug)] +struct Frame { + inner: Mutex, + texture: metal::Texture, +} + +unsafe impl Send for Frame {} +unsafe impl Sync for Frame {} + +impl Drop for Frame { + fn drop(&mut self) { + info!("dropping Frame"); + } +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum AcquireMode { + Wait, + Oldest, +} + +impl Default for AcquireMode { + fn default() -> Self { + AcquireMode::Oldest + } +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum ResizeFill { + Empty, + Clear(hal::pso::ColorValue), + Blit, +} + +impl Default for ResizeFill { + fn default() -> Self { + ResizeFill::Clear([0.0; 4]) + } +} + +#[derive(Debug)] +pub struct Swapchain { + frames: Arc>, + surface: Arc, + extent: w::Extent2D, + last_frame: usize, + pub acquire_mode: AcquireMode, + pub resize_fill: ResizeFill, +} + +impl Drop for Swapchain { + fn drop(&mut self) { + info!("dropping Swapchain"); + } +} + +impl Swapchain { + fn clear_drawables(&self) { + for frame in self.frames.iter() { + let mut inner = frame.inner.lock(); + inner.drawable = None; + inner.signpost = None; + } + } + + + + pub(crate) fn take_drawable(&self, index: w::SwapImageIndex) -> Result { + let mut frame = self.frames[index as usize].inner.lock(); + assert!(!frame.available && frame.linked); + frame.signpost = None; + + match frame.drawable.take() { + Some(drawable) => { + debug!("Making frame {} available again", index); + frame.available = true; + Ok(drawable) + } + None => { + warn!("Failed to get the drawable of frame {}", index); + frame.linked = false; + Err(()) + } + } + } +} + +#[derive(Debug)] +pub struct SwapchainImage { + frames: Arc>, + surface: Arc, + index: w::SwapImageIndex, +} + +impl SwapchainImage { + + pub fn iteration(&self) -> usize { + self.frames[self.index as usize].inner.lock().iteration + } + + + + pub fn wait_until_ready(&self) -> usize { + + { + let frame = self.frames[self.index as usize].inner.lock(); + assert!(!frame.available); + if frame.drawable.is_some() { + return 0; + } + } + + let mut count = 1; + loop { + match self.surface.next_frame(&self.frames) { + Ok((index, _)) if index == self.index as usize => { + debug!( + "Swapchain image {} is ready after {} frames", + self.index, count + ); + break; + } + Ok(_) => { + count += 1; + } + Err(_e) => { + warn!( + "Swapchain drawables are changed, unable to wait for {}", + self.index + ); + break; + } + } + } + count + } +} + +#[derive(Debug)] +pub struct SurfaceImage { + view: native::ImageView, + drawable: metal::Drawable, +} + +unsafe impl Send for SurfaceImage {} +unsafe impl Sync for SurfaceImage {} + +impl SurfaceImage { + pub(crate) fn into_drawable(self) -> metal::Drawable { + self.drawable + } +} + +impl Borrow for SurfaceImage { + fn borrow(&self) -> &native::ImageView { + &self.view + } +} + +impl w::Surface for Surface { + fn supports_queue_family(&self, _queue_family: &QueueFamily) -> bool { + + true + } + + fn capabilities(&self, physical_device: &PhysicalDevice) -> w::SurfaceCapabilities { + let current_extent = if self.main_thread_id == thread::current().id() { + Some(self.inner.dimensions()) + } else { + warn!("Unable to get the current view dimensions on a non-main thread"); + None + }; + + let device_caps = &physical_device.shared.private_caps; + + let can_set_maximum_drawables_count = + device_caps.os_is_mac || device_caps.has_version_at_least(11, 2); + let can_set_display_sync = + device_caps.os_is_mac && device_caps.has_version_at_least(10, 13); + + w::SurfaceCapabilities { + present_modes: if can_set_display_sync { + w::PresentMode::FIFO | w::PresentMode::IMMEDIATE + } else { + w::PresentMode::FIFO + }, + composite_alpha_modes: w::CompositeAlphaMode::OPAQUE, + + image_count: if can_set_maximum_drawables_count { + 2 ..= 3 + } else { + + + 3 ..= 3 + }, + current_extent, + extents: w::Extent2D { + width: 4, + height: 4, + } ..= w::Extent2D { + width: 4096, + height: 4096, + }, + max_image_layers: 1, + usage: image::Usage::COLOR_ATTACHMENT + | image::Usage::SAMPLED + | image::Usage::TRANSFER_SRC + | image::Usage::TRANSFER_DST, + } + } + + fn supported_formats(&self, _physical_device: &PhysicalDevice) -> Option> { + Some(vec![ + format::Format::Bgra8Unorm, + format::Format::Bgra8Srgb, + format::Format::Rgba16Sfloat, + ]) + } +} + +impl w::PresentationSurface for Surface { + type SwapchainImage = SurfaceImage; + + unsafe fn configure_swapchain( + &mut self, + device: &Device, + config: w::SwapchainConfig, + ) -> Result<(), w::CreationError> { + assert!(image::Usage::COLOR_ATTACHMENT.contains(config.image_usage)); + self.swapchain_format = self.inner.configure(&device.shared, &config); + Ok(()) + } + + unsafe fn unconfigure_swapchain(&mut self, _device: &Device) { + self.swapchain_format = metal::MTLPixelFormat::Invalid; + } + + unsafe fn acquire_image( + &mut self, + _timeout_ns: u64, + ) -> Result<(Self::SwapchainImage, Option), w::AcquireError> { + let render_layer_borrow = self.inner.render_layer.lock(); + let (drawable, texture) = autoreleasepool(|| { + let drawable: &metal::DrawableRef = msg_send![*render_layer_borrow, nextDrawable]; + assert!(!drawable.as_ptr().is_null()); + let texture: &metal::TextureRef = msg_send![drawable, texture]; + (drawable.to_owned(), texture.to_owned()) + }); + + let image = SurfaceImage { + view: native::ImageView { + texture, + mtl_format: self.swapchain_format, + }, + drawable, + }; + Ok((image, None)) + } +} + +impl Device { + pub(crate) fn build_swapchain( + &self, + surface: &mut Surface, + config: w::SwapchainConfig, + old_swapchain: Option, + ) -> (Swapchain, Vec) { + if let Some(ref sc) = old_swapchain { + sc.clear_drawables(); + } + + let mtl_format = surface.inner.configure(&self.shared, &config); + + let cmd_queue = self.shared.queue.lock(); + let format_desc = config.format.surface_desc(); + let render_layer_borrow = surface.inner.render_layer.lock(); + + let frames = (0 .. config.image_count) + .map(|index| { + autoreleasepool(|| { + + let (drawable, texture) = unsafe { + let drawable: &metal::DrawableRef = + msg_send![*render_layer_borrow, nextDrawable]; + assert!(!drawable.as_ptr().is_null()); + let texture: &metal::TextureRef = msg_send![drawable, texture]; + (drawable, texture) + }; + trace!("\tframe[{}] = {:?}", index, texture); + + let drawable = if index == 0 { + + match old_swapchain { + Some(ref old) => { + let cmd_buffer = cmd_queue.spawn_temp(); + match old.resize_fill { + ResizeFill::Empty => {} + ResizeFill::Clear(value) => { + let descriptor = metal::RenderPassDescriptor::new().to_owned(); + let attachment = descriptor.color_attachments().object_at(0).unwrap(); + attachment.set_texture(Some(texture)); + attachment.set_store_action(metal::MTLStoreAction::Store); + attachment.set_load_action(metal::MTLLoadAction::Clear); + attachment.set_clear_color(metal::MTLClearColor::new( + value[0] as _, + value[1] as _, + value[2] as _, + value[3] as _, + )); + let encoder = cmd_buffer.new_render_command_encoder(&descriptor); + encoder.end_encoding(); + } + ResizeFill::Blit => { + self.shared.service_pipes.simple_blit( + &self.shared.device, + cmd_buffer, + &old.frames[0].texture, + texture, + &self.shared.private_caps, + ); + } + } + cmd_buffer.present_drawable(drawable); + cmd_buffer.set_label("build_swapchain"); + cmd_buffer.commit(); + cmd_buffer.wait_until_completed(); + } + None => { + + drawable.present(); + } + } + None + } else { + Some(drawable.to_owned()) + }; + Frame { + inner: Mutex::new(FrameInner { + drawable, + signpost: if index != 0 && surface.inner.enable_signposts { + Some(native::Signpost::new( + SIGNPOST_ID, + [1, index as usize, 0, 0], + )) + } else { + None + }, + available: true, + linked: true, + iteration: 0, + last_frame: 0, + }), + texture: texture.to_owned(), + } + }) + }) + .collect::>(); + + let images = frames + .iter() + .map(|frame| native::Image { + like: native::ImageLike::Texture(frame.texture.clone()), + kind: image::Kind::D2(config.extent.width, config.extent.height, 1, 1), + format_desc, + shader_channel: Channel::Float, + mtl_format, + mtl_type: metal::MTLTextureType::D2, + }) + .collect(); + let (acquire_mode, resize_fill) = old_swapchain + .map(|ref old| (old.acquire_mode.clone(), old.resize_fill.clone())) + .unwrap_or_default(); + + let swapchain = Swapchain { + frames: Arc::new(frames), + surface: surface.inner.clone(), + extent: config.extent, + last_frame: 0, + acquire_mode, + resize_fill, + }; + + (swapchain, images) + } +} + +impl w::Swapchain for Swapchain { + unsafe fn acquire_image( + &mut self, + _timeout_ns: u64, + semaphore: Option<&native::Semaphore>, + fence: Option<&native::Fence>, + ) -> Result<(w::SwapImageIndex, Option), w::AcquireError> { + self.last_frame += 1; + + + if false && self.surface.dimensions() != self.extent { + + native::Signpost::place(SIGNPOST_ID, [0, 0, 0, 0]); + unimplemented!() + } + + let mut oldest_index = self.frames.len(); + let mut oldest_frame = self.last_frame; + + for (index, frame_arc) in self.frames.iter().enumerate() { + let mut frame = frame_arc.inner.lock(); + if !frame.available { + continue; + } + if frame.drawable.is_some() { + debug!("Found drawable of frame {}, acquiring", index); + frame.available = false; + frame.last_frame = self.last_frame; + if self.surface.enable_signposts { + + frame.signpost = Some(native::Signpost::new(SIGNPOST_ID, [1, index, 0, 0])); + } + if let Some(semaphore) = semaphore { + if let Some(ref system) = semaphore.system { + system.signal(); + } + } + if let Some(fence) = fence { + fence.0.replace(native::FenceInner::Idle { signaled: true }); + } + return Ok((index as _, None)); + } + if frame.last_frame < oldest_frame { + oldest_frame = frame.last_frame; + oldest_index = index; + } + } + + let (index, mut frame) = match self.acquire_mode { + AcquireMode::Wait => { + let pair = self + .surface + .next_frame(&self.frames) + .map_err(|_| w::AcquireError::OutOfDate)?; + + if let Some(fence) = fence { + fence.0.replace(native::FenceInner::Idle { signaled: true }); + } + pair + } + AcquireMode::Oldest => { + let frame = match self.frames.get(oldest_index) { + Some(frame) => frame.inner.lock(), + None => { + warn!("No frame is available"); + return Err(w::AcquireError::OutOfDate); + } + }; + if !frame.linked { + return Err(w::AcquireError::OutOfDate); + } + + if let Some(semaphore) = semaphore { + let mut sw_image = semaphore.image_ready.lock(); + if let Some(ref swi) = *sw_image { + warn!("frame {} hasn't been waited upon", swi.index); + } + *sw_image = Some(SwapchainImage { + frames: Arc::clone(&self.frames), + surface: Arc::clone(&self.surface), + index: oldest_index as _, + }); + } + if let Some(fence) = fence { + fence.0.replace(native::FenceInner::AcquireFrame { + swapchain_image: SwapchainImage { + frames: Arc::clone(&self.frames), + surface: Arc::clone(&self.surface), + index: oldest_index as _, + }, + iteration: frame.iteration, + }); + } + + (oldest_index, frame) + } + }; + + debug!("Acquiring frame {}", index); + assert!(frame.available); + frame.last_frame = self.last_frame; + frame.available = false; + if self.surface.enable_signposts { + + frame.signpost = Some(native::Signpost::new(SIGNPOST_ID, [1, index, 0, 0])); + } + + Ok((index as _, None)) + } +} diff --git a/third_party/rust/gfx-backend-vulkan/.cargo-checksum.json b/third_party/rust/gfx-backend-vulkan/.cargo-checksum.json new file mode 100644 index 000000000000..5c300ca5d8b0 --- /dev/null +++ b/third_party/rust/gfx-backend-vulkan/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"9b7f10d02014d4221b88d1283d75ec575e3bff017cc92581179b03caf19b76b2","README.md":"8cc42e022567870c58a53ff1cb6f94e961482e789fe5e22f9960408a43cf8405","src/command.rs":"4fb6d6ffb4b3e312f0e84818e80adaeef6206fe87827c447cf1b25d3effb74a5","src/conv.rs":"075f844081adb4fb7a76ab3950b017a44648a5eb9bf3721647a4f5bce617105b","src/device.rs":"1331e5820806d3a7c1286e141c41a6bd2e8fba0b50783ba9702bef8310f9c5e2","src/info.rs":"4a21b54f85ff73c538ca2f57f4d371eb862b5a28f126cd0ecafd37fc6dfd1318","src/lib.rs":"6feddaec1156bc91aa02a474fd866f2767954d27dfe33dd40f860836db3ef9a5","src/native.rs":"fc8c7d40054f59eeb36db5c4c439e2173cd9e967c4d69797f223e1c58748f71d","src/pool.rs":"8bfd5f750baef41a7edc539433f7e417e367dc60debfcb002188e12b0f9bd933","src/window.rs":"297d8995fa35c1faa1e9cde52210dc4241a9f4bb0741dbf870d7301c4a2baa29"},"package":"62538fedd66a78968a162e8e1a29d085ffbc97f8782634684b2f7da7aea59207"} \ No newline at end of file diff --git a/third_party/rust/gfx-backend-vulkan/Cargo.toml b/third_party/rust/gfx-backend-vulkan/Cargo.toml new file mode 100644 index 000000000000..5386dee27824 --- /dev/null +++ b/third_party/rust/gfx-backend-vulkan/Cargo.toml @@ -0,0 +1,75 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "gfx-backend-vulkan" +version = "0.4.0" +authors = ["The Gfx-rs Developers"] +description = "Vulkan API backend for gfx-rs" +homepage = "https://github.com/gfx-rs/gfx" +documentation = "https://docs.rs/gfx-backend-vulkan" +readme = "README.md" +keywords = ["graphics", "gamedev"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/gfx-rs/gfx" + +[lib] +name = "gfx_backend_vulkan" +[dependencies.arrayvec] +version = "0.5" + +[dependencies.ash] +version = "0.29.0" + +[dependencies.byteorder] +version = "1" + +[dependencies.hal] +version = "0.4" +package = "gfx-hal" + +[dependencies.lazy_static] +version = "1" + +[dependencies.log] +version = "0.4" + +[dependencies.raw-window-handle] +version = "0.3" + +[dependencies.shared_library] +version = "0.1.9" +optional = true + +[dependencies.smallvec] +version = "0.6" + +[features] +default = [] +use-rtld-next = ["shared_library"] +[target."cfg(all(unix, not(target_os = \"macos\"), not(target_os = \"ios\"), not(target_os = \"android\")))".dependencies.x11] +version = "2.15" +features = ["xlib"] +optional = true + +[target."cfg(all(unix, not(target_os = \"macos\"), not(target_os = \"ios\"), not(target_os = \"android\")))".dependencies.xcb] +version = "0.8" +optional = true +[target."cfg(target_os = \"macos\")".dependencies.core-graphics] +version = "0.17" + +[target."cfg(target_os = \"macos\")".dependencies.objc] +version = "0.2.5" +[target."cfg(windows)".dependencies.winapi] +version = "0.3" +features = ["libloaderapi", "windef", "winuser"] diff --git a/third_party/rust/gfx-backend-vulkan/README.md b/third_party/rust/gfx-backend-vulkan/README.md new file mode 100644 index 000000000000..0e8420ecb809 --- /dev/null +++ b/third_party/rust/gfx-backend-vulkan/README.md @@ -0,0 +1,13 @@ +# gfx-backend-vulkan + +[Vulkan](https://www.khronos.org/vulkan/) backend for gfx-rs. + +## Normalized Coordinates + +Render | Depth | Texture +-------|-------|-------- +![render_coordinates](../../../info/vk_render_coordinates.png) | ![depth_coordinates](../../../info/dx_depth_coordinates.png) | ![texture_coordinates](../../../info/dx_texture_coordinates.png) + +## Mirroring + +HAL is modelled after Vulkan, so everything should be 1:1. diff --git a/third_party/rust/gfx-backend-vulkan/src/command.rs b/third_party/rust/gfx-backend-vulkan/src/command.rs new file mode 100644 index 000000000000..4200ec28fb3f --- /dev/null +++ b/third_party/rust/gfx-backend-vulkan/src/command.rs @@ -0,0 +1,971 @@ +use ash::version::DeviceV1_0; +use ash::vk; +use smallvec::SmallVec; +use std::borrow::Borrow; +use std::ops::Range; +use std::sync::Arc; +use std::{mem, ptr, slice}; + +use crate::{conv, native as n, Backend, RawDevice}; +use hal::{ + buffer, + command as com, + format::Aspects, + image::{Filter, Layout, SubresourceRange}, + memory, + pso, + query, + range::RangeArg, + DrawCount, + IndexCount, + InstanceCount, + VertexCount, + VertexOffset, + WorkGroupCount, +}; + +#[derive(Debug)] +pub struct CommandBuffer { + pub raw: vk::CommandBuffer, + pub device: Arc, +} + +fn map_subpass_contents(contents: com::SubpassContents) -> vk::SubpassContents { + match contents { + com::SubpassContents::Inline => vk::SubpassContents::INLINE, + com::SubpassContents::SecondaryBuffers => vk::SubpassContents::SECONDARY_COMMAND_BUFFERS, + } +} + +fn map_buffer_image_regions(_image: &n::Image, regions: T) -> SmallVec<[vk::BufferImageCopy; 16]> +where + T: IntoIterator, + T::Item: Borrow, +{ + regions + .into_iter() + .map(|region| { + let r = region.borrow(); + let image_subresource = conv::map_subresource_layers(&r.image_layers); + vk::BufferImageCopy { + buffer_offset: r.buffer_offset, + buffer_row_length: r.buffer_width, + buffer_image_height: r.buffer_height, + image_subresource, + image_offset: conv::map_offset(r.image_offset), + image_extent: conv::map_extent(r.image_extent), + } + }) + .collect() +} + +struct BarrierSet { + global: SmallVec<[vk::MemoryBarrier; 4]>, + buffer: SmallVec<[vk::BufferMemoryBarrier; 4]>, + image: SmallVec<[vk::ImageMemoryBarrier; 4]>, +} + +fn destructure_barriers<'a, T>(barriers: T) -> BarrierSet +where + T: IntoIterator, + T::Item: Borrow>, +{ + let mut global: SmallVec<[vk::MemoryBarrier; 4]> = SmallVec::new(); + let mut buffer: SmallVec<[vk::BufferMemoryBarrier; 4]> = SmallVec::new(); + let mut image: SmallVec<[vk::ImageMemoryBarrier; 4]> = SmallVec::new(); + + for barrier in barriers { + match *barrier.borrow() { + memory::Barrier::AllBuffers(ref access) => { + global.push(vk::MemoryBarrier { + s_type: vk::StructureType::MEMORY_BARRIER, + p_next: ptr::null(), + src_access_mask: conv::map_buffer_access(access.start), + dst_access_mask: conv::map_buffer_access(access.end), + }); + } + memory::Barrier::AllImages(ref access) => { + global.push(vk::MemoryBarrier { + s_type: vk::StructureType::MEMORY_BARRIER, + p_next: ptr::null(), + src_access_mask: conv::map_image_access(access.start), + dst_access_mask: conv::map_image_access(access.end), + }); + } + memory::Barrier::Buffer { + ref states, + target, + ref range, + ref families, + } => { + let families = match families { + Some(f) => f.start.0 as u32 .. f.end.0 as u32, + None => vk::QUEUE_FAMILY_IGNORED .. vk::QUEUE_FAMILY_IGNORED, + }; + buffer.push(vk::BufferMemoryBarrier { + s_type: vk::StructureType::BUFFER_MEMORY_BARRIER, + p_next: ptr::null(), + src_access_mask: conv::map_buffer_access(states.start), + dst_access_mask: conv::map_buffer_access(states.end), + src_queue_family_index: families.start, + dst_queue_family_index: families.end, + buffer: target.raw, + offset: range.start.unwrap_or(0), + size: range + .end + .map_or(vk::WHOLE_SIZE, |end| end - range.start.unwrap_or(0)), + }); + } + memory::Barrier::Image { + ref states, + target, + ref range, + ref families, + } => { + let subresource_range = conv::map_subresource_range(range); + let families = match families { + Some(f) => f.start.0 as u32 .. f.end.0 as u32, + None => vk::QUEUE_FAMILY_IGNORED .. vk::QUEUE_FAMILY_IGNORED, + }; + image.push(vk::ImageMemoryBarrier { + s_type: vk::StructureType::IMAGE_MEMORY_BARRIER, + p_next: ptr::null(), + src_access_mask: conv::map_image_access(states.start.0), + dst_access_mask: conv::map_image_access(states.end.0), + old_layout: conv::map_image_layout(states.start.1), + new_layout: conv::map_image_layout(states.end.1), + src_queue_family_index: families.start, + dst_queue_family_index: families.end, + image: target.raw, + subresource_range, + }); + } + } + } + + BarrierSet { + global, + buffer, + image, + } +} + +impl CommandBuffer { + fn bind_descriptor_sets( + &mut self, + bind_point: vk::PipelineBindPoint, + layout: &n::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + let sets: SmallVec<[_; 16]> = sets.into_iter().map(|set| set.borrow().raw).collect(); + let dynamic_offsets: SmallVec<[_; 16]> = + offsets.into_iter().map(|offset| *offset.borrow()).collect(); + + unsafe { + self.device.0.cmd_bind_descriptor_sets( + self.raw, + bind_point, + layout.raw, + first_set as u32, + &sets, + &dynamic_offsets, + ); + } + } +} + +impl com::CommandBuffer for CommandBuffer { + unsafe fn begin( + &mut self, + flags: com::CommandBufferFlags, + info: com::CommandBufferInheritanceInfo, + ) { + let inheritance_info = vk::CommandBufferInheritanceInfo { + s_type: vk::StructureType::COMMAND_BUFFER_INHERITANCE_INFO, + p_next: ptr::null(), + render_pass: info + .subpass + .map_or(vk::RenderPass::null(), |subpass| subpass.main_pass.raw), + subpass: info.subpass.map_or(0, |subpass| subpass.index as u32), + framebuffer: info + .framebuffer + .map_or(vk::Framebuffer::null(), |buffer| buffer.raw), + occlusion_query_enable: if info.occlusion_query_enable { + vk::TRUE + } else { + vk::FALSE + }, + query_flags: conv::map_query_control_flags(info.occlusion_query_flags), + pipeline_statistics: conv::map_pipeline_statistics(info.pipeline_statistics), + }; + + let info = vk::CommandBufferBeginInfo { + s_type: vk::StructureType::COMMAND_BUFFER_BEGIN_INFO, + p_next: ptr::null(), + flags: conv::map_command_buffer_flags(flags), + p_inheritance_info: &inheritance_info, + }; + + assert_eq!(Ok(()), self.device.0.begin_command_buffer(self.raw, &info)); + } + + unsafe fn finish(&mut self) { + assert_eq!(Ok(()), self.device.0.end_command_buffer(self.raw)); + } + + unsafe fn reset(&mut self, release_resources: bool) { + let flags = if release_resources { + vk::CommandBufferResetFlags::RELEASE_RESOURCES + } else { + vk::CommandBufferResetFlags::empty() + }; + + assert_eq!(Ok(()), self.device.0.reset_command_buffer(self.raw, flags)); + } + + unsafe fn begin_render_pass( + &mut self, + render_pass: &n::RenderPass, + frame_buffer: &n::Framebuffer, + render_area: pso::Rect, + clear_values: T, + first_subpass: com::SubpassContents, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let render_area = conv::map_rect(&render_area); + + + + let clear_value_count = 64 - render_pass.clear_attachments_mask.leading_zeros() as u32; + let mut clear_value_iter = clear_values.into_iter(); + let raw_clear_values = (0 .. clear_value_count) + .map(|i| { + if render_pass.clear_attachments_mask & (1 << i) != 0 { + + let next = clear_value_iter.next().unwrap(); + mem::transmute(*next.borrow()) + } else { + mem::zeroed() + } + }) + .collect::>(); + + let info = vk::RenderPassBeginInfo { + s_type: vk::StructureType::RENDER_PASS_BEGIN_INFO, + p_next: ptr::null(), + render_pass: render_pass.raw, + framebuffer: frame_buffer.raw, + render_area, + clear_value_count, + p_clear_values: raw_clear_values.as_ptr(), + }; + + let contents = map_subpass_contents(first_subpass); + self.device + .0 + .cmd_begin_render_pass(self.raw, &info, contents); + } + + unsafe fn next_subpass(&mut self, contents: com::SubpassContents) { + let contents = map_subpass_contents(contents); + self.device.0.cmd_next_subpass(self.raw, contents); + } + + unsafe fn end_render_pass(&mut self) { + self.device.0.cmd_end_render_pass(self.raw); + } + + unsafe fn pipeline_barrier<'a, T>( + &mut self, + stages: Range, + dependencies: memory::Dependencies, + barriers: T, + ) where + T: IntoIterator, + T::Item: Borrow>, + { + let BarrierSet { + global, + buffer, + image, + } = destructure_barriers(barriers); + + self.device.0.cmd_pipeline_barrier( + self.raw, + conv::map_pipeline_stage(stages.start), + conv::map_pipeline_stage(stages.end), + mem::transmute(dependencies), + &global, + &buffer, + &image, + ); + } + + unsafe fn fill_buffer(&mut self, buffer: &n::Buffer, range: R, data: u32) + where + R: RangeArg, + { + let (offset, size) = conv::map_range_arg(&range); + self.device + .0 + .cmd_fill_buffer(self.raw, buffer.raw, offset, size, data); + } + + unsafe fn update_buffer(&mut self, buffer: &n::Buffer, offset: buffer::Offset, data: &[u8]) { + self.device + .0 + .cmd_update_buffer(self.raw, buffer.raw, offset, data); + } + + unsafe fn clear_image( + &mut self, + image: &n::Image, + layout: Layout, + value: com::ClearValue, + subresource_ranges: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let mut color_ranges = Vec::new(); + let mut ds_ranges = Vec::new(); + + for subresource_range in subresource_ranges { + let sub = subresource_range.borrow(); + let aspect_ds = sub.aspects & (Aspects::DEPTH | Aspects::STENCIL); + let vk_range = conv::map_subresource_range(sub); + if sub.aspects.contains(Aspects::COLOR) { + color_ranges.push(vk::ImageSubresourceRange { + aspect_mask: conv::map_image_aspects(Aspects::COLOR), + ..vk_range + }); + } + if !aspect_ds.is_empty() { + ds_ranges.push(vk::ImageSubresourceRange { + aspect_mask: conv::map_image_aspects(aspect_ds), + ..vk_range + }); + } + } + + + let color_value = mem::transmute(value.color); + let depth_stencil_value = vk::ClearDepthStencilValue { + depth: value.depth_stencil.depth, + stencil: value.depth_stencil.stencil, + }; + + if !color_ranges.is_empty() { + self.device.0.cmd_clear_color_image( + self.raw, + image.raw, + conv::map_image_layout(layout), + &color_value, + &color_ranges, + ) + } + if !ds_ranges.is_empty() { + self.device.0.cmd_clear_depth_stencil_image( + self.raw, + image.raw, + conv::map_image_layout(layout), + &depth_stencil_value, + &ds_ranges, + ) + } + } + + unsafe fn clear_attachments(&mut self, clears: T, rects: U) + where + T: IntoIterator, + T::Item: Borrow, + U: IntoIterator, + U::Item: Borrow, + { + let clears: SmallVec<[vk::ClearAttachment; 16]> = clears + .into_iter() + .map(|clear| match *clear.borrow() { + com::AttachmentClear::Color { index, value } => vk::ClearAttachment { + aspect_mask: vk::ImageAspectFlags::COLOR, + color_attachment: index as _, + clear_value: vk::ClearValue { + color: mem::transmute(value), + }, + }, + com::AttachmentClear::DepthStencil { depth, stencil } => vk::ClearAttachment { + aspect_mask: if depth.is_some() { + vk::ImageAspectFlags::DEPTH + } else { + vk::ImageAspectFlags::empty() + } | if stencil.is_some() { + vk::ImageAspectFlags::STENCIL + } else { + vk::ImageAspectFlags::empty() + }, + color_attachment: 0, + clear_value: vk::ClearValue { + depth_stencil: vk::ClearDepthStencilValue { + depth: depth.unwrap_or_default(), + stencil: stencil.unwrap_or_default(), + }, + }, + }, + }) + .collect(); + + let rects: SmallVec<[vk::ClearRect; 16]> = rects + .into_iter() + .map(|rect| conv::map_clear_rect(rect.borrow())) + .collect(); + + self.device + .0 + .cmd_clear_attachments(self.raw, &clears, &rects) + } + + unsafe fn resolve_image( + &mut self, + src: &n::Image, + src_layout: Layout, + dst: &n::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let regions = regions + .into_iter() + .map(|region| { + let r = region.borrow(); + vk::ImageResolve { + src_subresource: conv::map_subresource_layers(&r.src_subresource), + src_offset: conv::map_offset(r.src_offset), + dst_subresource: conv::map_subresource_layers(&r.dst_subresource), + dst_offset: conv::map_offset(r.dst_offset), + extent: conv::map_extent(r.extent), + } + }) + .collect::>(); + + self.device.0.cmd_resolve_image( + self.raw, + src.raw, + conv::map_image_layout(src_layout), + dst.raw, + conv::map_image_layout(dst_layout), + ®ions, + ); + } + + unsafe fn blit_image( + &mut self, + src: &n::Image, + src_layout: Layout, + dst: &n::Image, + dst_layout: Layout, + filter: Filter, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let regions = regions + .into_iter() + .map(|region| { + let r = region.borrow(); + vk::ImageBlit { + src_subresource: conv::map_subresource_layers(&r.src_subresource), + src_offsets: [ + conv::map_offset(r.src_bounds.start), + conv::map_offset(r.src_bounds.end), + ], + dst_subresource: conv::map_subresource_layers(&r.dst_subresource), + dst_offsets: [ + conv::map_offset(r.dst_bounds.start), + conv::map_offset(r.dst_bounds.end), + ], + } + }) + .collect::>(); + + self.device.0.cmd_blit_image( + self.raw, + src.raw, + conv::map_image_layout(src_layout), + dst.raw, + conv::map_image_layout(dst_layout), + ®ions, + conv::map_filter(filter), + ); + } + + unsafe fn bind_index_buffer(&mut self, ibv: buffer::IndexBufferView) { + self.device.0.cmd_bind_index_buffer( + self.raw, + ibv.buffer.raw, + ibv.offset, + conv::map_index_type(ibv.index_type), + ); + } + + unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) + where + I: IntoIterator, + T: Borrow, + { + let (buffers, offsets): (SmallVec<[vk::Buffer; 16]>, SmallVec<[vk::DeviceSize; 16]>) = + buffers + .into_iter() + .map(|(buffer, offset)| (buffer.borrow().raw, offset)) + .unzip(); + + self.device + .0 + .cmd_bind_vertex_buffers(self.raw, first_binding, &buffers, &offsets); + } + + unsafe fn set_viewports(&mut self, first_viewport: u32, viewports: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let viewports: SmallVec<[vk::Viewport; 16]> = viewports + .into_iter() + .map(|viewport| conv::map_viewport(viewport.borrow())) + .collect(); + + self.device + .0 + .cmd_set_viewport(self.raw, first_viewport, &viewports); + } + + unsafe fn set_scissors(&mut self, first_scissor: u32, scissors: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let scissors: SmallVec<[vk::Rect2D; 16]> = scissors + .into_iter() + .map(|scissor| conv::map_rect(scissor.borrow())) + .collect(); + + self.device + .0 + .cmd_set_scissor(self.raw, first_scissor, &scissors); + } + + unsafe fn set_stencil_reference(&mut self, faces: pso::Face, value: pso::StencilValue) { + + self.device + .0 + .cmd_set_stencil_reference(self.raw, mem::transmute(faces), value); + } + + unsafe fn set_stencil_read_mask(&mut self, faces: pso::Face, value: pso::StencilValue) { + + self.device + .0 + .cmd_set_stencil_compare_mask(self.raw, mem::transmute(faces), value); + } + + unsafe fn set_stencil_write_mask(&mut self, faces: pso::Face, value: pso::StencilValue) { + + self.device + .0 + .cmd_set_stencil_write_mask(self.raw, mem::transmute(faces), value); + } + + unsafe fn set_blend_constants(&mut self, color: pso::ColorValue) { + self.device.0.cmd_set_blend_constants(self.raw, &color); + } + + unsafe fn set_depth_bounds(&mut self, bounds: Range) { + self.device + .0 + .cmd_set_depth_bounds(self.raw, bounds.start, bounds.end); + } + + unsafe fn set_line_width(&mut self, width: f32) { + self.device.0.cmd_set_line_width(self.raw, width); + } + + unsafe fn set_depth_bias(&mut self, depth_bias: pso::DepthBias) { + self.device.0.cmd_set_depth_bias( + self.raw, + depth_bias.const_factor, + depth_bias.clamp, + depth_bias.slope_factor, + ); + } + + unsafe fn bind_graphics_pipeline(&mut self, pipeline: &n::GraphicsPipeline) { + self.device + .0 + .cmd_bind_pipeline(self.raw, vk::PipelineBindPoint::GRAPHICS, pipeline.0) + } + + unsafe fn bind_graphics_descriptor_sets( + &mut self, + layout: &n::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + self.bind_descriptor_sets( + vk::PipelineBindPoint::GRAPHICS, + layout, + first_set, + sets, + offsets, + ); + } + + unsafe fn bind_compute_pipeline(&mut self, pipeline: &n::ComputePipeline) { + self.device + .0 + .cmd_bind_pipeline(self.raw, vk::PipelineBindPoint::COMPUTE, pipeline.0) + } + + unsafe fn bind_compute_descriptor_sets( + &mut self, + layout: &n::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + self.bind_descriptor_sets( + vk::PipelineBindPoint::COMPUTE, + layout, + first_set, + sets, + offsets, + ); + } + + unsafe fn dispatch(&mut self, count: WorkGroupCount) { + self.device + .0 + .cmd_dispatch(self.raw, count[0], count[1], count[2]) + } + + unsafe fn dispatch_indirect(&mut self, buffer: &n::Buffer, offset: buffer::Offset) { + self.device + .0 + .cmd_dispatch_indirect(self.raw, buffer.raw, offset) + } + + unsafe fn copy_buffer(&mut self, src: &n::Buffer, dst: &n::Buffer, regions: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let regions: SmallVec<[vk::BufferCopy; 16]> = regions + .into_iter() + .map(|region| { + let region = region.borrow(); + vk::BufferCopy { + src_offset: region.src, + dst_offset: region.dst, + size: region.size, + } + }) + .collect(); + + self.device + .0 + .cmd_copy_buffer(self.raw, src.raw, dst.raw, ®ions) + } + + unsafe fn copy_image( + &mut self, + src: &n::Image, + src_layout: Layout, + dst: &n::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let regions: SmallVec<[vk::ImageCopy; 16]> = regions + .into_iter() + .map(|region| { + let r = region.borrow(); + vk::ImageCopy { + src_subresource: conv::map_subresource_layers(&r.src_subresource), + src_offset: conv::map_offset(r.src_offset), + dst_subresource: conv::map_subresource_layers(&r.dst_subresource), + dst_offset: conv::map_offset(r.dst_offset), + extent: conv::map_extent(r.extent), + } + }) + .collect(); + + self.device.0.cmd_copy_image( + self.raw, + src.raw, + conv::map_image_layout(src_layout), + dst.raw, + conv::map_image_layout(dst_layout), + ®ions, + ); + } + + unsafe fn copy_buffer_to_image( + &mut self, + src: &n::Buffer, + dst: &n::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let regions = map_buffer_image_regions(dst, regions); + + self.device.0.cmd_copy_buffer_to_image( + self.raw, + src.raw, + dst.raw, + conv::map_image_layout(dst_layout), + ®ions, + ); + } + + unsafe fn copy_image_to_buffer( + &mut self, + src: &n::Image, + src_layout: Layout, + dst: &n::Buffer, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let regions = map_buffer_image_regions(src, regions); + + self.device.0.cmd_copy_image_to_buffer( + self.raw, + src.raw, + conv::map_image_layout(src_layout), + dst.raw, + ®ions, + ); + } + + unsafe fn draw(&mut self, vertices: Range, instances: Range) { + self.device.0.cmd_draw( + self.raw, + vertices.end - vertices.start, + instances.end - instances.start, + vertices.start, + instances.start, + ) + } + + unsafe fn draw_indexed( + &mut self, + indices: Range, + base_vertex: VertexOffset, + instances: Range, + ) { + self.device.0.cmd_draw_indexed( + self.raw, + indices.end - indices.start, + instances.end - instances.start, + indices.start, + base_vertex, + instances.start, + ) + } + + unsafe fn draw_indirect( + &mut self, + buffer: &n::Buffer, + offset: buffer::Offset, + draw_count: DrawCount, + stride: u32, + ) { + self.device + .0 + .cmd_draw_indirect(self.raw, buffer.raw, offset, draw_count, stride) + } + + unsafe fn draw_indexed_indirect( + &mut self, + buffer: &n::Buffer, + offset: buffer::Offset, + draw_count: DrawCount, + stride: u32, + ) { + self.device + .0 + .cmd_draw_indexed_indirect(self.raw, buffer.raw, offset, draw_count, stride) + } + + unsafe fn set_event(&mut self, event: &n::Event, stage_mask: pso::PipelineStage) { + self.device.0.cmd_set_event( + self.raw, + event.0, + vk::PipelineStageFlags::from_raw(stage_mask.bits()), + ) + } + + unsafe fn reset_event(&mut self, event: &n::Event, stage_mask: pso::PipelineStage) { + self.device.0.cmd_reset_event( + self.raw, + event.0, + vk::PipelineStageFlags::from_raw(stage_mask.bits()), + ) + } + + unsafe fn wait_events<'a, I, J>( + &mut self, + events: I, + stages: Range, + barriers: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow>, + { + let events = events.into_iter().map(|e| e.borrow().0).collect::>(); + + let BarrierSet { + global, + buffer, + image, + } = destructure_barriers(barriers); + + self.device.0.cmd_wait_events( + self.raw, + &events, + vk::PipelineStageFlags::from_raw(stages.start.bits()), + vk::PipelineStageFlags::from_raw(stages.end.bits()), + &global, + &buffer, + &image, + ) + } + + unsafe fn begin_query(&mut self, query: query::Query, flags: query::ControlFlags) { + self.device.0.cmd_begin_query( + self.raw, + query.pool.0, + query.id, + conv::map_query_control_flags(flags), + ) + } + + unsafe fn end_query(&mut self, query: query::Query) { + self.device + .0 + .cmd_end_query(self.raw, query.pool.0, query.id) + } + + unsafe fn reset_query_pool(&mut self, pool: &n::QueryPool, queries: Range) { + self.device.0.cmd_reset_query_pool( + self.raw, + pool.0, + queries.start, + queries.end - queries.start, + ) + } + + unsafe fn copy_query_pool_results( + &mut self, + pool: &n::QueryPool, + queries: Range, + buffer: &n::Buffer, + offset: buffer::Offset, + stride: buffer::Offset, + flags: query::ResultFlags, + ) { + + self.device.0.fp_v1_0().cmd_copy_query_pool_results( + self.raw, + pool.0, + queries.start, + queries.end - queries.start, + buffer.raw, + offset, + stride, + conv::map_query_result_flags(flags), + ); + } + + unsafe fn write_timestamp(&mut self, stage: pso::PipelineStage, query: query::Query) { + self.device.0.cmd_write_timestamp( + self.raw, + conv::map_pipeline_stage(stage), + query.pool.0, + query.id, + ) + } + + unsafe fn push_compute_constants( + &mut self, + layout: &n::PipelineLayout, + offset: u32, + constants: &[u32], + ) { + self.device.0.cmd_push_constants( + self.raw, + layout.raw, + vk::ShaderStageFlags::COMPUTE, + offset, + slice::from_raw_parts(constants.as_ptr() as _, constants.len() * 4), + ); + } + + unsafe fn push_graphics_constants( + &mut self, + layout: &n::PipelineLayout, + stages: pso::ShaderStageFlags, + offset: u32, + constants: &[u32], + ) { + self.device.0.cmd_push_constants( + self.raw, + layout.raw, + conv::map_stage_flags(stages), + offset, + slice::from_raw_parts(constants.as_ptr() as _, constants.len() * 4), + ); + } + + unsafe fn execute_commands<'a, T, I>(&mut self, buffers: I) + where + T: 'a + Borrow, + I: IntoIterator, + { + let command_buffers = buffers + .into_iter() + .map(|b| b.borrow().raw) + .collect::>(); + self.device + .0 + .cmd_execute_commands(self.raw, &command_buffers); + } +} diff --git a/third_party/rust/gfx-backend-vulkan/src/conv.rs b/third_party/rust/gfx-backend-vulkan/src/conv.rs new file mode 100644 index 000000000000..b74394543d23 --- /dev/null +++ b/third_party/rust/gfx-backend-vulkan/src/conv.rs @@ -0,0 +1,587 @@ +use ash::vk; + +use hal::{ + buffer, + command, + format, + image, + pass, + pso, + query, + range::RangeArg, + window::{CompositeAlphaMode, PresentMode}, + Features, + IndexType, +}; + +use crate::native as n; +use std::borrow::Borrow; +use std::mem; +use std::ptr; + +pub fn map_format(format: format::Format) -> vk::Format { + vk::Format::from_raw(format as i32) +} + +pub fn map_vk_format(vk_format: vk::Format) -> Option { + if (vk_format.as_raw() as usize) < format::NUM_FORMATS && vk_format != vk::Format::UNDEFINED { + Some(unsafe { mem::transmute(vk_format) }) + } else { + None + } +} + +pub fn map_tiling(tiling: image::Tiling) -> vk::ImageTiling { + vk::ImageTiling::from_raw(tiling as i32) +} + +pub fn map_component(component: format::Component) -> vk::ComponentSwizzle { + use hal::format::Component::*; + match component { + Zero => vk::ComponentSwizzle::ZERO, + One => vk::ComponentSwizzle::ONE, + R => vk::ComponentSwizzle::R, + G => vk::ComponentSwizzle::G, + B => vk::ComponentSwizzle::B, + A => vk::ComponentSwizzle::A, + } +} + +pub fn map_swizzle(swizzle: format::Swizzle) -> vk::ComponentMapping { + vk::ComponentMapping { + r: map_component(swizzle.0), + g: map_component(swizzle.1), + b: map_component(swizzle.2), + a: map_component(swizzle.3), + } +} + +pub fn map_index_type(index_type: IndexType) -> vk::IndexType { + match index_type { + IndexType::U16 => vk::IndexType::UINT16, + IndexType::U32 => vk::IndexType::UINT32, + } +} + +pub fn map_image_layout(layout: image::Layout) -> vk::ImageLayout { + use hal::image::Layout as Il; + match layout { + Il::General => vk::ImageLayout::GENERAL, + Il::ColorAttachmentOptimal => vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL, + Il::DepthStencilAttachmentOptimal => vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + Il::DepthStencilReadOnlyOptimal => vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL, + Il::ShaderReadOnlyOptimal => vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL, + Il::TransferSrcOptimal => vk::ImageLayout::TRANSFER_SRC_OPTIMAL, + Il::TransferDstOptimal => vk::ImageLayout::TRANSFER_DST_OPTIMAL, + Il::Undefined => vk::ImageLayout::UNDEFINED, + Il::Preinitialized => vk::ImageLayout::PREINITIALIZED, + Il::Present => vk::ImageLayout::PRESENT_SRC_KHR, + } +} + +pub fn map_image_aspects(aspects: format::Aspects) -> vk::ImageAspectFlags { + vk::ImageAspectFlags::from_raw(aspects.bits() as u32) +} + +pub fn map_offset(offset: image::Offset) -> vk::Offset3D { + vk::Offset3D { + x: offset.x, + y: offset.y, + z: offset.z, + } +} + +pub fn map_extent(offset: image::Extent) -> vk::Extent3D { + vk::Extent3D { + width: offset.width, + height: offset.height, + depth: offset.depth, + } +} + +pub fn map_subresource(sub: &image::Subresource) -> vk::ImageSubresource { + vk::ImageSubresource { + aspect_mask: map_image_aspects(sub.aspects), + mip_level: sub.level as _, + array_layer: sub.layer as _, + } +} + +pub fn map_subresource_layers(sub: &image::SubresourceLayers) -> vk::ImageSubresourceLayers { + vk::ImageSubresourceLayers { + aspect_mask: map_image_aspects(sub.aspects), + mip_level: sub.level as _, + base_array_layer: sub.layers.start as _, + layer_count: (sub.layers.end - sub.layers.start) as _, + } +} + +pub fn map_subresource_range(range: &image::SubresourceRange) -> vk::ImageSubresourceRange { + vk::ImageSubresourceRange { + aspect_mask: map_image_aspects(range.aspects), + base_mip_level: range.levels.start as _, + level_count: (range.levels.end - range.levels.start) as _, + base_array_layer: range.layers.start as _, + layer_count: (range.layers.end - range.layers.start) as _, + } +} + +pub fn map_attachment_load_op(op: pass::AttachmentLoadOp) -> vk::AttachmentLoadOp { + use hal::pass::AttachmentLoadOp as Alo; + match op { + Alo::Load => vk::AttachmentLoadOp::LOAD, + Alo::Clear => vk::AttachmentLoadOp::CLEAR, + Alo::DontCare => vk::AttachmentLoadOp::DONT_CARE, + } +} + +pub fn map_attachment_store_op(op: pass::AttachmentStoreOp) -> vk::AttachmentStoreOp { + use hal::pass::AttachmentStoreOp as Aso; + match op { + Aso::Store => vk::AttachmentStoreOp::STORE, + Aso::DontCare => vk::AttachmentStoreOp::DONT_CARE, + } +} + +pub fn map_buffer_access(access: buffer::Access) -> vk::AccessFlags { + vk::AccessFlags::from_raw(access.bits()) +} + +pub fn map_image_access(access: image::Access) -> vk::AccessFlags { + vk::AccessFlags::from_raw(access.bits()) +} + +pub fn map_pipeline_stage(stage: pso::PipelineStage) -> vk::PipelineStageFlags { + vk::PipelineStageFlags::from_raw(stage.bits()) +} + +pub fn map_buffer_usage(usage: buffer::Usage) -> vk::BufferUsageFlags { + vk::BufferUsageFlags::from_raw(usage.bits()) +} + +pub fn map_image_usage(usage: image::Usage) -> vk::ImageUsageFlags { + vk::ImageUsageFlags::from_raw(usage.bits()) +} + +pub fn map_vk_image_usage(usage: vk::ImageUsageFlags) -> image::Usage { + image::Usage::from_bits_truncate(usage.as_raw()) +} + +pub fn map_descriptor_type(ty: pso::DescriptorType) -> vk::DescriptorType { + vk::DescriptorType::from_raw(ty as i32) +} + +pub fn map_stage_flags(stages: pso::ShaderStageFlags) -> vk::ShaderStageFlags { + vk::ShaderStageFlags::from_raw(stages.bits()) +} + +pub fn map_filter(filter: image::Filter) -> vk::Filter { + vk::Filter::from_raw(filter as i32) +} + +pub fn map_mip_filter(filter: image::Filter) -> vk::SamplerMipmapMode { + vk::SamplerMipmapMode::from_raw(filter as i32) +} + +pub fn map_wrap(wrap: image::WrapMode) -> vk::SamplerAddressMode { + use hal::image::WrapMode as Wm; + match wrap { + Wm::Tile => vk::SamplerAddressMode::REPEAT, + Wm::Mirror => vk::SamplerAddressMode::MIRRORED_REPEAT, + Wm::Clamp => vk::SamplerAddressMode::CLAMP_TO_EDGE, + Wm::Border => vk::SamplerAddressMode::CLAMP_TO_BORDER, + } +} + +pub fn map_border_color(col: image::PackedColor) -> Option { + match col.0 { + 0x00000000 => Some(vk::BorderColor::FLOAT_TRANSPARENT_BLACK), + 0xFF000000 => Some(vk::BorderColor::FLOAT_OPAQUE_BLACK), + 0xFFFFFFFF => Some(vk::BorderColor::FLOAT_OPAQUE_WHITE), + _ => None, + } +} + +pub fn map_topology(ia: &pso::InputAssemblerDesc) -> vk::PrimitiveTopology { + match (ia.primitive, ia.with_adjacency) { + (pso::Primitive::PointList, false) => vk::PrimitiveTopology::POINT_LIST, + (pso::Primitive::PointList, true) => panic!("Points can't have adjacency info"), + (pso::Primitive::LineList, false) => vk::PrimitiveTopology::LINE_LIST, + (pso::Primitive::LineList, true) => vk::PrimitiveTopology::LINE_LIST_WITH_ADJACENCY, + (pso::Primitive::LineStrip, false) => vk::PrimitiveTopology::LINE_STRIP, + (pso::Primitive::LineStrip, true) => vk::PrimitiveTopology::LINE_STRIP_WITH_ADJACENCY, + (pso::Primitive::TriangleList, false) => vk::PrimitiveTopology::TRIANGLE_LIST, + (pso::Primitive::TriangleList, true) => vk::PrimitiveTopology::TRIANGLE_LIST_WITH_ADJACENCY, + (pso::Primitive::TriangleStrip, false) => vk::PrimitiveTopology::TRIANGLE_STRIP, + (pso::Primitive::TriangleStrip, true) => vk::PrimitiveTopology::TRIANGLE_STRIP_WITH_ADJACENCY, + (pso::Primitive::PatchList(_), false) => vk::PrimitiveTopology::PATCH_LIST, + (pso::Primitive::PatchList(_), true) => panic!("Patches can't have adjacency info"), + } +} + +pub fn map_cull_face(cf: pso::Face) -> vk::CullModeFlags { + match cf { + pso::Face::NONE => vk::CullModeFlags::NONE, + pso::Face::FRONT => vk::CullModeFlags::FRONT, + pso::Face::BACK => vk::CullModeFlags::BACK, + _ => vk::CullModeFlags::FRONT_AND_BACK, + } +} + +pub fn map_front_face(ff: pso::FrontFace) -> vk::FrontFace { + match ff { + pso::FrontFace::Clockwise => vk::FrontFace::CLOCKWISE, + pso::FrontFace::CounterClockwise => vk::FrontFace::COUNTER_CLOCKWISE, + } +} + +pub fn map_comparison(fun: pso::Comparison) -> vk::CompareOp { + use hal::pso::Comparison::*; + match fun { + Never => vk::CompareOp::NEVER, + Less => vk::CompareOp::LESS, + LessEqual => vk::CompareOp::LESS_OR_EQUAL, + Equal => vk::CompareOp::EQUAL, + GreaterEqual => vk::CompareOp::GREATER_OR_EQUAL, + Greater => vk::CompareOp::GREATER, + NotEqual => vk::CompareOp::NOT_EQUAL, + Always => vk::CompareOp::ALWAYS, + } +} + +pub fn map_stencil_op(op: pso::StencilOp) -> vk::StencilOp { + use hal::pso::StencilOp::*; + match op { + Keep => vk::StencilOp::KEEP, + Zero => vk::StencilOp::ZERO, + Replace => vk::StencilOp::REPLACE, + IncrementClamp => vk::StencilOp::INCREMENT_AND_CLAMP, + IncrementWrap => vk::StencilOp::INCREMENT_AND_WRAP, + DecrementClamp => vk::StencilOp::DECREMENT_AND_CLAMP, + DecrementWrap => vk::StencilOp::DECREMENT_AND_WRAP, + Invert => vk::StencilOp::INVERT, + } +} + +pub fn map_stencil_side(side: &pso::StencilFace) -> vk::StencilOpState { + vk::StencilOpState { + fail_op: map_stencil_op(side.op_fail), + pass_op: map_stencil_op(side.op_pass), + depth_fail_op: map_stencil_op(side.op_depth_fail), + compare_op: map_comparison(side.fun), + compare_mask: !0, + write_mask: !0, + reference: 0, + } +} + +pub fn map_blend_factor(factor: pso::Factor) -> vk::BlendFactor { + use hal::pso::Factor::*; + match factor { + Zero => vk::BlendFactor::ZERO, + One => vk::BlendFactor::ONE, + SrcColor => vk::BlendFactor::SRC_COLOR, + OneMinusSrcColor => vk::BlendFactor::ONE_MINUS_SRC_COLOR, + DstColor => vk::BlendFactor::DST_COLOR, + OneMinusDstColor => vk::BlendFactor::ONE_MINUS_DST_COLOR, + SrcAlpha => vk::BlendFactor::SRC_ALPHA, + OneMinusSrcAlpha => vk::BlendFactor::ONE_MINUS_SRC_ALPHA, + DstAlpha => vk::BlendFactor::DST_ALPHA, + OneMinusDstAlpha => vk::BlendFactor::ONE_MINUS_DST_ALPHA, + ConstColor => vk::BlendFactor::CONSTANT_COLOR, + OneMinusConstColor => vk::BlendFactor::ONE_MINUS_CONSTANT_COLOR, + ConstAlpha => vk::BlendFactor::CONSTANT_ALPHA, + OneMinusConstAlpha => vk::BlendFactor::ONE_MINUS_CONSTANT_ALPHA, + SrcAlphaSaturate => vk::BlendFactor::SRC_ALPHA_SATURATE, + Src1Color => vk::BlendFactor::SRC1_COLOR, + OneMinusSrc1Color => vk::BlendFactor::ONE_MINUS_SRC1_COLOR, + Src1Alpha => vk::BlendFactor::SRC1_ALPHA, + OneMinusSrc1Alpha => vk::BlendFactor::ONE_MINUS_SRC1_ALPHA, + } +} + +pub fn map_blend_op(operation: pso::BlendOp) -> (vk::BlendOp, vk::BlendFactor, vk::BlendFactor) { + use hal::pso::BlendOp::*; + match operation { + Add { src, dst } => ( + vk::BlendOp::ADD, + map_blend_factor(src), + map_blend_factor(dst), + ), + Sub { src, dst } => ( + vk::BlendOp::SUBTRACT, + map_blend_factor(src), + map_blend_factor(dst), + ), + RevSub { src, dst } => ( + vk::BlendOp::REVERSE_SUBTRACT, + map_blend_factor(src), + map_blend_factor(dst), + ), + Min => ( + vk::BlendOp::MIN, + vk::BlendFactor::ZERO, + vk::BlendFactor::ZERO, + ), + Max => ( + vk::BlendOp::MAX, + vk::BlendFactor::ZERO, + vk::BlendFactor::ZERO, + ), + } +} + +pub fn map_pipeline_statistics( + statistics: query::PipelineStatistic, +) -> vk::QueryPipelineStatisticFlags { + vk::QueryPipelineStatisticFlags::from_raw(statistics.bits()) +} + +pub fn map_query_control_flags(flags: query::ControlFlags) -> vk::QueryControlFlags { + + vk::QueryControlFlags::from_raw(flags.bits() & vk::QueryControlFlags::all().as_raw()) +} + +pub fn map_query_result_flags(flags: query::ResultFlags) -> vk::QueryResultFlags { + vk::QueryResultFlags::from_raw(flags.bits() & vk::QueryResultFlags::all().as_raw()) +} + +pub fn map_image_features(features: vk::FormatFeatureFlags) -> format::ImageFeature { + format::ImageFeature::from_bits_truncate(features.as_raw()) +} + +pub fn map_buffer_features(features: vk::FormatFeatureFlags) -> format::BufferFeature { + format::BufferFeature::from_bits_truncate(features.as_raw()) +} + +pub fn map_device_features(features: Features) -> vk::PhysicalDeviceFeatures { + + + vk::PhysicalDeviceFeatures::builder() + .robust_buffer_access(features.contains(Features::ROBUST_BUFFER_ACCESS)) + .full_draw_index_uint32(features.contains(Features::FULL_DRAW_INDEX_U32)) + .image_cube_array(features.contains(Features::IMAGE_CUBE_ARRAY)) + .independent_blend(features.contains(Features::INDEPENDENT_BLENDING)) + .geometry_shader(features.contains(Features::GEOMETRY_SHADER)) + .tessellation_shader(features.contains(Features::TESSELLATION_SHADER)) + .sample_rate_shading(features.contains(Features::SAMPLE_RATE_SHADING)) + .dual_src_blend(features.contains(Features::DUAL_SRC_BLENDING)) + .logic_op(features.contains(Features::LOGIC_OP)) + .multi_draw_indirect(features.contains(Features::MULTI_DRAW_INDIRECT)) + .draw_indirect_first_instance(features.contains(Features::DRAW_INDIRECT_FIRST_INSTANCE)) + .depth_clamp(features.contains(Features::DEPTH_CLAMP)) + .depth_bias_clamp(features.contains(Features::DEPTH_BIAS_CLAMP)) + .fill_mode_non_solid(features.contains(Features::NON_FILL_POLYGON_MODE)) + .depth_bounds(features.contains(Features::DEPTH_BOUNDS)) + .wide_lines(features.contains(Features::LINE_WIDTH)) + .large_points(features.contains(Features::POINT_SIZE)) + .alpha_to_one(features.contains(Features::ALPHA_TO_ONE)) + .multi_viewport(features.contains(Features::MULTI_VIEWPORTS)) + .sampler_anisotropy(features.contains(Features::SAMPLER_ANISOTROPY)) + .texture_compression_etc2(features.contains(Features::FORMAT_ETC2)) + .texture_compression_astc_ldr(features.contains(Features::FORMAT_ASTC_LDR)) + .texture_compression_bc(features.contains(Features::FORMAT_BC)) + .occlusion_query_precise(features.contains(Features::PRECISE_OCCLUSION_QUERY)) + .pipeline_statistics_query(features.contains(Features::PIPELINE_STATISTICS_QUERY)) + .vertex_pipeline_stores_and_atomics(features.contains(Features::VERTEX_STORES_AND_ATOMICS)) + .fragment_stores_and_atomics(features.contains(Features::FRAGMENT_STORES_AND_ATOMICS)) + .shader_tessellation_and_geometry_point_size( + features.contains(Features::SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE), + ) + .shader_image_gather_extended(features.contains(Features::SHADER_IMAGE_GATHER_EXTENDED)) + .shader_storage_image_extended_formats( + features.contains(Features::SHADER_STORAGE_IMAGE_EXTENDED_FORMATS), + ) + .shader_storage_image_multisample( + features.contains(Features::SHADER_STORAGE_IMAGE_MULTISAMPLE), + ) + .shader_storage_image_read_without_format( + features.contains(Features::SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT), + ) + .shader_storage_image_write_without_format( + features.contains(Features::SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT), + ) + .shader_uniform_buffer_array_dynamic_indexing( + features.contains(Features::SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING), + ) + .shader_sampled_image_array_dynamic_indexing( + features.contains(Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING), + ) + .shader_storage_buffer_array_dynamic_indexing( + features.contains(Features::SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING), + ) + .shader_storage_image_array_dynamic_indexing( + features.contains(Features::SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING), + ) + .shader_clip_distance(features.contains(Features::SHADER_CLIP_DISTANCE)) + .shader_cull_distance(features.contains(Features::SHADER_CULL_DISTANCE)) + .shader_float64(features.contains(Features::SHADER_FLOAT64)) + .shader_int64(features.contains(Features::SHADER_INT64)) + .shader_int16(features.contains(Features::SHADER_INT16)) + .shader_resource_residency(features.contains(Features::SHADER_RESOURCE_RESIDENCY)) + .shader_resource_min_lod(features.contains(Features::SHADER_RESOURCE_MIN_LOD)) + .sparse_binding(features.contains(Features::SPARSE_BINDING)) + .sparse_residency_buffer(features.contains(Features::SPARSE_RESIDENCY_BUFFER)) + .sparse_residency_image2_d(features.contains(Features::SPARSE_RESIDENCY_IMAGE_2D)) + .sparse_residency_image3_d(features.contains(Features::SPARSE_RESIDENCY_IMAGE_3D)) + .sparse_residency2_samples(features.contains(Features::SPARSE_RESIDENCY_2_SAMPLES)) + .sparse_residency4_samples(features.contains(Features::SPARSE_RESIDENCY_4_SAMPLES)) + .sparse_residency8_samples(features.contains(Features::SPARSE_RESIDENCY_8_SAMPLES)) + .sparse_residency16_samples(features.contains(Features::SPARSE_RESIDENCY_16_SAMPLES)) + .sparse_residency_aliased(features.contains(Features::SPARSE_RESIDENCY_ALIASED)) + .variable_multisample_rate(features.contains(Features::VARIABLE_MULTISAMPLE_RATE)) + .inherited_queries(features.contains(Features::INHERITED_QUERIES)) + .build() +} + +pub fn map_memory_ranges<'a, I, R>(ranges: I) -> Vec +where + I: IntoIterator, + I::Item: Borrow<(&'a n::Memory, R)>, + R: RangeArg, +{ + ranges + .into_iter() + .map(|range| { + let &(ref memory, ref range) = range.borrow(); + let (offset, size) = map_range_arg(range); + vk::MappedMemoryRange { + s_type: vk::StructureType::MAPPED_MEMORY_RANGE, + p_next: ptr::null(), + memory: memory.raw, + offset, + size, + } + }) + .collect() +} + + + + + +pub fn map_range_arg(range: &R) -> (u64, u64) +where + R: RangeArg, +{ + let offset = *range.start().unwrap_or(&0); + let size = match range.end() { + Some(end) => end - offset, + None => vk::WHOLE_SIZE, + }; + + (offset, size) +} + +pub fn map_command_buffer_flags(flags: command::CommandBufferFlags) -> vk::CommandBufferUsageFlags { + + vk::CommandBufferUsageFlags::from_raw(flags.bits()) +} + +pub fn map_command_buffer_level(level: command::Level) -> vk::CommandBufferLevel { + match level { + command::Level::Primary => vk::CommandBufferLevel::PRIMARY, + command::Level::Secondary => vk::CommandBufferLevel::SECONDARY, + } +} + +pub fn map_view_kind( + kind: image::ViewKind, + ty: vk::ImageType, + is_cube: bool, +) -> Option { + use crate::image::ViewKind::*; + use crate::vk::ImageType; + + Some(match (ty, kind) { + (ImageType::TYPE_1D, D1) => vk::ImageViewType::TYPE_1D, + (ImageType::TYPE_1D, D1Array) => vk::ImageViewType::TYPE_1D_ARRAY, + (ImageType::TYPE_2D, D2) => vk::ImageViewType::TYPE_2D, + (ImageType::TYPE_2D, D2Array) => vk::ImageViewType::TYPE_2D_ARRAY, + (ImageType::TYPE_3D, D3) => vk::ImageViewType::TYPE_3D, + (ImageType::TYPE_2D, Cube) if is_cube => vk::ImageViewType::CUBE, + (ImageType::TYPE_2D, CubeArray) if is_cube => vk::ImageViewType::CUBE_ARRAY, + (ImageType::TYPE_3D, Cube) if is_cube => vk::ImageViewType::CUBE, + (ImageType::TYPE_3D, CubeArray) if is_cube => vk::ImageViewType::CUBE_ARRAY, + _ => return None, + }) +} + +pub fn map_rect(rect: &pso::Rect) -> vk::Rect2D { + vk::Rect2D { + offset: vk::Offset2D { + x: rect.x as _, + y: rect.y as _, + }, + extent: vk::Extent2D { + width: rect.w as _, + height: rect.h as _, + }, + } +} + +pub fn map_clear_rect(rect: &pso::ClearRect) -> vk::ClearRect { + vk::ClearRect { + base_array_layer: rect.layers.start as _, + layer_count: (rect.layers.end - rect.layers.start) as _, + rect: map_rect(&rect.rect), + } +} + +pub fn map_viewport(vp: &pso::Viewport) -> vk::Viewport { + vk::Viewport { + x: vp.rect.x as _, + y: vp.rect.y as _, + width: vp.rect.w as _, + height: vp.rect.h as _, + min_depth: vp.depth.start, + max_depth: vp.depth.end, + } +} + +pub fn map_view_capabilities(caps: image::ViewCapabilities) -> vk::ImageCreateFlags { + vk::ImageCreateFlags::from_raw(caps.bits()) +} + +pub fn map_present_mode(mode: PresentMode) -> vk::PresentModeKHR { + if mode == PresentMode::IMMEDIATE { + vk::PresentModeKHR::IMMEDIATE + } else if mode == PresentMode::MAILBOX { + vk::PresentModeKHR::MAILBOX + } else if mode == PresentMode::FIFO { + vk::PresentModeKHR::FIFO + } else if mode == PresentMode::RELAXED { + vk::PresentModeKHR::FIFO_RELAXED + } else { + panic!("Unexpected present mode {:?}", mode) + } +} + +pub fn map_vk_present_mode(mode: vk::PresentModeKHR) -> PresentMode { + if mode == vk::PresentModeKHR::IMMEDIATE { + PresentMode::IMMEDIATE + } else if mode == vk::PresentModeKHR::MAILBOX { + PresentMode::MAILBOX + } else if mode == vk::PresentModeKHR::FIFO { + PresentMode::FIFO + } else if mode == vk::PresentModeKHR::FIFO_RELAXED { + PresentMode::RELAXED + } else { + warn!("Unrecognized present mode {:?}", mode); + PresentMode::IMMEDIATE + } +} + +pub fn map_composite_alpha_mode(composite_alpha_mode: CompositeAlphaMode) -> vk::CompositeAlphaFlagsKHR { + vk::CompositeAlphaFlagsKHR::from_raw(composite_alpha_mode.bits()) +} + +pub fn map_vk_composite_alpha(composite_alpha: vk::CompositeAlphaFlagsKHR) -> CompositeAlphaMode { + CompositeAlphaMode::from_bits_truncate(composite_alpha.as_raw()) +} + +pub fn map_descriptor_pool_create_flags( + flags: pso::DescriptorPoolCreateFlags, +) -> vk::DescriptorPoolCreateFlags { + vk::DescriptorPoolCreateFlags::from_raw(flags.bits()) +} diff --git a/third_party/rust/gfx-backend-vulkan/src/device.rs b/third_party/rust/gfx-backend-vulkan/src/device.rs new file mode 100644 index 000000000000..d21a3627ecf0 --- /dev/null +++ b/third_party/rust/gfx-backend-vulkan/src/device.rs @@ -0,0 +1,2307 @@ +use arrayvec::ArrayVec; +use ash::extensions::khr; +use ash::version::DeviceV1_0; +use ash::vk; +use ash::vk::Handle; +use smallvec::SmallVec; + +use hal::{ + memory::Requirements, + pool::CommandPoolCreateFlags, + pso::VertexInputRate, + range::RangeArg, + window::SwapchainConfig, + {buffer, device as d, format, image, pass, pso, query, queue}, + {Features, MemoryTypeId}, +}; + +use std::borrow::Borrow; +use std::ffi::CString; +use std::ops::Range; +use std::pin::Pin; +use std::sync::Arc; +use std::{mem, ptr}; + +use crate::pool::RawCommandPool; +use crate::{conv, native as n, window as w, command as cmd}; +use crate::{Backend as B, DebugMessenger, Device}; + +#[derive(Debug, Default)] +struct GraphicsPipelineInfoBuf { + + dynamic_states: ArrayVec<[vk::DynamicState; 10]>, + + + c_strings: ArrayVec<[CString; 5]>, + stages: ArrayVec<[vk::PipelineShaderStageCreateInfo; 5]>, + specializations: ArrayVec<[vk::SpecializationInfo; 5]>, + specialization_entries: ArrayVec<[SmallVec<[vk::SpecializationMapEntry; 4]>; 5]>, + + vertex_bindings: Vec, + vertex_attributes: Vec, + blend_states: Vec, + + sample_mask: [u32; 2], + vertex_input_state: vk::PipelineVertexInputStateCreateInfo, + input_assembly_state: vk::PipelineInputAssemblyStateCreateInfo, + tessellation_state: Option, + viewport_state: vk::PipelineViewportStateCreateInfo, + rasterization_state: vk::PipelineRasterizationStateCreateInfo, + multisample_state: vk::PipelineMultisampleStateCreateInfo, + depth_stencil_state: vk::PipelineDepthStencilStateCreateInfo, + color_blend_state: vk::PipelineColorBlendStateCreateInfo, + pipeline_dynamic_state: vk::PipelineDynamicStateCreateInfo, + viewport: vk::Viewport, + scissor: vk::Rect2D, +} +impl GraphicsPipelineInfoBuf { + unsafe fn add_stage<'a>(&mut self, stage: vk::ShaderStageFlags, source: &pso::EntryPoint<'a, B>) { + let string = CString::new(source.entry).unwrap(); + let p_name = string.as_ptr(); + self.c_strings.push(string); + + self.specialization_entries.push( + source + .specialization + .constants + .iter() + .map(|c| vk::SpecializationMapEntry { + constant_id: c.id, + offset: c.range.start as _, + size: (c.range.end - c.range.start) as _, + }) + .collect(), + ); + let map_entries = self.specialization_entries.last().unwrap(); + + self.specializations.push(vk::SpecializationInfo { + map_entry_count: map_entries.len() as _, + p_map_entries: map_entries.as_ptr(), + data_size: source.specialization.data.len() as _, + p_data: source.specialization.data.as_ptr() as _, + }); + + self.stages.push(vk::PipelineShaderStageCreateInfo { + s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineShaderStageCreateFlags::empty(), + stage, + module: source.module.raw, + p_name, + p_specialization_info: self.specializations.last().unwrap(), + }) + } + + unsafe fn initialize<'a>( + this: &mut Pin<&mut Self>, + device: &Device, + desc: &pso::GraphicsPipelineDesc<'a, B>, + ) { + let mut this = Pin::get_mut(this.as_mut()); + + + + this.add_stage(vk::ShaderStageFlags::VERTEX, &desc.shaders.vertex); + + if let Some(ref entry) = desc.shaders.fragment { + this.add_stage(vk::ShaderStageFlags::FRAGMENT, entry); + } + + if let Some(ref entry) = desc.shaders.geometry { + this.add_stage(vk::ShaderStageFlags::GEOMETRY, entry); + } + + if let Some(ref entry) = desc.shaders.domain { + this.add_stage(vk::ShaderStageFlags::TESSELLATION_EVALUATION, entry); + } + + if let Some(ref entry) = desc.shaders.hull { + this.add_stage(vk::ShaderStageFlags::TESSELLATION_CONTROL, entry); + } + + this.vertex_bindings = desc.vertex_buffers.iter().map(|vbuf| { + vk::VertexInputBindingDescription { + binding: vbuf.binding, + stride: vbuf.stride as u32, + input_rate: match vbuf.rate { + VertexInputRate::Vertex => vk::VertexInputRate::VERTEX, + VertexInputRate::Instance(divisor) => { + debug_assert_eq!(divisor, 1, "Custom vertex rate divisors not supported in Vulkan backend without extension"); + vk::VertexInputRate::INSTANCE + }, + }, + } + }).collect(); + this.vertex_attributes = desc + .attributes + .iter() + .map(|attr| vk::VertexInputAttributeDescription { + location: attr.location as u32, + binding: attr.binding as u32, + format: conv::map_format(attr.element.format), + offset: attr.element.offset as u32, + }) + .collect(); + + this.vertex_input_state = vk::PipelineVertexInputStateCreateInfo { + s_type: vk::StructureType::PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineVertexInputStateCreateFlags::empty(), + vertex_binding_description_count: this.vertex_bindings.len() as _, + p_vertex_binding_descriptions: this.vertex_bindings.as_ptr(), + vertex_attribute_description_count: this.vertex_attributes.len() as _, + p_vertex_attribute_descriptions: this.vertex_attributes.as_ptr(), + }; + + this.input_assembly_state = vk::PipelineInputAssemblyStateCreateInfo { + s_type: vk::StructureType::PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineInputAssemblyStateCreateFlags::empty(), + topology: conv::map_topology(&desc.input_assembler), + primitive_restart_enable: match desc.input_assembler.restart_index { + Some(_) => vk::TRUE, + None => vk::FALSE, + }, + }; + + let depth_bias = match desc.rasterizer.depth_bias { + Some(pso::State::Static(db)) => db, + Some(pso::State::Dynamic) => { + this.dynamic_states.push(vk::DynamicState::DEPTH_BIAS); + pso::DepthBias::default() + } + None => pso::DepthBias::default(), + }; + + let (polygon_mode, line_width) = match desc.rasterizer.polygon_mode { + pso::PolygonMode::Point => (vk::PolygonMode::POINT, 1.0), + pso::PolygonMode::Line(width) => ( + vk::PolygonMode::LINE, + match width { + pso::State::Static(w) => w, + pso::State::Dynamic => { + this.dynamic_states.push(vk::DynamicState::LINE_WIDTH); + 1.0 + } + }, + ), + pso::PolygonMode::Fill => (vk::PolygonMode::FILL, 1.0), + }; + + this.rasterization_state = vk::PipelineRasterizationStateCreateInfo { + s_type: vk::StructureType::PIPELINE_RASTERIZATION_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineRasterizationStateCreateFlags::empty(), + depth_clamp_enable: if desc.rasterizer.depth_clamping { + if device.raw.1.contains(Features::DEPTH_CLAMP) { + vk::TRUE + } else { + warn!("Depth clamping was requested on a device with disabled feature"); + vk::FALSE + } + } else { + vk::FALSE + }, + rasterizer_discard_enable: if desc.shaders.fragment.is_none() + && desc.depth_stencil.depth.is_none() + && desc.depth_stencil.stencil.is_none() + { + vk::TRUE + } else { + vk::FALSE + }, + polygon_mode, + cull_mode: conv::map_cull_face(desc.rasterizer.cull_face), + front_face: conv::map_front_face(desc.rasterizer.front_face), + depth_bias_enable: if desc.rasterizer.depth_bias.is_some() { + vk::TRUE + } else { + vk::FALSE + }, + depth_bias_constant_factor: depth_bias.const_factor, + depth_bias_clamp: depth_bias.clamp, + depth_bias_slope_factor: depth_bias.slope_factor, + line_width, + }; + + this.tessellation_state = { + if let pso::Primitive::PatchList(patch_control_points) = desc.input_assembler.primitive { + Some(vk::PipelineTessellationStateCreateInfo { + s_type: vk::StructureType::PIPELINE_TESSELLATION_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineTessellationStateCreateFlags::empty(), + patch_control_points: patch_control_points as _, + }) + } else { + None + } + }; + + this.viewport_state = vk::PipelineViewportStateCreateInfo { + s_type: vk::StructureType::PIPELINE_VIEWPORT_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineViewportStateCreateFlags::empty(), + scissor_count: 1, + p_scissors: match desc.baked_states.scissor { + Some(ref rect) => { + this.scissor = conv::map_rect(rect); + &this.scissor + } + None => { + this.dynamic_states.push(vk::DynamicState::SCISSOR); + ptr::null() + } + }, + viewport_count: 1, + p_viewports: match desc.baked_states.viewport { + Some(ref vp) => { + this.viewport = conv::map_viewport(vp); + &this.viewport + } + None => { + this.dynamic_states.push(vk::DynamicState::VIEWPORT); + ptr::null() + } + }, + }; + + this.multisample_state = match desc.multisampling { + Some(ref ms) => { + this.sample_mask = [ + (ms.sample_mask & 0xFFFFFFFF) as u32, + ((ms.sample_mask >> 32) & 0xFFFFFFFF) as u32, + ]; + vk::PipelineMultisampleStateCreateInfo { + s_type: vk::StructureType::PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineMultisampleStateCreateFlags::empty(), + rasterization_samples: vk::SampleCountFlags::from_raw( + (ms.rasterization_samples as u32) & vk::SampleCountFlags::all().as_raw(), + ), + sample_shading_enable: ms.sample_shading.is_some() as _, + min_sample_shading: ms.sample_shading.unwrap_or(0.0), + p_sample_mask: &this.sample_mask as _, + alpha_to_coverage_enable: ms.alpha_coverage as _, + alpha_to_one_enable: ms.alpha_to_one as _, + } + } + None => vk::PipelineMultisampleStateCreateInfo { + s_type: vk::StructureType::PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineMultisampleStateCreateFlags::empty(), + rasterization_samples: vk::SampleCountFlags::TYPE_1, + sample_shading_enable: vk::FALSE, + min_sample_shading: 0.0, + p_sample_mask: ptr::null(), + alpha_to_coverage_enable: vk::FALSE, + alpha_to_one_enable: vk::FALSE, + }, + }; + + let depth_stencil = desc.depth_stencil; + let (depth_test_enable, depth_write_enable, depth_compare_op) = match depth_stencil.depth { + Some(ref depth) => (vk::TRUE, depth.write as _, conv::map_comparison(depth.fun)), + None => (vk::FALSE, vk::FALSE, vk::CompareOp::NEVER), + }; + let (stencil_test_enable, front, back) = match depth_stencil.stencil { + Some(ref stencil) => { + let mut front = conv::map_stencil_side(&stencil.faces.front); + let mut back = conv::map_stencil_side(&stencil.faces.back); + match stencil.read_masks { + pso::State::Static(ref sides) => { + front.compare_mask = sides.front; + back.compare_mask = sides.back; + } + pso::State::Dynamic => { + this.dynamic_states + .push(vk::DynamicState::STENCIL_COMPARE_MASK); + } + } + match stencil.write_masks { + pso::State::Static(ref sides) => { + front.write_mask = sides.front; + back.write_mask = sides.back; + } + pso::State::Dynamic => { + this.dynamic_states + .push(vk::DynamicState::STENCIL_WRITE_MASK); + } + } + match stencil.reference_values { + pso::State::Static(ref sides) => { + front.reference = sides.front; + back.reference = sides.back; + } + pso::State::Dynamic => { + this.dynamic_states + .push(vk::DynamicState::STENCIL_REFERENCE); + } + } + (vk::TRUE, front, back) + } + None => mem::zeroed(), + }; + let (min_depth_bounds, max_depth_bounds) = match desc.baked_states.depth_bounds { + Some(ref range) => (range.start, range.end), + None => { + this.dynamic_states.push(vk::DynamicState::DEPTH_BOUNDS); + (0.0, 1.0) + } + }; + + this.depth_stencil_state = vk::PipelineDepthStencilStateCreateInfo { + s_type: vk::StructureType::PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineDepthStencilStateCreateFlags::empty(), + depth_test_enable, + depth_write_enable, + depth_compare_op, + depth_bounds_test_enable: depth_stencil.depth_bounds as _, + stencil_test_enable, + front, + back, + min_depth_bounds, + max_depth_bounds, + }; + + this.blend_states = desc + .blender + .targets + .iter() + .map(|color_desc| { + let color_write_mask = + vk::ColorComponentFlags::from_raw(color_desc.mask.bits() as _); + match color_desc.blend { + Some(ref bs) => { + let (color_blend_op, src_color_blend_factor, dst_color_blend_factor) = + conv::map_blend_op(bs.color); + let (alpha_blend_op, src_alpha_blend_factor, dst_alpha_blend_factor) = + conv::map_blend_op(bs.alpha); + vk::PipelineColorBlendAttachmentState { + color_write_mask, + blend_enable: vk::TRUE, + src_color_blend_factor, + dst_color_blend_factor, + color_blend_op, + src_alpha_blend_factor, + dst_alpha_blend_factor, + alpha_blend_op, + } + } + None => vk::PipelineColorBlendAttachmentState { + color_write_mask, + ..mem::zeroed() + }, + } + }) + .collect(); + + this.color_blend_state = vk::PipelineColorBlendStateCreateInfo { + s_type: vk::StructureType::PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineColorBlendStateCreateFlags::empty(), + logic_op_enable: vk::FALSE, + logic_op: vk::LogicOp::CLEAR, + attachment_count: this.blend_states.len() as _, + p_attachments: this.blend_states.as_ptr(), + blend_constants: match desc.baked_states.blend_color { + Some(value) => value, + None => { + this.dynamic_states.push(vk::DynamicState::BLEND_CONSTANTS); + [0.0; 4] + } + }, + }; + + this.pipeline_dynamic_state = vk::PipelineDynamicStateCreateInfo { + s_type: vk::StructureType::PIPELINE_DYNAMIC_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineDynamicStateCreateFlags::empty(), + dynamic_state_count: this.dynamic_states.len() as _, + p_dynamic_states: this.dynamic_states.as_ptr(), + }; + } +} + +#[derive(Debug, Default)] +struct ComputePipelineInfoBuf { + c_string: CString, + specialization: vk::SpecializationInfo, + entries: SmallVec<[vk::SpecializationMapEntry; 4]>, +} +impl ComputePipelineInfoBuf { + unsafe fn initialize<'a>( + this: &mut Pin<&mut Self>, + desc: &pso::ComputePipelineDesc<'a, B>, + ) { + let mut this = Pin::get_mut(this.as_mut()); + + this.c_string = CString::new(desc.shader.entry).unwrap(); + this.entries = desc + .shader + .specialization + .constants + .iter() + .map(|c| vk::SpecializationMapEntry { + constant_id: c.id, + offset: c.range.start as _, + size: (c.range.end - c.range.start) as _, + }) + .collect(); + this.specialization = vk::SpecializationInfo { + map_entry_count: this.entries.len() as _, + p_map_entries: this.entries.as_ptr(), + data_size: desc.shader.specialization.data.len() as _, + p_data: desc.shader.specialization.data.as_ptr() as _, + }; + } +} + +impl d::Device for Device { + unsafe fn allocate_memory( + &self, + mem_type: MemoryTypeId, + size: u64, + ) -> Result { + let info = vk::MemoryAllocateInfo { + s_type: vk::StructureType::MEMORY_ALLOCATE_INFO, + p_next: ptr::null(), + allocation_size: size, + memory_type_index: mem_type.0 as _, + }; + + let result = self.raw.0.allocate_memory(&info, None); + + match result { + Ok(memory) => Ok(n::Memory { raw: memory }), + Err(vk::Result::ERROR_TOO_MANY_OBJECTS) => Err(d::AllocationError::TooManyObjects), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_command_pool( + &self, + family: queue::QueueFamilyId, + create_flags: CommandPoolCreateFlags, + ) -> Result { + let mut flags = vk::CommandPoolCreateFlags::empty(); + if create_flags.contains(CommandPoolCreateFlags::TRANSIENT) { + flags |= vk::CommandPoolCreateFlags::TRANSIENT; + } + if create_flags.contains(CommandPoolCreateFlags::RESET_INDIVIDUAL) { + flags |= vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER; + } + + let info = vk::CommandPoolCreateInfo { + s_type: vk::StructureType::COMMAND_POOL_CREATE_INFO, + p_next: ptr::null(), + flags, + queue_family_index: family.0 as _, + }; + + let result = self.raw.0.create_command_pool(&info, None); + + match result { + Ok(pool) => Ok(RawCommandPool { + raw: pool, + device: self.raw.clone(), + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn destroy_command_pool(&self, pool: RawCommandPool) { + self.raw.0.destroy_command_pool(pool.raw, None); + } + + unsafe fn create_render_pass<'a, IA, IS, ID>( + &self, + attachments: IA, + subpasses: IS, + dependencies: ID, + ) -> Result + where + IA: IntoIterator, + IA::Item: Borrow, + IS: IntoIterator, + IS::Item: Borrow>, + ID: IntoIterator, + ID::Item: Borrow, + { + let map_subpass_ref = |pass: pass::SubpassRef| match pass { + pass::SubpassRef::External => vk::SUBPASS_EXTERNAL, + pass::SubpassRef::Pass(id) => id as u32, + }; + + let attachments = attachments + .into_iter() + .map(|attachment| { + let attachment = attachment.borrow(); + vk::AttachmentDescription { + flags: vk::AttachmentDescriptionFlags::empty(), + format: attachment + .format + .map_or(vk::Format::UNDEFINED, conv::map_format), + samples: vk::SampleCountFlags::from_raw( + (attachment.samples as u32) & vk::SampleCountFlags::all().as_raw(), + ), + load_op: conv::map_attachment_load_op(attachment.ops.load), + store_op: conv::map_attachment_store_op(attachment.ops.store), + stencil_load_op: conv::map_attachment_load_op(attachment.stencil_ops.load), + stencil_store_op: conv::map_attachment_store_op(attachment.stencil_ops.store), + initial_layout: conv::map_image_layout(attachment.layouts.start), + final_layout: conv::map_image_layout(attachment.layouts.end), + } + }) + .collect::>(); + + let clear_attachments_mask = attachments + .iter() + .enumerate() + .filter_map(|(i, at)| { + if at.load_op == vk::AttachmentLoadOp::CLEAR + || at.stencil_load_op == vk::AttachmentLoadOp::CLEAR + { + Some(1 << i as u64) + } else { + None + } + }) + .sum(); + + let attachment_refs = subpasses + .into_iter() + .map(|subpass| { + let subpass = subpass.borrow(); + fn make_ref(&(id, layout): &pass::AttachmentRef) -> vk::AttachmentReference { + vk::AttachmentReference { + attachment: id as _, + layout: conv::map_image_layout(layout), + } + } + let colors = subpass.colors.iter().map(make_ref).collect::>(); + let depth_stencil = subpass.depth_stencil.map(make_ref); + let inputs = subpass.inputs.iter().map(make_ref).collect::>(); + let preserves = subpass + .preserves + .iter() + .map(|&id| id as u32) + .collect::>(); + let resolves = subpass.resolves.iter().map(make_ref).collect::>(); + + (colors, depth_stencil, inputs, preserves, resolves) + }) + .collect::>(); + + let subpasses = attachment_refs + .iter() + .map( + |(colors, depth_stencil, inputs, preserves, resolves)| vk::SubpassDescription { + flags: vk::SubpassDescriptionFlags::empty(), + pipeline_bind_point: vk::PipelineBindPoint::GRAPHICS, + input_attachment_count: inputs.len() as u32, + p_input_attachments: inputs.as_ptr(), + color_attachment_count: colors.len() as u32, + p_color_attachments: colors.as_ptr(), + p_resolve_attachments: if resolves.is_empty() { + ptr::null() + } else { + resolves.as_ptr() + }, + p_depth_stencil_attachment: match depth_stencil { + Some(ref aref) => aref as *const _, + None => ptr::null(), + }, + preserve_attachment_count: preserves.len() as u32, + p_preserve_attachments: preserves.as_ptr(), + }, + ) + .collect::>(); + + let dependencies = dependencies + .into_iter() + .map(|subpass_dep| { + let sdep = subpass_dep.borrow(); + + vk::SubpassDependency { + src_subpass: map_subpass_ref(sdep.passes.start), + dst_subpass: map_subpass_ref(sdep.passes.end), + src_stage_mask: conv::map_pipeline_stage(sdep.stages.start), + dst_stage_mask: conv::map_pipeline_stage(sdep.stages.end), + src_access_mask: conv::map_image_access(sdep.accesses.start), + dst_access_mask: conv::map_image_access(sdep.accesses.end), + dependency_flags: mem::transmute(sdep.flags), + } + }) + .collect::>(); + + let info = vk::RenderPassCreateInfo { + s_type: vk::StructureType::RENDER_PASS_CREATE_INFO, + p_next: ptr::null(), + flags: vk::RenderPassCreateFlags::empty(), + attachment_count: attachments.len() as u32, + p_attachments: attachments.as_ptr(), + subpass_count: subpasses.len() as u32, + p_subpasses: subpasses.as_ptr(), + dependency_count: dependencies.len() as u32, + p_dependencies: dependencies.as_ptr(), + }; + + let result = self.raw.0.create_render_pass(&info, None); + + match result { + Ok(renderpass) => Ok(n::RenderPass { + raw: renderpass, + clear_attachments_mask, + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn create_pipeline_layout( + &self, + sets: IS, + push_constant_ranges: IR, + ) -> Result + where + IS: IntoIterator, + IS::Item: Borrow, + IR: IntoIterator, + IR::Item: Borrow<(pso::ShaderStageFlags, Range)>, + { + let set_layouts = sets + .into_iter() + .map(|set| set.borrow().raw) + .collect::>(); + + debug!("create_pipeline_layout {:?}", set_layouts); + + let push_constant_ranges = push_constant_ranges + .into_iter() + .map(|range| { + let &(s, ref r) = range.borrow(); + vk::PushConstantRange { + stage_flags: conv::map_stage_flags(s), + offset: r.start, + size: r.end - r.start, + } + }) + .collect::>(); + + let info = vk::PipelineLayoutCreateInfo { + s_type: vk::StructureType::PIPELINE_LAYOUT_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineLayoutCreateFlags::empty(), + set_layout_count: set_layouts.len() as u32, + p_set_layouts: set_layouts.as_ptr(), + push_constant_range_count: push_constant_ranges.len() as u32, + p_push_constant_ranges: push_constant_ranges.as_ptr(), + }; + + let result = self.raw.0.create_pipeline_layout(&info, None); + + match result { + Ok(raw) => Ok(n::PipelineLayout { raw }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn create_pipeline_cache( + &self, + data: Option<&[u8]>, + ) -> Result { + let (data_len, data) = if let Some(d) = data { + (d.len(), d.as_ptr()) + } else { + (0_usize, ptr::null()) + }; + + let info = vk::PipelineCacheCreateInfo { + s_type: vk::StructureType::PIPELINE_CACHE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineCacheCreateFlags::empty(), + initial_data_size: data_len, + p_initial_data: data as _, + }; + + let result = self.raw.0.create_pipeline_cache(&info, None); + + match result { + Ok(raw) => Ok(n::PipelineCache { raw }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn get_pipeline_cache_data( + &self, + cache: &n::PipelineCache, + ) -> Result, d::OutOfMemory> { + let result = self.raw.0.get_pipeline_cache_data(cache.raw); + + match result { + Ok(data) => Ok(data), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn destroy_pipeline_cache(&self, cache: n::PipelineCache) { + self.raw.0.destroy_pipeline_cache(cache.raw, None); + } + + unsafe fn merge_pipeline_caches( + &self, + target: &n::PipelineCache, + sources: I, + ) -> Result<(), d::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow, + { + let caches = sources + .into_iter() + .map(|s| s.borrow().raw) + .collect::>(); + let result = self.raw.0.fp_v1_0().merge_pipeline_caches( + self.raw.0.handle(), + target.raw, + caches.len() as u32, + caches.as_ptr(), + ); + + match result { + vk::Result::SUCCESS => Ok(()), + vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host), + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn create_graphics_pipeline<'a>( + &self, + desc: &pso::GraphicsPipelineDesc<'a, B>, + cache: Option<&n::PipelineCache>, + ) -> Result { + debug!("create_graphics_pipeline {:?}", desc); + + let mut buf = GraphicsPipelineInfoBuf::default(); + let mut buf = Pin::new(&mut buf); + GraphicsPipelineInfoBuf::initialize(&mut buf, self, desc); + + let info = { + let (base_handle, base_index) = match desc.parent { + pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1), + pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _), + pso::BasePipeline::None => (vk::Pipeline::null(), -1), + }; + + let mut flags = vk::PipelineCreateFlags::empty(); + match desc.parent { + pso::BasePipeline::None => (), + _ => { + flags |= vk::PipelineCreateFlags::DERIVATIVE; + } + } + if desc + .flags + .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION) + { + flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION; + } + if desc + .flags + .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES) + { + flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES; + } + + vk::GraphicsPipelineCreateInfo { + s_type: vk::StructureType::GRAPHICS_PIPELINE_CREATE_INFO, + p_next: ptr::null(), + flags, + stage_count: buf.stages.len() as _, + p_stages: buf.stages.as_ptr(), + p_vertex_input_state: &buf.vertex_input_state, + p_input_assembly_state: &buf.input_assembly_state, + p_rasterization_state: &buf.rasterization_state, + p_tessellation_state: match buf.tessellation_state.as_ref() { + Some(t) => t as _, + None => ptr::null(), + }, + p_viewport_state: &buf.viewport_state, + p_multisample_state: &buf.multisample_state, + p_depth_stencil_state: &buf.depth_stencil_state, + p_color_blend_state: &buf.color_blend_state, + p_dynamic_state: &buf.pipeline_dynamic_state, + layout: desc.layout.raw, + render_pass: desc.subpass.main_pass.raw, + subpass: desc.subpass.index as _, + base_pipeline_handle: base_handle, + base_pipeline_index: base_index, + } + }; + + let mut pipeline = vk::Pipeline::null(); + + match self.raw.0.fp_v1_0().create_graphics_pipelines( + self.raw.0.handle(), + cache.map_or(vk::PipelineCache::null(), |cache| cache.raw), + 1, + &info, + ptr::null(), + &mut pipeline, + ) { + vk::Result::SUCCESS => Ok(n::GraphicsPipeline(pipeline)), + vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host.into()), + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device.into()), + _ => Err(pso::CreationError::Other), + } + } + + unsafe fn create_graphics_pipelines<'a, T>( + &self, + descs: T, + cache: Option<&n::PipelineCache>, + ) -> Vec> + where + T: IntoIterator, + T::Item: Borrow>, + { + debug!("create_graphics_pipelines:"); + + let mut bufs: Pin> = descs + .into_iter() + .enumerate() + .inspect(|(idx, desc)| debug!("# {} {:?}", idx, desc.borrow())) + .map(|(_, desc)| (desc, GraphicsPipelineInfoBuf::default())) + .collect::>() + .into(); + + for (desc, buf) in bufs.as_mut().get_unchecked_mut() { + let desc: &T::Item = desc; + GraphicsPipelineInfoBuf::initialize(&mut Pin::new_unchecked(buf), self, desc.borrow()); + } + + let infos: Vec<_> = bufs + .iter() + .map(|(desc, buf)| { + let desc = desc.borrow(); + + let (base_handle, base_index) = match desc.parent { + pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1), + pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _), + pso::BasePipeline::None => (vk::Pipeline::null(), -1), + }; + + let mut flags = vk::PipelineCreateFlags::empty(); + match desc.parent { + pso::BasePipeline::None => (), + _ => { + flags |= vk::PipelineCreateFlags::DERIVATIVE; + } + } + if desc + .flags + .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION) + { + flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION; + } + if desc + .flags + .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES) + { + flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES; + } + + vk::GraphicsPipelineCreateInfo { + s_type: vk::StructureType::GRAPHICS_PIPELINE_CREATE_INFO, + p_next: ptr::null(), + flags, + stage_count: buf.stages.len() as _, + p_stages: buf.stages.as_ptr(), + p_vertex_input_state: &buf.vertex_input_state, + p_input_assembly_state: &buf.input_assembly_state, + p_rasterization_state: &buf.rasterization_state, + p_tessellation_state: match buf.tessellation_state.as_ref() { + Some(t) => t as _, + None => ptr::null(), + }, + p_viewport_state: &buf.viewport_state, + p_multisample_state: &buf.multisample_state, + p_depth_stencil_state: &buf.depth_stencil_state, + p_color_blend_state: &buf.color_blend_state, + p_dynamic_state: &buf.pipeline_dynamic_state, + layout: desc.layout.raw, + render_pass: desc.subpass.main_pass.raw, + subpass: desc.subpass.index as _, + base_pipeline_handle: base_handle, + base_pipeline_index: base_index, + } + }) + .collect(); + + let (pipelines, error) = if infos.is_empty() { + (Vec::new(), None) + } else { + match self.raw.0.create_graphics_pipelines( + cache.map_or(vk::PipelineCache::null(), |cache| cache.raw), + &infos, + None, + ) { + Ok(pipelines) => (pipelines, None), + Err((pipelines, error)) => (pipelines, Some(error)), + } + }; + + pipelines + .into_iter() + .map(|pso| { + if pso == vk::Pipeline::null() { + match error { + Some(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { + Err(d::OutOfMemory::Host.into()) + } + Some(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { + Err(d::OutOfMemory::Device.into()) + } + _ => unreachable!(), + } + } else { + Ok(n::GraphicsPipeline(pso)) + } + }) + .collect() + } + + unsafe fn create_compute_pipeline<'a>( + &self, + desc: &pso::ComputePipelineDesc<'a, B>, + cache: Option<&n::PipelineCache>, + ) -> Result { + let mut buf = ComputePipelineInfoBuf::default(); + let mut buf = Pin::new(&mut buf); + ComputePipelineInfoBuf::initialize(&mut buf, desc); + + let info = { + let stage = vk::PipelineShaderStageCreateInfo { + s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineShaderStageCreateFlags::empty(), + stage: vk::ShaderStageFlags::COMPUTE, + module: desc.shader.module.raw, + p_name: buf.c_string.as_ptr(), + p_specialization_info: &buf.specialization, + }; + + let (base_handle, base_index) = match desc.parent { + pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1), + pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _), + pso::BasePipeline::None => (vk::Pipeline::null(), -1), + }; + + let mut flags = vk::PipelineCreateFlags::empty(); + match desc.parent { + pso::BasePipeline::None => (), + _ => { + flags |= vk::PipelineCreateFlags::DERIVATIVE; + } + } + if desc + .flags + .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION) + { + flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION; + } + if desc + .flags + .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES) + { + flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES; + } + + vk::ComputePipelineCreateInfo { + s_type: vk::StructureType::COMPUTE_PIPELINE_CREATE_INFO, + p_next: ptr::null(), + flags, + stage, + layout: desc.layout.raw, + base_pipeline_handle: base_handle, + base_pipeline_index: base_index, + } + }; + + let mut pipeline = vk::Pipeline::null(); + + match self.raw.0.fp_v1_0().create_compute_pipelines( + self.raw.0.handle(), + cache.map_or(vk::PipelineCache::null(), |cache| cache.raw), + 1, + &info, + ptr::null(), + &mut pipeline, + ) { + vk::Result::SUCCESS => Ok(n::ComputePipeline(pipeline)), + vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host.into()), + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device.into()), + _ => Err(pso::CreationError::Other), + } + } + + unsafe fn create_compute_pipelines<'a, T>( + &self, + descs: T, + cache: Option<&n::PipelineCache>, + ) -> Vec> + where + T: IntoIterator, + T::Item: Borrow>, + { + let mut bufs: Pin> = descs + .into_iter() + .map(|desc| (desc, ComputePipelineInfoBuf::default())) + .collect::>() + .into(); + + for (desc, buf) in bufs.as_mut().get_unchecked_mut() { + let desc: &T::Item = desc; + ComputePipelineInfoBuf::initialize(&mut Pin::new_unchecked(buf), desc.borrow()); + } + + let infos: Vec<_> = bufs + .iter() + .map(|(desc, buf)| { + let desc = desc.borrow(); + + let stage = vk::PipelineShaderStageCreateInfo { + s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineShaderStageCreateFlags::empty(), + stage: vk::ShaderStageFlags::COMPUTE, + module: desc.shader.module.raw, + p_name: buf.c_string.as_ptr(), + p_specialization_info: &buf.specialization, + }; + + let (base_handle, base_index) = match desc.parent { + pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1), + pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _), + pso::BasePipeline::None => (vk::Pipeline::null(), -1), + }; + + let mut flags = vk::PipelineCreateFlags::empty(); + match desc.parent { + pso::BasePipeline::None => (), + _ => { + flags |= vk::PipelineCreateFlags::DERIVATIVE; + } + } + if desc + .flags + .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION) + { + flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION; + } + if desc + .flags + .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES) + { + flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES; + } + + vk::ComputePipelineCreateInfo { + s_type: vk::StructureType::COMPUTE_PIPELINE_CREATE_INFO, + p_next: ptr::null(), + flags, + stage, + layout: desc.layout.raw, + base_pipeline_handle: base_handle, + base_pipeline_index: base_index, + } + }) + .collect(); + + let (pipelines, error) = if infos.is_empty() { + (Vec::new(), None) + } else { + match self.raw.0.create_compute_pipelines( + cache.map_or(vk::PipelineCache::null(), |cache| cache.raw), + &infos, + None, + ) { + Ok(pipelines) => (pipelines, None), + Err((pipelines, error)) => (pipelines, Some(error)), + } + }; + + pipelines + .into_iter() + .map(|pso| { + if pso == vk::Pipeline::null() { + match error { + Some(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { + Err(d::OutOfMemory::Host.into()) + } + Some(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { + Err(d::OutOfMemory::Device.into()) + } + _ => unreachable!(), + } + } else { + Ok(n::ComputePipeline(pso)) + } + }) + .collect() + } + + unsafe fn create_framebuffer( + &self, + renderpass: &n::RenderPass, + attachments: T, + extent: image::Extent, + ) -> Result + where + T: IntoIterator, + T::Item: Borrow, + { + let mut framebuffers_ptr = None; + let mut raw_attachments = SmallVec::<[_; 4]>::new(); + for attachment in attachments { + let at = attachment.borrow(); + raw_attachments.push(at.view); + match at.owner { + n::ImageViewOwner::User => {} + n::ImageViewOwner::Surface(ref fbo_ptr) => { + framebuffers_ptr = Some(Arc::clone(&fbo_ptr.0)); + } + } + } + + let info = vk::FramebufferCreateInfo { + s_type: vk::StructureType::FRAMEBUFFER_CREATE_INFO, + p_next: ptr::null(), + flags: vk::FramebufferCreateFlags::empty(), + render_pass: renderpass.raw, + attachment_count: raw_attachments.len() as u32, + p_attachments: raw_attachments.as_ptr(), + width: extent.width, + height: extent.height, + layers: extent.depth, + }; + + let result = self.raw.0.create_framebuffer(&info, None); + + match result { + Ok(raw) => Ok(n::Framebuffer { + raw, + owned: match framebuffers_ptr { + Some(fbo_ptr) => { + fbo_ptr.lock().unwrap().framebuffers.push(raw); + false + } + None => true, + }, + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn create_shader_module( + &self, + spirv_data: &[u32], + ) -> Result { + let info = vk::ShaderModuleCreateInfo { + s_type: vk::StructureType::SHADER_MODULE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::ShaderModuleCreateFlags::empty(), + code_size: spirv_data.len() * 4, + p_code: spirv_data.as_ptr(), + }; + + let module = self.raw.0.create_shader_module(&info, None); + + match module { + Ok(raw) => Ok(n::ShaderModule { raw }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + Err(_) => { + Err(d::ShaderError::CompilationFailed(String::new())) + } + } + } + + unsafe fn create_sampler( + &self, + desc: &image::SamplerDesc, + ) -> Result { + use hal::pso::Comparison; + + let (anisotropy_enable, max_anisotropy) = match desc.anisotropic { + image::Anisotropic::Off => (vk::FALSE, 1.0), + image::Anisotropic::On(aniso) => { + if self.raw.1.contains(Features::SAMPLER_ANISOTROPY) { + (vk::TRUE, aniso as f32) + } else { + warn!( + "Anisotropy({}) was requested on a device with disabled feature", + aniso + ); + (vk::FALSE, 1.0) + } + } + }; + let info = vk::SamplerCreateInfo { + s_type: vk::StructureType::SAMPLER_CREATE_INFO, + p_next: ptr::null(), + flags: vk::SamplerCreateFlags::empty(), + mag_filter: conv::map_filter(desc.mag_filter), + min_filter: conv::map_filter(desc.min_filter), + mipmap_mode: conv::map_mip_filter(desc.mip_filter), + address_mode_u: conv::map_wrap(desc.wrap_mode.0), + address_mode_v: conv::map_wrap(desc.wrap_mode.1), + address_mode_w: conv::map_wrap(desc.wrap_mode.2), + mip_lod_bias: desc.lod_bias.0, + anisotropy_enable, + max_anisotropy, + compare_enable: if desc.comparison.is_some() { + vk::TRUE + } else { + vk::FALSE + }, + compare_op: conv::map_comparison(desc.comparison.unwrap_or(Comparison::Never)), + min_lod: desc.lod_range.start.0, + max_lod: desc.lod_range.end.0, + border_color: match conv::map_border_color(desc.border) { + Some(bc) => bc, + None => { + error!("Unsupported border color {:x}", desc.border.0); + vk::BorderColor::FLOAT_TRANSPARENT_BLACK + } + }, + unnormalized_coordinates: if desc.normalized { + vk::FALSE + } else { + vk::TRUE + }, + }; + + let result = self.raw.0.create_sampler(&info, None); + + match result { + Ok(sampler) => Ok(n::Sampler(sampler)), + Err(vk::Result::ERROR_TOO_MANY_OBJECTS) => Err(d::AllocationError::TooManyObjects), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + + unsafe fn create_buffer( + &self, + size: u64, + usage: buffer::Usage, + ) -> Result { + let info = vk::BufferCreateInfo { + s_type: vk::StructureType::BUFFER_CREATE_INFO, + p_next: ptr::null(), + flags: vk::BufferCreateFlags::empty(), + size, + usage: conv::map_buffer_usage(usage), + sharing_mode: vk::SharingMode::EXCLUSIVE, + queue_family_index_count: 0, + p_queue_family_indices: ptr::null(), + }; + + let result = self.raw.0.create_buffer(&info, None); + + match result { + Ok(raw) => Ok(n::Buffer { raw }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn get_buffer_requirements(&self, buffer: &n::Buffer) -> Requirements { + let req = self.raw.0.get_buffer_memory_requirements(buffer.raw); + + Requirements { + size: req.size, + alignment: req.alignment, + type_mask: req.memory_type_bits as _, + } + } + + unsafe fn bind_buffer_memory( + &self, + memory: &n::Memory, + offset: u64, + buffer: &mut n::Buffer, + ) -> Result<(), d::BindError> { + let result = self + .raw + .0 + .bind_buffer_memory(buffer.raw, memory.raw, offset); + + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_buffer_view>( + &self, + buffer: &n::Buffer, + format: Option, + range: R, + ) -> Result { + let (offset, size) = conv::map_range_arg(&range); + let info = vk::BufferViewCreateInfo { + s_type: vk::StructureType::BUFFER_VIEW_CREATE_INFO, + p_next: ptr::null(), + flags: vk::BufferViewCreateFlags::empty(), + buffer: buffer.raw, + format: format.map_or(vk::Format::UNDEFINED, conv::map_format), + offset, + range: size, + }; + + let result = self.raw.0.create_buffer_view(&info, None); + + match result { + Ok(raw) => Ok(n::BufferView { raw }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_image( + &self, + kind: image::Kind, + mip_levels: image::Level, + format: format::Format, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Result { + let flags = conv::map_view_capabilities(view_caps); + let extent = conv::map_extent(kind.extent()); + let array_layers = kind.num_layers(); + let samples = kind.num_samples() as u32; + let image_type = match kind { + image::Kind::D1(..) => vk::ImageType::TYPE_1D, + image::Kind::D2(..) => vk::ImageType::TYPE_2D, + image::Kind::D3(..) => vk::ImageType::TYPE_3D, + }; + + let info = vk::ImageCreateInfo { + s_type: vk::StructureType::IMAGE_CREATE_INFO, + p_next: ptr::null(), + flags, + image_type, + format: conv::map_format(format), + extent: extent.clone(), + mip_levels: mip_levels as u32, + array_layers: array_layers as u32, + samples: vk::SampleCountFlags::from_raw(samples & vk::SampleCountFlags::all().as_raw()), + tiling: conv::map_tiling(tiling), + usage: conv::map_image_usage(usage), + sharing_mode: vk::SharingMode::EXCLUSIVE, + queue_family_index_count: 0, + p_queue_family_indices: ptr::null(), + initial_layout: vk::ImageLayout::UNDEFINED, + }; + + let result = self.raw.0.create_image(&info, None); + + match result { + Ok(raw) => Ok(n::Image { + raw, + ty: image_type, + flags, + extent, + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn get_image_requirements(&self, image: &n::Image) -> Requirements { + let req = self.raw.0.get_image_memory_requirements(image.raw); + + Requirements { + size: req.size, + alignment: req.alignment, + type_mask: req.memory_type_bits as _, + } + } + + unsafe fn get_image_subresource_footprint( + &self, + image: &n::Image, + subresource: image::Subresource, + ) -> image::SubresourceFootprint { + let sub = conv::map_subresource(&subresource); + let layout = self.raw.0.get_image_subresource_layout(image.raw, sub); + + image::SubresourceFootprint { + slice: layout.offset .. layout.offset + layout.size, + row_pitch: layout.row_pitch, + array_pitch: layout.array_pitch, + depth_pitch: layout.depth_pitch, + } + } + + unsafe fn bind_image_memory( + &self, + memory: &n::Memory, + offset: u64, + image: &mut n::Image, + ) -> Result<(), d::BindError> { + + + let result = self.raw.0.bind_image_memory(image.raw, memory.raw, offset); + + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_image_view( + &self, + image: &n::Image, + kind: image::ViewKind, + format: format::Format, + swizzle: format::Swizzle, + range: image::SubresourceRange, + ) -> Result { + let is_cube = image + .flags + .intersects(vk::ImageCreateFlags::CUBE_COMPATIBLE); + let info = vk::ImageViewCreateInfo { + s_type: vk::StructureType::IMAGE_VIEW_CREATE_INFO, + p_next: ptr::null(), + flags: vk::ImageViewCreateFlags::empty(), + image: image.raw, + view_type: match conv::map_view_kind(kind, image.ty, is_cube) { + Some(ty) => ty, + None => return Err(image::ViewError::BadKind(kind)), + }, + format: conv::map_format(format), + components: conv::map_swizzle(swizzle), + subresource_range: conv::map_subresource_range(&range), + }; + + let result = self.raw.0.create_image_view(&info, None); + + match result { + Ok(view) => Ok(n::ImageView { + image: image.raw, + view, + range, + owner: n::ImageViewOwner::User, + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_descriptor_pool( + &self, + max_sets: usize, + descriptor_pools: T, + flags: pso::DescriptorPoolCreateFlags, + ) -> Result + where + T: IntoIterator, + T::Item: Borrow, + { + let pools = descriptor_pools + .into_iter() + .map(|pool| { + let pool = pool.borrow(); + vk::DescriptorPoolSize { + ty: conv::map_descriptor_type(pool.ty), + descriptor_count: pool.count as u32, + } + }) + .collect::>(); + + let info = vk::DescriptorPoolCreateInfo { + s_type: vk::StructureType::DESCRIPTOR_POOL_CREATE_INFO, + p_next: ptr::null(), + flags: conv::map_descriptor_pool_create_flags(flags), + max_sets: max_sets as u32, + pool_size_count: pools.len() as u32, + p_pool_sizes: pools.as_ptr(), + }; + + let result = self.raw.0.create_descriptor_pool(&info, None); + + match result { + Ok(pool) => Ok(n::DescriptorPool { + raw: pool, + device: self.raw.clone(), + set_free_vec: Vec::new(), + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_descriptor_set_layout( + &self, + binding_iter: I, + immutable_sampler_iter: J, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + let immutable_samplers = immutable_sampler_iter + .into_iter() + .map(|is| is.borrow().0) + .collect::>(); + let mut sampler_offset = 0; + + let bindings = Arc::new( + binding_iter + .into_iter() + .map(|b| b.borrow().clone()) + .collect::>(), + ); + + let raw_bindings = bindings + .iter() + .map(|b| vk::DescriptorSetLayoutBinding { + binding: b.binding, + descriptor_type: conv::map_descriptor_type(b.ty), + descriptor_count: b.count as _, + stage_flags: conv::map_stage_flags(b.stage_flags), + p_immutable_samplers: if b.immutable_samplers { + let slice = &immutable_samplers[sampler_offset ..]; + sampler_offset += b.count; + slice.as_ptr() + } else { + ptr::null() + }, + }) + .collect::>(); + + debug!("create_descriptor_set_layout {:?}", raw_bindings); + + let info = vk::DescriptorSetLayoutCreateInfo { + s_type: vk::StructureType::DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + p_next: ptr::null(), + flags: vk::DescriptorSetLayoutCreateFlags::empty(), + binding_count: raw_bindings.len() as _, + p_bindings: raw_bindings.as_ptr(), + }; + + let result = self.raw.0.create_descriptor_set_layout(&info, None); + + match result { + Ok(layout) => Ok(n::DescriptorSetLayout { + raw: layout, + bindings, + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I) + where + I: IntoIterator>, + J: IntoIterator, + J::Item: Borrow>, + { + let mut raw_writes = Vec::new(); + let mut image_infos = Vec::new(); + let mut buffer_infos = Vec::new(); + let mut texel_buffer_views = Vec::new(); + + for sw in write_iter { + let layout = sw + .set + .bindings + .iter() + .find(|lb| lb.binding == sw.binding) + .expect("Descriptor set writes don't match the set layout!"); + let mut raw = vk::WriteDescriptorSet { + s_type: vk::StructureType::WRITE_DESCRIPTOR_SET, + p_next: ptr::null(), + dst_set: sw.set.raw, + dst_binding: sw.binding, + dst_array_element: sw.array_offset as _, + descriptor_count: 0, + descriptor_type: conv::map_descriptor_type(layout.ty), + p_image_info: ptr::null(), + p_buffer_info: ptr::null(), + p_texel_buffer_view: ptr::null(), + }; + + for descriptor in sw.descriptors { + raw.descriptor_count += 1; + match *descriptor.borrow() { + pso::Descriptor::Sampler(sampler) => { + image_infos.push(vk::DescriptorImageInfo { + sampler: sampler.0, + image_view: vk::ImageView::null(), + image_layout: vk::ImageLayout::GENERAL, + }); + } + pso::Descriptor::Image(view, layout) => { + image_infos.push(vk::DescriptorImageInfo { + sampler: vk::Sampler::null(), + image_view: view.view, + image_layout: conv::map_image_layout(layout), + }); + } + pso::Descriptor::CombinedImageSampler(view, layout, sampler) => { + image_infos.push(vk::DescriptorImageInfo { + sampler: sampler.0, + image_view: view.view, + image_layout: conv::map_image_layout(layout), + }); + } + pso::Descriptor::Buffer(buffer, ref range) => { + let offset = range.start.unwrap_or(0); + buffer_infos.push(vk::DescriptorBufferInfo { + buffer: buffer.raw, + offset, + range: match range.end { + Some(end) => end - offset, + None => vk::WHOLE_SIZE, + }, + }); + } + pso::Descriptor::UniformTexelBuffer(view) + | pso::Descriptor::StorageTexelBuffer(view) => { + texel_buffer_views.push(view.raw); + } + } + } + + raw.p_image_info = image_infos.len() as _; + raw.p_buffer_info = buffer_infos.len() as _; + raw.p_texel_buffer_view = texel_buffer_views.len() as _; + raw_writes.push(raw); + } + + + for raw in &mut raw_writes { + use crate::vk::DescriptorType as Dt; + match raw.descriptor_type { + Dt::SAMPLER + | Dt::SAMPLED_IMAGE + | Dt::STORAGE_IMAGE + | Dt::COMBINED_IMAGE_SAMPLER + | Dt::INPUT_ATTACHMENT => { + raw.p_buffer_info = ptr::null(); + raw.p_texel_buffer_view = ptr::null(); + let base = raw.p_image_info as usize - raw.descriptor_count as usize; + raw.p_image_info = image_infos[base ..].as_ptr(); + } + Dt::UNIFORM_TEXEL_BUFFER | Dt::STORAGE_TEXEL_BUFFER => { + raw.p_buffer_info = ptr::null(); + raw.p_image_info = ptr::null(); + let base = raw.p_texel_buffer_view as usize - raw.descriptor_count as usize; + raw.p_texel_buffer_view = texel_buffer_views[base ..].as_ptr(); + } + Dt::UNIFORM_BUFFER + | Dt::STORAGE_BUFFER + | Dt::STORAGE_BUFFER_DYNAMIC + | Dt::UNIFORM_BUFFER_DYNAMIC => { + raw.p_image_info = ptr::null(); + raw.p_texel_buffer_view = ptr::null(); + let base = raw.p_buffer_info as usize - raw.descriptor_count as usize; + raw.p_buffer_info = buffer_infos[base ..].as_ptr(); + } + _ => panic!("unknown descriptor type"), + } + } + + self.raw.0.update_descriptor_sets(&raw_writes, &[]); + } + + unsafe fn copy_descriptor_sets<'a, I>(&self, copies: I) + where + I: IntoIterator, + I::Item: Borrow>, + { + let copies = copies + .into_iter() + .map(|copy| { + let c = copy.borrow(); + vk::CopyDescriptorSet { + s_type: vk::StructureType::COPY_DESCRIPTOR_SET, + p_next: ptr::null(), + src_set: c.src_set.raw, + src_binding: c.src_binding as u32, + src_array_element: c.src_array_offset as u32, + dst_set: c.dst_set.raw, + dst_binding: c.dst_binding as u32, + dst_array_element: c.dst_array_offset as u32, + descriptor_count: c.count as u32, + } + }) + .collect::>(); + + self.raw.0.update_descriptor_sets(&[], &copies); + } + + unsafe fn map_memory(&self, memory: &n::Memory, range: R) -> Result<*mut u8, d::MapError> + where + R: RangeArg, + { + let (offset, size) = conv::map_range_arg(&range); + let result = self + .raw + .0 + .map_memory(memory.raw, offset, size, vk::MemoryMapFlags::empty()); + + match result { + Ok(ptr) => Ok(ptr as *mut _), + Err(vk::Result::ERROR_MEMORY_MAP_FAILED) => Err(d::MapError::MappingFailed), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn unmap_memory(&self, memory: &n::Memory) { + self.raw.0.unmap_memory(memory.raw) + } + + unsafe fn flush_mapped_memory_ranges<'a, I, R>(&self, ranges: I) -> Result<(), d::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a n::Memory, R)>, + R: RangeArg, + { + let ranges = conv::map_memory_ranges(ranges); + let result = self.raw.0.flush_mapped_memory_ranges(&ranges); + + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn invalidate_mapped_memory_ranges<'a, I, R>( + &self, + ranges: I, + ) -> Result<(), d::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a n::Memory, R)>, + R: RangeArg, + { + let ranges = conv::map_memory_ranges(ranges); + let result = self.raw.0.invalidate_mapped_memory_ranges(&ranges); + + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + fn create_semaphore(&self) -> Result { + let info = vk::SemaphoreCreateInfo { + s_type: vk::StructureType::SEMAPHORE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::SemaphoreCreateFlags::empty(), + }; + + let result = unsafe { self.raw.0.create_semaphore(&info, None) }; + + match result { + Ok(semaphore) => Ok(n::Semaphore(semaphore)), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + fn create_fence(&self, signaled: bool) -> Result { + let info = vk::FenceCreateInfo { + s_type: vk::StructureType::FENCE_CREATE_INFO, + p_next: ptr::null(), + flags: if signaled { + vk::FenceCreateFlags::SIGNALED + } else { + vk::FenceCreateFlags::empty() + }, + }; + + let result = unsafe { self.raw.0.create_fence(&info, None) }; + + match result { + Ok(fence) => Ok(n::Fence(fence)), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn reset_fences(&self, fences: I) -> Result<(), d::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow, + { + let fences = fences + .into_iter() + .map(|fence| fence.borrow().0) + .collect::>(); + let result = self.raw.0.reset_fences(&fences); + + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn wait_for_fences( + &self, + fences: I, + wait: d::WaitFor, + timeout_ns: u64, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + let fences = fences + .into_iter() + .map(|fence| fence.borrow().0) + .collect::>(); + let all = match wait { + d::WaitFor::Any => false, + d::WaitFor::All => true, + }; + let result = self.raw.0.wait_for_fences(&fences, all, timeout_ns); + match result { + Ok(()) => Ok(true), + Err(vk::Result::TIMEOUT) => Ok(false), + Err(vk::Result::ERROR_DEVICE_LOST) => Err(d::DeviceLost.into()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn get_fence_status(&self, fence: &n::Fence) -> Result { + let result = self.raw.0.get_fence_status(fence.0); + match result { + Ok(()) => Ok(true), + Err(vk::Result::NOT_READY) => Ok(false), + Err(vk::Result::ERROR_DEVICE_LOST) => Err(d::DeviceLost), + _ => unreachable!(), + } + } + + fn create_event(&self) -> Result { + let info = vk::EventCreateInfo { + s_type: vk::StructureType::EVENT_CREATE_INFO, + p_next: ptr::null(), + flags: vk::EventCreateFlags::empty(), + }; + + let result = unsafe { self.raw.0.create_event(&info, None) }; + match result { + Ok(e) => Ok(n::Event(e)), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn get_event_status(&self, event: &n::Event) -> Result { + let result = self.raw.0.get_event_status(event.0); + match result { + Ok(b) => Ok(b), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + Err(vk::Result::ERROR_DEVICE_LOST) => Err(d::DeviceLost.into()), + _ => unreachable!(), + } + } + + unsafe fn set_event(&self, event: &n::Event) -> Result<(), d::OutOfMemory> { + let result = self.raw.0.set_event(event.0); + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn reset_event(&self, event: &n::Event) -> Result<(), d::OutOfMemory> { + let result = self.raw.0.reset_event(event.0); + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn free_memory(&self, memory: n::Memory) { + self.raw.0.free_memory(memory.raw, None); + } + + unsafe fn create_query_pool( + &self, + ty: query::Type, + query_count: query::Id, + ) -> Result { + let (query_type, pipeline_statistics) = match ty { + query::Type::Occlusion => ( + vk::QueryType::OCCLUSION, + vk::QueryPipelineStatisticFlags::empty(), + ), + query::Type::PipelineStatistics(statistics) => ( + vk::QueryType::PIPELINE_STATISTICS, + conv::map_pipeline_statistics(statistics), + ), + query::Type::Timestamp => ( + vk::QueryType::TIMESTAMP, + vk::QueryPipelineStatisticFlags::empty(), + ), + }; + + let info = vk::QueryPoolCreateInfo { + s_type: vk::StructureType::QUERY_POOL_CREATE_INFO, + p_next: ptr::null(), + flags: vk::QueryPoolCreateFlags::empty(), + query_type, + query_count, + pipeline_statistics, + }; + + let result = self.raw.0.create_query_pool(&info, None); + + match result { + Ok(pool) => Ok(n::QueryPool(pool)), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn get_query_pool_results( + &self, + pool: &n::QueryPool, + queries: Range, + data: &mut [u8], + stride: buffer::Offset, + flags: query::ResultFlags, + ) -> Result { + let result = self.raw.0.fp_v1_0().get_query_pool_results( + self.raw.0.handle(), + pool.0, + queries.start, + queries.end - queries.start, + data.len(), + data.as_mut_ptr() as *mut _, + stride, + conv::map_query_result_flags(flags), + ); + + match result { + vk::Result::SUCCESS => Ok(true), + vk::Result::NOT_READY => Ok(false), + vk::Result::ERROR_DEVICE_LOST => Err(d::DeviceLost.into()), + vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host.into()), + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_swapchain( + &self, + surface: &mut w::Surface, + config: SwapchainConfig, + provided_old_swapchain: Option, + ) -> Result<(w::Swapchain, Vec), hal::window::CreationError> { + let functor = khr::Swapchain::new(&surface.raw.instance.0, &self.raw.0); + + let old_swapchain = match provided_old_swapchain { + Some(osc) => osc.raw, + None => vk::SwapchainKHR::null(), + }; + + let info = vk::SwapchainCreateInfoKHR { + s_type: vk::StructureType::SWAPCHAIN_CREATE_INFO_KHR, + p_next: ptr::null(), + flags: vk::SwapchainCreateFlagsKHR::empty(), + surface: surface.raw.handle, + min_image_count: config.image_count, + image_format: conv::map_format(config.format), + image_color_space: vk::ColorSpaceKHR::SRGB_NONLINEAR, + image_extent: vk::Extent2D { + width: config.extent.width, + height: config.extent.height, + }, + image_array_layers: 1, + image_usage: conv::map_image_usage(config.image_usage), + image_sharing_mode: vk::SharingMode::EXCLUSIVE, + queue_family_index_count: 0, + p_queue_family_indices: ptr::null(), + pre_transform: vk::SurfaceTransformFlagsKHR::IDENTITY, + composite_alpha: conv::map_composite_alpha_mode(config.composite_alpha_mode), + present_mode: conv::map_present_mode(config.present_mode), + clipped: 1, + old_swapchain, + }; + + let result = functor.create_swapchain(&info, None); + + if old_swapchain != vk::SwapchainKHR::null() { + functor.destroy_swapchain(old_swapchain, None) + } + + let swapchain_raw = match result { + Ok(swapchain_raw) => swapchain_raw, + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { + return Err(d::OutOfMemory::Host.into()); + } + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { + return Err(d::OutOfMemory::Device.into()); + } + Err(vk::Result::ERROR_DEVICE_LOST) => return Err(d::DeviceLost.into()), + Err(vk::Result::ERROR_SURFACE_LOST_KHR) => return Err(d::SurfaceLost.into()), + Err(vk::Result::ERROR_NATIVE_WINDOW_IN_USE_KHR) => return Err(d::WindowInUse.into()), + _ => unreachable!("Unexpected result - driver bug? {:?}", result), + }; + + let result = functor.get_swapchain_images(swapchain_raw); + + let backbuffer_images = match result { + Ok(backbuffer_images) => backbuffer_images, + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { + return Err(d::OutOfMemory::Host.into()); + } + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { + return Err(d::OutOfMemory::Device.into()); + } + _ => unreachable!(), + }; + + let swapchain = w::Swapchain { + raw: swapchain_raw, + functor, + }; + + let images = backbuffer_images + .into_iter() + .map(|image| n::Image { + raw: image, + ty: vk::ImageType::TYPE_2D, + flags: vk::ImageCreateFlags::empty(), + extent: vk::Extent3D { + width: config.extent.width, + height: config.extent.height, + depth: 1, + }, + }) + .collect(); + + Ok((swapchain, images)) + } + + unsafe fn destroy_swapchain(&self, swapchain: w::Swapchain) { + swapchain.functor.destroy_swapchain(swapchain.raw, None); + } + + unsafe fn destroy_query_pool(&self, pool: n::QueryPool) { + self.raw.0.destroy_query_pool(pool.0, None); + } + + unsafe fn destroy_shader_module(&self, module: n::ShaderModule) { + self.raw.0.destroy_shader_module(module.raw, None); + } + + unsafe fn destroy_render_pass(&self, rp: n::RenderPass) { + self.raw.0.destroy_render_pass(rp.raw, None); + } + + unsafe fn destroy_pipeline_layout(&self, pl: n::PipelineLayout) { + self.raw.0.destroy_pipeline_layout(pl.raw, None); + } + + unsafe fn destroy_graphics_pipeline(&self, pipeline: n::GraphicsPipeline) { + self.raw.0.destroy_pipeline(pipeline.0, None); + } + + unsafe fn destroy_compute_pipeline(&self, pipeline: n::ComputePipeline) { + self.raw.0.destroy_pipeline(pipeline.0, None); + } + + unsafe fn destroy_framebuffer(&self, fb: n::Framebuffer) { + if fb.owned { + self.raw.0.destroy_framebuffer(fb.raw, None); + } + } + + unsafe fn destroy_buffer(&self, buffer: n::Buffer) { + self.raw.0.destroy_buffer(buffer.raw, None); + } + + unsafe fn destroy_buffer_view(&self, view: n::BufferView) { + self.raw.0.destroy_buffer_view(view.raw, None); + } + + unsafe fn destroy_image(&self, image: n::Image) { + self.raw.0.destroy_image(image.raw, None); + } + + unsafe fn destroy_image_view(&self, view: n::ImageView) { + match view.owner { + n::ImageViewOwner::User => { + self.raw.0.destroy_image_view(view.view, None); + } + n::ImageViewOwner::Surface(_fbo_cache) => { + + } + } + } + + unsafe fn destroy_sampler(&self, sampler: n::Sampler) { + self.raw.0.destroy_sampler(sampler.0, None); + } + + unsafe fn destroy_descriptor_pool(&self, pool: n::DescriptorPool) { + self.raw.0.destroy_descriptor_pool(pool.raw, None); + } + + unsafe fn destroy_descriptor_set_layout(&self, layout: n::DescriptorSetLayout) { + self.raw.0.destroy_descriptor_set_layout(layout.raw, None); + } + + unsafe fn destroy_fence(&self, fence: n::Fence) { + self.raw.0.destroy_fence(fence.0, None); + } + + unsafe fn destroy_semaphore(&self, semaphore: n::Semaphore) { + self.raw.0.destroy_semaphore(semaphore.0, None); + } + + unsafe fn destroy_event(&self, event: n::Event) { + self.raw.0.destroy_event(event.0, None); + } + + fn wait_idle(&self) -> Result<(), d::OutOfMemory> { + match unsafe { self.raw.0.device_wait_idle() } { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn set_image_name(&self, image: &mut n::Image, name: &str) { + self.set_object_name(vk::ObjectType::IMAGE, image.raw.as_raw(), name) + } + + unsafe fn set_buffer_name(&self, buffer: &mut n::Buffer, name: &str) { + self.set_object_name(vk::ObjectType::BUFFER, buffer.raw.as_raw(), name) + } + + unsafe fn set_command_buffer_name( + &self, + command_buffer: &mut cmd::CommandBuffer, + name: &str + ) { + self.set_object_name(vk::ObjectType::COMMAND_BUFFER, command_buffer.raw.as_raw(), name) + } + + unsafe fn set_semaphore_name(&self, semaphore: &mut n::Semaphore, name: &str) { + self.set_object_name(vk::ObjectType::SEMAPHORE, semaphore.0.as_raw(), name) + } + + unsafe fn set_fence_name(&self, fence: &mut n::Fence, name: &str) { + self.set_object_name(vk::ObjectType::FENCE, fence.0.as_raw(), name) + } + + unsafe fn set_framebuffer_name(&self, framebuffer: &mut n::Framebuffer, name: &str) { + self.set_object_name(vk::ObjectType::FRAMEBUFFER, framebuffer.raw.as_raw(), name) + } + + unsafe fn set_render_pass_name(&self, render_pass: &mut n::RenderPass, name: &str) { + self.set_object_name(vk::ObjectType::RENDER_PASS, render_pass.raw.as_raw(), name) + } + + unsafe fn set_descriptor_set_name(&self, descriptor_set: &mut n::DescriptorSet, name: &str) { + self.set_object_name(vk::ObjectType::DESCRIPTOR_SET, descriptor_set.raw.as_raw(), name) + } + + unsafe fn set_descriptor_set_layout_name(&self, descriptor_set_layout: &mut n::DescriptorSetLayout, name: &str) { + self.set_object_name(vk::ObjectType::DESCRIPTOR_SET_LAYOUT, descriptor_set_layout.raw.as_raw(), name) + } +} + +impl Device { + unsafe fn set_object_name(&self, object_type: vk::ObjectType, object_handle: u64, name: &str) { + let instance = &self.raw.2; + if let Some(DebugMessenger::Utils(ref debug_utils_ext, _)) = instance.1 { + + static mut NAME_BUF: [u8; 64] = [0u8; 64]; + std::ptr::copy_nonoverlapping( + name.as_ptr(), + &mut NAME_BUF[0], + name.len().min(NAME_BUF.len()) + ); + NAME_BUF[name.len()] = 0; + let _result = debug_utils_ext.debug_utils_set_object_name( + self.raw.0.handle(), + &vk::DebugUtilsObjectNameInfoEXT { + s_type: vk::StructureType::DEBUG_UTILS_OBJECT_NAME_INFO_EXT, + p_next: std::ptr::null_mut(), + object_type, + object_handle, + p_object_name: NAME_BUF.as_ptr() as *mut _, + } + ); + } + } +} + +#[test] +fn test_send_sync() { + fn foo() {} + foo::() +} diff --git a/third_party/rust/gfx-backend-vulkan/src/info.rs b/third_party/rust/gfx-backend-vulkan/src/info.rs new file mode 100644 index 000000000000..1e02a6f962ac --- /dev/null +++ b/third_party/rust/gfx-backend-vulkan/src/info.rs @@ -0,0 +1,5 @@ +pub mod intel { + pub const VENDOR: u32 = 0x8086; + pub const DEVICE_KABY_LAKE_MASK: u32 = 0x5900; + pub const DEVICE_SKY_LAKE_MASK: u32 = 0x1900; +} diff --git a/third_party/rust/gfx-backend-vulkan/src/lib.rs b/third_party/rust/gfx-backend-vulkan/src/lib.rs new file mode 100644 index 000000000000..49204678d367 --- /dev/null +++ b/third_party/rust/gfx-backend-vulkan/src/lib.rs @@ -0,0 +1,1434 @@ +#![allow(non_snake_case)] + +#[macro_use] +extern crate log; +#[macro_use] +extern crate ash; +#[macro_use] +extern crate lazy_static; + +#[cfg(target_os = "macos")] +#[macro_use] +extern crate objc; + +use ash::extensions::{ + self, + ext::{DebugReport, DebugUtils}, +}; +use ash::version::{DeviceV1_0, EntryV1_0, InstanceV1_0}; +use ash::vk; +#[cfg(not(feature = "use-rtld-next"))] +use ash::{Entry, LoadingError}; + +use hal::{ + adapter, + device::{CreationError as DeviceCreationError, DeviceLost, OutOfMemory, SurfaceLost}, + format, + image, + memory, + pso::{PatchSize, PipelineStage}, + queue, + window::{PresentError, Suboptimal, SwapImageIndex}, + Features, + Limits, +}; + +use std::borrow::{Borrow, Cow}; +use std::ffi::{CStr, CString}; +use std::sync::Arc; +use std::{fmt, mem, ptr, slice}; + +#[cfg(feature = "use-rtld-next")] +use ash::{EntryCustom, LoadingError}; +#[cfg(feature = "use-rtld-next")] +use shared_library::dynamic_library::{DynamicLibrary, SpecialHandles}; + +mod command; +mod conv; +mod device; +mod info; +mod native; +mod pool; +mod window; + + +lazy_static! { + static ref LAYERS: Vec<&'static CStr> = if cfg!(all(target_os = "android", debug_assertions)) { + vec![ + CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_core_validation\0").unwrap(), + CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_object_tracker\0").unwrap(), + CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_parameter_validation\0").unwrap(), + CStr::from_bytes_with_nul(b"VK_LAYER_GOOGLE_threading\0").unwrap(), + CStr::from_bytes_with_nul(b"VK_LAYER_GOOGLE_unique_objects\0").unwrap(), + ] + } else if cfg!(debug_assertions) { + vec![CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_standard_validation\0").unwrap()] + } else { + vec![] + }; + static ref EXTENSIONS: Vec<&'static CStr> = if cfg!(debug_assertions) { + vec![ + DebugUtils::name(), + DebugReport::name(), + ] + } else { + vec![] + }; + static ref DEVICE_EXTENSIONS: Vec<&'static CStr> = vec![extensions::khr::Swapchain::name()]; + static ref SURFACE_EXTENSIONS: Vec<&'static CStr> = vec![ + extensions::khr::Surface::name(), + // Platform-specific WSI extensions + #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] + extensions::khr::XlibSurface::name(), + #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] + extensions::khr::XcbSurface::name(), + #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] + extensions::khr::WaylandSurface::name(), + #[cfg(target_os = "android")] + extensions::khr::AndroidSurface::name(), + #[cfg(target_os = "windows")] + extensions::khr::Win32Surface::name(), + #[cfg(target_os = "macos")] + extensions::mvk::MacOSSurface::name(), + ]; +} + +#[cfg(not(feature = "use-rtld-next"))] +lazy_static! { + // Entry function pointers + pub static ref VK_ENTRY: Result = Entry::new(); +} + +#[cfg(feature = "use-rtld-next")] +lazy_static! { + // Entry function pointers + pub static ref VK_ENTRY: Result, LoadingError> + = EntryCustom::new_custom( + || Ok(()), + |_, name| unsafe { + DynamicLibrary::symbol_special(SpecialHandles::Next, &*name.to_string_lossy()) + .unwrap_or(ptr::null_mut()) + } + ); +} + +pub struct RawInstance(pub ash::Instance, Option); + +pub enum DebugMessenger { + Utils(DebugUtils, vk::DebugUtilsMessengerEXT), + Report(DebugReport, vk::DebugReportCallbackEXT), +} + +impl Drop for RawInstance { + fn drop(&mut self) { + unsafe { + #[cfg(debug_assertions)] + { + match self.1 { + Some(DebugMessenger::Utils(ref ext, callback)) => { + ext.destroy_debug_utils_messenger(callback, None) + } + Some(DebugMessenger::Report(ref ext, callback)) => { + ext.destroy_debug_report_callback(callback, None) + } + None => {} + } + } + + self.0.destroy_instance(None); + } + } +} + +pub struct Instance { + pub raw: Arc, + + + pub extensions: Vec<&'static CStr>, +} + +impl fmt::Debug for Instance { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Instance") + } +} + +fn map_queue_type(flags: vk::QueueFlags) -> queue::QueueType { + if flags.contains(vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE) { + + queue::QueueType::General + } else if flags.contains(vk::QueueFlags::GRAPHICS) { + + queue::QueueType::Graphics + } else if flags.contains(vk::QueueFlags::COMPUTE) { + + queue::QueueType::Compute + } else if flags.contains(vk::QueueFlags::TRANSFER) { + queue::QueueType::Transfer + } else { + + unimplemented!() + } +} + +unsafe fn display_debug_utils_label_ext( + label_structs: *mut vk::DebugUtilsLabelEXT, + count: usize, +) -> Option { + if count == 0 { + return None; + } + + Some( + slice::from_raw_parts::(label_structs, count) + .iter() + .flat_map(|dul_obj| { + dul_obj + .p_label_name + .as_ref() + .map(|lbl| CStr::from_ptr(lbl).to_string_lossy().into_owned()) + }) + .collect::>() + .join(", "), + ) +} + +unsafe fn display_debug_utils_object_name_info_ext( + info_structs: *mut vk::DebugUtilsObjectNameInfoEXT, + count: usize, +) -> Option { + if count == 0 { + return None; + } + + + Some( + slice::from_raw_parts::(info_structs, count) + .iter() + .map(|obj_info| { + let object_name = obj_info + .p_object_name + .as_ref() + .map(|name| CStr::from_ptr(name).to_string_lossy().into_owned()); + + match object_name { + Some(name) => format!( + "(type: {:?}, hndl: {}, name: {})", + obj_info.object_type, + &obj_info.object_handle.to_string(), + name + ), + None => format!( + "(type: {:?}, hndl: {})", + obj_info.object_type, + &obj_info.object_handle.to_string() + ), + } + }) + .collect::>() + .join(", "), + ) +} + +unsafe extern "system" fn debug_utils_messenger_callback( + message_severity: vk::DebugUtilsMessageSeverityFlagsEXT, + message_type: vk::DebugUtilsMessageTypeFlagsEXT, + p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT, + _user_data: *mut std::os::raw::c_void, +) -> vk::Bool32 { + let callback_data = *p_callback_data; + + let message_severity = match message_severity { + vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => log::Level::Error, + vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => log::Level::Warn, + vk::DebugUtilsMessageSeverityFlagsEXT::INFO => log::Level::Info, + vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => log::Level::Trace, + _ => log::Level::Warn, + }; + let message_type = &format!("{:?}", message_type); + let message_id_number: i32 = callback_data.message_id_number as i32; + + let message_id_name = if callback_data.p_message_id_name.is_null() { + Cow::from("") + } else { + CStr::from_ptr(callback_data.p_message_id_name).to_string_lossy() + }; + + let message = if callback_data.p_message.is_null() { + Cow::from("") + } else { + CStr::from_ptr(callback_data.p_message).to_string_lossy() + }; + + let additional_info: [(&str, Option); 3] = [ + ( + "queue info", + display_debug_utils_label_ext( + callback_data.p_queue_labels as *mut _, + callback_data.queue_label_count as usize, + ), + ), + ( + "cmd buf info", + display_debug_utils_label_ext( + callback_data.p_cmd_buf_labels as *mut _, + callback_data.cmd_buf_label_count as usize, + ), + ), + ( + "object info", + display_debug_utils_object_name_info_ext( + callback_data.p_objects as *mut _, + callback_data.object_count as usize, + ), + ), + ]; + + log!(message_severity, "{}\n", { + let mut msg = format!( + "\n{} [{} ({})] : {}", + message_type, + message_id_name, + &message_id_number.to_string(), + message + ); + + for (info_label, info) in additional_info.into_iter() { + match info { + Some(data) => { + msg = format!("{}\n{}: {}", msg, info_label, data); + } + None => {} + } + } + + msg + }); + + vk::FALSE +} + +unsafe extern "system" fn debug_report_callback( + type_: vk::DebugReportFlagsEXT, + _: vk::DebugReportObjectTypeEXT, + _object: u64, + _location: usize, + _msg_code: i32, + layer_prefix: *const std::os::raw::c_char, + description: *const std::os::raw::c_char, + _user_data: *mut std::os::raw::c_void, +) -> vk::Bool32 { + let level = match type_ { + vk::DebugReportFlagsEXT::ERROR => log::Level::Error, + vk::DebugReportFlagsEXT::WARNING => log::Level::Warn, + vk::DebugReportFlagsEXT::INFORMATION => log::Level::Info, + vk::DebugReportFlagsEXT::DEBUG => log::Level::Debug, + _ => log::Level::Warn, + }; + + let layer_prefix = CStr::from_ptr(layer_prefix).to_str().unwrap(); + let description = CStr::from_ptr(description).to_str().unwrap(); + log!(level, "[{}] {}", layer_prefix, description); + vk::FALSE +} + +impl hal::Instance for Instance { + fn create(name: &str, version: u32) -> Result { + + let entry = VK_ENTRY.as_ref().map_err(|e| { + info!("Missing Vulkan entry points: {:?}", e); + hal::UnsupportedBackend + })?; + + let app_name = CString::new(name).unwrap(); + let app_info = vk::ApplicationInfo { + s_type: vk::StructureType::APPLICATION_INFO, + p_next: ptr::null(), + p_application_name: app_name.as_ptr(), + application_version: version, + p_engine_name: b"gfx-rs\0".as_ptr() as *const _, + engine_version: 1, + api_version: vk_make_version!(1, 0, 0), + }; + + let instance_extensions = entry + .enumerate_instance_extension_properties() + .expect("Unable to enumerate instance extensions"); + + let instance_layers = entry + .enumerate_instance_layer_properties() + .expect("Unable to enumerate instance layers"); + + + let extensions = SURFACE_EXTENSIONS + .iter() + .chain(EXTENSIONS.iter()) + .filter_map(|&ext| { + instance_extensions + .iter() + .find(|inst_ext| unsafe { + CStr::from_ptr(inst_ext.extension_name.as_ptr()).to_bytes() + == ext.to_bytes() + }) + .map(|_| ext) + .or_else(|| { + warn!("Unable to find extension: {}", ext.to_string_lossy()); + None + }) + }) + .collect::>(); + + + let layers = LAYERS + .iter() + .filter_map(|&layer| { + instance_layers + .iter() + .find(|inst_layer| unsafe { + CStr::from_ptr(inst_layer.layer_name.as_ptr()).to_bytes() + == layer.to_bytes() + }) + .map(|_| layer) + .or_else(|| { + warn!("Unable to find layer: {}", layer.to_string_lossy()); + None + }) + }) + .collect::>(); + + let instance = { + let cstrings = layers + .iter() + .chain(extensions.iter()) + .map(|&s| CString::from(s)) + .collect::>(); + + let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::>(); + + let create_info = vk::InstanceCreateInfo { + s_type: vk::StructureType::INSTANCE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::InstanceCreateFlags::empty(), + p_application_info: &app_info, + enabled_layer_count: layers.len() as _, + pp_enabled_layer_names: str_pointers.as_ptr(), + enabled_extension_count: extensions.len() as _, + pp_enabled_extension_names: str_pointers[layers.len() ..].as_ptr(), + }; + + unsafe { entry.create_instance(&create_info, None) }.map_err(|e| { + warn!("Unable to create Vulkan instance: {:?}", e); + hal::UnsupportedBackend + })? + }; + + #[cfg(debug_assertions)] + let debug_messenger = { + + if instance_extensions.iter().any(|props| unsafe { + CStr::from_ptr(props.extension_name.as_ptr()) == DebugUtils::name() + }) { + let ext = DebugUtils::new(entry, &instance); + let info = vk::DebugUtilsMessengerCreateInfoEXT { + s_type: vk::StructureType::DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, + p_next: ptr::null(), + flags: vk::DebugUtilsMessengerCreateFlagsEXT::empty(), + message_severity: vk::DebugUtilsMessageSeverityFlagsEXT::all(), + message_type: vk::DebugUtilsMessageTypeFlagsEXT::all(), + pfn_user_callback: Some(debug_utils_messenger_callback), + p_user_data: ptr::null_mut(), + }; + let handle = unsafe { ext.create_debug_utils_messenger(&info, None) }.unwrap(); + Some(DebugMessenger::Utils(ext, handle)) + } else if instance_extensions.iter().any(|props| unsafe { + CStr::from_ptr(props.extension_name.as_ptr()) == DebugReport::name() + }) { + let ext = DebugReport::new(entry, &instance); + let info = vk::DebugReportCallbackCreateInfoEXT { + s_type: vk::StructureType::DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + p_next: ptr::null(), + flags: vk::DebugReportFlagsEXT::all(), + pfn_callback: Some(debug_report_callback), + p_user_data: ptr::null_mut(), + }; + let handle = unsafe { ext.create_debug_report_callback(&info, None) }.unwrap(); + Some(DebugMessenger::Report(ext, handle)) + } else { + None + } + }; + #[cfg(not(debug_assertions))] + let debug_messenger = None; + + Ok(Instance { + raw: Arc::new(RawInstance(instance, debug_messenger)), + extensions, + }) + } + + fn enumerate_adapters(&self) -> Vec> { + let devices = match unsafe { self.raw.0.enumerate_physical_devices() } { + Ok(devices) => devices, + Err(err) => { + error!("Could not enumerate physical devices! {}", err); + vec![] + } + }; + + devices + .into_iter() + .map(|device| { + let properties = unsafe { self.raw.0.get_physical_device_properties(device) }; + let info = adapter::AdapterInfo { + name: unsafe { + CStr::from_ptr(properties.device_name.as_ptr()) + .to_str() + .unwrap_or("Unknown") + .to_owned() + }, + vendor: properties.vendor_id as usize, + device: properties.device_id as usize, + device_type: match properties.device_type { + ash::vk::PhysicalDeviceType::OTHER => adapter::DeviceType::Other, + ash::vk::PhysicalDeviceType::INTEGRATED_GPU => { + adapter::DeviceType::IntegratedGpu + } + ash::vk::PhysicalDeviceType::DISCRETE_GPU => { + adapter::DeviceType::DiscreteGpu + } + ash::vk::PhysicalDeviceType::VIRTUAL_GPU => adapter::DeviceType::VirtualGpu, + ash::vk::PhysicalDeviceType::CPU => adapter::DeviceType::Cpu, + _ => adapter::DeviceType::Other, + }, + }; + let physical_device = PhysicalDevice { + instance: self.raw.clone(), + handle: device, + properties, + }; + let queue_families = unsafe { + self.raw + .0 + .get_physical_device_queue_family_properties(device) + .into_iter() + .enumerate() + .map(|(i, properties)| QueueFamily { + properties, + device, + index: i as u32, + }) + .collect() + }; + + adapter::Adapter { + info, + physical_device, + queue_families, + } + }) + .collect() + } + + unsafe fn create_surface( + &self, + has_handle: &impl raw_window_handle::HasRawWindowHandle, + ) -> Result { + use raw_window_handle::RawWindowHandle; + + match has_handle.raw_window_handle() { + #[cfg(all( + unix, + not(target_os = "android"), + not(target_os = "macos") + ))] + RawWindowHandle::Wayland(handle) + if self.extensions.contains(&extensions::khr::WaylandSurface::name()) => + { + Ok(self.create_surface_from_wayland(handle.display, handle.surface)) + } + #[cfg(all( + feature = "x11", + unix, + not(target_os = "android"), + not(target_os = "macos") + ))] + RawWindowHandle::Xlib(handle) + if self.extensions.contains(&extensions::khr::XlibSurface::name()) => + { + Ok(self.create_surface_from_xlib(handle.display as *mut _, handle.window)) + } + #[cfg(all( + feature = "xcb", + unix, + not(target_os = "android"), + not(target_os = "macos"), + not(target_os = "ios") + ))] + RawWindowHandle::Xcb(handle) if self.extensions.contains(&extensions::khr::XcbSurface::name()) => { + Ok(self.create_surface_from_xcb(handle.connection as *mut _, handle.window)) + } + + + + + + #[cfg(windows)] + RawWindowHandle::Windows(handle) => { + use winapi::um::libloaderapi::GetModuleHandleW; + + let hinstance = GetModuleHandleW(ptr::null()); + Ok(self.create_surface_from_hwnd(hinstance as *mut _, handle.hwnd)) + } + #[cfg(target_os = "macos")] + RawWindowHandle::MacOS(handle) => { + Ok(self.create_surface_from_ns_view(handle.ns_view)) + } + _ => Err(hal::window::InitError::UnsupportedWindowHandle), + } + } + + unsafe fn destroy_surface(&self, surface: window::Surface) { + surface.raw.functor.destroy_surface(surface.raw.handle, None); + } +} + +#[derive(Debug, Clone)] +pub struct QueueFamily { + properties: vk::QueueFamilyProperties, + device: vk::PhysicalDevice, + index: u32, +} + +impl queue::QueueFamily for QueueFamily { + fn queue_type(&self) -> queue::QueueType { + map_queue_type(self.properties.queue_flags) + } + fn max_queues(&self) -> usize { + self.properties.queue_count as _ + } + fn id(&self) -> queue::QueueFamilyId { + queue::QueueFamilyId(self.index as _) + } +} + +pub struct PhysicalDevice { + instance: Arc, + handle: vk::PhysicalDevice, + properties: vk::PhysicalDeviceProperties, +} + +impl fmt::Debug for PhysicalDevice { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("PhysicalDevice") + } +} + +impl adapter::PhysicalDevice for PhysicalDevice { + unsafe fn open( + &self, + families: &[(&QueueFamily, &[queue::QueuePriority])], + requested_features: Features, + ) -> Result, DeviceCreationError> { + let family_infos = families + .iter() + .map(|&(family, priorities)| vk::DeviceQueueCreateInfo { + s_type: vk::StructureType::DEVICE_QUEUE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::DeviceQueueCreateFlags::empty(), + queue_family_index: family.index, + queue_count: priorities.len() as _, + p_queue_priorities: priorities.as_ptr(), + }) + .collect::>(); + + if !self.features().contains(requested_features) { + return Err(DeviceCreationError::MissingFeature); + } + + let enabled_features = conv::map_device_features(requested_features); + + + let device_raw = { + let cstrings = DEVICE_EXTENSIONS + .iter() + .map(|&s| CString::from(s)) + .collect::>(); + + let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::>(); + + let info = vk::DeviceCreateInfo { + s_type: vk::StructureType::DEVICE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::DeviceCreateFlags::empty(), + queue_create_info_count: family_infos.len() as u32, + p_queue_create_infos: family_infos.as_ptr(), + enabled_layer_count: 0, + pp_enabled_layer_names: ptr::null(), + enabled_extension_count: str_pointers.len() as u32, + pp_enabled_extension_names: str_pointers.as_ptr(), + p_enabled_features: &enabled_features, + }; + + match self.instance.0.create_device(self.handle, &info, None) { + Ok(device) => device, + Err(e) => return Err(match e { + vk::Result::ERROR_OUT_OF_HOST_MEMORY => DeviceCreationError::OutOfMemory(OutOfMemory::Host), + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => DeviceCreationError::OutOfMemory(OutOfMemory::Device), + vk::Result::ERROR_INITIALIZATION_FAILED => DeviceCreationError::InitializationFailed, + vk::Result::ERROR_DEVICE_LOST => DeviceCreationError::DeviceLost, + vk::Result::ERROR_TOO_MANY_OBJECTS => DeviceCreationError::TooManyObjects, + _ => unreachable!(), + }), + } + }; + + let swapchain_fn = vk::KhrSwapchainFn::load(|name| { + mem::transmute( + self.instance + .0 + .get_device_proc_addr(device_raw.handle(), name.as_ptr()), + ) + }); + + let device = Device { + raw: Arc::new(RawDevice(device_raw, requested_features, self.instance.clone())), + }; + + let device_arc = device.raw.clone(); + let queue_groups = families + .into_iter() + .map(|&(family, ref priorities)| { + let mut family_raw = + queue::QueueGroup::new(queue::QueueFamilyId(family.index as usize)); + for id in 0 .. priorities.len() { + let queue_raw = device_arc.0.get_device_queue(family.index, id as _); + family_raw.add_queue(CommandQueue { + raw: Arc::new(queue_raw), + device: device_arc.clone(), + swapchain_fn: swapchain_fn.clone(), + }); + } + family_raw + }) + .collect(); + + Ok(adapter::Gpu { + device, + queue_groups, + }) + } + + fn format_properties(&self, format: Option) -> format::Properties { + let properties = unsafe { + self.instance.0.get_physical_device_format_properties( + self.handle, + format.map_or(vk::Format::UNDEFINED, conv::map_format), + ) + }; + + format::Properties { + linear_tiling: conv::map_image_features(properties.linear_tiling_features), + optimal_tiling: conv::map_image_features(properties.optimal_tiling_features), + buffer_features: conv::map_buffer_features(properties.buffer_features), + } + } + + fn image_format_properties( + &self, + format: format::Format, + dimensions: u8, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Option { + let format_properties = unsafe { + self.instance.0.get_physical_device_image_format_properties( + self.handle, + conv::map_format(format), + match dimensions { + 1 => vk::ImageType::TYPE_1D, + 2 => vk::ImageType::TYPE_2D, + 3 => vk::ImageType::TYPE_3D, + _ => panic!("Unexpected image dimensionality: {}", dimensions), + }, + conv::map_tiling(tiling), + conv::map_image_usage(usage), + conv::map_view_capabilities(view_caps), + ) + }; + + match format_properties { + Ok(props) => Some(image::FormatProperties { + max_extent: image::Extent { + width: props.max_extent.width, + height: props.max_extent.height, + depth: props.max_extent.depth, + }, + max_levels: props.max_mip_levels as _, + max_layers: props.max_array_layers as _, + sample_count_mask: props.sample_counts.as_raw() as _, + max_resource_size: props.max_resource_size as _, + }), + Err(vk::Result::ERROR_FORMAT_NOT_SUPPORTED) => None, + Err(other) => { + error!("Unexpected error in `image_format_properties`: {:?}", other); + None + } + } + } + + fn memory_properties(&self) -> adapter::MemoryProperties { + let mem_properties = unsafe { + self.instance + .0 + .get_physical_device_memory_properties(self.handle) + }; + let memory_heaps = mem_properties.memory_heaps + [.. mem_properties.memory_heap_count as usize] + .iter() + .map(|mem| mem.size) + .collect(); + let memory_types = mem_properties.memory_types + [.. mem_properties.memory_type_count as usize] + .iter() + .map(|mem| { + use crate::memory::Properties; + let mut type_flags = Properties::empty(); + + if mem + .property_flags + .intersects(vk::MemoryPropertyFlags::DEVICE_LOCAL) + { + type_flags |= Properties::DEVICE_LOCAL; + } + if mem + .property_flags + .intersects(vk::MemoryPropertyFlags::HOST_VISIBLE) + { + type_flags |= Properties::CPU_VISIBLE; + } + if mem + .property_flags + .intersects(vk::MemoryPropertyFlags::HOST_COHERENT) + { + type_flags |= Properties::COHERENT; + } + if mem + .property_flags + .intersects(vk::MemoryPropertyFlags::HOST_CACHED) + { + type_flags |= Properties::CPU_CACHED; + } + if mem + .property_flags + .intersects(vk::MemoryPropertyFlags::LAZILY_ALLOCATED) + { + type_flags |= Properties::LAZILY_ALLOCATED; + } + + adapter::MemoryType { + properties: type_flags, + heap_index: mem.heap_index as usize, + } + }) + .collect(); + + adapter::MemoryProperties { + memory_heaps, + memory_types, + } + } + + fn features(&self) -> Features { + + let is_windows_intel_dual_src_bug = cfg!(windows) + && self.properties.vendor_id == info::intel::VENDOR + && (self.properties.device_id & info::intel::DEVICE_KABY_LAKE_MASK + == info::intel::DEVICE_KABY_LAKE_MASK + || self.properties.device_id & info::intel::DEVICE_SKY_LAKE_MASK + == info::intel::DEVICE_SKY_LAKE_MASK); + + let features = unsafe { self.instance.0.get_physical_device_features(self.handle) }; + let mut bits = Features::TRIANGLE_FAN + | Features::SEPARATE_STENCIL_REF_VALUES + | Features::SAMPLER_MIP_LOD_BIAS; + + if features.robust_buffer_access != 0 { + bits |= Features::ROBUST_BUFFER_ACCESS; + } + if features.full_draw_index_uint32 != 0 { + bits |= Features::FULL_DRAW_INDEX_U32; + } + if features.image_cube_array != 0 { + bits |= Features::IMAGE_CUBE_ARRAY; + } + if features.independent_blend != 0 { + bits |= Features::INDEPENDENT_BLENDING; + } + if features.geometry_shader != 0 { + bits |= Features::GEOMETRY_SHADER; + } + if features.tessellation_shader != 0 { + bits |= Features::TESSELLATION_SHADER; + } + if features.sample_rate_shading != 0 { + bits |= Features::SAMPLE_RATE_SHADING; + } + if features.dual_src_blend != 0 && !is_windows_intel_dual_src_bug { + bits |= Features::DUAL_SRC_BLENDING; + } + if features.logic_op != 0 { + bits |= Features::LOGIC_OP; + } + if features.multi_draw_indirect != 0 { + bits |= Features::MULTI_DRAW_INDIRECT; + } + if features.draw_indirect_first_instance != 0 { + bits |= Features::DRAW_INDIRECT_FIRST_INSTANCE; + } + if features.depth_clamp != 0 { + bits |= Features::DEPTH_CLAMP; + } + if features.depth_bias_clamp != 0 { + bits |= Features::DEPTH_BIAS_CLAMP; + } + if features.fill_mode_non_solid != 0 { + bits |= Features::NON_FILL_POLYGON_MODE; + } + if features.depth_bounds != 0 { + bits |= Features::DEPTH_BOUNDS; + } + if features.wide_lines != 0 { + bits |= Features::LINE_WIDTH; + } + if features.large_points != 0 { + bits |= Features::POINT_SIZE; + } + if features.alpha_to_one != 0 { + bits |= Features::ALPHA_TO_ONE; + } + if features.multi_viewport != 0 { + bits |= Features::MULTI_VIEWPORTS; + } + if features.sampler_anisotropy != 0 { + bits |= Features::SAMPLER_ANISOTROPY; + } + if features.texture_compression_etc2 != 0 { + bits |= Features::FORMAT_ETC2; + } + if features.texture_compression_astc_ldr != 0 { + bits |= Features::FORMAT_ASTC_LDR; + } + if features.texture_compression_bc != 0 { + bits |= Features::FORMAT_BC; + } + if features.occlusion_query_precise != 0 { + bits |= Features::PRECISE_OCCLUSION_QUERY; + } + if features.pipeline_statistics_query != 0 { + bits |= Features::PIPELINE_STATISTICS_QUERY; + } + if features.vertex_pipeline_stores_and_atomics != 0 { + bits |= Features::VERTEX_STORES_AND_ATOMICS; + } + if features.fragment_stores_and_atomics != 0 { + bits |= Features::FRAGMENT_STORES_AND_ATOMICS; + } + if features.shader_tessellation_and_geometry_point_size != 0 { + bits |= Features::SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE; + } + if features.shader_image_gather_extended != 0 { + bits |= Features::SHADER_IMAGE_GATHER_EXTENDED; + } + if features.shader_storage_image_extended_formats != 0 { + bits |= Features::SHADER_STORAGE_IMAGE_EXTENDED_FORMATS; + } + if features.shader_storage_image_multisample != 0 { + bits |= Features::SHADER_STORAGE_IMAGE_MULTISAMPLE; + } + if features.shader_storage_image_read_without_format != 0 { + bits |= Features::SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT; + } + if features.shader_storage_image_write_without_format != 0 { + bits |= Features::SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT; + } + if features.shader_uniform_buffer_array_dynamic_indexing != 0 { + bits |= Features::SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING; + } + if features.shader_sampled_image_array_dynamic_indexing != 0 { + bits |= Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING; + } + if features.shader_storage_buffer_array_dynamic_indexing != 0 { + bits |= Features::SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING; + } + if features.shader_storage_image_array_dynamic_indexing != 0 { + bits |= Features::SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING; + } + if features.shader_clip_distance != 0 { + bits |= Features::SHADER_CLIP_DISTANCE; + } + if features.shader_cull_distance != 0 { + bits |= Features::SHADER_CULL_DISTANCE; + } + if features.shader_float64 != 0 { + bits |= Features::SHADER_FLOAT64; + } + if features.shader_int64 != 0 { + bits |= Features::SHADER_INT64; + } + if features.shader_int16 != 0 { + bits |= Features::SHADER_INT16; + } + if features.shader_resource_residency != 0 { + bits |= Features::SHADER_RESOURCE_RESIDENCY; + } + if features.shader_resource_min_lod != 0 { + bits |= Features::SHADER_RESOURCE_MIN_LOD; + } + if features.sparse_binding != 0 { + bits |= Features::SPARSE_BINDING; + } + if features.sparse_residency_buffer != 0 { + bits |= Features::SPARSE_RESIDENCY_BUFFER; + } + if features.sparse_residency_image2_d != 0 { + bits |= Features::SPARSE_RESIDENCY_IMAGE_2D; + } + if features.sparse_residency_image3_d != 0 { + bits |= Features::SPARSE_RESIDENCY_IMAGE_3D; + } + if features.sparse_residency2_samples != 0 { + bits |= Features::SPARSE_RESIDENCY_2_SAMPLES; + } + if features.sparse_residency4_samples != 0 { + bits |= Features::SPARSE_RESIDENCY_4_SAMPLES; + } + if features.sparse_residency8_samples != 0 { + bits |= Features::SPARSE_RESIDENCY_8_SAMPLES; + } + if features.sparse_residency16_samples != 0 { + bits |= Features::SPARSE_RESIDENCY_16_SAMPLES; + } + if features.sparse_residency_aliased != 0 { + bits |= Features::SPARSE_RESIDENCY_ALIASED; + } + if features.variable_multisample_rate != 0 { + bits |= Features::VARIABLE_MULTISAMPLE_RATE; + } + if features.inherited_queries != 0 { + bits |= Features::INHERITED_QUERIES; + } + + bits + } + + fn limits(&self) -> Limits { + let limits = &self.properties.limits; + let max_group_count = limits.max_compute_work_group_count; + let max_group_size = limits.max_compute_work_group_size; + + Limits { + max_image_1d_size: limits.max_image_dimension1_d, + max_image_2d_size: limits.max_image_dimension2_d, + max_image_3d_size: limits.max_image_dimension3_d, + max_image_cube_size: limits.max_image_dimension_cube, + max_image_array_layers: limits.max_image_array_layers as _, + max_texel_elements: limits.max_texel_buffer_elements as _, + max_patch_size: limits.max_tessellation_patch_size as PatchSize, + max_viewports: limits.max_viewports as _, + max_viewport_dimensions: limits.max_viewport_dimensions, + max_framebuffer_extent: image::Extent { + width: limits.max_framebuffer_width, + height: limits.max_framebuffer_height, + depth: limits.max_framebuffer_layers, + }, + max_compute_work_group_count: [ + max_group_count[0] as _, + max_group_count[1] as _, + max_group_count[2] as _, + ], + max_compute_work_group_size: [ + max_group_size[0] as _, + max_group_size[1] as _, + max_group_size[2] as _, + ], + max_vertex_input_attributes: limits.max_vertex_input_attributes as _, + max_vertex_input_bindings: limits.max_vertex_input_bindings as _, + max_vertex_input_attribute_offset: limits.max_vertex_input_attribute_offset as _, + max_vertex_input_binding_stride: limits.max_vertex_input_binding_stride as _, + max_vertex_output_components: limits.max_vertex_output_components as _, + optimal_buffer_copy_offset_alignment: limits.optimal_buffer_copy_offset_alignment as _, + optimal_buffer_copy_pitch_alignment: limits.optimal_buffer_copy_row_pitch_alignment + as _, + min_texel_buffer_offset_alignment: limits.min_texel_buffer_offset_alignment as _, + min_uniform_buffer_offset_alignment: limits.min_uniform_buffer_offset_alignment as _, + min_storage_buffer_offset_alignment: limits.min_storage_buffer_offset_alignment as _, + framebuffer_color_sample_counts: limits.framebuffer_color_sample_counts.as_raw() as _, + framebuffer_depth_sample_counts: limits.framebuffer_depth_sample_counts.as_raw() as _, + framebuffer_stencil_sample_counts: limits.framebuffer_stencil_sample_counts.as_raw() + as _, + max_color_attachments: limits.max_color_attachments as _, + buffer_image_granularity: limits.buffer_image_granularity, + non_coherent_atom_size: limits.non_coherent_atom_size as _, + max_sampler_anisotropy: limits.max_sampler_anisotropy, + min_vertex_input_binding_stride_alignment: 1, + max_bound_descriptor_sets: limits.max_bound_descriptor_sets as _, + max_compute_shared_memory_size: limits.max_compute_shared_memory_size as _, + max_compute_work_group_invocations: limits.max_compute_work_group_invocations as _, + max_descriptor_set_input_attachments: limits.max_descriptor_set_input_attachments as _, + max_descriptor_set_sampled_images: limits.max_descriptor_set_sampled_images as _, + max_descriptor_set_samplers: limits.max_descriptor_set_samplers as _, + max_descriptor_set_storage_buffers: limits.max_descriptor_set_storage_buffers as _, + max_descriptor_set_storage_buffers_dynamic: limits + .max_descriptor_set_storage_buffers_dynamic + as _, + max_descriptor_set_storage_images: limits.max_descriptor_set_storage_images as _, + max_descriptor_set_uniform_buffers: limits.max_descriptor_set_uniform_buffers as _, + max_descriptor_set_uniform_buffers_dynamic: limits + .max_descriptor_set_uniform_buffers_dynamic + as _, + max_draw_indexed_index_value: limits.max_draw_indexed_index_value, + max_draw_indirect_count: limits.max_draw_indirect_count, + max_fragment_combined_output_resources: limits.max_fragment_combined_output_resources + as _, + max_fragment_dual_source_attachments: limits.max_fragment_dual_src_attachments as _, + max_fragment_input_components: limits.max_fragment_input_components as _, + max_fragment_output_attachments: limits.max_fragment_output_attachments as _, + max_framebuffer_layers: limits.max_framebuffer_layers as _, + max_geometry_input_components: limits.max_geometry_input_components as _, + max_geometry_output_components: limits.max_geometry_output_components as _, + max_geometry_output_vertices: limits.max_geometry_output_vertices as _, + max_geometry_shader_invocations: limits.max_geometry_shader_invocations as _, + max_geometry_total_output_components: limits.max_geometry_total_output_components as _, + max_memory_allocation_count: limits.max_memory_allocation_count as _, + max_per_stage_descriptor_input_attachments: limits + .max_per_stage_descriptor_input_attachments + as _, + max_per_stage_descriptor_sampled_images: limits.max_per_stage_descriptor_sampled_images + as _, + max_per_stage_descriptor_samplers: limits.max_per_stage_descriptor_samplers as _, + max_per_stage_descriptor_storage_buffers: limits + .max_per_stage_descriptor_storage_buffers + as _, + max_per_stage_descriptor_storage_images: limits.max_per_stage_descriptor_storage_images + as _, + max_per_stage_descriptor_uniform_buffers: limits + .max_per_stage_descriptor_uniform_buffers + as _, + max_per_stage_resources: limits.max_per_stage_resources as _, + max_push_constants_size: limits.max_push_constants_size as _, + max_sampler_allocation_count: limits.max_sampler_allocation_count as _, + max_sampler_lod_bias: limits.max_sampler_lod_bias as _, + max_storage_buffer_range: limits.max_storage_buffer_range as _, + max_uniform_buffer_range: limits.max_uniform_buffer_range as _, + min_memory_map_alignment: limits.min_memory_map_alignment, + standard_sample_locations: limits.standard_sample_locations == ash::vk::TRUE, + } + } + + fn is_valid_cache(&self, cache: &[u8]) -> bool { + const HEADER_SIZE: usize = 16 + vk::UUID_SIZE; + + if cache.len() < HEADER_SIZE { + warn!("Bad cache data length {:?}", cache.len()); + return false; + } + + let header_len = u32::from_le_bytes([cache[0], cache[1], cache[2], cache[3]]); + let header_version = u32::from_le_bytes([cache[4], cache[5], cache[6], cache[7]]); + let vendor_id = u32::from_le_bytes([cache[8], cache[9], cache[10], cache[11]]); + let device_id = u32::from_le_bytes([cache[12], cache[13], cache[14], cache[15]]); + + + if (header_len as usize) < HEADER_SIZE { + warn!("Bad header length {:?}", header_len); + return false; + } + + + if header_version != vk::PipelineCacheHeaderVersion::ONE.as_raw() as u32 { + warn!("Unsupported cache header version: {:?}", header_version); + return false; + } + + + if vendor_id != self.properties.vendor_id { + warn!( + "Vendor ID mismatch. Device: {:?}, cache: {:?}.", + self.properties.vendor_id, vendor_id, + ); + return false; + } + + + if device_id != self.properties.device_id { + warn!( + "Device ID mismatch. Device: {:?}, cache: {:?}.", + self.properties.device_id, device_id, + ); + return false; + } + + if self.properties.pipeline_cache_uuid != cache[16 .. 16 + vk::UUID_SIZE] { + warn!( + "Pipeline cache UUID mismatch. Device: {:?}, cache: {:?}.", + self.properties.pipeline_cache_uuid, + &cache[16 .. 16 + vk::UUID_SIZE], + ); + return false; + } + true + } +} + +#[doc(hidden)] +pub struct RawDevice( + pub ash::Device, + Features, + Arc, +); + +impl fmt::Debug for RawDevice { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "RawDevice") + } +} +impl Drop for RawDevice { + fn drop(&mut self) { + unsafe { + self.0.destroy_device(None); + } + } +} + + +pub type RawCommandQueue = Arc; + +pub struct CommandQueue { + raw: RawCommandQueue, + device: Arc, + swapchain_fn: vk::KhrSwapchainFn, +} + +impl fmt::Debug for CommandQueue { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("CommandQueue") + } +} + +impl queue::CommandQueue for CommandQueue { + unsafe fn submit<'a, T, Ic, S, Iw, Is>( + &mut self, + submission: queue::Submission, + fence: Option<&native::Fence>, + ) where + T: 'a + Borrow, + Ic: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + Is: IntoIterator, + { + + let mut waits = Vec::new(); + let mut stages = Vec::new(); + + let buffers = submission + .command_buffers + .into_iter() + .map(|cmd| cmd.borrow().raw) + .collect::>(); + for (semaphore, stage) in submission.wait_semaphores { + waits.push(semaphore.borrow().0); + stages.push(conv::map_pipeline_stage(stage)); + } + let signals = submission + .signal_semaphores + .into_iter() + .map(|semaphore| semaphore.borrow().0) + .collect::>(); + + let info = vk::SubmitInfo { + s_type: vk::StructureType::SUBMIT_INFO, + p_next: ptr::null(), + wait_semaphore_count: waits.len() as u32, + p_wait_semaphores: waits.as_ptr(), + + p_wait_dst_stage_mask: if stages.is_empty() { + ptr::null() + } else { + stages.as_ptr() + }, + command_buffer_count: buffers.len() as u32, + p_command_buffers: buffers.as_ptr(), + signal_semaphore_count: signals.len() as u32, + p_signal_semaphores: signals.as_ptr(), + }; + + let fence_raw = fence.map(|fence| fence.0).unwrap_or(vk::Fence::null()); + + let result = self.device.0.queue_submit(*self.raw, &[info], fence_raw); + assert_eq!(Ok(()), result); + } + + unsafe fn present<'a, W, Is, S, Iw>( + &mut self, + swapchains: Is, + wait_semaphores: Iw, + ) -> Result, PresentError> + where + W: 'a + Borrow, + Is: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + { + let semaphores = wait_semaphores + .into_iter() + .map(|sem| sem.borrow().0) + .collect::>(); + + let mut frames = Vec::new(); + let mut vk_swapchains = Vec::new(); + for (swapchain, index) in swapchains { + vk_swapchains.push(swapchain.borrow().raw); + frames.push(index); + } + + let info = vk::PresentInfoKHR { + s_type: vk::StructureType::PRESENT_INFO_KHR, + p_next: ptr::null(), + wait_semaphore_count: semaphores.len() as _, + p_wait_semaphores: semaphores.as_ptr(), + swapchain_count: vk_swapchains.len() as _, + p_swapchains: vk_swapchains.as_ptr(), + p_image_indices: frames.as_ptr(), + p_results: ptr::null_mut(), + }; + + match self.swapchain_fn.queue_present_khr(*self.raw, &info) { + vk::Result::SUCCESS => Ok(None), + vk::Result::SUBOPTIMAL_KHR => Ok(Some(Suboptimal)), + vk::Result::ERROR_OUT_OF_HOST_MEMORY => { + Err(PresentError::OutOfMemory(OutOfMemory::Host)) + } + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => { + Err(PresentError::OutOfMemory(OutOfMemory::Device)) + } + vk::Result::ERROR_DEVICE_LOST => Err(PresentError::DeviceLost(DeviceLost)), + vk::Result::ERROR_OUT_OF_DATE_KHR => Err(PresentError::OutOfDate), + vk::Result::ERROR_SURFACE_LOST_KHR => Err(PresentError::SurfaceLost(SurfaceLost)), + _ => panic!("Failed to present frame"), + } + } + + unsafe fn present_surface( + &mut self, + surface: &mut window::Surface, + image: window::SurfaceImage, + wait_semaphore: Option<&native::Semaphore>, + ) -> Result, PresentError> { + let ssc = surface.swapchain.as_ref().unwrap(); + let p_wait_semaphores = if let Some(wait_semaphore) = wait_semaphore { + &wait_semaphore.0 + } else { + let submit_info = vk::SubmitInfo { + s_type: vk::StructureType::SUBMIT_INFO, + p_next: ptr::null(), + wait_semaphore_count: 0, + p_wait_semaphores: ptr::null(), + p_wait_dst_stage_mask: &vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT, + command_buffer_count: 0, + p_command_buffers: ptr::null(), + signal_semaphore_count: 1, + p_signal_semaphores: &ssc.semaphore.0, + }; + self.device + .0 + .queue_submit(*self.raw, &[submit_info], vk::Fence::null()) + .unwrap(); + &ssc.semaphore.0 + }; + let present_info = vk::PresentInfoKHR { + s_type: vk::StructureType::PRESENT_INFO_KHR, + p_next: ptr::null(), + wait_semaphore_count: 1, + p_wait_semaphores, + swapchain_count: 1, + p_swapchains: &ssc.swapchain.raw, + p_image_indices: &image.index, + p_results: ptr::null_mut(), + }; + + match self + .swapchain_fn + .queue_present_khr(*self.raw, &present_info) + { + vk::Result::SUCCESS => Ok(None), + vk::Result::SUBOPTIMAL_KHR => Ok(Some(Suboptimal)), + vk::Result::ERROR_OUT_OF_HOST_MEMORY => { + Err(PresentError::OutOfMemory(OutOfMemory::Host)) + } + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => { + Err(PresentError::OutOfMemory(OutOfMemory::Device)) + } + vk::Result::ERROR_DEVICE_LOST => Err(PresentError::DeviceLost(DeviceLost)), + vk::Result::ERROR_OUT_OF_DATE_KHR => Err(PresentError::OutOfDate), + vk::Result::ERROR_SURFACE_LOST_KHR => Err(PresentError::SurfaceLost(SurfaceLost)), + _ => panic!("Failed to present frame"), + } + } + + fn wait_idle(&self) -> Result<(), OutOfMemory> { + match unsafe { self.device.0.queue_wait_idle(*self.raw) } { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(OutOfMemory::Device), + Err(_) => unreachable!(), + } + } +} + +#[derive(Debug)] +pub struct Device { + raw: Arc, +} + +#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)] +pub enum Backend {} +impl hal::Backend for Backend { + type Instance = Instance; + type PhysicalDevice = PhysicalDevice; + type Device = Device; + + type Surface = window::Surface; + type Swapchain = window::Swapchain; + + type QueueFamily = QueueFamily; + type CommandQueue = CommandQueue; + type CommandBuffer = command::CommandBuffer; + + type Memory = native::Memory; + type CommandPool = pool::RawCommandPool; + + type ShaderModule = native::ShaderModule; + type RenderPass = native::RenderPass; + type Framebuffer = native::Framebuffer; + + type Buffer = native::Buffer; + type BufferView = native::BufferView; + type Image = native::Image; + type ImageView = native::ImageView; + type Sampler = native::Sampler; + + type ComputePipeline = native::ComputePipeline; + type GraphicsPipeline = native::GraphicsPipeline; + type PipelineLayout = native::PipelineLayout; + type PipelineCache = native::PipelineCache; + type DescriptorSetLayout = native::DescriptorSetLayout; + type DescriptorPool = native::DescriptorPool; + type DescriptorSet = native::DescriptorSet; + + type Fence = native::Fence; + type Semaphore = native::Semaphore; + type Event = native::Event; + type QueryPool = native::QueryPool; +} diff --git a/third_party/rust/gfx-backend-vulkan/src/native.rs b/third_party/rust/gfx-backend-vulkan/src/native.rs new file mode 100644 index 000000000000..743183c5e666 --- /dev/null +++ b/third_party/rust/gfx-backend-vulkan/src/native.rs @@ -0,0 +1,180 @@ +use crate::{window::FramebufferCachePtr, Backend, RawDevice}; +use ash::{version::DeviceV1_0, vk}; +use hal::{image::SubresourceRange, pso}; +use smallvec::SmallVec; +use std::{borrow::Borrow, sync::Arc}; + +#[derive(Debug, Hash)] +pub struct Semaphore(pub vk::Semaphore); + +#[derive(Debug, Hash, PartialEq, Eq)] +pub struct Fence(pub vk::Fence); + +#[derive(Debug, Hash)] +pub struct Event(pub vk::Event); + +#[derive(Debug, Hash)] +pub struct GraphicsPipeline(pub vk::Pipeline); + +#[derive(Debug, Hash)] +pub struct ComputePipeline(pub vk::Pipeline); + +#[derive(Debug, Hash)] +pub struct Memory { + pub(crate) raw: vk::DeviceMemory, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct Buffer { + pub(crate) raw: vk::Buffer, +} + +unsafe impl Sync for Buffer {} +unsafe impl Send for Buffer {} + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct BufferView { + pub(crate) raw: vk::BufferView, +} + +#[derive(Debug, Hash, PartialEq, Eq)] +pub struct Image { + pub(crate) raw: vk::Image, + pub(crate) ty: vk::ImageType, + pub(crate) flags: vk::ImageCreateFlags, + pub(crate) extent: vk::Extent3D, +} + +#[derive(Debug, Hash, PartialEq, Eq)] +pub enum ImageViewOwner { + User, + Surface(FramebufferCachePtr), +} + +#[derive(Debug, Hash, PartialEq, Eq)] +pub struct ImageView { + pub(crate) image: vk::Image, + pub(crate) view: vk::ImageView, + pub(crate) range: SubresourceRange, + pub(crate) owner: ImageViewOwner, +} + +#[derive(Debug, Hash)] +pub struct Sampler(pub vk::Sampler); + +#[derive(Debug, Hash)] +pub struct RenderPass { + pub raw: vk::RenderPass, + pub clear_attachments_mask: u64, +} + +#[derive(Debug, Hash)] +pub struct Framebuffer { + pub(crate) raw: vk::Framebuffer, + pub(crate) owned: bool, +} + +#[derive(Debug)] +pub struct DescriptorSetLayout { + pub(crate) raw: vk::DescriptorSetLayout, + pub(crate) bindings: Arc>, +} + +#[derive(Debug)] +pub struct DescriptorSet { + pub(crate) raw: vk::DescriptorSet, + pub(crate) bindings: Arc>, +} + +#[derive(Debug, Hash)] +pub struct PipelineLayout { + pub(crate) raw: vk::PipelineLayout, +} + +#[derive(Debug)] +pub struct PipelineCache { + pub(crate) raw: vk::PipelineCache, +} + +#[derive(Debug, Eq, Hash, PartialEq)] +pub struct ShaderModule { + pub(crate) raw: vk::ShaderModule, +} + +#[derive(Debug)] +pub struct DescriptorPool { + pub(crate) raw: vk::DescriptorPool, + pub(crate) device: Arc, + + pub(crate) set_free_vec: Vec, +} + +impl pso::DescriptorPool for DescriptorPool { + unsafe fn allocate_sets( + &mut self, + layout_iter: I, + output: &mut SmallVec<[DescriptorSet; 1]>, + ) -> Result<(), pso::AllocationError> + where + I: IntoIterator, + I::Item: Borrow, + { + use std::ptr; + + let mut raw_layouts = Vec::new(); + let mut layout_bindings = Vec::new(); + for layout in layout_iter { + raw_layouts.push(layout.borrow().raw); + layout_bindings.push(layout.borrow().bindings.clone()); + } + + let info = vk::DescriptorSetAllocateInfo { + s_type: vk::StructureType::DESCRIPTOR_SET_ALLOCATE_INFO, + p_next: ptr::null(), + descriptor_pool: self.raw, + descriptor_set_count: raw_layouts.len() as u32, + p_set_layouts: raw_layouts.as_ptr(), + }; + + self.device + .0 + .allocate_descriptor_sets(&info) + .map(|sets| { + output.extend( + sets.into_iter() + .zip(layout_bindings) + .map(|(raw, bindings)| DescriptorSet { raw, bindings }), + ) + }) + .map_err(|err| match err { + vk::Result::ERROR_OUT_OF_HOST_MEMORY => pso::AllocationError::Host, + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => pso::AllocationError::Device, + vk::Result::ERROR_OUT_OF_POOL_MEMORY => pso::AllocationError::OutOfPoolMemory, + _ => pso::AllocationError::FragmentedPool, + }) + } + + unsafe fn free_sets(&mut self, descriptor_sets: I) + where + I: IntoIterator, + { + self.set_free_vec.clear(); + self.set_free_vec + .extend(descriptor_sets.into_iter().map(|d| d.raw)); + self.device + .0 + .free_descriptor_sets(self.raw, &self.set_free_vec); + } + + unsafe fn reset(&mut self) { + assert_eq!( + Ok(()), + self.device + .0 + .reset_descriptor_pool(self.raw, vk::DescriptorPoolResetFlags::empty()) + ); + } +} + +#[derive(Debug, Hash)] +pub struct QueryPool(pub vk::QueryPool); diff --git a/third_party/rust/gfx-backend-vulkan/src/pool.rs b/third_party/rust/gfx-backend-vulkan/src/pool.rs new file mode 100644 index 000000000000..707b749a0ae9 --- /dev/null +++ b/third_party/rust/gfx-backend-vulkan/src/pool.rs @@ -0,0 +1,60 @@ +use ash::version::DeviceV1_0; +use ash::vk; +use smallvec::SmallVec; +use std::ptr; +use std::sync::Arc; + +use crate::command::CommandBuffer; +use crate::conv; +use crate::{Backend, RawDevice}; +use hal::{command, pool}; + +#[derive(Debug)] +pub struct RawCommandPool { + pub(crate) raw: vk::CommandPool, + pub(crate) device: Arc, +} + +impl pool::CommandPool for RawCommandPool { + unsafe fn reset(&mut self, release_resources: bool) { + let flags = if release_resources { + vk::CommandPoolResetFlags::RELEASE_RESOURCES + } else { + vk::CommandPoolResetFlags::empty() + }; + + assert_eq!(Ok(()), self.device.0.reset_command_pool(self.raw, flags)); + } + + unsafe fn allocate_vec(&mut self, num: usize, level: command::Level) -> SmallVec<[CommandBuffer; 1]> { + let info = vk::CommandBufferAllocateInfo { + s_type: vk::StructureType::COMMAND_BUFFER_ALLOCATE_INFO, + p_next: ptr::null(), + command_pool: self.raw, + level: conv::map_command_buffer_level(level), + command_buffer_count: num as u32, + }; + + let device = &self.device; + let cbufs_raw = device.0 + .allocate_command_buffers(&info) + .expect("Error on command buffer allocation"); + + cbufs_raw + .into_iter() + .map(|buffer| CommandBuffer { + raw: buffer, + device: device.clone(), + }) + .collect() + } + + unsafe fn free(&mut self, cbufs: I) + where + I: IntoIterator, + { + let buffers: SmallVec<[vk::CommandBuffer; 16]> = + cbufs.into_iter().map(|buffer| buffer.raw).collect(); + self.device.0.free_command_buffers(self.raw, &buffers); + } +} diff --git a/third_party/rust/gfx-backend-vulkan/src/window.rs b/third_party/rust/gfx-backend-vulkan/src/window.rs new file mode 100644 index 000000000000..01cd7ac20804 --- /dev/null +++ b/third_party/rust/gfx-backend-vulkan/src/window.rs @@ -0,0 +1,597 @@ +use std::{ + borrow::Borrow, + fmt, + hash, + os::raw::c_void, + ptr, + sync::{Arc, Mutex}, + time::Instant, +}; + +use ash::{extensions::khr, version::DeviceV1_0 as _, vk}; +use hal::{format::Format, window as w}; +use smallvec::SmallVec; + +use crate::{conv, native}; +use crate::{ + Backend, + Device, + Instance, + PhysicalDevice, + QueueFamily, + RawDevice, + RawInstance, + VK_ENTRY, +}; + + +#[derive(Debug, Default)] +pub struct FramebufferCache { + + pub framebuffers: SmallVec<[vk::Framebuffer; 1]>, +} + +#[derive(Debug, Default)] +pub struct FramebufferCachePtr(pub Arc>); + +impl hash::Hash for FramebufferCachePtr { + fn hash(&self, hasher: &mut H) { + (self.0.as_ref() as *const Mutex).hash(hasher) + } +} +impl PartialEq for FramebufferCachePtr { + fn eq(&self, other: &Self) -> bool { + Arc::ptr_eq(&self.0, &other.0) + } +} +impl Eq for FramebufferCachePtr {} + +#[derive(Debug)] +struct SurfaceFrame { + image: vk::Image, + view: vk::ImageView, + framebuffers: FramebufferCachePtr, +} + +#[derive(Debug)] +pub struct SurfaceSwapchain { + pub(crate) swapchain: Swapchain, + device: Arc, + fence: native::Fence, + pub(crate) semaphore: native::Semaphore, + frames: Vec, +} + +impl SurfaceSwapchain { + unsafe fn release_resources(self, device: &ash::Device) -> Swapchain { + let _ = device.device_wait_idle(); + device.destroy_fence(self.fence.0, None); + device.destroy_semaphore(self.semaphore.0, None); + for frame in self.frames { + device.destroy_image_view(frame.view, None); + for framebuffer in frame.framebuffers.0.lock().unwrap().framebuffers.drain() { + device.destroy_framebuffer(framebuffer, None); + } + } + self.swapchain + } +} + +pub struct Surface { + + + pub(crate) raw: Arc, + pub(crate) swapchain: Option, +} + +impl fmt::Debug for Surface { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Surface") + } +} + +pub struct RawSurface { + pub(crate) handle: vk::SurfaceKHR, + pub(crate) functor: khr::Surface, + pub(crate) instance: Arc, +} + +impl Instance { + #[cfg(all( + feature = "x11", + unix, + not(target_os = "android"), + not(target_os = "macos") + ))] + pub fn create_surface_from_xlib(&self, dpy: *mut vk::Display, window: vk::Window) -> Surface { + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + if !self.extensions.contains(&khr::XlibSurface::name()) { + panic!("Vulkan driver does not support VK_KHR_XLIB_SURFACE"); + } + + let surface = { + let xlib_loader = khr::XlibSurface::new(entry, &self.raw.0); + let info = vk::XlibSurfaceCreateInfoKHR { + s_type: vk::StructureType::XLIB_SURFACE_CREATE_INFO_KHR, + p_next: ptr::null(), + flags: vk::XlibSurfaceCreateFlagsKHR::empty(), + window, + dpy, + }; + + unsafe { xlib_loader.create_xlib_surface(&info, None) } + .expect("XlibSurface::create_xlib_surface() failed") + }; + + self.create_surface_from_vk_surface_khr(surface) + } + + #[cfg(all( + feature = "xcb", + unix, + not(target_os = "android"), + not(target_os = "macos") + ))] + pub fn create_surface_from_xcb( + &self, + connection: *mut vk::xcb_connection_t, + window: vk::xcb_window_t, + ) -> Surface { + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + if !self.extensions.contains(&khr::XcbSurface::name()) { + panic!("Vulkan driver does not support VK_KHR_XCB_SURFACE"); + } + + let surface = { + let xcb_loader = khr::XcbSurface::new(entry, &self.raw.0); + let info = vk::XcbSurfaceCreateInfoKHR { + s_type: vk::StructureType::XCB_SURFACE_CREATE_INFO_KHR, + p_next: ptr::null(), + flags: vk::XcbSurfaceCreateFlagsKHR::empty(), + window, + connection, + }; + + unsafe { xcb_loader.create_xcb_surface(&info, None) } + .expect("XcbSurface::create_xcb_surface() failed") + }; + + self.create_surface_from_vk_surface_khr(surface) + } + + #[cfg(all(unix, not(target_os = "android")))] + pub fn create_surface_from_wayland( + &self, + display: *mut c_void, + surface: *mut c_void, + ) -> Surface { + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + if !self.extensions.contains(&khr::WaylandSurface::name()) { + panic!("Vulkan driver does not support VK_KHR_WAYLAND_SURFACE"); + } + + let surface = { + let w_loader = khr::WaylandSurface::new(entry, &self.raw.0); + let info = vk::WaylandSurfaceCreateInfoKHR { + s_type: vk::StructureType::WAYLAND_SURFACE_CREATE_INFO_KHR, + p_next: ptr::null(), + flags: vk::WaylandSurfaceCreateFlagsKHR::empty(), + display: display as *mut _, + surface: surface as *mut _, + }; + + unsafe { w_loader.create_wayland_surface(&info, None) }.expect("WaylandSurface failed") + }; + + self.create_surface_from_vk_surface_khr(surface) + } + + #[cfg(target_os = "android")] + pub fn create_surface_android(&self, window: *const c_void) -> Surface { + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + let surface = { + let loader = khr::AndroidSurface::new(entry, &self.raw.0); + let info = vk::AndroidSurfaceCreateInfoKHR { + s_type: vk::StructureType::ANDROID_SURFACE_CREATE_INFO_KHR, + p_next: ptr::null(), + flags: vk::AndroidSurfaceCreateFlagsKHR::empty(), + window: window as *const _ as *mut _, + }; + + unsafe { loader.create_android_surface(&info, None) }.expect("AndroidSurface failed") + }; + + self.create_surface_from_vk_surface_khr(surface) + } + + #[cfg(windows)] + pub fn create_surface_from_hwnd(&self, hinstance: *mut c_void, hwnd: *mut c_void) -> Surface { + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + if !self.extensions.contains(&khr::Win32Surface::name()) { + panic!("Vulkan driver does not support VK_KHR_WIN32_SURFACE"); + } + + let surface = { + let info = vk::Win32SurfaceCreateInfoKHR { + s_type: vk::StructureType::WIN32_SURFACE_CREATE_INFO_KHR, + p_next: ptr::null(), + flags: vk::Win32SurfaceCreateFlagsKHR::empty(), + hinstance: hinstance as *mut _, + hwnd: hwnd as *mut _, + }; + let win32_loader = khr::Win32Surface::new(entry, &self.raw.0); + unsafe { + win32_loader + .create_win32_surface(&info, None) + .expect("Unable to create Win32 surface") + } + }; + + self.create_surface_from_vk_surface_khr(surface) + } + + #[cfg(target_os = "macos")] + pub fn create_surface_from_ns_view(&self, view: *mut c_void) -> Surface { + use ash::extensions::mvk; + use core_graphics::{base::CGFloat, geometry::CGRect}; + use objc::runtime::{Object, BOOL, YES}; + + + unsafe { + let view = view as *mut Object; + let existing: *mut Object = msg_send![view, layer]; + let class = class!(CAMetalLayer); + + let use_current = if existing.is_null() { + false + } else { + let result: BOOL = msg_send![existing, isKindOfClass: class]; + result == YES + }; + + if !use_current { + let layer: *mut Object = msg_send![class, new]; + let () = msg_send![view, setLayer: layer]; + let bounds: CGRect = msg_send![view, bounds]; + let () = msg_send![layer, setBounds: bounds]; + + let window: *mut Object = msg_send![view, window]; + if !window.is_null() { + let scale_factor: CGFloat = msg_send![window, backingScaleFactor]; + let () = msg_send![layer, setContentsScale: scale_factor]; + } + } + } + + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + if !self.extensions.contains(&mvk::MacOSSurface::name()) { + panic!("Vulkan driver does not support VK_MVK_MACOS_SURFACE"); + } + + let surface = { + let mac_os_loader = mvk::MacOSSurface::new(entry, &self.raw.0); + let info = vk::MacOSSurfaceCreateInfoMVK { + s_type: vk::StructureType::MACOS_SURFACE_CREATE_INFO_M, + p_next: ptr::null(), + flags: vk::MacOSSurfaceCreateFlagsMVK::empty(), + p_view: view, + }; + + unsafe { + mac_os_loader + .create_mac_os_surface_mvk(&info, None) + .expect("Unable to create macOS surface") + } + }; + + self.create_surface_from_vk_surface_khr(surface) + } + + pub fn create_surface_from_vk_surface_khr(&self, surface: vk::SurfaceKHR) -> Surface { + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + let functor = khr::Surface::new(entry, &self.raw.0); + + let raw = Arc::new(RawSurface { + handle: surface, + functor, + instance: self.raw.clone(), + }); + + Surface { + raw, + swapchain: None, + } + } +} + +impl w::Surface for Surface { + fn supports_queue_family(&self, queue_family: &QueueFamily) -> bool { + unsafe { + self.raw.functor.get_physical_device_surface_support( + queue_family.device, + queue_family.index, + self.raw.handle, + ) + } + } + + fn capabilities(&self, physical_device: &PhysicalDevice) -> w::SurfaceCapabilities { + + let caps = unsafe { + self.raw + .functor + .get_physical_device_surface_capabilities(physical_device.handle, self.raw.handle) + } + .expect("Unable to query surface capabilities"); + + + let max_images = if caps.max_image_count == 0 { + !0 + } else { + caps.max_image_count + }; + + + let current_extent = if caps.current_extent.width != !0 && caps.current_extent.height != !0 + { + Some(w::Extent2D { + width: caps.current_extent.width, + height: caps.current_extent.height, + }) + } else { + None + }; + + let min_extent = w::Extent2D { + width: caps.min_image_extent.width, + height: caps.min_image_extent.height, + }; + + let max_extent = w::Extent2D { + width: caps.max_image_extent.width, + height: caps.max_image_extent.height, + }; + + let raw_present_modes = unsafe { + self.raw + .functor + .get_physical_device_surface_present_modes(physical_device.handle, self.raw.handle) + } + .expect("Unable to query present modes"); + + w::SurfaceCapabilities { + present_modes: raw_present_modes + .into_iter() + .fold(w::PresentMode::empty(), |u, m| { u | conv::map_vk_present_mode(m) }), + composite_alpha_modes: conv::map_vk_composite_alpha(caps.supported_composite_alpha), + image_count: caps.min_image_count ..= max_images, + current_extent, + extents: min_extent ..= max_extent, + max_image_layers: caps.max_image_array_layers as _, + usage: conv::map_vk_image_usage(caps.supported_usage_flags), + } + } + + fn supported_formats(&self, physical_device: &PhysicalDevice) -> Option> { + + let raw_formats = unsafe { + self.raw + .functor + .get_physical_device_surface_formats(physical_device.handle, self.raw.handle) + } + .expect("Unable to query surface formats"); + + match raw_formats[0].format { + + + + vk::Format::UNDEFINED => None, + _ => Some( + raw_formats + .into_iter() + .filter_map(|sf| conv::map_vk_format(sf.format)) + .collect(), + ), + } + } +} + +#[derive(Debug)] +pub struct SurfaceImage { + pub(crate) index: w::SwapImageIndex, + view: native::ImageView, +} + +impl Borrow for SurfaceImage { + fn borrow(&self) -> &native::ImageView { + &self.view + } +} + +impl w::PresentationSurface for Surface { + type SwapchainImage = SurfaceImage; + + unsafe fn configure_swapchain( + &mut self, + device: &Device, + config: w::SwapchainConfig, + ) -> Result<(), w::CreationError> { + use hal::device::Device as _; + + let format = config.format; + let old = self + .swapchain + .take() + .map(|ssc| ssc.release_resources(&device.raw.0)); + + let (swapchain, images) = device.create_swapchain(self, config, old)?; + + self.swapchain = Some(SurfaceSwapchain { + swapchain, + device: Arc::clone(&device.raw), + fence: device.create_fence(false).unwrap(), + semaphore: device.create_semaphore().unwrap(), + frames: images + .iter() + .map(|image| { + let view = device + .create_image_view( + image, + hal::image::ViewKind::D2, + format, + hal::format::Swizzle::NO, + hal::image::SubresourceRange { + aspects: hal::format::Aspects::COLOR, + layers: 0 .. 1, + levels: 0 .. 1, + }, + ) + .unwrap(); + SurfaceFrame { + image: view.image, + view: view.view, + framebuffers: Default::default(), + } + }) + .collect(), + }); + + Ok(()) + } + + unsafe fn unconfigure_swapchain(&mut self, device: &Device) { + if let Some(ssc) = self.swapchain.take() { + let swapchain = ssc.release_resources(&device.raw.0); + swapchain.functor.destroy_swapchain(swapchain.raw, None); + } + } + + unsafe fn acquire_image( + &mut self, + mut timeout_ns: u64, + ) -> Result<(Self::SwapchainImage, Option), w::AcquireError> { + use hal::window::Swapchain as _; + + let ssc = self.swapchain.as_mut().unwrap(); + let moment = Instant::now(); + let (index, suboptimal) = + ssc.swapchain + .acquire_image(timeout_ns, None, Some(&ssc.fence))?; + timeout_ns = timeout_ns.saturating_sub(moment.elapsed().as_nanos() as u64); + let fences = &[ssc.fence.0]; + + match ssc.device.0.wait_for_fences(fences, true, timeout_ns) { + Ok(()) => { + ssc.device.0.reset_fences(fences).unwrap(); + let frame = &ssc.frames[index as usize]; + + + for framebuffer in frame.framebuffers.0.lock().unwrap().framebuffers.drain() { + ssc.device.0.destroy_framebuffer(framebuffer, None); + } + let image = Self::SwapchainImage { + index, + view: native::ImageView { + image: frame.image, + view: frame.view, + range: hal::image::SubresourceRange { + aspects: hal::format::Aspects::COLOR, + layers: 0 .. 1, + levels: 0 .. 1, + }, + owner: native::ImageViewOwner::Surface(FramebufferCachePtr(Arc::clone( + &frame.framebuffers.0, + ))), + }, + }; + Ok((image, suboptimal)) + } + Err(vk::Result::NOT_READY) => Err(w::AcquireError::NotReady), + Err(vk::Result::TIMEOUT) => Err(w::AcquireError::Timeout), + Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => Err(w::AcquireError::OutOfDate), + Err(vk::Result::ERROR_SURFACE_LOST_KHR) => { + Err(w::AcquireError::SurfaceLost(hal::device::SurfaceLost)) + } + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(w::AcquireError::OutOfMemory( + hal::device::OutOfMemory::Host, + )), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(w::AcquireError::OutOfMemory( + hal::device::OutOfMemory::Device, + )), + Err(vk::Result::ERROR_DEVICE_LOST) => { + Err(w::AcquireError::DeviceLost(hal::device::DeviceLost)) + } + _ => unreachable!(), + } + } +} + +pub struct Swapchain { + pub(crate) raw: vk::SwapchainKHR, + pub(crate) functor: khr::Swapchain, +} + +impl fmt::Debug for Swapchain { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Swapchain") + } +} + +impl w::Swapchain for Swapchain { + unsafe fn acquire_image( + &mut self, + timeout_ns: u64, + semaphore: Option<&native::Semaphore>, + fence: Option<&native::Fence>, + ) -> Result<(w::SwapImageIndex, Option), w::AcquireError> { + let semaphore = semaphore.map_or(vk::Semaphore::null(), |s| s.0); + let fence = fence.map_or(vk::Fence::null(), |f| f.0); + + + let index = self + .functor + .acquire_next_image(self.raw, timeout_ns, semaphore, fence); + + match index { + Ok((i, true)) => Ok((i, Some(w::Suboptimal))), + Ok((i, false)) => Ok((i, None)), + Err(vk::Result::NOT_READY) => Err(w::AcquireError::NotReady), + Err(vk::Result::TIMEOUT) => Err(w::AcquireError::Timeout), + Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => Err(w::AcquireError::OutOfDate), + Err(vk::Result::ERROR_SURFACE_LOST_KHR) => { + Err(w::AcquireError::SurfaceLost(hal::device::SurfaceLost)) + } + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(w::AcquireError::OutOfMemory( + hal::device::OutOfMemory::Host, + )), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(w::AcquireError::OutOfMemory( + hal::device::OutOfMemory::Device, + )), + Err(vk::Result::ERROR_DEVICE_LOST) => { + Err(w::AcquireError::DeviceLost(hal::device::DeviceLost)) + } + _ => panic!("Failed to acquire image."), + } + } +} diff --git a/third_party/rust/gfx-hal/.cargo-checksum.json b/third_party/rust/gfx-hal/.cargo-checksum.json new file mode 100644 index 000000000000..794b3bcdcefe --- /dev/null +++ b/third_party/rust/gfx-hal/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"20527e88deb658382bf29a8661b688313385ab3cc0cfcefa81bce47472960e59","src/adapter.rs":"7076bac0db2368278d295db347e06b6d088b5a6448fc77c68ef64b3a59c024a3","src/buffer.rs":"8068981cbe8a640520e63274d084dc8d83b1fb7b05265976a5b70366f107c86a","src/command/clear.rs":"f0f08993f966a1f82091fa2a8dc226da28946f4a04bc3eca4307079bc4b14b43","src/command/mod.rs":"2c8050344f996eae3fca10724afbc89ba8df5adad5c94341af426ec77eecaade","src/command/structs.rs":"00b5850540ae21227c6578866e86cc741d074601239f8b1bbd0342a5e5f74623","src/device.rs":"31bf744cd16d0ac7c3bef0016dd7adb99ee8e76a6c3552b47a0b8f9abad86842","src/format.rs":"6d864c354dc5af0a31dd11f379b64f37609949d38f7e7349a546ec7c6e808307","src/image.rs":"621895798256182eac069b0f5f5040fc1bd0b38d62faf19f8ee5c54964a4c6a0","src/lib.rs":"d6f5404c0d6014e7602d5dc9fca41483f7a213fa5c014405688b79e2bb961616","src/memory.rs":"a8e3b745f44e54e74cce48bb0fffd6439498a9d96163a05cec4d4e6faa3fb500","src/pass.rs":"5dc3657ed879c1da91e310cc43287b4ec8feeeee1edaad0db5242e5bd8c5cf6d","src/pool.rs":"85330ac11f6a154694353615865cfddd52feec9435e20b0ff3a8ec6c7f7fe353","src/pso/compute.rs":"fb9a5748c3b9174924db13c1c59388bcd75279ff6d40d1a068fb52e70e5ccb94","src/pso/descriptor.rs":"0e4edf70bdd2e85ed9481f836312c24077220d66f23b7caea19e7059b26b0aa0","src/pso/graphics.rs":"b572990d08112b2f76d044ee48359d82f50a3ea29bcdeecb62249fc15d7bbefb","src/pso/input_assembler.rs":"c6ac5a0e70b2efd0056a8f393b95a5159ace65f23eed0e5a32b1ffedd44c4e53","src/pso/mod.rs":"d3ab00d99ab12e10a8386aa16349f793680d7da01cf61fc46437d78d6c5902bc","src/pso/output_merger.rs":"174cceec8080a68d22f68a0a51044c16fa8a45028adc503b9a65919b68550827","src/pso/specialization.rs":"fb90dc6a34908b283514edb040293d382a3471e0c8fa0bd11ec5f98cceec5799","src/query.rs":"f8541c41370b4812c001dc3f3d4487f2832652e9ea4abac391ed8ca2d0d1d7e4","src/queue/family.rs":"80bc451a615b4643a1e0958ad8dd28c37c11801edad035fd9079fae489dfe315","src/queue/mod.rs":"19c10c5434ecbe29b35caf0bd74045e3576688c643b5020400e3a1337bc06206","src/range.rs":"94486dad94f5d7fafaaf019c7dd9715212b25447da76ea55e867f1a91a35e606","src/window.rs":"4fc9333199f5ef055498cc667a693f132092657ba0153fd5cf426a7f33562301"},"package":"977716fea7800ab5bc9a1e048dd2f72b23af166d8c2f48c6fb6d1ce37d77ca7e"} \ No newline at end of file diff --git a/third_party/rust/gfx-hal/Cargo.toml b/third_party/rust/gfx-hal/Cargo.toml new file mode 100644 index 000000000000..44a59f1e0e1e --- /dev/null +++ b/third_party/rust/gfx-hal/Cargo.toml @@ -0,0 +1,49 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "gfx-hal" +version = "0.4.0" +authors = ["The Gfx-rs Developers"] +description = "gfx-rs hardware abstraction layer" +homepage = "https://github.com/gfx-rs/gfx" +documentation = "https://docs.rs/gfx-hal" +keywords = ["graphics"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/gfx-rs/gfx" + +[lib] +name = "gfx_hal" +path = "src/lib.rs" +[dependencies.bitflags] +version = "1.0" + +[dependencies.mint] +version = "0.5" +optional = true + +[dependencies.raw-window-handle] +version = "0.3" + +[dependencies.serde] +version = "1" +features = ["serde_derive"] +optional = true + +[dependencies.smallvec] +version = "0.6" +[dev-dependencies.gfx-backend-empty] +version = "0.4" + +[features] +unstable = [] diff --git a/third_party/rust/gfx-hal/src/adapter.rs b/third_party/rust/gfx-hal/src/adapter.rs new file mode 100644 index 000000000000..525e563acee8 --- /dev/null +++ b/third_party/rust/gfx-hal/src/adapter.rs @@ -0,0 +1,153 @@ + + + + + + + + +use std::{any::Any, fmt}; + +use crate::{ + queue::{QueueGroup, QueuePriority}, + device, format, image, memory, Backend, Features, Limits, +}; + + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct MemoryType { + + + pub properties: memory::Properties, + + pub heap_index: usize, +} + + +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct MemoryProperties { + + + pub memory_types: Vec, + + pub memory_heaps: Vec, +} + + + + + +#[derive(Debug)] +pub struct Gpu { + + pub device: B::Device, + + pub queue_groups: Vec>, +} + + +pub trait PhysicalDevice: fmt::Debug + Any + Send + Sync { + + + + + + + + + + + + + + + + + + + + + + + unsafe fn open( + &self, + families: &[(&B::QueueFamily, &[QueuePriority])], + requested_features: Features, + ) -> Result, device::CreationError>; + + + fn format_properties(&self, format: Option) -> format::Properties; + + + fn image_format_properties( + &self, + format: format::Format, + dimensions: u8, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Option; + + + fn memory_properties(&self) -> MemoryProperties; + + + + fn features(&self) -> Features; + + + fn limits(&self) -> Limits; + + + fn is_valid_cache(&self, _cache: &[u8]) -> bool { + false + } +} + + +#[derive(Clone, PartialEq, Eq, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum DeviceType { + + Other = 0, + + IntegratedGpu = 1, + + DiscreteGpu = 2, + + VirtualGpu = 3, + + Cpu = 4, +} + + +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct AdapterInfo { + + pub name: String, + + pub vendor: usize, + + pub device: usize, + + pub device_type: DeviceType, +} + + + + + + + +#[derive(Debug)] +pub struct Adapter { + + pub info: AdapterInfo, + + pub physical_device: B::PhysicalDevice, + + pub queue_families: Vec, +} diff --git a/third_party/rust/gfx-hal/src/buffer.rs b/third_party/rust/gfx-hal/src/buffer.rs new file mode 100644 index 000000000000..9ff2a4d7fbcc --- /dev/null +++ b/third_party/rust/gfx-hal/src/buffer.rs @@ -0,0 +1,138 @@ + + + + + + + + +use crate::{device, format, Backend, IndexType}; + + +pub type Offset = u64; + + +pub type State = Access; + + +#[derive(Clone, Debug, PartialEq)] +pub enum CreationError { + + OutOfMemory(device::OutOfMemory), + + + + + UnsupportedUsage { + + usage: Usage, + }, +} + +impl From for CreationError { + fn from(error: device::OutOfMemory) -> Self { + CreationError::OutOfMemory(error) + } +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum ViewCreationError { + + OutOfMemory(device::OutOfMemory), + + + UnsupportedFormat { + + format: Option, + }, +} + +impl From for ViewCreationError { + fn from(error: device::OutOfMemory) -> Self { + ViewCreationError::OutOfMemory(error) + } +} + +bitflags!( + /// Buffer usage flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Usage: u32 { + /// + const TRANSFER_SRC = 0x1; + /// + const TRANSFER_DST = 0x2; + /// + const UNIFORM_TEXEL = 0x4; + /// + const STORAGE_TEXEL = 0x8; + /// + const UNIFORM = 0x10; + /// + const STORAGE = 0x20; + /// + const INDEX = 0x40; + /// + const VERTEX = 0x80; + /// + const INDIRECT = 0x100; + } +); + +impl Usage { + + pub fn can_transfer(&self) -> bool { + self.intersects(Usage::TRANSFER_SRC | Usage::TRANSFER_DST) + } +} + +bitflags!( + /// Buffer access flags. + /// + /// Access of buffers by the pipeline or shaders. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Access: u32 { + /// Read commands instruction for indirect execution. + const INDIRECT_COMMAND_READ = 0x1; + /// Read index values for indexed draw commands. + /// + /// See [`draw_indexed`](../command/trait.RawCommandBuffer.html#tymethod.draw_indexed) + /// and [`draw_indexed_indirect`](../command/trait.RawCommandBuffer.html#tymethod.draw_indexed_indirect). + const INDEX_BUFFER_READ = 0x2; + /// Read vertices from vertex buffer for draw commands in the [`VERTEX_INPUT`]( + /// ../pso/struct.PipelineStage.html#associatedconstant.VERTEX_INPUT) stage. + const VERTEX_BUFFER_READ = 0x4; + /// + const UNIFORM_READ = 0x8; + /// + const SHADER_READ = 0x20; + /// + const SHADER_WRITE = 0x40; + /// + const TRANSFER_READ = 0x800; + /// + const TRANSFER_WRITE = 0x1000; + /// + const HOST_READ = 0x2000; + /// + const HOST_WRITE = 0x4000; + /// + const MEMORY_READ = 0x8000; + /// + const MEMORY_WRITE = 0x10000; + } +); + + + + + +#[derive(Debug)] +pub struct IndexBufferView<'a, B: Backend> { + + pub buffer: &'a B::Buffer, + + pub offset: u64, + + pub index_type: IndexType, +} diff --git a/third_party/rust/gfx-hal/src/command/clear.rs b/third_party/rust/gfx-hal/src/command/clear.rs new file mode 100644 index 000000000000..7257a494dd9c --- /dev/null +++ b/third_party/rust/gfx-hal/src/command/clear.rs @@ -0,0 +1,70 @@ +use crate::pso; +use std::fmt; + + +#[repr(C)] +#[derive(Clone, Copy)] +pub union ClearColor { + + pub float32: [f32; 4], + + pub sint32: [i32; 4], + + pub uint32: [u32; 4], +} + +impl fmt::Debug for ClearColor { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln![f, "ClearColor"] + } +} + + +#[repr(C)] +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ClearDepthStencil { + + pub depth: f32, + + pub stencil: u32, +} + + +#[repr(C)] +#[derive(Clone, Copy)] +pub union ClearValue { + + pub color: ClearColor, + + pub depth_stencil: ClearDepthStencil, + _align: [u32; 4], +} + +impl fmt::Debug for ClearValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("ClearValue") + .field("color", unsafe { &self.color.uint32 }) + .field("depth_stencil", unsafe { &self.depth_stencil }) + .finish() + } +} + + +#[derive(Clone, Copy, Debug)] +pub enum AttachmentClear { + + Color { + + index: usize, + + value: ClearColor, + }, + + DepthStencil { + + depth: Option, + + stencil: Option, + }, +} diff --git a/third_party/rust/gfx-hal/src/command/mod.rs b/third_party/rust/gfx-hal/src/command/mod.rs new file mode 100644 index 000000000000..b4242d773805 --- /dev/null +++ b/third_party/rust/gfx-hal/src/command/mod.rs @@ -0,0 +1,564 @@ + + + + + + + + + + + + + + + +mod clear; +mod structs; + +use std::any::Any; +use std::borrow::Borrow; +use std::fmt; +use std::ops::Range; + +use crate::image::{Filter, Layout, SubresourceRange}; +use crate::memory::{Barrier, Dependencies}; +use crate::range::RangeArg; +use crate::{buffer, pass, pso, query}; +use crate::{ + Backend, + DrawCount, + IndexCount, + InstanceCount, + VertexCount, + VertexOffset, + WorkGroupCount, +}; + +pub use self::clear::*; +pub use self::structs::*; + + + +pub type DescriptorSetOffset = u32; + +bitflags! { + /// Option flags for various command buffer settings. + #[derive(Default)] + pub struct CommandBufferFlags: u32 { + // TODO: Remove once 'const fn' is stabilized: https://github.com/rust-lang/rust/issues/24111 + /// No flags. + const EMPTY = 0x0; + + /// Says that the command buffer will be recorded, submitted only once, and then reset and re-filled + /// for another submission. + const ONE_TIME_SUBMIT = 0x1; + + /// If set on a secondary command buffer, it says the command buffer takes place entirely inside + /// a render pass. Ignored on primary command buffer. + const RENDER_PASS_CONTINUE = 0x2; + + /// Says that a command buffer can be recorded into multiple primary command buffers, + /// and submitted to a queue while it is still pending. + const SIMULTANEOUS_USE = 0x4; + } +} + + + + +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum Level { + Primary, + Secondary, +} + + +#[derive(Debug)] +pub enum SubpassContents { + + + Inline, + + + + SecondaryBuffers, +} + +#[allow(missing_docs)] +#[derive(Debug)] +pub struct CommandBufferInheritanceInfo<'a, B: Backend> { + pub subpass: Option>, + pub framebuffer: Option<&'a B::Framebuffer>, + pub occlusion_query_enable: bool, + pub occlusion_query_flags: query::ControlFlags, + pub pipeline_statistics: query::PipelineStatistic, +} + +impl<'a, B: Backend> Default for CommandBufferInheritanceInfo<'a, B> { + fn default() -> Self { + CommandBufferInheritanceInfo { + subpass: None, + framebuffer: None, + occlusion_query_enable: false, + occlusion_query_flags: query::ControlFlags::empty(), + pipeline_statistics: query::PipelineStatistic::empty(), + } + } +} + + + +pub trait CommandBuffer: fmt::Debug + Any + Send + Sync { + + unsafe fn begin( + &mut self, + flags: CommandBufferFlags, + inheritance_info: CommandBufferInheritanceInfo, + ); + + + + unsafe fn begin_primary(&mut self, flags: CommandBufferFlags) { + self.begin(flags, CommandBufferInheritanceInfo::default()); + } + + + unsafe fn finish(&mut self); + + + + unsafe fn reset(&mut self, release_resources: bool); + + + + + + unsafe fn pipeline_barrier<'a, T>( + &mut self, + stages: Range, + dependencies: Dependencies, + barriers: T, + ) where + T: IntoIterator, + T::Item: Borrow>; + + + unsafe fn fill_buffer(&mut self, buffer: &B::Buffer, range: R, data: u32) + where + R: RangeArg; + + + unsafe fn update_buffer(&mut self, buffer: &B::Buffer, offset: buffer::Offset, data: &[u8]); + + + unsafe fn clear_image( + &mut self, + image: &B::Image, + layout: Layout, + value: ClearValue, + subresource_ranges: T, + ) where + T: IntoIterator, + T::Item: Borrow; + + + + unsafe fn clear_attachments(&mut self, clears: T, rects: U) + where + T: IntoIterator, + T::Item: Borrow, + U: IntoIterator, + U::Item: Borrow; + + + + unsafe fn resolve_image( + &mut self, + src: &B::Image, + src_layout: Layout, + dst: &B::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow; + + + + unsafe fn blit_image( + &mut self, + src: &B::Image, + src_layout: Layout, + dst: &B::Image, + dst_layout: Layout, + filter: Filter, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow; + + + + unsafe fn bind_index_buffer(&mut self, view: buffer::IndexBufferView); + + + + + + + + + + + + + + + + + + unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) + where + I: IntoIterator, + T: Borrow; + + + + + + + + + + + + + + + + + + + unsafe fn set_viewports(&mut self, first_viewport: u32, viewports: T) + where + T: IntoIterator, + T::Item: Borrow; + + + + + + + + + + + + + + + + + + + unsafe fn set_scissors(&mut self, first_scissor: u32, rects: T) + where + T: IntoIterator, + T::Item: Borrow; + + + + + unsafe fn set_stencil_reference(&mut self, faces: pso::Face, value: pso::StencilValue); + + + unsafe fn set_stencil_read_mask(&mut self, faces: pso::Face, value: pso::StencilValue); + + + unsafe fn set_stencil_write_mask(&mut self, faces: pso::Face, value: pso::StencilValue); + + + unsafe fn set_blend_constants(&mut self, color: pso::ColorValue); + + + unsafe fn set_depth_bounds(&mut self, bounds: Range); + + + unsafe fn set_line_width(&mut self, width: f32); + + + unsafe fn set_depth_bias(&mut self, depth_bias: pso::DepthBias); + + + + + + + + + + unsafe fn begin_render_pass( + &mut self, + render_pass: &B::RenderPass, + framebuffer: &B::Framebuffer, + render_area: pso::Rect, + clear_values: T, + first_subpass: SubpassContents, + ) where + T: IntoIterator, + T::Item: Borrow; + + + unsafe fn next_subpass(&mut self, contents: SubpassContents); + + + unsafe fn end_render_pass(&mut self); + + + + + + + + + + + unsafe fn bind_graphics_pipeline(&mut self, pipeline: &B::GraphicsPipeline); + + + + unsafe fn bind_graphics_descriptor_sets( + &mut self, + layout: &B::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow; + + + + + + + + + + + unsafe fn bind_compute_pipeline(&mut self, pipeline: &B::ComputePipeline); + + + + unsafe fn bind_compute_descriptor_sets( + &mut self, + layout: &B::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow; + + + + + + + + + + + + + + + + + + unsafe fn dispatch(&mut self, count: WorkGroupCount); + + + + unsafe fn dispatch_indirect(&mut self, buffer: &B::Buffer, offset: buffer::Offset); + + + unsafe fn copy_buffer(&mut self, src: &B::Buffer, dst: &B::Buffer, regions: T) + where + T: IntoIterator, + T::Item: Borrow; + + + + + + unsafe fn copy_image( + &mut self, + src: &B::Image, + src_layout: Layout, + dst: &B::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow; + + + unsafe fn copy_buffer_to_image( + &mut self, + src: &B::Buffer, + dst: &B::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow; + + + unsafe fn copy_image_to_buffer( + &mut self, + src: &B::Image, + src_layout: Layout, + dst: &B::Buffer, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow; + + + + + + + unsafe fn draw(&mut self, vertices: Range, instances: Range); + + + + + + + + unsafe fn draw_indexed( + &mut self, + indices: Range, + base_vertex: VertexOffset, + instances: Range, + ); + + + + + + + + + + + unsafe fn draw_indirect( + &mut self, + buffer: &B::Buffer, + offset: buffer::Offset, + draw_count: DrawCount, + stride: u32, + ); + + + + + + + + + unsafe fn draw_indexed_indirect( + &mut self, + buffer: &B::Buffer, + offset: buffer::Offset, + draw_count: DrawCount, + stride: u32, + ); + + + unsafe fn set_event(&mut self, event: &B::Event, stages: pso::PipelineStage); + + + unsafe fn reset_event(&mut self, event: &B::Event, stages: pso::PipelineStage); + + + + + + + + unsafe fn wait_events<'a, I, J>( + &mut self, + events: I, + stages: Range, + barriers: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow>; + + + + + unsafe fn begin_query(&mut self, query: query::Query, flags: query::ControlFlags); + + + unsafe fn end_query(&mut self, query: query::Query); + + + unsafe fn reset_query_pool(&mut self, pool: &B::QueryPool, queries: Range); + + + unsafe fn copy_query_pool_results( + &mut self, + pool: &B::QueryPool, + queries: Range, + buffer: &B::Buffer, + offset: buffer::Offset, + stride: buffer::Offset, + flags: query::ResultFlags, + ); + + + unsafe fn write_timestamp(&mut self, stage: pso::PipelineStage, query: query::Query); + + + + + + + unsafe fn push_graphics_constants( + &mut self, + layout: &B::PipelineLayout, + stages: pso::ShaderStageFlags, + offset: u32, + constants: &[u32], + ); + + + + + + + unsafe fn push_compute_constants( + &mut self, + layout: &B::PipelineLayout, + offset: u32, + constants: &[u32], + ); + + + unsafe fn execute_commands<'a, T, I>(&mut self, cmd_buffers: I) + where + T: 'a + Borrow, + I: IntoIterator; +} diff --git a/third_party/rust/gfx-hal/src/command/structs.rs b/third_party/rust/gfx-hal/src/command/structs.rs new file mode 100644 index 000000000000..b6ae124e17a2 --- /dev/null +++ b/third_party/rust/gfx-hal/src/command/structs.rs @@ -0,0 +1,86 @@ +use crate::{buffer, image}; + +use std::ops::Range; + + + + +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BufferCopy { + + pub src: buffer::Offset, + + pub dst: buffer::Offset, + + pub size: buffer::Offset, +} + + + +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ImageCopy { + + pub src_subresource: image::SubresourceLayers, + + pub src_offset: image::Offset, + + pub dst_subresource: image::SubresourceLayers, + + pub dst_offset: image::Offset, + + pub extent: image::Extent, +} + + + +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BufferImageCopy { + + pub buffer_offset: buffer::Offset, + + pub buffer_width: u32, + + pub buffer_height: u32, + + pub image_layers: image::SubresourceLayers, + + pub image_offset: image::Offset, + + pub image_extent: image::Extent, +} + + + + +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ImageResolve { + + pub src_subresource: image::SubresourceLayers, + + pub src_offset: image::Offset, + + pub dst_subresource: image::SubresourceLayers, + + pub dst_offset: image::Offset, + + pub extent: image::Extent, +} + + + +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ImageBlit { + + pub src_subresource: image::SubresourceLayers, + + pub src_bounds: Range, + + pub dst_subresource: image::SubresourceLayers, + + pub dst_bounds: Range, +} diff --git a/third_party/rust/gfx-hal/src/device.rs b/third_party/rust/gfx-hal/src/device.rs new file mode 100644 index 000000000000..588027231981 --- /dev/null +++ b/third_party/rust/gfx-hal/src/device.rs @@ -0,0 +1,793 @@ + + + + + + + + + + + + + +use std::any::Any; +use std::borrow::Borrow; +use std::ops::Range; +use std::{fmt, iter}; + +use crate::{ + buffer, format, image, pass, pso, query, + memory::Requirements, + pool::CommandPoolCreateFlags, + pso::DescriptorPoolCreateFlags, + queue::QueueFamilyId, + range::RangeArg, + window::{self, SwapchainConfig}, + Backend, MemoryTypeId, +}; + + +#[derive(Clone, Debug, PartialEq)] +pub struct DeviceLost; + + +#[derive(Clone, Debug, PartialEq)] +pub struct SurfaceLost; + + +#[derive(Clone, Debug, PartialEq)] +pub struct WindowInUse; + + +#[derive(Clone, Debug, PartialEq)] +pub enum OutOfMemory { + + Host, + + Device, +} + + + +#[derive(Clone, Debug, PartialEq)] +pub enum OomOrDeviceLost { + + OutOfMemory(OutOfMemory), + + DeviceLost(DeviceLost), +} + +impl From for OomOrDeviceLost { + fn from(error: OutOfMemory) -> Self { + OomOrDeviceLost::OutOfMemory(error) + } +} + +impl From for OomOrDeviceLost { + fn from(error: DeviceLost) -> Self { + OomOrDeviceLost::DeviceLost(error) + } +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum AllocationError { + + OutOfMemory(OutOfMemory), + + + TooManyObjects, +} + +impl From for AllocationError { + fn from(error: OutOfMemory) -> Self { + AllocationError::OutOfMemory(error) + } +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum CreationError { + + OutOfMemory(OutOfMemory), + + InitializationFailed, + + + MissingExtension, + + + + + + MissingFeature, + + + + + TooManyObjects, + + + + + + DeviceLost, +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum MapError { + + OutOfMemory(OutOfMemory), + + OutOfBounds, + + MappingFailed, +} + +impl From for MapError { + fn from(error: OutOfMemory) -> Self { + MapError::OutOfMemory(error) + } +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum BindError { + + OutOfMemory(OutOfMemory), + + WrongMemory, + + OutOfBounds, +} + +impl From for BindError { + fn from(error: OutOfMemory) -> Self { + BindError::OutOfMemory(error) + } +} + + +#[derive(Clone, Debug, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum WaitFor { + + Any, + + All, +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum ShaderError { + + CompilationFailed(String), + + MissingEntryPoint(String), + + InterfaceMismatch(String), + + UnsupportedStage(pso::Stage), + + OutOfMemory(OutOfMemory), +} + +impl From for ShaderError { + fn from(error: OutOfMemory) -> Self { + ShaderError::OutOfMemory(error) + } +} + + + + + + + + + + + + + + + + + + + + + +pub trait Device: fmt::Debug + Any + Send + Sync { + + + + + + + + + unsafe fn allocate_memory( + &self, + memory_type: MemoryTypeId, + size: u64, + ) -> Result; + + + unsafe fn free_memory(&self, memory: B::Memory); + + + + + unsafe fn create_command_pool( + &self, + family: QueueFamilyId, + create_flags: CommandPoolCreateFlags, + ) -> Result; + + + unsafe fn destroy_command_pool(&self, pool: B::CommandPool); + + + + + + + unsafe fn create_render_pass<'a, IA, IS, ID>( + &self, + attachments: IA, + subpasses: IS, + dependencies: ID, + ) -> Result + where + IA: IntoIterator, + IA::Item: Borrow, + IS: IntoIterator, + IS::Item: Borrow>, + ID: IntoIterator, + ID::Item: Borrow; + + + unsafe fn destroy_render_pass(&self, rp: B::RenderPass); + + + + + + + + + + + + + + + + + + unsafe fn create_pipeline_layout( + &self, + set_layouts: IS, + push_constant: IR, + ) -> Result + where + IS: IntoIterator, + IS::Item: Borrow, + IR: IntoIterator, + IR::Item: Borrow<(pso::ShaderStageFlags, Range)>; + + + unsafe fn destroy_pipeline_layout(&self, layout: B::PipelineLayout); + + + unsafe fn create_pipeline_cache( + &self, + data: Option<&[u8]>, + ) -> Result; + + + unsafe fn get_pipeline_cache_data( + &self, + cache: &B::PipelineCache, + ) -> Result, OutOfMemory>; + + + unsafe fn merge_pipeline_caches( + &self, + target: &B::PipelineCache, + sources: I, + ) -> Result<(), OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow; + + + unsafe fn destroy_pipeline_cache(&self, cache: B::PipelineCache); + + + unsafe fn create_graphics_pipeline<'a>( + &self, + desc: &pso::GraphicsPipelineDesc<'a, B>, + cache: Option<&B::PipelineCache>, + ) -> Result; + + + unsafe fn create_graphics_pipelines<'a, I>( + &self, + descs: I, + cache: Option<&B::PipelineCache>, + ) -> Vec> + where + I: IntoIterator, + I::Item: Borrow>, + { + descs + .into_iter() + .map(|desc| self.create_graphics_pipeline(desc.borrow(), cache)) + .collect() + } + + + + + + unsafe fn destroy_graphics_pipeline(&self, pipeline: B::GraphicsPipeline); + + + unsafe fn create_compute_pipeline<'a>( + &self, + desc: &pso::ComputePipelineDesc<'a, B>, + cache: Option<&B::PipelineCache>, + ) -> Result; + + + unsafe fn create_compute_pipelines<'a, I>( + &self, + descs: I, + cache: Option<&B::PipelineCache>, + ) -> Vec> + where + I: IntoIterator, + I::Item: Borrow>, + { + descs + .into_iter() + .map(|desc| self.create_compute_pipeline(desc.borrow(), cache)) + .collect() + } + + + + + + unsafe fn destroy_compute_pipeline(&self, pipeline: B::ComputePipeline); + + + + + + unsafe fn create_framebuffer( + &self, + pass: &B::RenderPass, + attachments: I, + extent: image::Extent, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow; + + + + + + unsafe fn destroy_framebuffer(&self, buf: B::Framebuffer); + + + + + + unsafe fn create_shader_module( + &self, + spirv_data: &[u32], + ) -> Result; + + + + + unsafe fn destroy_shader_module(&self, shader: B::ShaderModule); + + + + + unsafe fn create_buffer( + &self, + size: u64, + usage: buffer::Usage, + ) -> Result; + + + unsafe fn get_buffer_requirements(&self, buf: &B::Buffer) -> Requirements; + + + + + + unsafe fn bind_buffer_memory( + &self, + memory: &B::Memory, + offset: u64, + buf: &mut B::Buffer, + ) -> Result<(), BindError>; + + + + + + unsafe fn destroy_buffer(&self, buffer: B::Buffer); + + + unsafe fn create_buffer_view>( + &self, + buf: &B::Buffer, + fmt: Option, + range: R, + ) -> Result; + + + unsafe fn destroy_buffer_view(&self, view: B::BufferView); + + + unsafe fn create_image( + &self, + kind: image::Kind, + mip_levels: image::Level, + format: format::Format, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Result; + + + unsafe fn get_image_requirements(&self, image: &B::Image) -> Requirements; + + + unsafe fn get_image_subresource_footprint( + &self, + image: &B::Image, + subresource: image::Subresource, + ) -> image::SubresourceFootprint; + + + unsafe fn bind_image_memory( + &self, + memory: &B::Memory, + offset: u64, + image: &mut B::Image, + ) -> Result<(), BindError>; + + + + + + unsafe fn destroy_image(&self, image: B::Image); + + + unsafe fn create_image_view( + &self, + image: &B::Image, + view_kind: image::ViewKind, + format: format::Format, + swizzle: format::Swizzle, + range: image::SubresourceRange, + ) -> Result; + + + unsafe fn destroy_image_view(&self, view: B::ImageView); + + + unsafe fn create_sampler( + &self, + desc: &image::SamplerDesc, + ) -> Result; + + + unsafe fn destroy_sampler(&self, sampler: B::Sampler); + + + + + + unsafe fn create_descriptor_pool( + &self, + max_sets: usize, + descriptor_ranges: I, + flags: DescriptorPoolCreateFlags, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow; + + + + + + + unsafe fn destroy_descriptor_pool(&self, pool: B::DescriptorPool); + + + + + + + + unsafe fn create_descriptor_set_layout( + &self, + bindings: I, + immutable_samplers: J, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow; + + + unsafe fn destroy_descriptor_set_layout(&self, layout: B::DescriptorSetLayout); + + + unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I) + where + I: IntoIterator>, + J: IntoIterator, + J::Item: Borrow>; + + + unsafe fn copy_descriptor_sets<'a, I>(&self, copy_iter: I) + where + I: IntoIterator, + I::Item: Borrow>; + + + + + unsafe fn map_memory(&self, memory: &B::Memory, range: R) -> Result<*mut u8, MapError> + where + R: RangeArg; + + + unsafe fn flush_mapped_memory_ranges<'a, I, R>(&self, ranges: I) -> Result<(), OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a B::Memory, R)>, + R: RangeArg; + + + unsafe fn invalidate_mapped_memory_ranges<'a, I, R>( + &self, + ranges: I, + ) -> Result<(), OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a B::Memory, R)>, + R: RangeArg; + + + unsafe fn unmap_memory(&self, memory: &B::Memory); + + + fn create_semaphore(&self) -> Result; + + + unsafe fn destroy_semaphore(&self, semaphore: B::Semaphore); + + + + + + + + + fn create_fence(&self, signaled: bool) -> Result; + + + unsafe fn reset_fence(&self, fence: &B::Fence) -> Result<(), OutOfMemory> { + self.reset_fences(iter::once(fence)) + } + + + unsafe fn reset_fences(&self, fences: I) -> Result<(), OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow, + { + for fence in fences { + self.reset_fence(fence.borrow())?; + } + Ok(()) + } + + + + unsafe fn wait_for_fence( + &self, + fence: &B::Fence, + timeout_ns: u64, + ) -> Result { + self.wait_for_fences(iter::once(fence), WaitFor::All, timeout_ns) + } + + + + unsafe fn wait_for_fences( + &self, + fences: I, + wait: WaitFor, + timeout_ns: u64, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + use std::{thread, time}; + fn to_ns(duration: time::Duration) -> u64 { + duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64 + } + + let start = time::Instant::now(); + match wait { + WaitFor::All => { + for fence in fences { + if !self.wait_for_fence(fence.borrow(), 0)? { + let elapsed_ns = to_ns(start.elapsed()); + if elapsed_ns > timeout_ns { + return Ok(false); + } + if !self.wait_for_fence(fence.borrow(), timeout_ns - elapsed_ns)? { + return Ok(false); + } + } + } + Ok(true) + } + WaitFor::Any => { + let fences: Vec<_> = fences.into_iter().collect(); + loop { + for fence in &fences { + if self.wait_for_fence(fence.borrow(), 0)? { + return Ok(true); + } + } + if to_ns(start.elapsed()) >= timeout_ns { + return Ok(false); + } + thread::sleep(time::Duration::from_millis(1)); + } + } + } + } + + + unsafe fn get_fence_status(&self, fence: &B::Fence) -> Result; + + + unsafe fn destroy_fence(&self, fence: B::Fence); + + + fn create_event(&self) -> Result; + + + unsafe fn destroy_event(&self, event: B::Event); + + + + + unsafe fn get_event_status(&self, event: &B::Event) -> Result; + + + unsafe fn set_event(&self, event: &B::Event) -> Result<(), OutOfMemory>; + + + unsafe fn reset_event(&self, event: &B::Event) -> Result<(), OutOfMemory>; + + + + + + unsafe fn create_query_pool( + &self, + ty: query::Type, + count: query::Id, + ) -> Result; + + + unsafe fn destroy_query_pool(&self, pool: B::QueryPool); + + + + unsafe fn get_query_pool_results( + &self, + pool: &B::QueryPool, + queries: Range, + data: &mut [u8], + stride: buffer::Offset, + flags: query::ResultFlags, + ) -> Result; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + unsafe fn create_swapchain( + &self, + surface: &mut B::Surface, + config: SwapchainConfig, + old_swapchain: Option, + ) -> Result<(B::Swapchain, Vec), window::CreationError>; + + + unsafe fn destroy_swapchain(&self, swapchain: B::Swapchain); + + + + + fn wait_idle(&self) -> Result<(), OutOfMemory>; + + + + unsafe fn set_image_name(&self, image: &mut B::Image, name: &str); + + + unsafe fn set_buffer_name(&self, buffer: &mut B::Buffer, name: &str); + + + unsafe fn set_command_buffer_name(&self, command_buffer: &mut B::CommandBuffer, name: &str); + + + unsafe fn set_semaphore_name(&self, semaphore: &mut B::Semaphore, name: &str); + + + unsafe fn set_fence_name(&self, fence: &mut B::Fence, name: &str); + + + unsafe fn set_framebuffer_name(&self, framebuffer: &mut B::Framebuffer, name: &str); + + + unsafe fn set_render_pass_name(&self, render_pass: &mut B::RenderPass, name: &str); + + + unsafe fn set_descriptor_set_name(&self, descriptor_set: &mut B::DescriptorSet, name: &str); + + + + unsafe fn set_descriptor_set_layout_name( + &self, + descriptor_set_layout: &mut B::DescriptorSetLayout, + name: &str, + ); +} diff --git a/third_party/rust/gfx-hal/src/format.rs b/third_party/rust/gfx-hal/src/format.rs new file mode 100644 index 000000000000..1972583a8c1b --- /dev/null +++ b/third_party/rust/gfx-hal/src/format.rs @@ -0,0 +1,623 @@ + + + + + + + + + + + +bitflags!( + /// Bitflags which describe what properties of an image + /// a format specifies or does not specify. For example, + /// the `Rgba8Unorm` format only specifies a `COLOR` aspect, + /// while `D32SfloatS8Uint` specifies both a depth and stencil + /// aspect but no color. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Aspects: u8 { + /// Color aspect. + const COLOR = 0x1; + /// Depth aspect. + const DEPTH = 0x2; + /// Stencil aspect. + const STENCIL = 0x4; + } +); + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct FormatDesc { + + + + + + + + + pub bits: u16, + + pub dim: (u8, u8), + + + + + pub packed: bool, + + pub aspects: Aspects, +} + +impl FormatDesc { + + pub fn is_compressed(&self) -> bool { + self.dim != (1, 1) + } +} + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct FormatBits { + + + + pub color: u8, + + + + pub alpha: u8, + + pub depth: u8, + + pub stencil: u8, +} + + +pub const BITS_ZERO: FormatBits = FormatBits { + color: 0, + alpha: 0, + depth: 0, + stencil: 0, +}; + + + +#[repr(u8)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Component { + + Zero, + + One, + + R, + + G, + + B, + + A, +} + + + + + + + + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Swizzle(pub Component, pub Component, pub Component, pub Component); + +impl Swizzle { + + pub const NO: Swizzle = Swizzle(Component::R, Component::G, Component::B, Component::A); +} + +impl Default for Swizzle { + fn default() -> Self { + Self::NO + } +} + + +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Properties { + + + + pub linear_tiling: ImageFeature, + + + pub optimal_tiling: ImageFeature, + + pub buffer_features: BufferFeature, +} + +bitflags!( + /// Image feature flags. + #[derive(Default)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct ImageFeature: u32 { + /// Image view can be sampled. + const SAMPLED = 0x1; + /// Image view can be used as storage image. + const STORAGE = 0x2; + /// Image view can be used as storage image (with atomics). + const STORAGE_ATOMIC = 0x4; + /// Image view can be used as color and input attachment. + const COLOR_ATTACHMENT = 0x80; + /// Image view can be used as color (with blending) and input attachment. + const COLOR_ATTACHMENT_BLEND = 0x100; + /// Image view can be used as depth-stencil and input attachment. + const DEPTH_STENCIL_ATTACHMENT = 0x200; + /// Image can be used as source for blit commands. + const BLIT_SRC = 0x400; + /// Image can be used as destination for blit commands. + const BLIT_DST = 0x800; + /// Image can be sampled with a (mipmap) linear sampler or as blit source + /// with linear sampling. + /// Requires `SAMPLED` or `BLIT_SRC` flag. + const SAMPLED_LINEAR = 0x1000; + } +); + +bitflags!( + /// Buffer feature flags. + #[derive(Default)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct BufferFeature: u32 { + /// Buffer view can be used as uniform texel buffer. + const UNIFORM_TEXEL = 0x8; + /// Buffer view can be used as storage texel buffer. + const STORAGE_TEXEL = 0x10; + /// Buffer view can be used as storage texel buffer (with atomics). + const STORAGE_TEXEL_ATOMIC = 0x20; + /// Image view can be used as vertex buffer. + const VERTEX = 0x40; + } +); + + + +#[repr(u8)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum ChannelType { + + Unorm, + + Snorm, + + Uint, + + Sint, + + Ufloat, + + Sfloat, + + Uscaled, + + Sscaled, + + Srgb, +} + +macro_rules! surface_types { + { $($name:ident { $total:expr, $($aspect:ident)|*, $dim:expr $( ,$component:ident : $bits:expr )*} ,)* } => { + /// Type of the allocated texture surface. It is supposed to only + /// carry information about the number of bits per each channel. + /// The actual types are up to the views to decide and interpret. + /// The actual components are up to the swizzle to define. + #[repr(u8)] + #[allow(missing_docs, non_camel_case_types)] + #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub enum SurfaceType { + $( $name, )* + } + + impl SurfaceType { + /// Return the bits for this format. + pub fn describe_bits(&self) -> FormatBits { + match *self { + $( SurfaceType::$name => FormatBits { + $( $component: $bits, )* + .. BITS_ZERO + }, )* + } + } + + /// Return the format descriptor. + pub fn desc(&self) -> FormatDesc { + match *self { + $( SurfaceType::$name => FormatDesc { + bits: $total.min(!$total), + dim: $dim, + packed: $total > 0x1000, + aspects: $(Aspects::$aspect)|*, + }, )* + } + } + } + } +} + + + +surface_types! { + R4_G4 { !8, COLOR, (1, 1), color: 8 }, + R4_G4_B4_A4 { !16, COLOR, (1, 1), color: 12, alpha: 4 }, + B4_G4_R4_A4 { !16, COLOR, (1, 1), color: 12, alpha: 4 }, + R5_G6_B5 { !16, COLOR, (1, 1), color: 16 }, + B5_G6_R5 { !16, COLOR, (1, 1), color: 16 }, + R5_G5_B5_A1 { !16, COLOR, (1, 1), color: 15, alpha: 1 }, + B5_G5_R5_A1 { !16, COLOR, (1, 1), color: 15, alpha: 1 }, + A1_R5_G5_B5 { !16, COLOR, (1, 1), color: 15, alpha: 1 }, + R8 { 8, COLOR, (1, 1), color: 8 }, + R8_G8 { 16, COLOR, (1, 1), color: 16 }, + R8_G8_B8 { 24, COLOR, (1, 1), color: 24 }, + B8_G8_R8 { 24, COLOR, (1, 1), color: 24 }, + R8_G8_B8_A8 { 32, COLOR, (1, 1), color: 24, alpha: 8 }, + B8_G8_R8_A8 { 32, COLOR, (1, 1), color: 24, alpha: 8 }, + A8_B8_G8_R8 { !32, COLOR, (1, 1), color: 24, alpha: 8 }, + A2_R10_G10_B10 { !32, COLOR, (1, 1), color: 30, alpha: 2 }, + A2_B10_G10_R10 { !32, COLOR, (1, 1), color: 30, alpha: 2 }, + R16 { 16, COLOR, (1, 1), color: 16 }, + R16_G16 { 32, COLOR, (1, 1), color: 32 }, + R16_G16_B16 { 48, COLOR, (1, 1), color: 48 }, + R16_G16_B16_A16 { 64, COLOR, (1, 1), color: 48, alpha: 16 }, + R32 { 32, COLOR, (1, 1), color: 32 }, + R32_G32 { 64, COLOR, (1, 1), color: 64 }, + R32_G32_B32 { 96, COLOR, (1, 1), color: 96 }, + R32_G32_B32_A32 { 128, COLOR, (1, 1), color: 96, alpha: 32 }, + R64 { 64, COLOR, (1, 1), color: 64 }, + R64_G64 { 128, COLOR, (1, 1), color: 128 }, + R64_G64_B64 { 192, COLOR, (1, 1), color: 192 }, + R64_G64_B64_A64 { 256, COLOR, (1, 1), color: 192, alpha: 64 }, + B10_G11_R11 { !32, COLOR, (1, 1), color: 32 }, + E5_B9_G9_R9 { !32, COLOR, (1, 1), color: 27 }, + D16 { 16, DEPTH, (1, 1), depth: 16 }, + X8D24 { !32, DEPTH, (1, 1), depth: 24 }, + D32 { 32, DEPTH, (1, 1), depth: 32 }, + S8 { 8, STENCIL, (1, 1), stencil: 8 }, + D16_S8 { 24, DEPTH | STENCIL, (1, 1), depth: 16, stencil: 8 }, + D24_S8 { 32, DEPTH | STENCIL, (1, 1), depth: 24, stencil: 8 }, + D32_S8 { 40, DEPTH | STENCIL, (1, 1), depth: 32, stencil: 8 }, + BC1_RGB { 64, COLOR, (4, 4) }, + BC1_RGBA { 64, COLOR, (4, 4) }, + BC2 { 128, COLOR, (4, 4) }, + BC3 { 128, COLOR, (4, 4) }, + BC4 { 64, COLOR, (4, 4) }, + BC5 { 128, COLOR, (4, 4) }, + BC6 { 128, COLOR, (4, 4) }, + BC7 { 128, COLOR, (4, 4) }, + ETC2_R8_G8_B8 { 64, COLOR, (4, 4) }, + ETC2_R8_G8_B8_A1 { 64, COLOR, (4, 4) }, + ETC2_R8_G8_B8_A8 { 128, COLOR, (4, 4) }, + EAC_R11 { 64, COLOR, (4, 4) }, + EAC_R11_G11 { 128, COLOR, (4, 4) }, + ASTC_4x4 { 128, COLOR, (4, 4) }, + ASTC_5x4 { 128, COLOR, (5, 4) }, + ASTC_5x5 { 128, COLOR, (5, 5) }, + ASTC_6x5 { 128, COLOR, (6, 5) }, + ASTC_6x6 { 128, COLOR, (6, 6) }, + ASTC_8x5 { 128, COLOR, (8, 5) }, + ASTC_8x6 { 128, COLOR, (8, 6) }, + ASTC_8x8 { 128, COLOR, (8, 8) }, + ASTC_10x5 { 128, COLOR, (10, 5) }, + ASTC_10x6 { 128, COLOR, (10, 6) }, + ASTC_10x8 { 128, COLOR, (10, 8) }, + ASTC_10x10 { 128, COLOR, (10, 10) }, + ASTC_12x10 { 128, COLOR, (12, 10) }, + ASTC_12x12 { 128, COLOR, (12, 12) }, +} + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BaseFormat(pub SurfaceType, pub ChannelType); + + +pub trait AsFormat { + + const SELF: Format; +} + +macro_rules! formats { + { + $name:ident = ($surface:ident, $channel:ident), + $($name_tail:ident = ($surface_tail:ident, $channel_tail:ident),)* + } => { + /// A format descriptor that describes the channels present in a + /// texture or view, how they are laid out, what size they are, + /// and how the elements of the channels are interpreted (integer, + /// float, etc.) + #[allow(missing_docs)] + #[repr(u32)] + #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub enum Format { + $name = 1, + $( $name_tail, )* + + // This serves as safety net for conversion from Vulkan -> HAL, + // in case Vulkan adds new formats: + // 1. We can check if a format is out of range + // 2. We 'ensure' that backend implementations do non-exhaustive matching + #[doc(hidden)] + __NumFormats, + } + + /// Number of formats. + pub const NUM_FORMATS: usize = Format::__NumFormats as _; + + /// Conversion table from `Format` to `BaseFormat`, excluding `Undefined`. + pub const BASE_FORMATS: [BaseFormat; NUM_FORMATS-1] = [ + BaseFormat(SurfaceType::$surface, ChannelType::$channel), + $(BaseFormat(SurfaceType::$surface_tail, ChannelType::$channel_tail), )* + ]; + + /// A struct equivalent to the matching `Format` enum member, which allows + /// an API to be strongly typed on particular formats. + #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct $name; + + impl AsFormat for $name { + const SELF: Format = Format::$name; + } + + $( + + + #[allow(missing_docs)] + #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct $name_tail; + + impl AsFormat for $name_tail { + const SELF: Format = Format::$name_tail; + } + + )* + } +} + + +formats! { + Rg4Unorm = (R4_G4, Unorm), + Rgba4Unorm = (R4_G4_B4_A4, Unorm), + Bgra4Unorm = (B4_G4_R4_A4, Unorm), + R5g6b5Unorm = (R5_G6_B5, Unorm), + B5g6r5Unorm = (B5_G6_R5, Unorm), + R5g5b5a1Unorm = (R5_G5_B5_A1, Unorm), + B5g5r5a1Unorm = (B5_G5_R5_A1, Unorm), + A1r5g5b5Unorm = (A1_R5_G5_B5, Unorm), + R8Unorm = (R8, Unorm), + R8Snorm = (R8, Snorm), + R8Uscaled = (R8, Uscaled), + R8Sscaled = (R8, Sscaled), + R8Uint = (R8, Uint), + R8Sint = (R8, Sint), + R8Srgb = (R8, Srgb), + Rg8Unorm = (R8_G8, Unorm), + Rg8Snorm = (R8_G8, Snorm), + Rg8Uscaled = (R8_G8, Uscaled), + Rg8Sscaled = (R8_G8, Sscaled), + Rg8Uint = (R8_G8, Uint), + Rg8Sint = (R8_G8, Sint), + Rg8Srgb = (R8_G8, Srgb), + Rgb8Unorm = (R8_G8_B8, Unorm), + Rgb8Snorm = (R8_G8_B8, Snorm), + Rgb8Uscaled = (R8_G8_B8, Uscaled), + Rgb8Sscaled = (R8_G8_B8, Sscaled), + Rgb8Uint = (R8_G8_B8, Uint), + Rgb8Sint = (R8_G8_B8, Sint), + Rgb8Srgb = (R8_G8_B8, Srgb), + Bgr8Unorm = (B8_G8_R8, Unorm), + Bgr8Snorm = (B8_G8_R8, Snorm), + Bgr8Uscaled = (B8_G8_R8, Uscaled), + Bgr8Sscaled = (B8_G8_R8, Sscaled), + Bgr8Uint = (B8_G8_R8, Uint), + Bgr8Sint = (B8_G8_R8, Sint), + Bgr8Srgb = (B8_G8_R8, Srgb), + Rgba8Unorm = (R8_G8_B8_A8, Unorm), + Rgba8Snorm = (R8_G8_B8_A8, Snorm), + Rgba8Uscaled = (R8_G8_B8_A8, Uscaled), + Rgba8Sscaled = (R8_G8_B8_A8, Sscaled), + Rgba8Uint = (R8_G8_B8_A8, Uint), + Rgba8Sint = (R8_G8_B8_A8, Sint), + Rgba8Srgb = (R8_G8_B8_A8, Srgb), + Bgra8Unorm = (B8_G8_R8_A8, Unorm), + Bgra8Snorm = (B8_G8_R8_A8, Snorm), + Bgra8Uscaled = (B8_G8_R8_A8, Uscaled), + Bgra8Sscaled = (B8_G8_R8_A8, Sscaled), + Bgra8Uint = (B8_G8_R8_A8, Uint), + Bgra8Sint = (B8_G8_R8_A8, Sint), + Bgra8Srgb = (B8_G8_R8_A8, Srgb), + Abgr8Unorm = (A8_B8_G8_R8, Unorm), + Abgr8Snorm = (A8_B8_G8_R8, Snorm), + Abgr8Uscaled = (A8_B8_G8_R8, Uscaled), + Abgr8Sscaled = (A8_B8_G8_R8, Sscaled), + Abgr8Uint = (A8_B8_G8_R8, Uint), + Abgr8Sint = (A8_B8_G8_R8, Sint), + Abgr8Srgb = (A8_B8_G8_R8, Srgb), + A2r10g10b10Unorm = (A2_R10_G10_B10, Unorm), + A2r10g10b10Snorm = (A2_R10_G10_B10, Snorm), + A2r10g10b10Uscaled = (A2_R10_G10_B10, Uscaled), + A2r10g10b10Sscaled = (A2_R10_G10_B10, Sscaled), + A2r10g10b10Uint = (A2_R10_G10_B10, Uint), + A2r10g10b10Sint = (A2_R10_G10_B10, Sint), + A2b10g10r10Unorm = (A2_B10_G10_R10, Unorm), + A2b10g10r10Snorm = (A2_B10_G10_R10, Snorm), + A2b10g10r10Uscaled = (A2_B10_G10_R10, Uscaled), + A2b10g10r10Sscaled = (A2_B10_G10_R10, Sscaled), + A2b10g10r10Uint = (A2_B10_G10_R10, Uint), + A2b10g10r10Sint = (A2_B10_G10_R10, Sint), + R16Unorm = (R16, Unorm), + R16Snorm = (R16, Snorm), + R16Uscaled = (R16, Uscaled), + R16Sscaled = (R16, Sscaled), + R16Uint = (R16, Uint), + R16Sint = (R16, Sint), + R16Sfloat = (R16, Sfloat), + Rg16Unorm = (R16_G16, Unorm), + Rg16Snorm = (R16_G16, Snorm), + Rg16Uscaled = (R16_G16, Uscaled), + Rg16Sscaled = (R16_G16, Sscaled), + Rg16Uint = (R16_G16, Uint), + Rg16Sint = (R16_G16, Sint), + Rg16Sfloat = (R16_G16, Sfloat), + Rgb16Unorm = (R16_G16_B16, Unorm), + Rgb16Snorm = (R16_G16_B16, Snorm), + Rgb16Uscaled = (R16_G16_B16, Uscaled), + Rgb16Sscaled = (R16_G16_B16, Sscaled), + Rgb16Uint = (R16_G16_B16, Uint), + Rgb16Sint = (R16_G16_B16, Sint), + Rgb16Sfloat = (R16_G16_B16, Sfloat), + Rgba16Unorm = (R16_G16_B16_A16, Unorm), + Rgba16Snorm = (R16_G16_B16_A16, Snorm), + Rgba16Uscaled = (R16_G16_B16_A16, Uscaled), + Rgba16Sscaled = (R16_G16_B16_A16, Sscaled), + Rgba16Uint = (R16_G16_B16_A16, Uint), + Rgba16Sint = (R16_G16_B16_A16, Sint), + Rgba16Sfloat = (R16_G16_B16_A16, Sfloat), + R32Uint = (R32, Uint), + R32Sint = (R32, Sint), + R32Sfloat = (R32, Sfloat), + Rg32Uint = (R32_G32, Uint), + Rg32Sint = (R32_G32, Sint), + Rg32Sfloat = (R32_G32, Sfloat), + Rgb32Uint = (R32_G32_B32, Uint), + Rgb32Sint = (R32_G32_B32, Sint), + Rgb32Sfloat = (R32_G32_B32, Sfloat), + Rgba32Uint = (R32_G32_B32_A32, Uint), + Rgba32Sint = (R32_G32_B32_A32, Sint), + Rgba32Sfloat = (R32_G32_B32_A32, Sfloat), + R64Uint = (R64, Uint), + R64Sint = (R64, Sint), + R64Sfloat = (R64, Sfloat), + Rg64Uint = (R64_G64, Uint), + Rg64Sint = (R64_G64, Sint), + Rg64Sfloat = (R64_G64, Sfloat), + Rgb64Uint = (R64_G64_B64, Uint), + Rgb64Sint = (R64_G64_B64, Sint), + Rgb64Sfloat = (R64_G64_B64, Sfloat), + Rgba64Uint = (R64_G64_B64_A64, Uint), + Rgba64Sint = (R64_G64_B64_A64, Sint), + Rgba64Sfloat = (R64_G64_B64_A64, Sfloat), + B10g11r11Ufloat = (B10_G11_R11, Ufloat), + E5b9g9r9Ufloat = (E5_B9_G9_R9, Ufloat), + D16Unorm = (D16, Unorm), + X8D24Unorm = (X8D24, Unorm), + D32Sfloat = (D32, Sfloat), + S8Uint = (S8, Uint), + D16UnormS8Uint = (D16_S8, Unorm), + D24UnormS8Uint = (D24_S8, Unorm), + D32SfloatS8Uint = (D32_S8, Sfloat), + Bc1RgbUnorm = (BC1_RGB, Unorm), + Bc1RgbSrgb = (BC1_RGB, Srgb), + Bc1RgbaUnorm = (BC1_RGBA, Unorm), + Bc1RgbaSrgb = (BC1_RGBA, Srgb), + Bc2Unorm = (BC2, Unorm), + Bc2Srgb = (BC2, Srgb), + Bc3Unorm = (BC3, Unorm), + Bc3Srgb = (BC3, Srgb), + Bc4Unorm = (BC4, Unorm), + Bc4Snorm = (BC4, Snorm), + Bc5Unorm = (BC5, Unorm), + Bc5Snorm = (BC5, Snorm), + Bc6hUfloat = (BC6, Ufloat), + Bc6hSfloat = (BC6, Sfloat), + Bc7Unorm = (BC7, Unorm), + Bc7Srgb = (BC7, Srgb), + Etc2R8g8b8Unorm = (ETC2_R8_G8_B8, Unorm), + Etc2R8g8b8Srgb = (ETC2_R8_G8_B8, Srgb), + Etc2R8g8b8a1Unorm = (ETC2_R8_G8_B8_A1, Unorm), + Etc2R8g8b8a1Srgb = (ETC2_R8_G8_B8_A1, Srgb), + Etc2R8g8b8a8Unorm = (ETC2_R8_G8_B8_A8, Unorm), + Etc2R8g8b8a8Srgb = (ETC2_R8_G8_B8_A8, Srgb), + EacR11Unorm = (EAC_R11, Unorm), + EacR11Snorm = (EAC_R11, Snorm), + EacR11g11Unorm = (EAC_R11_G11, Unorm), + EacR11g11Snorm = (EAC_R11_G11, Snorm), + Astc4x4Unorm = (ASTC_4x4, Unorm), + Astc4x4Srgb = (ASTC_4x4, Srgb), + Astc5x4Unorm = (ASTC_5x4, Unorm), + Astc5x4Srgb = (ASTC_5x4, Srgb), + Astc5x5Unorm = (ASTC_5x5, Unorm), + Astc5x5Srgb = (ASTC_5x5, Srgb), + Astc6x5Unorm = (ASTC_6x5, Unorm), + Astc6x5Srgb = (ASTC_6x5, Srgb), + Astc6x6Unorm = (ASTC_6x6, Unorm), + Astc6x6Srgb = (ASTC_6x6, Srgb), + Astc8x5Unorm = (ASTC_8x5, Unorm), + Astc8x5Srgb = (ASTC_8x5, Srgb), + Astc8x6Unorm = (ASTC_8x6, Unorm), + Astc8x6Srgb = (ASTC_8x6, Srgb), + Astc8x8Unorm = (ASTC_8x8, Unorm), + Astc8x8Srgb = (ASTC_8x8, Srgb), + Astc10x5Unorm = (ASTC_10x5, Unorm), + Astc10x5Srgb = (ASTC_10x5, Srgb), + Astc10x6Unorm = (ASTC_10x6, Unorm), + Astc10x6Srgb = (ASTC_10x6, Srgb), + Astc10x8Unorm = (ASTC_10x8, Unorm), + Astc10x8Srgb = (ASTC_10x8, Srgb), + Astc10x10Unorm = (ASTC_10x10, Unorm), + Astc10x10Srgb = (ASTC_10x10, Srgb), + Astc12x10Unorm = (ASTC_12x10, Unorm), + Astc12x10Srgb = (ASTC_12x10, Srgb), + Astc12x12Unorm = (ASTC_12x12, Unorm), + Astc12x12Srgb = (ASTC_12x12, Srgb), +} + +impl Format { + + + + pub fn base_format(self) -> BaseFormat { + assert!(self as usize != 0 && NUM_FORMATS > self as usize); + BASE_FORMATS[self as usize - 1] + } + + + pub fn surface_desc(&self) -> FormatDesc { + self.base_format().0.desc() + } + + + pub fn is_color(self) -> bool { + self.surface_desc().aspects.contains(Aspects::COLOR) + } + + + pub fn is_depth(self) -> bool { + self.surface_desc().aspects.contains(Aspects::DEPTH) + } + + + pub fn is_stencil(self) -> bool { + self.surface_desc().aspects.contains(Aspects::STENCIL) + } +} + + +impl AsFormat for f32 { + const SELF: Format = Format::R32Sfloat; +} +impl AsFormat for [f32; 2] { + const SELF: Format = Format::Rg32Sfloat; +} +impl AsFormat for [f32; 3] { + const SELF: Format = Format::Rgb32Sfloat; +} +impl AsFormat for [f32; 4] { + const SELF: Format = Format::Rgba32Sfloat; +} diff --git a/third_party/rust/gfx-hal/src/image.rs b/third_party/rust/gfx-hal/src/image.rs new file mode 100644 index 000000000000..c9573f1c41c0 --- /dev/null +++ b/third_party/rust/gfx-hal/src/image.rs @@ -0,0 +1,639 @@ + + + + +use crate::{ + buffer::Offset as RawOffset, + device, + format, + pso::{Comparison, Rect}, +}; +use std::{f32, hash, ops::Range}; + + +pub type Size = u32; + +pub type NumSamples = u8; + +pub type Layer = u16; + +pub type Level = u8; + +pub const MAX_LEVEL: Level = 15; + +pub type TexelCoordinate = i32; + + +#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Extent { + + pub width: Size, + + pub height: Size, + + pub depth: Size, +} + +impl Extent { + + pub fn is_empty(&self) -> bool { + self.width == 0 || self.height == 0 || self.depth == 0 + } + + pub fn at_level(&self, level: Level) -> Self { + Extent { + width: 1.max(self.width >> level), + height: 1.max(self.height >> level), + depth: 1.max(self.depth >> level), + } + } + + pub fn rect(&self) -> Rect { + Rect { + x: 0, + y: 0, + w: self.width as i16, + h: self.height as i16, + } + } +} + + + + + + + +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Offset { + + pub x: TexelCoordinate, + + pub y: TexelCoordinate, + + pub z: TexelCoordinate, +} + +impl Offset { + + pub const ZERO: Self = Offset { x: 0, y: 0, z: 0 }; + + + pub fn into_bounds(self, extent: &Extent) -> Range { + let end = Offset { + x: self.x + extent.width as i32, + y: self.y + extent.height as i32, + z: self.z + extent.depth as i32, + }; + self .. end + } +} + + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Tiling { + + Optimal, + + + Linear, +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum CreationError { + + OutOfMemory(device::OutOfMemory), + + Format(format::Format), + + Kind, + + Samples(NumSamples), + + Size(Size), + + Data(usize), + + Usage(Usage), +} + +impl From for CreationError { + fn from(error: device::OutOfMemory) -> Self { + CreationError::OutOfMemory(error) + } +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum ViewError { + + Usage(Usage), + + Level(Level), + + Layer(LayerError), + + BadFormat(format::Format), + + BadKind(ViewKind), + + OutOfMemory(device::OutOfMemory), + + Unsupported, +} + +impl From for ViewError { + fn from(error: device::OutOfMemory) -> Self { + ViewError::OutOfMemory(error) + } +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum LayerError { + + NotExpected(Kind), + + OutOfBounds(Range), +} + + + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Filter { + + + + Nearest, + + + + + Linear, +} + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Anisotropic { + + Off, + + On(u8), +} + + +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[repr(u8)] +pub enum CubeFace { + PosX, + NegX, + PosY, + NegY, + PosZ, + NegZ, +} + + +pub const CUBE_FACES: [CubeFace; 6] = [ + CubeFace::PosX, + CubeFace::NegX, + CubeFace::PosY, + CubeFace::NegY, + CubeFace::PosZ, + CubeFace::NegZ, +]; + + + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Kind { + + D1(Size, Layer), + + D2(Size, Size, Layer, NumSamples), + + D3(Size, Size, Size), +} + +impl Kind { + + pub fn extent(&self) -> Extent { + match *self { + Kind::D1(width, _) => Extent { + width, + height: 1, + depth: 1, + }, + Kind::D2(width, height, _, _) => Extent { + width, + height, + depth: 1, + }, + Kind::D3(width, height, depth) => Extent { + width, + height, + depth, + }, + } + } + + + pub fn level_extent(&self, level: Level) -> Extent { + use std::cmp::{max, min}; + + let map = |val| max(min(val, 1), val >> min(level, MAX_LEVEL)); + match *self { + Kind::D1(w, _) => Extent { + width: map(w), + height: 1, + depth: 1, + }, + Kind::D2(w, h, _, _) => Extent { + width: map(w), + height: map(h), + depth: 1, + }, + Kind::D3(w, h, d) => Extent { + width: map(w), + height: map(h), + depth: map(d), + }, + } + } + + + pub fn num_levels(&self) -> Level { + use std::cmp::max; + match *self { + Kind::D2(_, _, _, s) if s > 1 => { + + 1 + } + _ => { + let extent = self.extent(); + let dominant = max(max(extent.width, extent.height), extent.depth); + (1 ..).find(|level| dominant >> level == 0).unwrap() + } + } + } + + + + + pub fn num_layers(&self) -> Layer { + match *self { + Kind::D1(_, a) | Kind::D2(_, _, a, _) => a, + Kind::D3(..) => 1, + } + } + + + pub fn num_samples(&self) -> NumSamples { + match *self { + Kind::D1(..) => 1, + Kind::D2(_, _, _, s) => s, + Kind::D3(..) => 1, + } + } +} + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum ViewKind { + + D1, + + + + D1Array, + + D2, + + + D2Array, + + D3, + + Cube, + + CubeArray, +} + +bitflags!( + /// Capabilities to create views into an image. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct ViewCapabilities: u32 { + /// Support creation of views with different formats. + const MUTABLE_FORMAT = 0x0000_0008; + /// Support creation of `Cube` and `CubeArray` kinds of views. + const KIND_CUBE = 0x0000_0010; + /// Support creation of `D2Array` kind of view. + const KIND_2D_ARRAY = 0x0000_0020; + } +); + +bitflags!( + /// TODO: Find out if TRANSIENT_ATTACHMENT + INPUT_ATTACHMENT + /// are applicable on backends other than Vulkan. --AP + /// Image usage flags + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Usage: u32 { + /// The image is used as a transfer source. + const TRANSFER_SRC = 0x1; + /// The image is used as a transfer destination. + const TRANSFER_DST = 0x2; + /// The image is a [sampled image](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#descriptorsets-sampledimage) + const SAMPLED = 0x4; + /// The image is a [storage image](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#descriptorsets-storageimage) + const STORAGE = 0x8; + /// The image is used as a color attachment -- that is, color input to a rendering pass. + const COLOR_ATTACHMENT = 0x10; + /// The image is used as a depth attachment. + const DEPTH_STENCIL_ATTACHMENT = 0x20; + /// + const TRANSIENT_ATTACHMENT = 0x40; + /// + const INPUT_ATTACHMENT = 0x80; + + } +); + +impl Usage { + + pub fn can_transfer(&self) -> bool { + self.intersects(Usage::TRANSFER_SRC | Usage::TRANSFER_DST) + } + + + pub fn can_target(&self) -> bool { + self.intersects(Usage::COLOR_ATTACHMENT | Usage::DEPTH_STENCIL_ATTACHMENT) + } +} + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum WrapMode { + + + + Tile, + + Mirror, + + Clamp, + + Border, +} + + + +#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Lod(pub f32); + +impl Lod { + + pub const RANGE: Range = Lod(f32::MIN) .. Lod(f32::MAX); +} + +impl Eq for Lod {} +impl hash::Hash for Lod { + fn hash(&self, state: &mut H) { + self.0.to_bits().hash(state) + } +} + + +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct PackedColor(pub u32); + +impl From<[f32; 4]> for PackedColor { + fn from(c: [f32; 4]) -> PackedColor { + PackedColor( + c.iter() + .rev() + .fold(0, |u, &c| (u << 8) + (c * 255.0) as u32), + ) + } +} + +impl Into<[f32; 4]> for PackedColor { + fn into(self) -> [f32; 4] { + let mut out = [0.0; 4]; + for (i, channel) in out.iter_mut().enumerate() { + let byte = (self.0 >> (i << 3)) & 0xFF; + *channel = byte as f32 / 255.0; + } + out + } +} + + + + + + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SamplerDesc { + + pub min_filter: Filter, + + pub mag_filter: Filter, + + pub mip_filter: Filter, + + + pub wrap_mode: (WrapMode, WrapMode, WrapMode), + + + + pub lod_bias: Lod, + + pub lod_range: Range, + + pub comparison: Option, + + pub border: PackedColor, + + pub normalized: bool, + + pub anisotropic: Anisotropic, +} + +impl SamplerDesc { + + + pub fn new(filter: Filter, wrap: WrapMode) -> Self { + SamplerDesc { + min_filter: filter, + mag_filter: filter, + mip_filter: filter, + wrap_mode: (wrap, wrap, wrap), + lod_bias: Lod(0.0), + lod_range: Lod::RANGE.clone(), + comparison: None, + border: PackedColor(0), + normalized: true, + anisotropic: Anisotropic::Off, + } + } +} + + + + + + + + + +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Layout { + + General, + + ColorAttachmentOptimal, + + DepthStencilAttachmentOptimal, + + + DepthStencilReadOnlyOptimal, + + ShaderReadOnlyOptimal, + + TransferSrcOptimal, + + TransferDstOptimal, + + + + + Undefined, + + + Preinitialized, + + Present, +} + +bitflags!( + /// Bitflags to describe how memory in an image or buffer can be accessed. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Access: u32 { + /// Read access to an input attachment from within a fragment shader. + const INPUT_ATTACHMENT_READ = 0x10; + /// Read-only state for SRV access, or combine with `SHADER_WRITE` to have r/w access to UAV. + const SHADER_READ = 0x20; + /// Writeable state for UAV access. + /// Combine with `SHADER_READ` to have r/w access to UAV. + const SHADER_WRITE = 0x40; + /// Read state but can only be combined with `COLOR_ATTACHMENT_WRITE`. + const COLOR_ATTACHMENT_READ = 0x80; + /// Write-only state but can be combined with `COLOR_ATTACHMENT_READ`. + const COLOR_ATTACHMENT_WRITE = 0x100; + /// Read access to a depth/stencil attachment in a depth or stencil operation. + const DEPTH_STENCIL_ATTACHMENT_READ = 0x200; + /// Write access to a depth/stencil attachment in a depth or stencil operation. + const DEPTH_STENCIL_ATTACHMENT_WRITE = 0x400; + /// Read access to the buffer in a copy operation. + const TRANSFER_READ = 0x800; + /// Write access to the buffer in a copy operation. + const TRANSFER_WRITE = 0x1000; + /// Read access for raw memory to be accessed by the host system (ie, CPU). + const HOST_READ = 0x2000; + /// Write access for raw memory to be accessed by the host system. + const HOST_WRITE = 0x4000; + /// Read access for memory to be accessed by a non-specific entity. This may + /// be the host system, or it may be something undefined or specified by an + /// extension. + const MEMORY_READ = 0x8000; + /// Write access for memory to be accessed by a non-specific entity. + const MEMORY_WRITE = 0x10000; + } +); + + +pub type State = (Access, Layout); + + +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Subresource { + + pub aspects: format::Aspects, + + pub level: Level, + + pub layer: Layer, +} + + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SubresourceLayers { + + pub aspects: format::Aspects, + + pub level: Level, + + pub layers: Range, +} + + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SubresourceRange { + + pub aspects: format::Aspects, + + pub levels: Range, + + pub layers: Range, +} + + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct FormatProperties { + + pub max_extent: Extent, + + pub max_levels: Level, + + pub max_layers: Layer, + + pub sample_count_mask: NumSamples, + + pub max_resource_size: usize, +} + + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SubresourceFootprint { + + pub slice: Range, + + pub row_pitch: RawOffset, + + pub array_pitch: RawOffset, + + pub depth_pitch: RawOffset, +} diff --git a/third_party/rust/gfx-hal/src/lib.rs b/third_party/rust/gfx-hal/src/lib.rs new file mode 100644 index 000000000000..09a5f03d1aea --- /dev/null +++ b/third_party/rust/gfx-hal/src/lib.rs @@ -0,0 +1,459 @@ +#![deny(missing_debug_implementations, missing_docs, unused)] + + + + +#[macro_use] +extern crate bitflags; + +#[cfg(feature = "serde")] +#[macro_use] +extern crate serde; + +use std::any::Any; +use std::fmt; +use std::hash::Hash; + +pub mod adapter; +pub mod buffer; +pub mod command; +pub mod device; +pub mod format; +pub mod image; +pub mod memory; +pub mod pass; +pub mod pool; +pub mod pso; +pub mod query; +pub mod queue; +pub mod range; +pub mod window; + + +pub mod prelude { + pub use crate::{ + adapter::PhysicalDevice as _, + command::CommandBuffer as _, + device::Device as _, + pool::CommandPool as _, + pso::DescriptorPool as _, + queue::{CommandQueue as _, QueueFamily as _}, + window::{PresentationSurface as _, Surface as _, Swapchain as _}, + Instance as _, + }; +} + + +pub type VertexCount = u32; + +pub type VertexOffset = i32; + +pub type IndexCount = u32; + +pub type InstanceCount = u32; + +pub type DrawCount = u32; + +pub type WorkGroupCount = [u32; 3]; + +bitflags! { + //TODO: add a feature for non-normalized samplers + //TODO: add a feature for mutable comparison samplers + /// Features that the device supports. + /// These only include features of the core interface and not API extensions. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Features: u64 { + /// Bit mask of Vulkan Core features. + const CORE_MASK = 0x0FFF_FFFF_FFFF_FFFF; + /// Bit mask of Vulkan Portability features. + const PORTABILITY_MASK = 0xF000_0000_0000_0000; + + /// Support for robust buffer access. + /// Buffer access by SPIR-V shaders is checked against the buffer/image boundaries. + const ROBUST_BUFFER_ACCESS = 0x000_0000_0000_0001; + /// Support the full 32-bit range of indexed for draw calls. + /// If not supported, the maximum index value is determined by `Limits::max_draw_index_value`. + const FULL_DRAW_INDEX_U32 = 0x000_0000_0000_0002; + /// Support cube array image views. + const IMAGE_CUBE_ARRAY = 0x000_0000_0000_0004; + /// Support different color blending settings per attachments on graphics pipeline creation. + const INDEPENDENT_BLENDING = 0x000_0000_0000_0008; + /// Support geometry shader. + const GEOMETRY_SHADER = 0x000_0000_0000_0010; + /// Support tessellation shaders. + const TESSELLATION_SHADER = 0x000_0000_0000_0020; + /// Support per-sample shading and multisample interpolation. + const SAMPLE_RATE_SHADING = 0x000_0000_0000_0040; + /// Support dual source blending. + const DUAL_SRC_BLENDING = 0x000_0000_0000_0080; + /// Support logic operations. + const LOGIC_OP = 0x000_0000_0000_0100; + /// Support multiple draws per indirect call. + const MULTI_DRAW_INDIRECT = 0x000_0000_0000_0200; + /// Support indirect drawing with first instance value. + /// If not supported the first instance value **must** be 0. + const DRAW_INDIRECT_FIRST_INSTANCE = 0x00_0000_0000_0400; + /// Support depth clamping. + const DEPTH_CLAMP = 0x000_0000_0000_0800; + /// Support depth bias clamping. + const DEPTH_BIAS_CLAMP = 0x000_0000_0000_1000; + /// Support non-fill polygon modes. + const NON_FILL_POLYGON_MODE = 0x000_0000_0000_2000; + /// Support depth bounds test. + const DEPTH_BOUNDS = 0x000_0000_0000_4000; + /// Support lines with width other than 1.0. + const LINE_WIDTH = 0x000_0000_0000_8000; + /// Support points with size greater than 1.0. + const POINT_SIZE = 0x000_0000_0001_0000; + /// Support replacing alpha values with 1.0. + const ALPHA_TO_ONE = 0x000_0000_0002_0000; + /// Support multiple viewports and scissors. + const MULTI_VIEWPORTS = 0x000_0000_0004_0000; + /// Support anisotropic filtering. + const SAMPLER_ANISOTROPY = 0x000_0000_0008_0000; + /// Support ETC2 texture compression formats. + const FORMAT_ETC2 = 0x000_0000_0010_0000; + /// Support ASTC (LDR) texture compression formats. + const FORMAT_ASTC_LDR = 0x000_0000_0020_0000; + /// Support BC texture compression formats. + const FORMAT_BC = 0x000_0000_0040_0000; + /// Support precise occlusion queries, returning the actual number of samples. + /// If not supported, queries return a non-zero value when at least **one** sample passes. + const PRECISE_OCCLUSION_QUERY = 0x000_0000_0080_0000; + /// Support query of pipeline statistics. + const PIPELINE_STATISTICS_QUERY = 0x000_0000_0100_0000; + /// Support unordered access stores and atomic ops in the vertex, geometry + /// and tessellation shader stage. + /// If not supported, the shader resources **must** be annotated as read-only. + const VERTEX_STORES_AND_ATOMICS = 0x000_0000_0200_0000; + /// Support unordered access stores and atomic ops in the fragment shader stage + /// If not supported, the shader resources **must** be annotated as read-only. + const FRAGMENT_STORES_AND_ATOMICS = 0x000_0000_0400_0000; + /// + const SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE = 0x000_0000_0800_0000; + /// + const SHADER_IMAGE_GATHER_EXTENDED = 0x000_0000_1000_0000; + /// + const SHADER_STORAGE_IMAGE_EXTENDED_FORMATS = 0x000_0000_2000_0000; + /// + const SHADER_STORAGE_IMAGE_MULTISAMPLE = 0x000_0000_4000_0000; + /// + const SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT = 0x000_0000_8000_0000; + /// + const SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT = 0x000_0001_0000_0000; + /// + const SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING = 0x000_0002_0000_0000; + /// + const SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING = 0x000_0004_0000_0000; + /// + const SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING = 0x000_0008_0000_0000; + /// + const SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING = 0x000_0010_0000_0000; + /// + const SHADER_CLIP_DISTANCE = 0x000_0020_0000_0000; + /// + const SHADER_CULL_DISTANCE = 0x000_0040_0000_0000; + /// + const SHADER_FLOAT64 = 0x000_0080_0000_0000; + /// + const SHADER_INT64 = 0x000_0100_0000_0000; + /// + const SHADER_INT16 = 0x000_0200_0000_0000; + /// + const SHADER_RESOURCE_RESIDENCY = 0x000_0400_0000_0000; + /// + const SHADER_RESOURCE_MIN_LOD = 0x000_0800_0000_0000; + /// + const SPARSE_BINDING = 0x000_1000_0000_0000; + /// + const SPARSE_RESIDENCY_BUFFER = 0x000_2000_0000_0000; + /// + const SPARSE_RESIDENCY_IMAGE_2D = 0x000_4000_0000_0000; + /// + const SPARSE_RESIDENCY_IMAGE_3D = 0x000_8000_0000_0000; + /// + const SPARSE_RESIDENCY_2_SAMPLES = 0x001_0000_0000_0000; + /// + const SPARSE_RESIDENCY_4_SAMPLES = 0x002_0000_0000_0000; + /// + const SPARSE_RESIDENCY_8_SAMPLES = 0x004_0000_0000_0000; + /// + const SPARSE_RESIDENCY_16_SAMPLES = 0x008_0000_0000_0000; + /// + const SPARSE_RESIDENCY_ALIASED = 0x010_0000_0000_0000; + /// + const VARIABLE_MULTISAMPLE_RATE = 0x020_0000_0000_0000; + /// + const INHERITED_QUERIES = 0x040_0000_0000_0000; + + /// Support triangle fan primitive topology. + const TRIANGLE_FAN = 0x1000_0000_0000_0000; + /// Support separate stencil reference values for front and back sides. + const SEPARATE_STENCIL_REF_VALUES = 0x2000_0000_0000_0000; + /// Support manually specified vertex attribute rates (divisors). + const INSTANCE_RATE = 0x4000_0000_0000_0000; + /// Support non-zero mipmap bias on samplers. + const SAMPLER_MIP_LOD_BIAS = 0x8000_0000_0000_0000; + } +} + + +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Limits { + + pub max_image_1d_size: image::Size, + + pub max_image_2d_size: image::Size, + + pub max_image_3d_size: image::Size, + + pub max_image_cube_size: image::Size, + + pub max_image_array_layers: image::Layer, + + pub max_texel_elements: usize, + + pub max_uniform_buffer_range: buffer::Offset, + + pub max_storage_buffer_range: buffer::Offset, + + pub max_push_constants_size: usize, + + pub max_memory_allocation_count: usize, + + pub max_sampler_allocation_count: usize, + + pub max_bound_descriptor_sets: pso::DescriptorSetIndex, + + pub max_framebuffer_layers: usize, + + pub max_per_stage_descriptor_samplers: usize, + + pub max_per_stage_descriptor_uniform_buffers: usize, + + pub max_per_stage_descriptor_storage_buffers: usize, + + pub max_per_stage_descriptor_sampled_images: usize, + + pub max_per_stage_descriptor_storage_images: usize, + + pub max_per_stage_descriptor_input_attachments: usize, + + pub max_per_stage_resources: usize, + + + pub max_descriptor_set_samplers: usize, + + pub max_descriptor_set_uniform_buffers: usize, + + pub max_descriptor_set_uniform_buffers_dynamic: usize, + + pub max_descriptor_set_storage_buffers: usize, + + pub max_descriptor_set_storage_buffers_dynamic: usize, + + pub max_descriptor_set_sampled_images: usize, + + pub max_descriptor_set_storage_images: usize, + + pub max_descriptor_set_input_attachments: usize, + + + pub max_vertex_input_attributes: usize, + + pub max_vertex_input_bindings: usize, + + pub max_vertex_input_attribute_offset: usize, + + pub max_vertex_input_binding_stride: usize, + + pub max_vertex_output_components: usize, + + + pub max_patch_size: pso::PatchSize, + + pub max_geometry_shader_invocations: usize, + + pub max_geometry_input_components: usize, + + pub max_geometry_output_components: usize, + + pub max_geometry_output_vertices: usize, + + pub max_geometry_total_output_components: usize, + + pub max_fragment_input_components: usize, + + pub max_fragment_output_attachments: usize, + + pub max_fragment_dual_source_attachments: usize, + + pub max_fragment_combined_output_resources: usize, + + + pub max_compute_shared_memory_size: usize, + + pub max_compute_work_group_count: WorkGroupCount, + + pub max_compute_work_group_invocations: usize, + + pub max_compute_work_group_size: [u32; 3], + + + pub max_draw_indexed_index_value: IndexCount, + + pub max_draw_indirect_count: InstanceCount, + + + pub max_sampler_lod_bias: f32, + + pub max_sampler_anisotropy: f32, + + + pub max_viewports: usize, + + pub max_viewport_dimensions: [image::Size; 2], + + pub max_framebuffer_extent: image::Extent, + + + pub min_memory_map_alignment: usize, + + pub buffer_image_granularity: buffer::Offset, + + pub min_texel_buffer_offset_alignment: buffer::Offset, + + pub min_uniform_buffer_offset_alignment: buffer::Offset, + + pub min_storage_buffer_offset_alignment: buffer::Offset, + + pub framebuffer_color_sample_counts: image::NumSamples, + + pub framebuffer_depth_sample_counts: image::NumSamples, + + pub framebuffer_stencil_sample_counts: image::NumSamples, + + pub max_color_attachments: usize, + + pub standard_sample_locations: bool, + + pub optimal_buffer_copy_offset_alignment: buffer::Offset, + + + pub optimal_buffer_copy_pitch_alignment: buffer::Offset, + + pub non_coherent_atom_size: usize, + + + pub min_vertex_input_binding_stride_alignment: buffer::Offset, +} + + +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[repr(u8)] +pub enum IndexType { + U16, + U32, +} + + + +#[derive(Clone, Debug, PartialEq)] +pub struct UnsupportedBackend; + + + + + + + + + + + + + + + + + + + + + + + + +pub trait Instance: Any + Send + Sync + Sized { + + fn create(name: &str, version: u32) -> Result; + + fn enumerate_adapters(&self) -> Vec>; + + unsafe fn create_surface( + &self, + _: &impl raw_window_handle::HasRawWindowHandle, + ) -> Result; + + + + + unsafe fn destroy_surface(&self, surface: B::Surface); +} + + +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct MemoryTypeId(pub usize); + +impl From for MemoryTypeId { + fn from(id: usize) -> Self { + MemoryTypeId(id) + } +} + + + + +#[allow(missing_docs)] +pub trait Backend: 'static + Sized + Eq + Clone + Hash + fmt::Debug + Any + Send + Sync { + type Instance: Instance; + type PhysicalDevice: adapter::PhysicalDevice; + type Device: device::Device; + + type Surface: window::PresentationSurface; + type Swapchain: window::Swapchain; + + type QueueFamily: queue::QueueFamily; + type CommandQueue: queue::CommandQueue; + type CommandBuffer: command::CommandBuffer; + + type ShaderModule: fmt::Debug + Any + Send + Sync; + type RenderPass: fmt::Debug + Any + Send + Sync; + type Framebuffer: fmt::Debug + Any + Send + Sync; + + type Memory: fmt::Debug + Any + Send + Sync; + type CommandPool: pool::CommandPool; + + type Buffer: fmt::Debug + Any + Send + Sync; + type BufferView: fmt::Debug + Any + Send + Sync; + type Image: fmt::Debug + Any + Send + Sync; + type ImageView: fmt::Debug + Any + Send + Sync; + type Sampler: fmt::Debug + Any + Send + Sync; + + type ComputePipeline: fmt::Debug + Any + Send + Sync; + type GraphicsPipeline: fmt::Debug + Any + Send + Sync; + type PipelineCache: fmt::Debug + Any + Send + Sync; + type PipelineLayout: fmt::Debug + Any + Send + Sync; + type DescriptorPool: pso::DescriptorPool; + type DescriptorSet: fmt::Debug + Any + Send + Sync; + type DescriptorSetLayout: fmt::Debug + Any + Send + Sync; + + type Fence: fmt::Debug + Any + Send + Sync; + type Semaphore: fmt::Debug + Any + Send + Sync; + type Event: fmt::Debug + Any + Send + Sync; + type QueryPool: fmt::Debug + Any + Send + Sync; +} diff --git a/third_party/rust/gfx-hal/src/memory.rs b/third_party/rust/gfx-hal/src/memory.rs new file mode 100644 index 000000000000..c826b30ed3fd --- /dev/null +++ b/third_party/rust/gfx-hal/src/memory.rs @@ -0,0 +1,101 @@ + + +use crate::{buffer, image, queue, Backend}; +use std::ops::Range; + +bitflags!( + /// Memory property flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Properties: u16 { + /// Device local memory on the GPU. + const DEVICE_LOCAL = 0x1; + + /// Host visible memory can be accessed by the CPU. + /// + /// Backends must provide at least one cpu visible memory. + const CPU_VISIBLE = 0x2; + + /// CPU-GPU coherent. + /// + /// Non-coherent memory requires explicit flushing. + const COHERENT = 0x4; + + /// Cached memory by the CPU + const CPU_CACHED = 0x8; + + /// Memory that may be lazily allocated as needed on the GPU + /// and *must not* be visible to the CPU. + const LAZILY_ALLOCATED = 0x10; + } +); + +bitflags!( + /// Barrier dependency flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Dependencies: u32 { + /// Specifies the memory dependency to be framebuffer-local. + const BY_REGION = 0x1; + //const VIEW_LOCAL = 0x2; + //const DEVICE_GROUP = 0x4; + } +); + + + + + +#[allow(missing_docs)] +#[derive(Clone, Debug)] +pub enum Barrier<'a, B: Backend> { + + AllBuffers(Range), + + AllImages(Range), + + Buffer { + + states: Range, + + target: &'a B::Buffer, + + + families: Option>, + + range: Range>, + }, + + Image { + + states: Range, + + target: &'a B::Image, + + + families: Option>, + + range: image::SubresourceRange, + }, +} + +impl<'a, B: Backend> Barrier<'a, B> { + + pub fn whole_buffer(target: &'a B::Buffer, states: Range) -> Self { + Barrier::Buffer { + states, + target, + families: None, + range: None .. None, + } + } +} + + +#[derive(Clone, Copy, Debug)] +pub struct Requirements { + + pub size: u64, + + pub alignment: u64, + + pub type_mask: u64, +} diff --git a/third_party/rust/gfx-hal/src/pass.rs b/third_party/rust/gfx-hal/src/pass.rs new file mode 100644 index 000000000000..0588843e2f1d --- /dev/null +++ b/third_party/rust/gfx-hal/src/pass.rs @@ -0,0 +1,185 @@ + + +use crate::{format::Format, image, memory::Dependencies, pso::PipelineStage, Backend}; +use std::ops::Range; + + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum AttachmentLoadOp { + + Load, + + Clear, + + DontCare, +} + + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum AttachmentStoreOp { + + Store, + + DontCare, +} + + +pub type AttachmentLayout = image::Layout; + + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct AttachmentOps { + + + pub load: AttachmentLoadOp, + + pub store: AttachmentStoreOp, +} + +impl AttachmentOps { + + pub const DONT_CARE: Self = AttachmentOps { + load: AttachmentLoadOp::DontCare, + store: AttachmentStoreOp::DontCare, + }; + + + pub const PRESERVE: Self = AttachmentOps { + load: AttachmentLoadOp::Load, + store: AttachmentStoreOp::Store, + }; + + + pub fn new(load: AttachmentLoadOp, store: AttachmentStoreOp) -> Self { + AttachmentOps { load, store } + } + + + + #[cfg(feature = "serde")] + fn whatever() -> Self { + Self::DONT_CARE + } +} + + + + +#[derive(Clone, Debug, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Attachment { + + + + + + pub format: Option, + + pub samples: image::NumSamples, + + pub ops: AttachmentOps, + + #[cfg_attr(feature = "serde", serde(default = "AttachmentOps::whatever"))] + pub stencil_ops: AttachmentOps, + + pub layouts: Range, +} + +impl Attachment { + + + pub fn has_clears(&self) -> bool { + self.ops.load == AttachmentLoadOp::Clear || self.stencil_ops.load == AttachmentLoadOp::Clear + } +} + + +pub type AttachmentId = usize; + +pub type AttachmentRef = (AttachmentId, AttachmentLayout); + +pub const ATTACHMENT_UNUSED: AttachmentId = !0; + + +#[derive(Copy, Clone, Debug, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum SubpassRef { + + + External, + + + + + Pass(usize), +} + + + + + +#[derive(Clone, Debug, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SubpassDependency { + + pub passes: Range, + + pub stages: Range, + + pub accesses: Range, + + pub flags: Dependencies, +} + + +#[derive(Clone, Debug)] +pub struct SubpassDesc<'a> { + + pub colors: &'a [AttachmentRef], + + pub depth_stencil: Option<&'a AttachmentRef>, + + pub inputs: &'a [AttachmentRef], + + + + + + pub resolves: &'a [AttachmentRef], + + + pub preserves: &'a [AttachmentId], +} + + +pub type SubpassId = usize; + + +#[derive(Debug)] +pub struct Subpass<'a, B: Backend> { + + pub index: SubpassId, + + pub main_pass: &'a B::RenderPass, +} + +impl<'a, B: Backend> Clone for Subpass<'a, B> { + fn clone(&self) -> Self { + Subpass { + index: self.index, + main_pass: self.main_pass, + } + } +} + +impl<'a, B: Backend> PartialEq for Subpass<'a, B> { + fn eq(&self, other: &Self) -> bool { + self.index == other.index && self.main_pass as *const _ == other.main_pass as *const _ + } +} + +impl<'a, B: Backend> Copy for Subpass<'a, B> {} +impl<'a, B: Backend> Eq for Subpass<'a, B> {} diff --git a/third_party/rust/gfx-hal/src/pool.rs b/third_party/rust/gfx-hal/src/pool.rs new file mode 100644 index 000000000000..373f4d5f8f17 --- /dev/null +++ b/third_party/rust/gfx-hal/src/pool.rs @@ -0,0 +1,43 @@ + + +use crate::command::Level; +use crate::Backend; + +use smallvec::SmallVec; +use std::any::Any; +use std::fmt; + +bitflags!( + /// Command pool creation flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct CommandPoolCreateFlags: u8 { + /// Indicates short-lived command buffers. + /// Memory optimization hint for implementations. + const TRANSIENT = 0x1; + /// Allow command buffers to be reset individually. + const RESET_INDIVIDUAL = 0x2; + } +); + + +pub trait CommandPool: fmt::Debug + Any + Send + Sync { + + + + unsafe fn reset(&mut self, release_resources: bool); + + + unsafe fn allocate_one(&mut self, level: Level) -> B::CommandBuffer { + self.allocate_vec(1, level).pop().unwrap() + } + + + unsafe fn allocate_vec(&mut self, num: usize, level: Level) -> SmallVec<[B::CommandBuffer; 1]> { + (0 .. num).map(|_| self.allocate_one(level)).collect() + } + + + unsafe fn free(&mut self, buffers: I) + where + I: IntoIterator; +} diff --git a/third_party/rust/gfx-hal/src/pso/compute.rs b/third_party/rust/gfx-hal/src/pso/compute.rs new file mode 100644 index 000000000000..2312d7f18da4 --- /dev/null +++ b/third_party/rust/gfx-hal/src/pso/compute.rs @@ -0,0 +1,31 @@ + + +use crate::{ + pso::{BasePipeline, EntryPoint, PipelineCreationFlags}, + Backend, +}; + + +#[derive(Debug)] +pub struct ComputePipelineDesc<'a, B: Backend> { + + pub shader: EntryPoint<'a, B>, + + pub layout: &'a B::PipelineLayout, + + pub flags: PipelineCreationFlags, + + pub parent: BasePipeline<'a, B::ComputePipeline>, +} + +impl<'a, B: Backend> ComputePipelineDesc<'a, B> { + + pub fn new(shader: EntryPoint<'a, B>, layout: &'a B::PipelineLayout) -> Self { + ComputePipelineDesc { + shader, + layout, + flags: PipelineCreationFlags::empty(), + parent: BasePipeline::None, + } + } +} diff --git a/third_party/rust/gfx-hal/src/pso/descriptor.rs b/third_party/rust/gfx-hal/src/pso/descriptor.rs new file mode 100644 index 000000000000..58d8342e7387 --- /dev/null +++ b/third_party/rust/gfx-hal/src/pso/descriptor.rs @@ -0,0 +1,253 @@ + + + + + + + + + + + + + + + + + + +use smallvec::SmallVec; +use std::{borrow::Borrow, fmt, iter, ops::Range}; + +use crate::{ + buffer::Offset, + image::Layout, + pso::ShaderStageFlags, + Backend, +}; + + +pub type DescriptorSetIndex = u16; + +pub type DescriptorBinding = u32; + +pub type DescriptorArrayIndex = usize; + + + + +#[repr(C)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum DescriptorType { + + Sampler = 0, + + CombinedImageSampler = 1, + + + SampledImage = 2, + + StorageImage = 3, + + UniformTexelBuffer = 4, + + StorageTexelBuffer = 5, + + UniformBuffer = 6, + + StorageBuffer = 7, + + + + UniformBufferDynamic = 8, + + StorageBufferDynamic = 9, + + InputAttachment = 10, +} + + + + + + + + + + + + + +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct DescriptorSetLayoutBinding { + + pub binding: DescriptorBinding, + + pub ty: DescriptorType, + + + + + pub count: DescriptorArrayIndex, + + pub stage_flags: ShaderStageFlags, + + pub immutable_samplers: bool, +} + + +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct DescriptorRangeDesc { + + pub ty: DescriptorType, + + pub count: usize, +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum AllocationError { + + + Host, + + + Device, + + + OutOfPoolMemory, + + FragmentedPool, + + IncompatibleLayout, +} + + +pub trait DescriptorPool: Send + Sync + fmt::Debug { + + + + + + + + + + + + unsafe fn allocate_set( + &mut self, + layout: &B::DescriptorSetLayout, + ) -> Result { + let mut sets = SmallVec::new(); + self.allocate_sets(iter::once(layout), &mut sets) + .map(|_| sets.remove(0)) + } + + + + + + + + + + + + + + unsafe fn allocate_sets( + &mut self, + layouts: I, + sets: &mut SmallVec<[B::DescriptorSet; 1]>, + ) -> Result<(), AllocationError> + where + I: IntoIterator, + I::Item: Borrow, + { + let base = sets.len(); + for layout in layouts { + match self.allocate_set(layout.borrow()) { + Ok(set) => sets.push(set), + Err(e) => { + while sets.len() != base { + self.free_sets(sets.pop()); + } + return Err(e); + } + } + } + Ok(()) + } + + + unsafe fn free_sets(&mut self, descriptor_sets: I) + where + I: IntoIterator; + + + + + + unsafe fn reset(&mut self); +} + + + +#[allow(missing_docs)] +#[derive(Debug)] +pub struct DescriptorSetWrite<'a, B: Backend, WI> +where + WI: IntoIterator, + WI::Item: Borrow>, +{ + pub set: &'a B::DescriptorSet, + + + + + pub binding: DescriptorBinding, + pub array_offset: DescriptorArrayIndex, + pub descriptors: WI, +} + + + + + +#[allow(missing_docs)] +#[derive(Clone, Debug)] +pub enum Descriptor<'a, B: Backend> { + Sampler(&'a B::Sampler), + Image(&'a B::ImageView, Layout), + CombinedImageSampler(&'a B::ImageView, Layout, &'a B::Sampler), + Buffer(&'a B::Buffer, Range>), + UniformTexelBuffer(&'a B::BufferView), + StorageTexelBuffer(&'a B::BufferView), +} + + + +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug)] +pub struct DescriptorSetCopy<'a, B: Backend> { + pub src_set: &'a B::DescriptorSet, + pub src_binding: DescriptorBinding, + pub src_array_offset: DescriptorArrayIndex, + pub dst_set: &'a B::DescriptorSet, + pub dst_binding: DescriptorBinding, + pub dst_array_offset: DescriptorArrayIndex, + pub count: usize, +} + +bitflags! { + /// Descriptor pool creation flags. + pub struct DescriptorPoolCreateFlags: u32 { + /// Specifies that descriptor sets are allowed to be freed from the pool + /// individually. + const FREE_DESCRIPTOR_SET = 0x1; + } +} diff --git a/third_party/rust/gfx-hal/src/pso/graphics.rs b/third_party/rust/gfx-hal/src/pso/graphics.rs new file mode 100644 index 000000000000..a909215c42f0 --- /dev/null +++ b/third_party/rust/gfx-hal/src/pso/graphics.rs @@ -0,0 +1,289 @@ + + +use crate::{ + image, + pass, + pso::{ + input_assembler::{AttributeDesc, InputAssemblerDesc, Primitive, VertexBufferDesc}, + output_merger::{ColorBlendDesc, DepthStencilDesc, Face}, + BasePipeline, EntryPoint, PipelineCreationFlags, State, + }, + Backend, +}; + +use std::ops::Range; + + +#[derive(Clone, Copy, Debug, Hash, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Rect { + + pub x: i16, + + pub y: i16, + + pub w: i16, + + pub h: i16, +} + + +#[derive(Clone, Debug, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ClearRect { + + pub rect: Rect, + + pub layers: Range, +} + + +#[derive(Clone, Debug, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Viewport { + + pub rect: Rect, + + pub depth: Range, +} + + +pub type ColorValue = [f32; 4]; + +pub type DepthValue = f32; + +pub type StencilValue = u32; + + + + + + + + + + + +#[derive(Clone, Debug)] +pub struct GraphicsShaderSet<'a, B: Backend> { + + pub vertex: EntryPoint<'a, B>, + + + + + pub hull: Option>, + + + pub domain: Option>, + + + pub geometry: Option>, + + + + pub fragment: Option>, +} + + +#[derive(Clone, Debug, Default, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BakedStates { + + pub viewport: Option, + + pub scissor: Option, + + pub blend_color: Option, + + pub depth_bounds: Option>, +} + + + +#[derive(Debug)] +pub struct GraphicsPipelineDesc<'a, B: Backend> { + + pub shaders: GraphicsShaderSet<'a, B>, + + pub rasterizer: Rasterizer, + + pub vertex_buffers: Vec, + + pub attributes: Vec, + + + pub input_assembler: InputAssemblerDesc, + + pub blender: BlendDesc, + + pub depth_stencil: DepthStencilDesc, + + pub multisampling: Option, + + pub baked_states: BakedStates, + + pub layout: &'a B::PipelineLayout, + + pub subpass: pass::Subpass<'a, B>, + + pub flags: PipelineCreationFlags, + + + pub parent: BasePipeline<'a, B::GraphicsPipeline>, +} + +impl<'a, B: Backend> GraphicsPipelineDesc<'a, B> { + + pub fn new( + shaders: GraphicsShaderSet<'a, B>, + primitive: Primitive, + rasterizer: Rasterizer, + layout: &'a B::PipelineLayout, + subpass: pass::Subpass<'a, B>, + ) -> Self { + GraphicsPipelineDesc { + shaders, + rasterizer, + vertex_buffers: Vec::new(), + attributes: Vec::new(), + input_assembler: InputAssemblerDesc::new(primitive), + blender: BlendDesc::default(), + depth_stencil: DepthStencilDesc::default(), + multisampling: None, + baked_states: BakedStates::default(), + layout, + subpass, + flags: PipelineCreationFlags::empty(), + parent: BasePipeline::None, + } + } +} + + + +#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum PolygonMode { + + Point, + + Line(State), + + Fill, +} + + + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum FrontFace { + + Clockwise, + + CounterClockwise, +} + + + + + + + + +#[derive(Copy, Clone, Debug, Default, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct DepthBias { + + pub const_factor: f32, + + pub clamp: f32, + + pub slope_factor: f32, +} + + +#[derive(Copy, Clone, Debug, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Rasterizer { + + pub polygon_mode: PolygonMode, + + pub cull_face: Face, + + pub front_face: FrontFace, + + + + pub depth_clamping: bool, + + pub depth_bias: Option>, + + pub conservative: bool, +} + +impl Rasterizer { + + pub const FILL: Self = Rasterizer { + polygon_mode: PolygonMode::Fill, + cull_face: Face::NONE, + front_face: FrontFace::CounterClockwise, + depth_clamping: false, + depth_bias: None, + conservative: false, + }; +} + + +#[derive(Clone, Debug, Default, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BlendDesc { + + pub logic_op: Option, + + pub targets: Vec, +} + + +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[allow(missing_docs)] +pub enum LogicOp { + Clear = 0, + And = 1, + AndReverse = 2, + Copy = 3, + AndInverted = 4, + NoOp = 5, + Xor = 6, + Or = 7, + Nor = 8, + Equivalent = 9, + Invert = 10, + OrReverse = 11, + CopyInverted = 12, + OrInverted = 13, + Nand = 14, + Set = 15, +} + + +pub type SampleMask = u64; + + +#[derive(Clone, Debug, PartialEq)] +pub struct Multisampling { + + pub rasterization_samples: image::NumSamples, + + pub sample_shading: Option, + + pub sample_mask: SampleMask, + + + + pub alpha_coverage: bool, + + pub alpha_to_one: bool, +} diff --git a/third_party/rust/gfx-hal/src/pso/input_assembler.rs b/third_party/rust/gfx-hal/src/pso/input_assembler.rs new file mode 100644 index 000000000000..741094cf1cfd --- /dev/null +++ b/third_party/rust/gfx-hal/src/pso/input_assembler.rs @@ -0,0 +1,146 @@ + + + +use crate::{format, IndexType}; + + +pub type Location = u32; + +pub type BufferIndex = u32; + +pub type ElemOffset = u32; + +pub type ElemStride = u32; + +pub type InstanceRate = u8; + +pub type PatchSize = u8; + + + +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum VertexInputRate { + + Vertex, + + Instance(InstanceRate), +} + +impl VertexInputRate { + + pub fn as_uint(&self) -> u8 { + match *self { + VertexInputRate::Vertex => 0, + VertexInputRate::Instance(divisor) => divisor, + } + } +} + + +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Element { + + pub format: F, + + pub offset: ElemOffset, +} + + + +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct VertexBufferDesc { + + + + pub binding: BufferIndex, + + + pub stride: ElemStride, + + + + + pub rate: VertexInputRate, +} + + + +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct AttributeDesc { + + + + + pub location: Location, + + pub binding: BufferIndex, + + pub element: Element, +} + + + +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[repr(u8)] +pub enum Primitive { + + PointList, + + + LineList, + + + + LineStrip, + + + TriangleList, + + + TriangleStrip, + + + PatchList(PatchSize), +} + + +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct InputAssemblerDesc { + + pub primitive: Primitive, + + + + + + + + + + pub with_adjacency: bool, + + + + + + + + pub restart_index: Option, +} + +impl InputAssemblerDesc { + + pub fn new(primitive: Primitive) -> Self { + InputAssemblerDesc { + primitive, + with_adjacency: false, + restart_index: None, + } + } +} diff --git a/third_party/rust/gfx-hal/src/pso/mod.rs b/third_party/rust/gfx-hal/src/pso/mod.rs new file mode 100644 index 000000000000..84eb209356d0 --- /dev/null +++ b/third_party/rust/gfx-hal/src/pso/mod.rs @@ -0,0 +1,290 @@ + + + + +use crate::{device, pass, Backend}; +use std::{fmt, io, slice}; + +mod compute; +mod descriptor; +mod graphics; +mod input_assembler; +mod output_merger; +mod specialization; + +pub use self::{ + compute::*, + descriptor::*, + graphics::*, + input_assembler::*, + output_merger::*, + specialization::*, +}; + +/// Error types happening upon PSO creation on the device side. +#[derive(Clone, Debug, PartialEq)] +pub enum CreationError { + /// Unknown other error. + Other, + /// Invalid subpass (not part of renderpass). + InvalidSubpass(pass::SubpassId), + /// Shader compilation error. + Shader(device::ShaderError), + /// Out of either host or device memory. + OutOfMemory(device::OutOfMemory), +} + +impl From for CreationError { + fn from(err: device::OutOfMemory) -> Self { + CreationError::OutOfMemory(err) + } +} + +bitflags!( + /// Stages of the logical pipeline. + /// + /// The pipeline is structured by the ordering of the flags. + /// Some stages are queue type dependent. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct PipelineStage: u32 { + /// Beginning of the command queue. + const TOP_OF_PIPE = 0x1; + /// Indirect data consumption. + const DRAW_INDIRECT = 0x2; + /// Vertex data consumption. + const VERTEX_INPUT = 0x4; + /// Vertex shader execution. + const VERTEX_SHADER = 0x8; + /// Hull shader execution. + const HULL_SHADER = 0x10; + /// Domain shader execution. + const DOMAIN_SHADER = 0x20; + /// Geometry shader execution. + const GEOMETRY_SHADER = 0x40; + /// Fragment shader execution. + const FRAGMENT_SHADER = 0x80; + /// Stage of early depth and stencil test. + const EARLY_FRAGMENT_TESTS = 0x100; + /// Stage of late depth and stencil test. + const LATE_FRAGMENT_TESTS = 0x200; + /// Stage of final color value calculation. + const COLOR_ATTACHMENT_OUTPUT = 0x400; + /// Compute shader execution, + const COMPUTE_SHADER = 0x800; + /// Copy/Transfer command execution. + const TRANSFER = 0x1000; + /// End of the command queue. + const BOTTOM_OF_PIPE = 0x2000; + /// Read/Write access from host. + /// (Not a real pipeline stage) + const HOST = 0x4000; + } +); + +bitflags!( + /// Combination of different shader pipeline stages. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct ShaderStageFlags: u32 { + /// Vertex shader stage. + const VERTEX = 0x1; + /// Hull (tessellation) shader stage. + const HULL = 0x2; + /// Domain (tessellation) shader stage. + const DOMAIN = 0x4; + /// Geometry shader stage. + const GEOMETRY = 0x8; + /// Fragment shader stage. + const FRAGMENT = 0x10; + /// Compute shader stage. + const COMPUTE = 0x20; + /// All graphics pipeline shader stages. + const GRAPHICS = Self::VERTEX.bits | Self::HULL.bits | + Self::DOMAIN.bits | Self::GEOMETRY.bits | Self::FRAGMENT.bits; + /// All shader stages. + const ALL = Self::GRAPHICS.bits | Self::COMPUTE.bits; + } +); + +// Note: this type is only needed for backends, not used anywhere within gfx_hal. +/// Which program stage this shader represents. +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[repr(u8)] +pub enum Stage { + Vertex, + Hull, + Domain, + Geometry, + Fragment, + Compute, +} + +impl From for ShaderStageFlags { + fn from(stage: Stage) -> Self { + match stage { + Stage::Vertex => ShaderStageFlags::VERTEX, + Stage::Hull => ShaderStageFlags::HULL, + Stage::Domain => ShaderStageFlags::DOMAIN, + Stage::Geometry => ShaderStageFlags::GEOMETRY, + Stage::Fragment => ShaderStageFlags::FRAGMENT, + Stage::Compute => ShaderStageFlags::COMPUTE, + } + } +} + +impl fmt::Display for Stage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match self { + Stage::Vertex => "vertex", + Stage::Hull => "hull", + Stage::Domain => "domain", + Stage::Geometry => "geometry", + Stage::Fragment => "fragment", + Stage::Compute => "compute", + }) + } +} + +/// Shader entry point. +#[derive(Debug)] +pub struct EntryPoint<'a, B: Backend> { + /// Entry point name. + pub entry: &'a str, + /// Shader module reference. + pub module: &'a B::ShaderModule, + /// Specialization. + pub specialization: Specialization<'a>, +} + +impl<'a, B: Backend> Clone for EntryPoint<'a, B> { + fn clone(&self) -> Self { + EntryPoint { + entry: self.entry, + module: self.module, + specialization: self.specialization.clone(), + } + } +} + +bitflags!( + /// Pipeline creation flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct PipelineCreationFlags: u32 { + /// Disable pipeline optimizations. + /// + /// May speedup pipeline creation. + const DISABLE_OPTIMIZATION = 0x1; + /// Allow derivatives (children) of the pipeline. + /// + /// Must be set when pipelines set the pipeline as base. + const ALLOW_DERIVATIVES = 0x2; + } +); + +/// A reference to a parent pipeline. The assumption is that +/// a parent and derivative/child pipeline have most settings +/// in common, and one may be switched for another more quickly +/// than entirely unrelated pipelines would be. +#[derive(Debug)] +pub enum BasePipeline<'a, P: 'a> { + /// Referencing an existing pipeline as parent. + Pipeline(&'a P), + /// A pipeline in the same create pipelines call. + /// + /// The index of the parent must be lower than the index of the child. + Index(usize), + /// No parent pipeline exists. + None, +} + +/// Pipeline state which may be static or dynamic. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum State { + /// Static state that cannot be altered. + Static(T), + /// Dynamic state set through a command buffer. + Dynamic, +} + +impl State { + /// Returns the static value or a default. + pub fn static_or(self, default: T) -> T { + match self { + State::Static(v) => v, + State::Dynamic => default, + } + } + + /// Whether the state is static. + pub fn is_static(self) -> bool { + match self { + State::Static(_) => true, + State::Dynamic => false, + } + } + + /// Whether the state is dynamic. + pub fn is_dynamic(self) -> bool { + !self.is_static() + } +} + + +/// Safely read SPIR-V +/// +/// Converts to native endianness and returns correctly aligned storage without unnecessary +/// copying. Returns an `InvalidData` error if the input is trivially not SPIR-V. +/// +/// This function can also be used to convert an already in-memory `&[u8]` to a valid `Vec`, +/// but prefer working with `&[u32]` from the start whenever possible. +/// +/// # Examples +/// ```no_run +/// let mut file = std::fs::File::open("/path/to/shader.spv").unwrap(); +/// let words = gfx_hal::pso::read_spirv(&mut file).unwrap(); +/// ``` +/// ``` +/// const SPIRV: &[u8] = &[ +/// 0x03, 0x02, 0x23, 0x07, // ... +/// ]; +/// let words = gfx_hal::pso::read_spirv(std::io::Cursor::new(&SPIRV[..])).unwrap(); +/// ``` +pub fn read_spirv(mut x: R) -> io::Result> { + let size = x.seek(io::SeekFrom::End(0))?; + if size % 4 != 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "input length not divisible by 4", + )); + } + if size > usize::max_value() as u64 { + return Err(io::Error::new(io::ErrorKind::InvalidData, "input too long")); + } + let words = (size / 4) as usize; + let mut result = Vec::::with_capacity(words); + x.seek(io::SeekFrom::Start(0))?; + unsafe { + // Writing all bytes through a pointer with less strict alignment when our type has no + // invalid bitpatterns is safe. + x.read_exact(slice::from_raw_parts_mut( + result.as_mut_ptr() as *mut u8, + words * 4, + ))?; + result.set_len(words); + } + const MAGIC_NUMBER: u32 = 0x07230203; + if result.len() > 0 && result[0] == MAGIC_NUMBER.swap_bytes() { + for word in &mut result { + *word = word.swap_bytes(); + } + } + if result.len() == 0 || result[0] != MAGIC_NUMBER { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "input missing SPIR-V magic number", + )); + } + Ok(result) +} diff --git a/third_party/rust/gfx-hal/src/pso/output_merger.rs b/third_party/rust/gfx-hal/src/pso/output_merger.rs new file mode 100644 index 000000000000..5c1dcc3114fe --- /dev/null +++ b/third_party/rust/gfx-hal/src/pso/output_merger.rs @@ -0,0 +1,362 @@ + + + + +use crate::pso::{ + graphics::StencilValue, + State, +}; + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Comparison { + + Never = 0, + + Less = 1, + + Equal = 2, + + LessEqual = 3, + + Greater = 4, + + NotEqual = 5, + + GreaterEqual = 6, + + Always = 7, +} + +bitflags!( + /// Target output color mask. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct ColorMask: u8 { + /// Red mask + const RED = 0x1; + /// Green mask + const GREEN = 0x2; + /// Blue mask + const BLUE = 0x4; + /// Alpha channel mask + const ALPHA = 0x8; + /// Mask for RGB channels + const COLOR = 0x7; + /// Mask all channels + const ALL = 0xF; + /// Mask no channels. + const NONE = 0x0; + } +); + +impl Default for ColorMask { + fn default() -> Self { + Self::ALL + } +} + + + + +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Factor { + Zero = 0, + One = 1, + SrcColor = 2, + OneMinusSrcColor = 3, + DstColor = 4, + OneMinusDstColor = 5, + SrcAlpha = 6, + OneMinusSrcAlpha = 7, + DstAlpha = 8, + OneMinusDstAlpha = 9, + ConstColor = 10, + OneMinusConstColor = 11, + ConstAlpha = 12, + OneMinusConstAlpha = 13, + SrcAlphaSaturate = 14, + Src1Color = 15, + OneMinusSrc1Color = 16, + Src1Alpha = 17, + OneMinusSrc1Alpha = 18, +} + + +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum BlendOp { + + + Add { src: Factor, dst: Factor }, + + + Sub { src: Factor, dst: Factor }, + + + RevSub { src: Factor, dst: Factor }, + + Min, + + Max, +} + +impl BlendOp { + + pub const REPLACE: Self = BlendOp::Add { + src: Factor::One, + dst: Factor::Zero, + }; + + pub const ADD: Self = BlendOp::Add { + src: Factor::One, + dst: Factor::One, + }; + + pub const ALPHA: Self = BlendOp::Add { + src: Factor::SrcAlpha, + dst: Factor::OneMinusSrcAlpha, + }; + + pub const PREMULTIPLIED_ALPHA: Self = BlendOp::Add { + src: Factor::One, + dst: Factor::OneMinusSrcAlpha, + }; +} + + + +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BlendState { + + pub color: BlendOp, + + pub alpha: BlendOp, +} + +impl BlendState { + + pub const REPLACE: Self = BlendState { + color: BlendOp::REPLACE, + alpha: BlendOp::REPLACE, + }; + + pub const ADD: Self = BlendState { + color: BlendOp::ADD, + alpha: BlendOp::ADD, + }; + + pub const MULTIPLY: Self = BlendState { + color: BlendOp::Add { + src: Factor::Zero, + dst: Factor::SrcColor, + }, + alpha: BlendOp::Add { + src: Factor::Zero, + dst: Factor::SrcAlpha, + }, + }; + + pub const ALPHA: Self = BlendState { + color: BlendOp::ALPHA, + alpha: BlendOp::PREMULTIPLIED_ALPHA, + }; + + pub const PREMULTIPLIED_ALPHA: Self = BlendState { + color: BlendOp::PREMULTIPLIED_ALPHA, + alpha: BlendOp::PREMULTIPLIED_ALPHA, + }; +} + + +#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ColorBlendDesc { + + pub mask: ColorMask, + + pub blend: Option, +} + +impl ColorBlendDesc { + + + pub const EMPTY: Self = ColorBlendDesc { + mask: ColorMask::ALL, + blend: None, + }; +} + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct DepthTest { + + pub fun: Comparison, + + pub write: bool, +} + +impl DepthTest { + + pub const FAIL: Self = DepthTest { + fun: Comparison::Never, + write: false, + }; + + + + pub const PASS_TEST: Self = DepthTest { + fun: Comparison::Always, + write: false, + }; + + + pub const PASS_WRITE: Self = DepthTest { + fun: Comparison::Always, + write: true, + }; +} + + +#[repr(u8)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum StencilOp { + + Keep = 0, + + Zero = 1, + + Replace = 2, + + IncrementClamp = 3, + + DecrementClamp = 4, + + Invert = 5, + + IncrementWrap = 6, + + DecrementWrap = 7, +} + + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct StencilFace { + + pub fun: Comparison, + + pub op_fail: StencilOp, + + pub op_depth_fail: StencilOp, + + pub op_pass: StencilOp, +} + +impl Default for StencilFace { + fn default() -> StencilFace { + StencilFace { + fun: Comparison::Never, + op_fail: StencilOp::Keep, + op_depth_fail: StencilOp::Keep, + op_pass: StencilOp::Keep, + } + } +} + + +#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Sided { + + pub front: T, + + pub back: T, +} + +impl Sided { + + + pub fn new(value: T) -> Self { + Sided { + front: value, + back: value, + } + } +} + + + +pub type StencilValues = State>; + + + + + + +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct StencilTest { + + pub faces: Sided, + + + pub read_masks: StencilValues, + + pub write_masks: StencilValues, + + pub reference_values: StencilValues, +} + +impl Default for StencilTest { + fn default() -> Self { + StencilTest { + faces: Sided::default(), + read_masks: State::Static(Sided::new(!0)), + write_masks: State::Static(Sided::new(!0)), + reference_values: State::Static(Sided::new(0)), + } + } +} + + +#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct DepthStencilDesc { + + pub depth: Option, + + pub depth_bounds: bool, + + pub stencil: Option, +} + +impl DepthStencilDesc { + + pub fn uses_depth(&self) -> bool { + self.depth.is_some() || self.depth_bounds + } + + pub fn uses_stencil(&self) -> bool { + self.stencil.is_some() + } +} + +bitflags!( + /// Face. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Face: u32 { + /// Empty face. TODO: remove when constexpr are stabilized to use empty() + const NONE = 0x0; + /// Front face. + const FRONT = 0x1; + /// Back face. + const BACK = 0x2; + } +); diff --git a/third_party/rust/gfx-hal/src/pso/specialization.rs b/third_party/rust/gfx-hal/src/pso/specialization.rs new file mode 100644 index 000000000000..79a8123fafe5 --- /dev/null +++ b/third_party/rust/gfx-hal/src/pso/specialization.rs @@ -0,0 +1,132 @@ + + +use std::{borrow::Cow, ops::Range, slice}; + + + + + + + + + + + +#[derive(Debug, Clone, Hash, PartialEq)] +pub struct SpecializationConstant { + + pub id: u32, + + pub range: Range, +} + + +#[derive(Debug, Clone)] +pub struct Specialization<'a> { + + pub constants: Cow<'a, [SpecializationConstant]>, + + pub data: Cow<'a, [u8]>, +} + +impl Specialization<'_> { + + pub const EMPTY: Self = Specialization { + constants: Cow::Borrowed(&[]), + data: Cow::Borrowed(&[]), + }; +} + +impl Default for Specialization<'_> { + fn default() -> Self { + Specialization::EMPTY + } +} + +#[doc(hidden)] +#[derive(Debug, Default)] +pub struct SpecializationStorage { + constants: Vec, + data: Vec, +} + + +#[doc(hidden)] +pub trait SpecConstList: Sized { + fn fold(self, storage: &mut SpecializationStorage); +} + +impl From for Specialization<'_> +where + T: SpecConstList, +{ + fn from(list: T) -> Self { + let mut storage = SpecializationStorage::default(); + list.fold(&mut storage); + Specialization { + data: Cow::Owned(storage.data), + constants: Cow::Owned(storage.constants), + } + } +} + +#[doc(hidden)] +#[derive(Debug)] +pub struct SpecConstListNil; + +#[doc(hidden)] +#[derive(Debug)] +pub struct SpecConstListCons { + pub head: (u32, H), + pub tail: T, +} + +impl SpecConstList for SpecConstListNil { + fn fold(self, _storage: &mut SpecializationStorage) {} +} + +impl SpecConstList for SpecConstListCons +where + T: SpecConstList, +{ + fn fold(self, storage: &mut SpecializationStorage) { + let size = std::mem::size_of::(); + assert!(storage.data.len() + size <= u16::max_value() as usize); + let offset = storage.data.len() as u16; + storage.data.extend_from_slice(unsafe { + + slice::from_raw_parts(&self.head.1 as *const H as *const u8, size) + }); + storage.constants.push(SpecializationConstant { + id: self.head.0, + range: offset .. offset + size as u16, + }); + self.tail.fold(storage) + } +} + + +#[macro_export] +macro_rules! spec_const_list { + (@ $(,)?) => { + $crate::pso::SpecConstListNil + }; + + (@ $head_id:expr => $head_constant:expr $(,$tail_id:expr => $tail_constant:expr)* $(,)?) => { + $crate::pso::SpecConstListCons { + head: ($head_id, $head_constant), + tail: $crate::spec_const_list!(@ $($tail_id:expr => $tail_constant:expr),*), + } + }; + + ($($id:expr => $constant:expr),* $(,)?) => { + $crate::spec_const_list!(@ $($id => $constant),*).into() + }; + + ($($constant:expr),* $(,)?) => { + { + let mut counter = 0; + $crate::spec_const_list!(@ $({ counter += 1; counter - 1 } => $constant),*).into() + } + }; +} diff --git a/third_party/rust/gfx-hal/src/query.rs b/third_party/rust/gfx-hal/src/query.rs new file mode 100644 index 000000000000..8c4163003c8a --- /dev/null +++ b/third_party/rust/gfx-hal/src/query.rs @@ -0,0 +1,106 @@ + + + + + +use crate::device::OutOfMemory; +use crate::Backend; + + +pub type Id = u32; + + +#[derive(Clone, Debug, PartialEq)] +pub enum CreationError { + + OutOfMemory(OutOfMemory), + + + Unsupported(Type), +} + +impl From for CreationError { + fn from(error: OutOfMemory) -> Self { + CreationError::OutOfMemory(error) + } +} + + + +#[derive(Debug)] +pub struct Query<'a, B: Backend> { + + pub pool: &'a B::QueryPool, + + pub id: Id, +} + +bitflags!( + /// Query control flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct ControlFlags: u32 { + /// Occlusion queries **must** return the exact sampler number. + /// + /// Requires `precise_occlusion_query` device feature. + const PRECISE = 0x1; + } +); + +bitflags!( + /// Query result flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct ResultFlags: u32 { + /// Results will be written as an array of 64-bit unsigned integer values. + const BITS_64 = 0x1; + /// Wait for each query’s status to become available before retrieving its results. + const WAIT = 0x2; + /// Availability status accompanies the results. + const WITH_AVAILABILITY = 0x4; + /// Returning partial results is acceptable. + const PARTIAL = 0x8; + } +); + + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum Type { + + + Occlusion, + + + + PipelineStatistics(PipelineStatistic), + + + Timestamp, +} + +bitflags!( + /// Pipeline statistic flags + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct PipelineStatistic: u32 { + /// + const INPUT_ASSEMBLY_VERTICES = 0x1; + /// + const INPUT_ASSEMBLY_PRIMITIVES = 0x2; + /// + const VERTEX_SHADER_INVOCATIONS = 0x4; + /// + const GEOMETRY_SHADER_INVOCATIONS = 0x8; + /// + const GEOMETRY_SHADER_PRIMITIVES = 0x10; + /// + const CLIPPING_INVOCATIONS = 0x20; + /// + const CLIPPING_PRIMITIVES = 0x40; + /// + const FRAGMENT_SHADER_INVOCATIONS = 0x80; + /// + const HULL_SHADER_PATCHES = 0x100; + /// + const DOMAIN_SHADER_INVOCATIONS = 0x200; + /// + const COMPUTE_SHADER_INVOCATIONS = 0x400; + } +); diff --git a/third_party/rust/gfx-hal/src/queue/family.rs b/third_party/rust/gfx-hal/src/queue/family.rs new file mode 100644 index 000000000000..8941953253de --- /dev/null +++ b/third_party/rust/gfx-hal/src/queue/family.rs @@ -0,0 +1,52 @@ + + +use crate::queue::QueueType; +use crate::Backend; + +use std::any::Any; +use std::fmt::Debug; + + + + +pub trait QueueFamily: Debug + Any + Send + Sync { + + fn queue_type(&self) -> QueueType; + + fn max_queues(&self) -> usize; + + fn id(&self) -> QueueFamilyId; +} + + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct QueueFamilyId(pub usize); + + + + +#[derive(Debug)] +pub struct QueueGroup { + + pub family: QueueFamilyId, + + pub queues: Vec, +} + +impl QueueGroup { + + pub fn new(family: QueueFamilyId) -> Self { + QueueGroup { + family, + queues: Vec::new(), + } + } + + + + + pub fn add_queue(&mut self, queue: B::CommandQueue) { + self.queues.push(queue); + } +} diff --git a/third_party/rust/gfx-hal/src/queue/mod.rs b/third_party/rust/gfx-hal/src/queue/mod.rs new file mode 100644 index 000000000000..262c1406e520 --- /dev/null +++ b/third_party/rust/gfx-hal/src/queue/mod.rs @@ -0,0 +1,149 @@ + + + + + + + + +pub mod family; + +use crate::{ + device::OutOfMemory, + pso, + window::{PresentError, PresentationSurface, Suboptimal, SwapImageIndex}, + Backend, +}; +use std::{any::Any, borrow::Borrow, fmt, iter}; + +pub use self::family::{QueueFamily, QueueFamilyId, QueueGroup}; + + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum QueueType { + + General, + + Graphics, + + Compute, + + Transfer, +} + +impl QueueType { + + pub fn supports_graphics(&self) -> bool { + match *self { + QueueType::General | QueueType::Graphics => true, + QueueType::Compute | QueueType::Transfer => false, + } + } + + pub fn supports_compute(&self) -> bool { + match *self { + QueueType::General | QueueType::Graphics | QueueType::Compute => true, + QueueType::Transfer => false, + } + } + + pub fn supports_transfer(&self) -> bool { + true + } +} + + + +pub type QueuePriority = f32; + + +#[derive(Debug)] +pub struct Submission { + + pub command_buffers: Ic, + + pub wait_semaphores: Iw, + + pub signal_semaphores: Is, +} + + + +pub trait CommandQueue: fmt::Debug + Any + Send + Sync { + + + + + + + + unsafe fn submit<'a, T, Ic, S, Iw, Is>( + &mut self, + submission: Submission, + fence: Option<&B::Fence>, + ) where + T: 'a + Borrow, + Ic: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + Is: IntoIterator; + + + unsafe fn submit_without_semaphores<'a, T, Ic>( + &mut self, + command_buffers: Ic, + fence: Option<&B::Fence>, + ) where + T: 'a + Borrow, + Ic: IntoIterator, + { + let submission = Submission { + command_buffers, + wait_semaphores: iter::empty(), + signal_semaphores: iter::empty(), + }; + self.submit::<_, _, B::Semaphore, _, _>(submission, fence) + } + + + + + + + unsafe fn present<'a, W, Is, S, Iw>( + &mut self, + swapchains: Is, + wait_semaphores: Iw, + ) -> Result, PresentError> + where + Self: Sized, + W: 'a + Borrow, + Is: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator; + + + unsafe fn present_without_semaphores<'a, W, Is>( + &mut self, + swapchains: Is, + ) -> Result, PresentError> + where + Self: Sized, + W: 'a + Borrow, + Is: IntoIterator, + { + self.present::<_, _, B::Semaphore, _>(swapchains, iter::empty()) + } + + + unsafe fn present_surface( + &mut self, + surface: &mut B::Surface, + image: >::SwapchainImage, + wait_semaphore: Option<&B::Semaphore>, + ) -> Result, PresentError>; + + + fn wait_idle(&self) -> Result<(), OutOfMemory>; +} diff --git a/third_party/rust/gfx-hal/src/range.rs b/third_party/rust/gfx-hal/src/range.rs new file mode 100644 index 000000000000..c3343deed1bb --- /dev/null +++ b/third_party/rust/gfx-hal/src/range.rs @@ -0,0 +1,59 @@ + + + +use std::ops::{Range, RangeFrom, RangeFull, RangeTo}; + + + + +pub trait RangeArg { + + fn start(&self) -> Option<&T>; + + fn end(&self) -> Option<&T>; +} + +impl RangeArg for Range { + fn start(&self) -> Option<&T> { + Some(&self.start) + } + fn end(&self) -> Option<&T> { + Some(&self.end) + } +} + +impl RangeArg for RangeTo { + fn start(&self) -> Option<&T> { + None + } + fn end(&self) -> Option<&T> { + Some(&self.end) + } +} + +impl RangeArg for RangeFrom { + fn start(&self) -> Option<&T> { + Some(&self.start) + } + fn end(&self) -> Option<&T> { + None + } +} + +impl RangeArg for RangeFull { + fn start(&self) -> Option<&T> { + None + } + fn end(&self) -> Option<&T> { + None + } +} + +impl RangeArg for (Option, Option) { + fn start(&self) -> Option<&T> { + self.0.as_ref() + } + fn end(&self) -> Option<&T> { + self.1.as_ref() + } +} diff --git a/third_party/rust/gfx-hal/src/window.rs b/third_party/rust/gfx-hal/src/window.rs new file mode 100644 index 000000000000..168384070b18 --- /dev/null +++ b/third_party/rust/gfx-hal/src/window.rs @@ -0,0 +1,526 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +use crate::device; +use crate::format::Format; +use crate::image; +use crate::queue::CommandQueue; +use crate::Backend; + +use std::any::Any; +use std::borrow::Borrow; +use std::cmp::{max, min}; +use std::fmt; +use std::iter; +use std::ops::RangeInclusive; + + +#[derive(Clone, Debug, PartialEq)] +pub enum CreationError { + + OutOfMemory(device::OutOfMemory), + + DeviceLost(device::DeviceLost), + + SurfaceLost(device::SurfaceLost), + + WindowInUse(device::WindowInUse), +} + +impl From for CreationError { + fn from(error: device::OutOfMemory) -> Self { + CreationError::OutOfMemory(error) + } +} + +impl From for CreationError { + fn from(error: device::DeviceLost) -> Self { + CreationError::DeviceLost(error) + } +} + +impl From for CreationError { + fn from(error: device::SurfaceLost) -> Self { + CreationError::SurfaceLost(error) + } +} + +impl From for CreationError { + fn from(error: device::WindowInUse) -> Self { + CreationError::WindowInUse(error) + } +} + + + + +#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Extent2D { + + pub width: image::Size, + + pub height: image::Size, +} + +impl From for Extent2D { + fn from(ex: image::Extent) -> Self { + Extent2D { + width: ex.width, + height: ex.height, + } + } +} + +impl Extent2D { + + pub fn to_extent(&self) -> image::Extent { + image::Extent { + width: self.width, + height: self.height, + depth: 1, + } + } +} + + + +#[derive(Debug, Clone)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SurfaceCapabilities { + + + + + + pub image_count: RangeInclusive, + + + + + pub current_extent: Option, + + + + + pub extents: RangeInclusive, + + + + + pub max_image_layers: image::Layer, + + + pub usage: image::Usage, + + + pub present_modes: PresentMode, + + + pub composite_alpha_modes: CompositeAlphaMode, +} + +impl SurfaceCapabilities { + fn clamped_extent(&self, default_extent: Extent2D) -> Extent2D { + match self.current_extent { + Some(current) => current, + None => { + let (min_width, max_width) = (self.extents.start().width, self.extents.end().width); + let (min_height, max_height) = + (self.extents.start().height, self.extents.end().height); + + + let width = min(max_width, max(default_extent.width, min_width)); + let height = min(max_height, max(default_extent.height, min_height)); + + Extent2D { width, height } + } + } + } +} + + +pub trait Surface: fmt::Debug + Any + Send + Sync { + + + + + + + + fn supports_queue_family(&self, family: &B::QueueFamily) -> bool; + + + + + fn capabilities(&self, physical_device: &B::PhysicalDevice) -> SurfaceCapabilities; + + + + + + + + + + + + fn supported_formats(&self, physical_device: &B::PhysicalDevice) -> Option>; +} + + + +pub trait PresentationSurface: Surface { + + type SwapchainImage: Borrow + fmt::Debug + Send + Sync; + + + unsafe fn configure_swapchain( + &mut self, + device: &B::Device, + config: SwapchainConfig, + ) -> Result<(), CreationError>; + + + + + unsafe fn unconfigure_swapchain(&mut self, device: &B::Device); + + + + + + + + + + + + + + + unsafe fn acquire_image( + &mut self, + timeout_ns: u64, + ) -> Result<(Self::SwapchainImage, Option), AcquireError>; +} + + + + + + + +pub type SwapImageIndex = u32; + + +bitflags!( + /// Specifies the mode regulating how a swapchain presents frames. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct PresentMode: u32 { + /// Don't ever wait for v-sync. + const IMMEDIATE = 0x1; + /// Wait for v-sync, overwrite the last rendered frame. + const MAILBOX = 0x2; + /// Present frames in the same order they are rendered. + const FIFO = 0x4; + /// Don't wait for the next v-sync if we just missed it. + const RELAXED = 0x8; + } +); + +bitflags!( + /// Specifies how the alpha channel of the images should be handled during + /// compositing. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct CompositeAlphaMode: u32 { + /// The alpha channel, if it exists, of the images is ignored in the + /// compositing process. Instead, the image is treated as if it has a + /// constant alpha of 1.0. + const OPAQUE = 0x1; + /// The alpha channel, if it exists, of the images is respected in the + /// compositing process. The non-alpha channels of the image are + /// expected to already be multiplied by the alpha channel by the + /// application. + const PREMULTIPLIED = 0x2; + /// The alpha channel, if it exists, of the images is respected in the + /// compositing process. The non-alpha channels of the image are not + /// expected to already be multiplied by the alpha channel by the + /// application; instead, the compositor will multiply the non-alpha + /// channels of the image by the alpha channel during compositing. + const POSTMULTIPLIED = 0x4; + /// The way in which the presentation engine treats the alpha channel in + /// the images is unknown to gfx-hal. Instead, the application is + /// responsible for setting the composite alpha blending mode using + /// native window system commands. If the application does not set the + /// blending mode using native window system commands, then a + /// platform-specific default will be used. + const INHERIT = 0x8; + } +); + + + + + + + + + + + + + + + + + +#[derive(Debug, Clone)] +pub struct SwapchainConfig { + + pub present_mode: PresentMode, + + pub composite_alpha_mode: CompositeAlphaMode, + + pub format: Format, + + + pub extent: Extent2D, + + + pub image_count: SwapImageIndex, + + + pub image_layers: image::Layer, + + pub image_usage: image::Usage, +} + +impl SwapchainConfig { + + + + + + + + pub fn new(width: u32, height: u32, format: Format, image_count: SwapImageIndex) -> Self { + SwapchainConfig { + present_mode: PresentMode::FIFO, + composite_alpha_mode: CompositeAlphaMode::OPAQUE, + format, + extent: Extent2D { width, height }, + image_count, + image_layers: 1, + image_usage: image::Usage::COLOR_ATTACHMENT, + } + } + + + + + pub fn from_caps(caps: &SurfaceCapabilities, format: Format, default_extent: Extent2D) -> Self { + let composite_alpha_mode = if caps.composite_alpha_modes.contains(CompositeAlphaMode::INHERIT) { + CompositeAlphaMode::INHERIT + } else if caps.composite_alpha_modes.contains(CompositeAlphaMode::OPAQUE) { + CompositeAlphaMode::OPAQUE + } else { + panic!("neither INHERIT or OPAQUE CompositeAlphaMode(s) are supported") + }; + let present_mode = if caps.present_modes.contains(PresentMode::FIFO) { + PresentMode::FIFO + } else { + panic!("FIFO PresentMode is not supported") + }; + + SwapchainConfig { + present_mode, + composite_alpha_mode, + format, + extent: caps.clamped_extent(default_extent), + image_count: *caps.image_count.start(), + image_layers: 1, + image_usage: image::Usage::COLOR_ATTACHMENT, + } + } + + + + + + + + + pub fn with_present_mode(mut self, mode: PresentMode) -> Self { + self.present_mode = mode; + self + } + + + + + + + + + pub fn with_image_usage(mut self, usage: image::Usage) -> Self { + self.image_usage = usage; + self + } + + +} + + + +#[derive(Debug)] +pub struct Suboptimal; + + +#[derive(Clone, Debug, PartialEq)] +pub enum AcquireError { + + OutOfMemory(device::OutOfMemory), + + NotReady, + + Timeout, + + OutOfDate, + + SurfaceLost(device::SurfaceLost), + + DeviceLost(device::DeviceLost), +} + + +#[derive(Clone, Debug, PartialEq)] +pub enum PresentError { + + OutOfMemory(device::OutOfMemory), + + OutOfDate, + + SurfaceLost(device::SurfaceLost), + + DeviceLost(device::DeviceLost), +} + + + +pub trait Swapchain: fmt::Debug + Any + Send + Sync { + + + + + + + + + + + + + + + + unsafe fn acquire_image( + &mut self, + timeout_ns: u64, + semaphore: Option<&B::Semaphore>, + fence: Option<&B::Fence>, + ) -> Result<(SwapImageIndex, Option), AcquireError>; + + + + + + + + + + + + + + unsafe fn present<'a, S, Iw>( + &'a self, + present_queue: &mut B::CommandQueue, + image_index: SwapImageIndex, + wait_semaphores: Iw, + ) -> Result, PresentError> + where + Self: 'a + Sized + Borrow, + S: 'a + Borrow, + Iw: IntoIterator, + { + present_queue.present(iter::once((self, image_index)), wait_semaphores) + } + + + unsafe fn present_without_semaphores<'a>( + &'a self, + present_queue: &mut B::CommandQueue, + image_index: SwapImageIndex, + ) -> Result, PresentError> + where + Self: 'a + Sized + Borrow, + { + self.present::(present_queue, image_index, iter::empty()) + } +} + + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum InitError { + + UnsupportedWindowHandle, +} diff --git a/third_party/rust/hibitset/.cargo-checksum.json b/third_party/rust/hibitset/.cargo-checksum.json new file mode 100644 index 000000000000..1800c0b6b9d4 --- /dev/null +++ b/third_party/rust/hibitset/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"a9b40514258547911202144e05e39e5bf5e36231bcfad00344335094a039511e","LICENSE-APACHE":"b40930bbcf80744c86c46a12bc9da056641d722716c378f5659b9e555ef833e1","LICENSE-MIT":"3643762aa37cb42db407394768d29bb439c2cc5778dd02f557b80e7caaac995b","README.md":"71071d4986f1aab6f16ddcd76873d2763d37c322c29ddf8257c687e97a2a217c","benches/benches.rs":"4190dc661208d00074cff8b6ec1bdbadedd50ed6b8cec79414354dbcde927b04","benches/iter.rs":"0fa1fca605f176bd9cc6478dc27c5e64762db6b030890212bf142609a05281bc","bors.toml":"1d8a7a56c5c76925a3daa8c50a40cc82cbfc638f521f864106bd60b1e8a219a2","src/atomic.rs":"88482afab2a0274bb66e820c064714277bdcff93c0c091403ef26bc6ee20481a","src/iter/drain.rs":"96ffe098f3493fa5426d741f54643a995ba6d68f9f166cfdf17a20c796cd915d","src/iter/mod.rs":"680e00e6976929d827b12fb585e04e2868e1bc200d5c75a57a185899b8c76924","src/iter/parallel.rs":"885d8aab10b90c3187a3d1080c95be8a14571f8fc74ff7a983c8479efe8fa003","src/lib.rs":"4c512456114fa409f1d2f2f75508e94b9b19bf9006c2be9b00f71385dfbae79c","src/ops.rs":"e98a1b447af18c14d27adb250c4dd65ed2eedde302f33a79fe2abe37b67e90f2","src/util.rs":"ac7f79043ff8ecf6952aa707710a967a5142a3cd57a8d73b44d2e96a9ce3fac2"},"package":"47e7292fd9f7fe89fa35c98048f2d0a69b79ed243604234d18f6f8a1aa6f408d"} \ No newline at end of file diff --git a/third_party/rust/hibitset/Cargo.toml b/third_party/rust/hibitset/Cargo.toml new file mode 100644 index 000000000000..3ac71574a21a --- /dev/null +++ b/third_party/rust/hibitset/Cargo.toml @@ -0,0 +1,34 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "hibitset" +version = "0.6.2" +authors = ["csheratt"] +description = "Hierarchical bit set structure" +documentation = "https://docs.rs/hibitset" +keywords = ["bitset", "container", "data-structures", "hierarchical"] +categories = ["data-structures"] +license = "MIT/Apache-2.0" +repository = "https://github.com/slide-rs/hibitset" +[dependencies.atom] +version = "0.3" + +[dependencies.rayon] +version = "1.1" +optional = true +[dev-dependencies.rand] +version = "0.7" + +[features] +default = ["parallel"] +parallel = ["rayon"] diff --git a/third_party/rust/hibitset/LICENSE-APACHE b/third_party/rust/hibitset/LICENSE-APACHE new file mode 100644 index 000000000000..8dada3edaf50 --- /dev/null +++ b/third_party/rust/hibitset/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/rust/hibitset/LICENSE-MIT b/third_party/rust/hibitset/LICENSE-MIT new file mode 100644 index 000000000000..a0135fa43911 --- /dev/null +++ b/third_party/rust/hibitset/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2018 The slide-rs hackers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/hibitset/README.md b/third_party/rust/hibitset/README.md new file mode 100644 index 000000000000..411a2e89708a --- /dev/null +++ b/third_party/rust/hibitset/README.md @@ -0,0 +1,22 @@ +# hibitset +[![Build Status](https://travis-ci.org/slide-rs/hibitset.svg)](https://travis-ci.org/slide-rs/hibitset) +[![Crates.io](https://img.shields.io/crates/v/hibitset.svg?maxAge=2592000)](https://crates.io/crates/hibitset) + +Provides hierarchical bit sets, which allow very fast iteration on +sparse data structures. + +## Usage + +Just add this to your `Cargo.toml`: + +```toml +[dependencies] +hibitset = "0.6" +``` + +## License + +This library is licensed under the Apache License 2.0, +see [the LICENSE file][li] for more information. + +[li]: LICENSE diff --git a/third_party/rust/hibitset/benches/benches.rs b/third_party/rust/hibitset/benches/benches.rs new file mode 100644 index 000000000000..5fa15ea52934 --- /dev/null +++ b/third_party/rust/hibitset/benches/benches.rs @@ -0,0 +1,93 @@ +#![feature(test)] + +extern crate hibitset; +extern crate test; + +mod bitset { + use hibitset::BitSet; + use test; + + #[bench] + fn add(b: &mut test::Bencher) { + let mut bitset = BitSet::with_capacity(1_000_000); + let mut range = (0..1_000_000).cycle(); + b.iter(|| range.next().map(|i| bitset.add(i))) + } + + #[bench] + fn remove_set(b: &mut test::Bencher) { + let mut bitset = BitSet::with_capacity(1_000_000); + let mut range = (0..1_000_000).cycle(); + for i in 0..1_000_000 { + bitset.add(i); + } + b.iter(|| range.next().map(|i| bitset.remove(i))) + } + + #[bench] + fn remove_clear(b: &mut test::Bencher) { + let mut bitset = BitSet::with_capacity(1_000_000); + let mut range = (0..1_000_000).cycle(); + b.iter(|| range.next().map(|i| bitset.remove(i))) + } + + #[bench] + fn contains(b: &mut test::Bencher) { + let mut bitset = BitSet::with_capacity(1_000_000); + let mut range = (0..1_000_000).cycle(); + for i in 0..500_000 { + + + bitset.add(i * 2); + } + b.iter(|| range.next().map(|i| bitset.contains(i))) + } +} + +mod atomic_bitset { + use hibitset::AtomicBitSet; + use test; + + #[bench] + fn add(b: &mut test::Bencher) { + let mut bitset = AtomicBitSet::new(); + let mut range = (0..1_000_000).cycle(); + b.iter(|| range.next().map(|i| bitset.add(i))) + } + + #[bench] + fn add_atomic(b: &mut test::Bencher) { + let bitset = AtomicBitSet::new(); + let mut range = (0..1_000_000).cycle(); + b.iter(|| range.next().map(|i| bitset.add_atomic(i))) + } + + #[bench] + fn remove_set(b: &mut test::Bencher) { + let mut bitset = AtomicBitSet::new(); + let mut range = (0..1_000_000).cycle(); + for i in 0..1_000_000 { + bitset.add(i); + } + b.iter(|| range.next().map(|i| bitset.remove(i))) + } + + #[bench] + fn remove_clear(b: &mut test::Bencher) { + let mut bitset = AtomicBitSet::new(); + let mut range = (0..1_000_000).cycle(); + b.iter(|| range.next().map(|i| bitset.remove(i))) + } + + #[bench] + fn contains(b: &mut test::Bencher) { + let mut bitset = AtomicBitSet::new(); + let mut range = (0..1_000_000).cycle(); + for i in 0..500_000 { + + + bitset.add(i * 2); + } + b.iter(|| range.next().map(|i| bitset.contains(i))) + } +} diff --git a/third_party/rust/hibitset/benches/iter.rs b/third_party/rust/hibitset/benches/iter.rs new file mode 100644 index 000000000000..024b2fa610ba --- /dev/null +++ b/third_party/rust/hibitset/benches/iter.rs @@ -0,0 +1,248 @@ +#![feature(test)] +extern crate hibitset; +extern crate rand; +#[cfg(feature = "parallel")] +extern crate rayon; +extern crate test; + +#[cfg(feature = "parallel")] +use rayon::iter::ParallelIterator; + +use hibitset::{BitSet, BitSetLike}; + +use test::{black_box, Bencher}; + +use rand::prelude::*; + +use self::Mode::*; + +enum Mode { + Seq, + #[cfg(feature = "parallel")] + Par(u8), +} + +fn bench(n: usize, mode: Mode, b: &mut Bencher) { + let mut rng = thread_rng(); + let mut bitset = BitSet::with_capacity(1048576); + for _ in 0..n { + let index = rng.gen_range(0, 1048576); + bitset.add(index); + } + match mode { + Seq => b.iter(|| black_box((&bitset).iter().map(black_box).count())), + #[cfg(feature = "parallel")] + Par(splits) => b.iter(|| { + black_box( + (&bitset) + .par_iter() + .layers_split(splits) + .map(black_box) + .count(), + ) + }), + } +} + +#[bench] +fn iter_100(b: &mut Bencher) { + bench(100, Seq, b); +} + +#[bench] +fn iter_1000(b: &mut Bencher) { + bench(1000, Seq, b); +} + +#[bench] +fn iter_10000(b: &mut Bencher) { + bench(10000, Seq, b); +} + +#[bench] +fn iter_100000(b: &mut Bencher) { + bench(100000, Seq, b); +} + +#[bench] +fn iter_1000000(b: &mut Bencher) { + bench(1000000, Seq, b); +} + +#[cfg(feature = "parallel")] +mod par { + use super::*; + + #[bench] + fn par_iter_3_100(b: &mut Bencher) { + bench(100, Par(3), b); + } + + #[bench] + fn par_iter_3_1000(b: &mut Bencher) { + bench(1000, Par(3), b); + } + + #[bench] + fn par_iter_3_10000(b: &mut Bencher) { + bench(10000, Par(3), b); + } + + #[bench] + fn par_iter_3_100000(b: &mut Bencher) { + bench(100000, Par(3), b); + } + + #[bench] + fn par_iter_3_1000000(b: &mut Bencher) { + bench(1000000, Par(3), b); + } + + #[bench] + fn par_iter_2_100(b: &mut Bencher) { + bench(100, Par(2), b); + } + + #[bench] + fn par_iter_2_1000(b: &mut Bencher) { + bench(1000, Par(2), b); + } + + #[bench] + fn par_iter_2_10000(b: &mut Bencher) { + bench(10000, Par(2), b); + } + + #[bench] + fn par_iter_2_100000(b: &mut Bencher) { + bench(100000, Par(2), b); + } + + #[bench] + fn par_iter_2_1000000(b: &mut Bencher) { + bench(1000000, Par(2), b); + } + + fn bench_payload(n: usize, splits: u8, payload: u32, b: &mut Bencher) { + let mut rng = thread_rng(); + let mut bitset = BitSet::with_capacity(1048576); + for _ in 0..n { + let index = rng.gen_range(0, 1048576); + bitset.add(index); + } + b.iter(|| { + black_box( + (&bitset) + .par_iter() + .layers_split(splits) + .map(|mut n| { + for i in 0..payload { + n += black_box(i); + } + black_box(n) + }) + .count(), + ) + }); + } + + #[bench] + fn par_3_payload_1000_iter_100(b: &mut Bencher) { + bench_payload(100, 3, 1000, b); + } + + #[bench] + fn par_3_payload_1000_iter_1000(b: &mut Bencher) { + bench_payload(1000, 3, 1000, b); + } + + #[bench] + fn par_3_payload_1000_iter_10000(b: &mut Bencher) { + bench_payload(10000, 3, 1000, b); + } + + #[bench] + fn par_3_payload_1000_iter_100000(b: &mut Bencher) { + bench_payload(100000, 3, 1000, b); + } + + #[bench] + fn par_3_payload_1000_iter_1000000(b: &mut Bencher) { + bench_payload(1000000, 3, 1000, b); + } + + #[bench] + fn par_2_payload_1000_iter_100(b: &mut Bencher) { + bench_payload(100, 2, 1000, b); + } + + #[bench] + fn par_2_payload_1000_iter_1000(b: &mut Bencher) { + bench_payload(1000, 2, 1000, b); + } + + #[bench] + fn par_2_payload_1000_iter_10000(b: &mut Bencher) { + bench_payload(10000, 2, 1000, b); + } + + #[bench] + fn par_2_payload_1000_iter_100000(b: &mut Bencher) { + bench_payload(100000, 2, 1000, b); + } + + #[bench] + fn par_2_payload_1000_iter_1000000(b: &mut Bencher) { + bench_payload(1000000, 2, 1000, b); + } + + #[bench] + fn par_3_payload_100_iter_100(b: &mut Bencher) { + bench_payload(100, 3, 100, b); + } + + #[bench] + fn par_3_payload_100_iter_1000(b: &mut Bencher) { + bench_payload(1000, 3, 100, b); + } + + #[bench] + fn par_3_payload_100_iter_10000(b: &mut Bencher) { + bench_payload(10000, 3, 100, b); + } + + #[bench] + fn par_3_payload_100_iter_100000(b: &mut Bencher) { + bench_payload(100000, 3, 100, b); + } + + #[bench] + fn par_3_payload_100_iter_1000000(b: &mut Bencher) { + bench_payload(1000000, 3, 100, b); + } + + #[bench] + fn par_2_payload_100_iter_100(b: &mut Bencher) { + bench_payload(100, 2, 100, b); + } + + #[bench] + fn par_2_payload_100_iter_1000(b: &mut Bencher) { + bench_payload(1000, 2, 100, b); + } + + #[bench] + fn par_2_payload_100_iter_10000(b: &mut Bencher) { + bench_payload(10000, 2, 100, b); + } + + #[bench] + fn par_2_payload_100_iter_100000(b: &mut Bencher) { + bench_payload(100000, 2, 100, b); + } + + #[bench] + fn par_2_payload_100_iter_1000000(b: &mut Bencher) { + bench_payload(1000000, 2, 100, b); + } +} diff --git a/third_party/rust/hibitset/bors.toml b/third_party/rust/hibitset/bors.toml new file mode 100644 index 000000000000..359f8947bac9 --- /dev/null +++ b/third_party/rust/hibitset/bors.toml @@ -0,0 +1 @@ +status = ["continuous-integration/travis-ci/push"] diff --git a/third_party/rust/hibitset/src/atomic.rs b/third_party/rust/hibitset/src/atomic.rs new file mode 100644 index 000000000000..67070a4792a2 --- /dev/null +++ b/third_party/rust/hibitset/src/atomic.rs @@ -0,0 +1,419 @@ +use std::default::Default; +use std::fmt::{Debug, Error as FormatError, Formatter}; +use std::iter::repeat; +use std::sync::atomic::{AtomicUsize, Ordering}; + +use atom::AtomSetOnce; + +use util::*; +use {BitSetLike, DrainableBitSet}; + + + + + + + + + + + + + + + + + + + +#[derive(Debug)] +pub struct AtomicBitSet { + layer3: AtomicUsize, + layer2: Vec, + layer1: Vec, +} + +impl AtomicBitSet { + + pub fn new() -> AtomicBitSet { + Default::default() + } + + + + + + + #[inline] + pub fn add_atomic(&self, id: Index) -> bool { + let (_, p1, p2) = offsets(id); + + + + + + + let set = self.layer1[p1].add(id); + self.layer2[p2].fetch_or(id.mask(SHIFT2), Ordering::Relaxed); + self.layer3.fetch_or(id.mask(SHIFT3), Ordering::Relaxed); + set + } + + + + #[inline] + pub fn add(&mut self, id: Index) -> bool { + use std::sync::atomic::Ordering::Relaxed; + + let (_, p1, p2) = offsets(id); + if self.layer1[p1].add(id) { + return true; + } + + self.layer2[p2].store(self.layer2[p2].load(Relaxed) | id.mask(SHIFT2), Relaxed); + self.layer3 + .store(self.layer3.load(Relaxed) | id.mask(SHIFT3), Relaxed); + false + } + + + + + #[inline] + pub fn remove(&mut self, id: Index) -> bool { + use std::sync::atomic::Ordering::Relaxed; + let (_, p1, p2) = offsets(id); + + + + + + + + + if !self.layer1[p1].remove(id) { + return false; + } + if self.layer1[p1].mask.load(Ordering::Relaxed) != 0 { + return true; + } + + let v = self.layer2[p2].load(Relaxed) & !id.mask(SHIFT2); + self.layer2[p2].store(v, Relaxed); + if v != 0 { + return true; + } + + let v = self.layer3.load(Relaxed) & !id.mask(SHIFT3); + self.layer3.store(v, Relaxed); + return true; + } + + + #[inline] + pub fn contains(&self, id: Index) -> bool { + let i = id.offset(SHIFT2); + self.layer1[i].contains(id) + } + + + pub fn clear(&mut self) { + + + + + + let (mut m3, mut m2) = (self.layer3.swap(0, Ordering::Relaxed), 0usize); + let mut offset = 0; + + loop { + if m2 != 0 { + let bit = m2.trailing_zeros() as usize; + m2 &= !(1 << bit); + + + + + self.layer1[offset + bit].clear(); + continue; + } + + if m3 != 0 { + let bit = m3.trailing_zeros() as usize; + m3 &= !(1 << bit); + offset = bit << BITS; + m2 = self.layer2[bit].swap(0, Ordering::Relaxed); + continue; + } + break; + } + } +} + +impl BitSetLike for AtomicBitSet { + #[inline] + fn layer3(&self) -> usize { + self.layer3.load(Ordering::Relaxed) + } + #[inline] + fn layer2(&self, i: usize) -> usize { + self.layer2[i].load(Ordering::Relaxed) + } + #[inline] + fn layer1(&self, i: usize) -> usize { + self.layer1[i].mask.load(Ordering::Relaxed) + } + #[inline] + fn layer0(&self, i: usize) -> usize { + let (o1, o0) = (i >> BITS, i & ((1 << BITS) - 1)); + self.layer1[o1] + .atom + .get() + .map(|l0| l0[o0].load(Ordering::Relaxed)) + .unwrap_or(0) + } + #[inline] + fn contains(&self, i: Index) -> bool { + self.contains(i) + } +} + +impl DrainableBitSet for AtomicBitSet { + #[inline] + fn remove(&mut self, i: Index) -> bool { + self.remove(i) + } +} + +impl Default for AtomicBitSet { + fn default() -> Self { + AtomicBitSet { + layer3: Default::default(), + layer2: repeat(0) + .map(|_| AtomicUsize::new(0)) + .take(1 << BITS) + .collect(), + layer1: repeat(0) + .map(|_| AtomicBlock::new()) + .take(1 << (2 * BITS)) + .collect(), + } + } +} + +struct AtomicBlock { + mask: AtomicUsize, + atom: AtomSetOnce>, +} + +impl AtomicBlock { + fn new() -> AtomicBlock { + AtomicBlock { + mask: AtomicUsize::new(0), + atom: AtomSetOnce::empty(), + } + } + + fn add(&self, id: Index) -> bool { + if self.atom.is_none() { + let v = Box::new(unsafe { ::std::mem::zeroed() }); + self.atom.set_if_none(v); + } + + let (i, m) = (id.row(SHIFT1), id.mask(SHIFT0)); + let old = self.atom.get().unwrap()[i].fetch_or(m, Ordering::Relaxed); + self.mask.fetch_or(id.mask(SHIFT1), Ordering::Relaxed); + old & m != 0 + } + + fn contains(&self, id: Index) -> bool { + self.atom + .get() + .map(|l0| l0[id.row(SHIFT1)].load(Ordering::Relaxed) & id.mask(SHIFT0) != 0) + .unwrap_or(false) + } + + fn remove(&mut self, id: Index) -> bool { + if let Some(l0) = self.atom.get_mut() { + let (i, m) = (id.row(SHIFT1), !id.mask(SHIFT0)); + let v = l0[i].load(Ordering::Relaxed); + l0[i].store(v & m, Ordering::Relaxed); + if v & m == 0 { + let v = self.mask.load(Ordering::Relaxed) & !id.mask(SHIFT1); + self.mask.store(v, Ordering::Relaxed); + } + v & id.mask(SHIFT0) == id.mask(SHIFT0) + } else { + false + } + } + + fn clear(&mut self) { + self.mask.store(0, Ordering::Relaxed); + self.atom.get().map(|l0| { + for l in &l0[..] { + l.store(0, Ordering::Relaxed); + } + }); + } +} + +impl Debug for AtomicBlock { + fn fmt(&self, f: &mut Formatter) -> Result<(), FormatError> { + f.debug_struct("AtomicBlock") + .field("mask", &self.mask) + .field("atom", &self.atom.get().unwrap().iter()) + .finish() + } +} + +#[cfg(test)] +mod atomic_set_test { + use {AtomicBitSet, BitSetAnd, BitSetLike}; + + #[test] + fn insert() { + let mut c = AtomicBitSet::new(); + for i in 0..1_000 { + assert!(!c.add(i)); + assert!(c.add(i)); + } + + for i in 0..1_000 { + assert!(c.contains(i)); + } + } + + #[test] + fn insert_100k() { + let mut c = AtomicBitSet::new(); + for i in 0..100_000 { + assert!(!c.add(i)); + assert!(c.add(i)); + } + + for i in 0..100_000 { + assert!(c.contains(i)); + } + } + + #[test] + fn add_atomic() { + let c = AtomicBitSet::new(); + for i in 0..1_000 { + assert!(!c.add_atomic(i)); + assert!(c.add_atomic(i)); + } + + for i in 0..1_000 { + assert!(c.contains(i)); + } + } + + #[test] + fn add_atomic_100k() { + let c = AtomicBitSet::new(); + for i in 0..100_000 { + assert!(!c.add_atomic(i)); + assert!(c.add_atomic(i)); + } + + for i in 0..100_000 { + assert!(c.contains(i)); + } + } + + #[test] + fn remove() { + let mut c = AtomicBitSet::new(); + for i in 0..1_000 { + assert!(!c.add(i)); + } + + for i in 0..1_000 { + assert!(c.contains(i)); + assert!(c.remove(i)); + assert!(!c.contains(i)); + assert!(!c.remove(i)); + } + } + + #[test] + fn iter() { + let mut c = AtomicBitSet::new(); + for i in 0..100_000 { + c.add(i); + } + + let mut count = 0; + for (idx, i) in c.iter().enumerate() { + count += 1; + assert_eq!(idx, i as usize); + } + assert_eq!(count, 100_000); + } + + #[test] + fn iter_odd_even() { + let mut odd = AtomicBitSet::new(); + let mut even = AtomicBitSet::new(); + for i in 0..100_000 { + if i % 2 == 1 { + odd.add(i); + } else { + even.add(i); + } + } + + assert_eq!((&odd).iter().count(), 50_000); + assert_eq!((&even).iter().count(), 50_000); + assert_eq!(BitSetAnd(&odd, &even).iter().count(), 0); + } + + #[test] + fn clear() { + let mut set = AtomicBitSet::new(); + for i in 0..1_000 { + set.add(i); + } + + assert_eq!((&set).iter().sum::(), 500_500 - 1_000); + + assert_eq!((&set).iter().count(), 1_000); + set.clear(); + assert_eq!((&set).iter().count(), 0); + + for i in 0..1_000 { + set.add(i * 64); + } + + assert_eq!((&set).iter().count(), 1_000); + set.clear(); + assert_eq!((&set).iter().count(), 0); + + for i in 0..1_000 { + set.add(i * 1_000); + } + + assert_eq!((&set).iter().count(), 1_000); + set.clear(); + assert_eq!((&set).iter().count(), 0); + + for i in 0..100 { + set.add(i * 10_000); + } + + assert_eq!((&set).iter().count(), 100); + set.clear(); + assert_eq!((&set).iter().count(), 0); + + for i in 0..10 { + set.add(i * 10_000); + } + + assert_eq!((&set).iter().count(), 10); + set.clear(); + assert_eq!((&set).iter().count(), 0); + } + +} diff --git a/third_party/rust/hibitset/src/iter/drain.rs b/third_party/rust/hibitset/src/iter/drain.rs new file mode 100644 index 000000000000..716c33727627 --- /dev/null +++ b/third_party/rust/hibitset/src/iter/drain.rs @@ -0,0 +1,45 @@ +use iter::BitIter; +use util::*; +use DrainableBitSet; + + + + +pub struct DrainBitIter<'a, T: 'a> { + iter: BitIter<&'a mut T>, +} + +impl<'a, T: DrainableBitSet> DrainBitIter<'a, T> { + + + + + pub fn new(set: &'a mut T, masks: [usize; LAYERS], prefix: [u32; LAYERS - 1]) -> Self { + DrainBitIter { + iter: BitIter::new(set, masks, prefix), + } + } +} + +impl<'a, T> Iterator for DrainBitIter<'a, T> +where + T: DrainableBitSet, +{ + type Item = Index; + + fn next(&mut self) -> Option { + let next = self.iter.next(); + if let Some(next) = next { + self.iter.set.remove(next); + } + next + } +} + +#[test] +fn drain_all() { + use {BitSet, BitSetLike}; + let mut bit_set: BitSet = (0..10000).filter(|i| i % 2 == 0).collect(); + bit_set.drain().for_each(|_| {}); + assert_eq!(0, bit_set.iter().count()); +} diff --git a/third_party/rust/hibitset/src/iter/mod.rs b/third_party/rust/hibitset/src/iter/mod.rs new file mode 100644 index 000000000000..376136128038 --- /dev/null +++ b/third_party/rust/hibitset/src/iter/mod.rs @@ -0,0 +1,150 @@ +use util::*; +use {BitSet, BitSetLike}; + +pub use self::drain::DrainBitIter; + +#[cfg(feature = "parallel")] +pub use self::parallel::{BitParIter, BitProducer}; + +mod drain; +#[cfg(feature = "parallel")] +mod parallel; + + + + +#[derive(Debug, Clone)] +pub struct BitIter { + pub(crate) set: T, + pub(crate) masks: [usize; LAYERS], + pub(crate) prefix: [u32; LAYERS - 1], +} + +impl BitIter { + + + + + pub fn new(set: T, masks: [usize; LAYERS], prefix: [u32; LAYERS - 1]) -> Self { + BitIter { + set: set, + masks: masks, + prefix: prefix, + } + } +} + +impl BitIter { + + pub fn contains(&self, i: Index) -> bool { + self.set.contains(i) + } +} + +impl<'a> BitIter<&'a mut BitSet> { + + pub(crate) fn clear(&mut self) { + use self::State::Continue; + while let Some(level) = (1..LAYERS).find(|&level| self.handle_level(level) == Continue) { + let lower = level - 1; + let idx = (self.prefix[lower] >> BITS) as usize; + *self.set.layer_mut(lower, idx) = 0; + if level == LAYERS - 1 { + self.set.layer3 &= !((2 << idx) - 1); + } + } + } +} + +#[derive(PartialEq)] +pub(crate) enum State { + Empty, + Continue, + Value(Index), +} + +impl Iterator for BitIter +where + T: BitSetLike, +{ + type Item = Index; + + fn next(&mut self) -> Option { + use self::State::*; + 'find: loop { + for level in 0..LAYERS { + match self.handle_level(level) { + Value(v) => return Some(v), + Continue => continue 'find, + Empty => {} + } + } + + return None; + } + } +} + +impl BitIter { + pub(crate) fn handle_level(&mut self, level: usize) -> State { + use self::State::*; + if self.masks[level] == 0 { + Empty + } else { + + let first_bit = self.masks[level].trailing_zeros(); + + self.masks[level] &= !(1 << first_bit); + + let idx = self.prefix.get(level).cloned().unwrap_or(0) | first_bit; + if level == 0 { + + Value(idx) + } else { + + self.masks[level - 1] = self.set.get_from_layer(level - 1, idx as usize); + self.prefix[level - 1] = idx << BITS; + Continue + } + } + } +} + +#[cfg(test)] +mod tests { + use {BitSet, BitSetLike}; + + #[test] + fn iterator_clear_empties() { + use rand::prelude::*; + + let mut set = BitSet::new(); + let mut rng = thread_rng(); + let limit = 1_048_576; + for _ in 0..(limit / 10) { + set.add(rng.gen_range(0, limit)); + } + (&mut set).iter().clear(); + assert_eq!(0, set.layer3); + for &i in &set.layer2 { + assert_eq!(0, i); + } + for &i in &set.layer1 { + assert_eq!(0, i); + } + for &i in &set.layer0 { + assert_eq!(0, i); + } + } + + #[test] + fn iterator_clone() { + let mut set = BitSet::new(); + set.add(1); + set.add(3); + let iter = set.iter().skip(1); + for (a, b) in iter.clone().zip(iter) { + assert_eq!(a, b); + } + } +} diff --git a/third_party/rust/hibitset/src/iter/parallel.rs b/third_party/rust/hibitset/src/iter/parallel.rs new file mode 100644 index 000000000000..0a60ace39703 --- /dev/null +++ b/third_party/rust/hibitset/src/iter/parallel.rs @@ -0,0 +1,242 @@ +use rayon::iter::plumbing::{bridge_unindexed, Folder, UnindexedConsumer, UnindexedProducer}; +use rayon::iter::ParallelIterator; + +use iter::{BitIter, BitSetLike, Index, BITS, LAYERS}; +use util::average_ones; + + + + +#[derive(Debug)] +pub struct BitParIter(T, u8); + +impl BitParIter { + + + + + + + pub fn new(set: T) -> Self { + BitParIter(set, 3) + } + + + + + + + + + + + + + + + + + + + + + + + + + + + pub fn layers_split(mut self, layers: u8) -> Self { + assert!(layers >= 1); + assert!(layers <= 3); + self.1 = layers; + self + } +} + +impl ParallelIterator for BitParIter +where + T: BitSetLike + Send + Sync, +{ + type Item = Index; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + bridge_unindexed(BitProducer((&self.0).iter(), self.1), consumer) + } +} + + + + +#[derive(Debug)] +pub struct BitProducer<'a, T: 'a + Send + Sync>(pub BitIter<&'a T>, pub u8); + +impl<'a, T: 'a + Send + Sync> UnindexedProducer for BitProducer<'a, T> +where + T: BitSetLike, +{ + type Item = Index; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + fn split(mut self) -> (Self, Option) { + let splits = self.1; + let other = { + let mut handle_level = |level: usize| { + if self.0.masks[level] == 0 { + + None + } else { + + let level_prefix = self.0.prefix.get(level).cloned().unwrap_or(0); + let first_bit = self.0.masks[level].trailing_zeros(); + average_ones(self.0.masks[level]) + .and_then(|average_bit| { + let mask = (1 << average_bit) - 1; + let mut other = BitProducer( + BitIter::new(self.0.set, [0; LAYERS], [0; LAYERS - 1]), + splits, + ); + + other.0.masks[level] = self.0.masks[level] & !mask; + other.0.prefix[level - 1] = (level_prefix | average_bit as u32) << BITS; + + + other.0.prefix[level..].copy_from_slice(&self.0.prefix[level..]); + + self.0.masks[level] &= mask; + self.0.prefix[level - 1] = (level_prefix | first_bit) << BITS; + Some(other) + }) + .or_else(|| { + + let idx = level_prefix as usize | first_bit as usize; + self.0.prefix[level - 1] = (idx as u32) << BITS; + + + self.0.masks[level] = 0; + self.0.masks[level - 1] = self.0.set.get_from_layer(level - 1, idx); + None + }) + } + }; + let top_layer = LAYERS - 1; + let mut h = handle_level(top_layer); + for i in 1..splits { + h = h.or_else(|| handle_level(top_layer - i as usize)); + } + h + }; + (self, other) + } + + fn fold_with(self, folder: F) -> F + where + F: Folder, + { + folder.consume_iter(self.0) + } +} + +#[cfg(test)] +mod test_bit_producer { + use rayon::iter::plumbing::UnindexedProducer; + + use super::BitProducer; + use iter::BitSetLike; + use util::BITS; + + fn test_splitting(split_levels: u8) { + fn visit(mut us: BitProducer, d: usize, i: usize, mut trail: String, c: &mut usize) + where + T: Send + Sync + BitSetLike, + { + if d == 0 { + assert!(us.split().1.is_none(), trail); + *c += 1; + } else { + for j in 1..(i + 1) { + let (new_us, them) = us.split(); + us = new_us; + let them = them.expect(&trail); + let mut trail = trail.clone(); + trail.push_str(&i.to_string()); + visit(them, d, i - j, trail, c); + } + trail.push_str("u"); + visit(us, d - 1, BITS, trail, c); + } + } + + let usize_bits = ::std::mem::size_of::() * 8; + + let mut c = ::BitSet::new(); + for i in 0..(usize_bits.pow(3) * 2) { + assert!(!c.add(i as u32)); + } + + let us = BitProducer((&c).iter(), split_levels); + let (us, them) = us.split(); + + let mut count = 0; + visit( + us, + split_levels as usize - 1, + BITS, + "u".to_owned(), + &mut count, + ); + visit( + them.expect("Splitting top level"), + split_levels as usize - 1, + BITS, + "t".to_owned(), + &mut count, + ); + assert_eq!(usize_bits.pow(split_levels as u32 - 1) * 2, count); + } + + #[test] + fn max_3_splitting_of_two_top_bits() { + test_splitting(3); + } + + #[test] + fn max_2_splitting_of_two_top_bits() { + test_splitting(2); + } + + #[test] + fn max_1_splitting_of_two_top_bits() { + test_splitting(1); + } +} diff --git a/third_party/rust/hibitset/src/lib.rs b/third_party/rust/hibitset/src/lib.rs new file mode 100644 index 000000000000..a6fea2293263 --- /dev/null +++ b/third_party/rust/hibitset/src/lib.rs @@ -0,0 +1,723 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#![deny(missing_docs)] + +extern crate atom; +#[cfg(test)] +extern crate rand; +#[cfg(feature = "parallel")] +extern crate rayon; + +mod atomic; +mod iter; +mod ops; +mod util; + +pub use atomic::AtomicBitSet; +pub use iter::{BitIter, DrainBitIter}; +#[cfg(feature = "parallel")] +pub use iter::{BitParIter, BitProducer}; +pub use ops::{BitSetAll, BitSetAnd, BitSetNot, BitSetOr, BitSetXor}; + +use util::*; + + + + + + +#[derive(Clone, Debug, Default)] +pub struct BitSet { + layer3: usize, + layer2: Vec, + layer1: Vec, + layer0: Vec, +} + +impl BitSet { + + pub fn new() -> BitSet { + Default::default() + } + + #[inline] + fn valid_range(max: Index) { + if (MAX_EID as u32) < max { + panic!("Expected index to be less then {}, found {}", MAX_EID, max); + } + } + + + pub fn with_capacity(max: Index) -> BitSet { + Self::valid_range(max); + let mut value = BitSet::new(); + value.extend(max); + value + } + + #[inline(never)] + fn extend(&mut self, id: Index) { + Self::valid_range(id); + let (p0, p1, p2) = offsets(id); + + Self::fill_up(&mut self.layer2, p2); + Self::fill_up(&mut self.layer1, p1); + Self::fill_up(&mut self.layer0, p0); + } + + fn fill_up(vec: &mut Vec, upper_index: usize) { + if vec.len() <= upper_index { + vec.resize(upper_index + 1, 0); + } + } + + + + #[inline(never)] + fn add_slow(&mut self, id: Index) { + let (_, p1, p2) = offsets(id); + self.layer1[p1] |= id.mask(SHIFT1); + self.layer2[p2] |= id.mask(SHIFT2); + self.layer3 |= id.mask(SHIFT3); + } + + + + #[inline] + pub fn add(&mut self, id: Index) -> bool { + let (p0, mask) = (id.offset(SHIFT1), id.mask(SHIFT0)); + + if p0 >= self.layer0.len() { + self.extend(id); + } + + if self.layer0[p0] & mask != 0 { + return true; + } + + + + let old = self.layer0[p0]; + self.layer0[p0] |= mask; + if old == 0 { + self.add_slow(id); + } + false + } + + fn layer_mut(&mut self, level: usize, idx: usize) -> &mut usize { + match level { + 0 => { + Self::fill_up(&mut self.layer0, idx); + &mut self.layer0[idx] + } + 1 => { + Self::fill_up(&mut self.layer1, idx); + &mut self.layer1[idx] + } + 2 => { + Self::fill_up(&mut self.layer2, idx); + &mut self.layer2[idx] + } + 3 => &mut self.layer3, + _ => panic!("Invalid layer: {}", level), + } + } + + + + + #[inline] + pub fn remove(&mut self, id: Index) -> bool { + let (p0, p1, p2) = offsets(id); + + if p0 >= self.layer0.len() { + return false; + } + + if self.layer0[p0] & id.mask(SHIFT0) == 0 { + return false; + } + + + + + + self.layer0[p0] &= !id.mask(SHIFT0); + if self.layer0[p0] != 0 { + return true; + } + + self.layer1[p1] &= !id.mask(SHIFT1); + if self.layer1[p1] != 0 { + return true; + } + + self.layer2[p2] &= !id.mask(SHIFT2); + if self.layer2[p2] != 0 { + return true; + } + + self.layer3 &= !id.mask(SHIFT3); + return true; + } + + + #[inline] + pub fn contains(&self, id: Index) -> bool { + let p0 = id.offset(SHIFT1); + p0 < self.layer0.len() && (self.layer0[p0] & id.mask(SHIFT0)) != 0 + } + + + #[inline] + pub fn contains_set(&self, other: &BitSet) -> bool { + for id in other.iter() { + if !self.contains(id) { + return false; + } + } + true + } + + + pub fn clear(&mut self) { + self.layer0.clear(); + self.layer1.clear(); + self.layer2.clear(); + self.layer3 = 0; + } +} + + + + + + + + + + + + + + +pub trait BitSetLike { + + + + fn get_from_layer(&self, layer: usize, idx: usize) -> usize { + match layer { + 0 => self.layer0(idx), + 1 => self.layer1(idx), + 2 => self.layer2(idx), + 3 => self.layer3(), + _ => panic!("Invalid layer: {}", layer), + } + } + + + fn is_empty(&self) -> bool { + self.layer3() == 0 + } + + + + fn layer3(&self) -> usize; + + + + fn layer2(&self, i: usize) -> usize; + + + + fn layer1(&self, i: usize) -> usize; + + + + fn layer0(&self, i: usize) -> usize; + + + fn contains(&self, i: Index) -> bool; + + + fn iter(self) -> BitIter + where + Self: Sized, + { + let layer3 = self.layer3(); + + BitIter::new(self, [0, 0, 0, layer3], [0; LAYERS - 1]) + } + + + #[cfg(feature = "parallel")] + fn par_iter(self) -> BitParIter + where + Self: Sized, + { + BitParIter::new(self) + } +} + + +pub trait DrainableBitSet: BitSetLike { + + + + fn remove(&mut self, i: Index) -> bool; + + + fn drain<'a>(&'a mut self) -> DrainBitIter<'a, Self> + where + Self: Sized, + { + let layer3 = self.layer3(); + + DrainBitIter::new(self, [0, 0, 0, layer3], [0; LAYERS - 1]) + } +} + +impl<'a, T> BitSetLike for &'a T +where + T: BitSetLike + ?Sized, +{ + #[inline] + fn layer3(&self) -> usize { + (*self).layer3() + } + + #[inline] + fn layer2(&self, i: usize) -> usize { + (*self).layer2(i) + } + + #[inline] + fn layer1(&self, i: usize) -> usize { + (*self).layer1(i) + } + + #[inline] + fn layer0(&self, i: usize) -> usize { + (*self).layer0(i) + } + + #[inline] + fn contains(&self, i: Index) -> bool { + (*self).contains(i) + } +} + +impl<'a, T> BitSetLike for &'a mut T +where + T: BitSetLike + ?Sized, +{ + #[inline] + fn layer3(&self) -> usize { + (**self).layer3() + } + + #[inline] + fn layer2(&self, i: usize) -> usize { + (**self).layer2(i) + } + + #[inline] + fn layer1(&self, i: usize) -> usize { + (**self).layer1(i) + } + + #[inline] + fn layer0(&self, i: usize) -> usize { + (**self).layer0(i) + } + + #[inline] + fn contains(&self, i: Index) -> bool { + (**self).contains(i) + } +} + +impl<'a, T> DrainableBitSet for &'a mut T +where + T: DrainableBitSet, +{ + #[inline] + fn remove(&mut self, i: Index) -> bool { + (**self).remove(i) + } +} + +impl BitSetLike for BitSet { + #[inline] + fn layer3(&self) -> usize { + self.layer3 + } + + #[inline] + fn layer2(&self, i: usize) -> usize { + self.layer2.get(i).map(|&x| x).unwrap_or(0) + } + + #[inline] + fn layer1(&self, i: usize) -> usize { + self.layer1.get(i).map(|&x| x).unwrap_or(0) + } + + #[inline] + fn layer0(&self, i: usize) -> usize { + self.layer0.get(i).map(|&x| x).unwrap_or(0) + } + + #[inline] + fn contains(&self, i: Index) -> bool { + self.contains(i) + } +} + +impl DrainableBitSet for BitSet { + #[inline] + fn remove(&mut self, i: Index) -> bool { + self.remove(i) + } +} + +impl PartialEq for BitSet { + #[inline] + fn eq(&self, rhv: &BitSet) -> bool { + if self.layer3 != rhv.layer3 { + return false; + } + if self.layer2.len() != rhv.layer2.len() + || self.layer1.len() != rhv.layer1.len() + || self.layer0.len() != rhv.layer0.len() + { + return false; + } + + for i in 0..self.layer2.len() { + if self.layer2(i) != rhv.layer2(i) { + return false; + } + } + for i in 0..self.layer1.len() { + if self.layer1(i) != rhv.layer1(i) { + return false; + } + } + for i in 0..self.layer0.len() { + if self.layer0(i) != rhv.layer0(i) { + return false; + } + } + + true + } +} +impl Eq for BitSet {} + +#[cfg(test)] +mod tests { + use super::{BitSet, BitSetAnd, BitSetLike, BitSetNot}; + + #[test] + fn insert() { + let mut c = BitSet::new(); + for i in 0..1_000 { + assert!(!c.add(i)); + assert!(c.add(i)); + } + + for i in 0..1_000 { + assert!(c.contains(i)); + } + } + + #[test] + fn insert_100k() { + let mut c = BitSet::new(); + for i in 0..100_000 { + assert!(!c.add(i)); + assert!(c.add(i)); + } + + for i in 0..100_000 { + assert!(c.contains(i)); + } + } + #[test] + fn remove() { + let mut c = BitSet::new(); + for i in 0..1_000 { + assert!(!c.add(i)); + } + + for i in 0..1_000 { + assert!(c.contains(i)); + assert!(c.remove(i)); + assert!(!c.contains(i)); + assert!(!c.remove(i)); + } + } + + #[test] + fn iter() { + let mut c = BitSet::new(); + for i in 0..100_000 { + c.add(i); + } + + let mut count = 0; + for (idx, i) in c.iter().enumerate() { + count += 1; + assert_eq!(idx, i as usize); + } + assert_eq!(count, 100_000); + } + + #[test] + fn iter_odd_even() { + let mut odd = BitSet::new(); + let mut even = BitSet::new(); + for i in 0..100_000 { + if i % 2 == 1 { + odd.add(i); + } else { + even.add(i); + } + } + + assert_eq!((&odd).iter().count(), 50_000); + assert_eq!((&even).iter().count(), 50_000); + assert_eq!(BitSetAnd(&odd, &even).iter().count(), 0); + } + + #[test] + fn iter_random_add() { + use rand::prelude::*; + + let mut set = BitSet::new(); + let mut rng = thread_rng(); + let limit = 1_048_576; + let mut added = 0; + for _ in 0..(limit / 10) { + let index = rng.gen_range(0, limit); + if !set.add(index) { + added += 1; + } + } + assert_eq!(set.iter().count(), added as usize); + } + + #[test] + fn iter_clusters() { + let mut set = BitSet::new(); + for x in 0..8 { + let x = (x * 3) << (::BITS * 2); + for y in 0..8 { + let y = (y * 3) << (::BITS); + for z in 0..8 { + let z = z * 2; + set.add(x + y + z); + } + } + } + assert_eq!(set.iter().count(), 8usize.pow(3)); + } + + #[test] + fn not() { + let mut c = BitSet::new(); + for i in 0..10_000 { + if i % 2 == 1 { + c.add(i); + } + } + let d = BitSetNot(c); + for (idx, i) in d.iter().take(5_000).enumerate() { + assert_eq!(idx * 2, i as usize); + } + } +} + +#[cfg(all(test, feature = "parallel"))] +mod test_parallel { + use super::{BitSet, BitSetAnd, BitSetLike}; + use rayon::iter::ParallelIterator; + + #[test] + fn par_iter_one() { + let step = 5000; + let tests = 1_048_576 / step; + for n in 0..tests { + let n = n * step; + let mut set = BitSet::new(); + set.add(n); + assert_eq!(set.par_iter().count(), 1); + } + let mut set = BitSet::new(); + set.add(1_048_576 - 1); + assert_eq!(set.par_iter().count(), 1); + } + + #[test] + fn par_iter_random_add() { + use rand::prelude::*; + use std::collections::HashSet; + use std::sync::{Arc, Mutex}; + + let mut set = BitSet::new(); + let mut check_set = HashSet::new(); + let mut rng = thread_rng(); + let limit = 1_048_576; + for _ in 0..(limit / 10) { + let index = rng.gen_range(0, limit); + set.add(index); + check_set.insert(index); + } + let check_set = Arc::new(Mutex::new(check_set)); + let missing_set = Arc::new(Mutex::new(HashSet::new())); + set.par_iter().for_each(|n| { + let check_set = check_set.clone(); + let missing_set = missing_set.clone(); + let mut check = check_set.lock().unwrap(); + if !check.remove(&n) { + let mut missing = missing_set.lock().unwrap(); + missing.insert(n); + } + }); + let check_set = check_set.lock().unwrap(); + let missing_set = missing_set.lock().unwrap(); + if !check_set.is_empty() && !missing_set.is_empty() { + panic!( + "There were values that didn't get iterated: {:?} + There were values that got iterated, but that shouldn't be: {:?}", + *check_set, *missing_set + ); + } + if !check_set.is_empty() { + panic!( + "There were values that didn't get iterated: {:?}", + *check_set + ); + } + if !missing_set.is_empty() { + panic!( + "There were values that got iterated, but that shouldn't be: {:?}", + *missing_set + ); + } + } + + #[test] + fn par_iter_odd_even() { + let mut odd = BitSet::new(); + let mut even = BitSet::new(); + for i in 0..100_000 { + if i % 2 == 1 { + odd.add(i); + } else { + even.add(i); + } + } + + assert_eq!((&odd).par_iter().count(), 50_000); + assert_eq!((&even).par_iter().count(), 50_000); + assert_eq!(BitSetAnd(&odd, &even).par_iter().count(), 0); + } + + #[test] + fn par_iter_clusters() { + use std::collections::HashSet; + use std::sync::{Arc, Mutex}; + let mut set = BitSet::new(); + let mut check_set = HashSet::new(); + for x in 0..8 { + let x = (x * 3) << (::BITS * 2); + for y in 0..8 { + let y = (y * 3) << (::BITS); + for z in 0..8 { + let z = z * 2; + let index = x + y + z; + set.add(index); + check_set.insert(index); + } + } + } + let check_set = Arc::new(Mutex::new(check_set)); + let missing_set = Arc::new(Mutex::new(HashSet::new())); + set.par_iter().for_each(|n| { + let check_set = check_set.clone(); + let missing_set = missing_set.clone(); + let mut check = check_set.lock().unwrap(); + if !check.remove(&n) { + let mut missing = missing_set.lock().unwrap(); + missing.insert(n); + } + }); + let check_set = check_set.lock().unwrap(); + let missing_set = missing_set.lock().unwrap(); + if !check_set.is_empty() && !missing_set.is_empty() { + panic!( + "There were values that didn't get iterated: {:?} + There were values that got iterated, but that shouldn't be: {:?}", + *check_set, *missing_set + ); + } + if !check_set.is_empty() { + panic!( + "There were values that didn't get iterated: {:?}", + *check_set + ); + } + if !missing_set.is_empty() { + panic!( + "There were values that got iterated, but that shouldn't be: {:?}", + *missing_set + ); + } + } +} diff --git a/third_party/rust/hibitset/src/ops.rs b/third_party/rust/hibitset/src/ops.rs new file mode 100644 index 000000000000..b09f80e40d71 --- /dev/null +++ b/third_party/rust/hibitset/src/ops.rs @@ -0,0 +1,720 @@ +use std::iter::{FromIterator, IntoIterator}; +use std::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not}; +use std::usize; + +use util::*; + +use {AtomicBitSet, BitIter, BitSet, BitSetLike, DrainableBitSet}; + +impl<'a, B> BitOrAssign<&'a B> for BitSet +where + B: BitSetLike, +{ + fn bitor_assign(&mut self, lhs: &B) { + use iter::State::Continue; + let mut iter = lhs.iter(); + while let Some(level) = (1..LAYERS).find(|&level| iter.handle_level(level) == Continue) { + let lower = level - 1; + let idx = iter.prefix[lower] as usize >> BITS; + *self.layer_mut(lower, idx) |= lhs.get_from_layer(lower, idx); + } + self.layer3 |= lhs.layer3(); + } +} + +impl<'a, B> BitAndAssign<&'a B> for BitSet +where + B: BitSetLike, +{ + fn bitand_assign(&mut self, lhs: &B) { + use iter::State::*; + let mut iter = lhs.iter(); + iter.masks[LAYERS - 1] &= self.layer3(); + while let Some(level) = (1..LAYERS).find(|&level| iter.handle_level(level) == Continue) { + let lower = level - 1; + let idx = iter.prefix[lower] as usize >> BITS; + let our_layer = self.get_from_layer(lower, idx); + let their_layer = lhs.get_from_layer(lower, idx); + + iter.masks[lower] &= our_layer; + + let mut masks = [0; LAYERS]; + masks[lower] = our_layer & !their_layer; + BitIter::new(&mut *self, masks, iter.prefix).clear(); + + *self.layer_mut(lower, idx) &= their_layer; + } + let mut masks = [0; LAYERS]; + masks[LAYERS - 1] = self.layer3() & !lhs.layer3(); + BitIter::new(&mut *self, masks, [0; LAYERS - 1]).clear(); + + self.layer3 &= lhs.layer3(); + } +} + +impl<'a, B> BitXorAssign<&'a B> for BitSet +where + B: BitSetLike, +{ + fn bitxor_assign(&mut self, lhs: &B) { + use iter::State::*; + let mut iter = lhs.iter(); + while let Some(level) = (1..LAYERS).find(|&level| iter.handle_level(level) == Continue) { + let lower = level - 1; + let idx = iter.prefix[lower] as usize >> BITS; + + if lower == 0 { + *self.layer_mut(lower, idx) ^= lhs.get_from_layer(lower, idx); + + let mut change_bit = |level| { + let lower = level - 1; + let h = iter.prefix.get(level).cloned().unwrap_or(0) as usize; + let l = iter.prefix[lower] as usize >> BITS; + let mask = 1 << (l & !h); + + if self.get_from_layer(lower, l) == 0 { + *self.layer_mut(level, h >> BITS) &= !mask; + } else { + *self.layer_mut(level, h >> BITS) |= mask; + } + }; + + change_bit(level); + if iter.masks[level] == 0 { + (2..LAYERS).for_each(change_bit); + } + } + } + } +} + + + + + + +#[derive(Debug)] +pub struct BitSetAnd(pub A, pub B); + +impl BitSetLike for BitSetAnd { + #[inline] + fn layer3(&self) -> usize { + self.0.layer3() & self.1.layer3() + } + #[inline] + fn layer2(&self, i: usize) -> usize { + self.0.layer2(i) & self.1.layer2(i) + } + #[inline] + fn layer1(&self, i: usize) -> usize { + self.0.layer1(i) & self.1.layer1(i) + } + #[inline] + fn layer0(&self, i: usize) -> usize { + self.0.layer0(i) & self.1.layer0(i) + } + #[inline] + fn contains(&self, i: Index) -> bool { + self.0.contains(i) && self.1.contains(i) + } +} + +impl DrainableBitSet for BitSetAnd { + #[inline] + fn remove(&mut self, i: Index) -> bool { + if self.contains(i) { + self.0.remove(i); + self.1.remove(i); + true + } else { + false + } + } +} + + + + + + +#[derive(Debug)] +pub struct BitSetOr(pub A, pub B); + +impl BitSetLike for BitSetOr { + #[inline] + fn layer3(&self) -> usize { + self.0.layer3() | self.1.layer3() + } + #[inline] + fn layer2(&self, i: usize) -> usize { + self.0.layer2(i) | self.1.layer2(i) + } + #[inline] + fn layer1(&self, i: usize) -> usize { + self.0.layer1(i) | self.1.layer1(i) + } + #[inline] + fn layer0(&self, i: usize) -> usize { + self.0.layer0(i) | self.1.layer0(i) + } + #[inline] + fn contains(&self, i: Index) -> bool { + self.0.contains(i) || self.1.contains(i) + } +} + +impl DrainableBitSet for BitSetOr { + #[inline] + fn remove(&mut self, i: Index) -> bool { + if self.contains(i) { + self.0.remove(i); + self.1.remove(i); + true + } else { + false + } + } +} + + + + + +#[derive(Debug)] +pub struct BitSetNot(pub A); + +impl BitSetLike for BitSetNot { + #[inline] + fn layer3(&self) -> usize { + !0 + } + #[inline] + fn layer2(&self, _: usize) -> usize { + !0 + } + #[inline] + fn layer1(&self, _: usize) -> usize { + !0 + } + #[inline] + fn layer0(&self, i: usize) -> usize { + !self.0.layer0(i) + } + #[inline] + fn contains(&self, i: Index) -> bool { + !self.0.contains(i) + } +} + + + + + + +#[derive(Debug)] +pub struct BitSetXor(pub A, pub B); + +impl BitSetLike for BitSetXor { + #[inline] + fn layer3(&self) -> usize { + let xor = BitSetAnd( + BitSetOr(&self.0, &self.1), + BitSetNot(BitSetAnd(&self.0, &self.1)), + ); + xor.layer3() + } + #[inline] + fn layer2(&self, id: usize) -> usize { + let xor = BitSetAnd( + BitSetOr(&self.0, &self.1), + BitSetNot(BitSetAnd(&self.0, &self.1)), + ); + xor.layer2(id) + } + #[inline] + fn layer1(&self, id: usize) -> usize { + let xor = BitSetAnd( + BitSetOr(&self.0, &self.1), + BitSetNot(BitSetAnd(&self.0, &self.1)), + ); + xor.layer1(id) + } + #[inline] + fn layer0(&self, id: usize) -> usize { + let xor = BitSetAnd( + BitSetOr(&self.0, &self.1), + BitSetNot(BitSetAnd(&self.0, &self.1)), + ); + xor.layer0(id) + } + #[inline] + fn contains(&self, i: Index) -> bool { + BitSetAnd( + BitSetOr(&self.0, &self.1), + BitSetNot(BitSetAnd(&self.0, &self.1)), + ) + .contains(i) + } +} + + + +#[derive(Debug)] +pub struct BitSetAll; +impl BitSetLike for BitSetAll { + #[inline] + fn layer3(&self) -> usize { + usize::MAX + } + #[inline] + fn layer2(&self, _id: usize) -> usize { + usize::MAX + } + #[inline] + fn layer1(&self, _id: usize) -> usize { + usize::MAX + } + #[inline] + fn layer0(&self, _id: usize) -> usize { + usize::MAX + } + #[inline] + fn contains(&self, _i: Index) -> bool { + true + } +} + +macro_rules! operator { + ( impl < ( $( $lifetime:tt )* ) ( $( $arg:ident ),* ) > for $bitset:ty ) => { + impl<$( $lifetime, )* $( $arg ),*> IntoIterator for $bitset + where $( $arg: BitSetLike ),* + { + type Item = as Iterator>::Item; + type IntoIter = BitIter; + fn into_iter(self) -> Self::IntoIter { + self.iter() + } + } + + impl<$( $lifetime, )* $( $arg ),*> Not for $bitset + where $( $arg: BitSetLike ),* + { + type Output = BitSetNot; + fn not(self) -> Self::Output { + BitSetNot(self) + } + } + + impl<$( $lifetime, )* $( $arg, )* T> BitAnd for $bitset + where T: BitSetLike, + $( $arg: BitSetLike ),* + { + type Output = BitSetAnd; + fn bitand(self, rhs: T) -> Self::Output { + BitSetAnd(self, rhs) + } + } + + impl<$( $lifetime, )* $( $arg, )* T> BitOr for $bitset + where T: BitSetLike, + $( $arg: BitSetLike ),* + { + type Output = BitSetOr; + fn bitor(self, rhs: T) -> Self::Output { + BitSetOr(self, rhs) + } + } + + impl<$( $lifetime, )* $( $arg, )* T> BitXor for $bitset + where T: BitSetLike, + $( $arg: BitSetLike ),* + { + type Output = BitSetXor; + fn bitxor(self, rhs: T) -> Self::Output { + BitSetXor(self, rhs) + } + } + + } +} + +operator!(impl<()()> for BitSet); +operator!(impl<('a)()> for &'a BitSet); +operator!(impl<()()> for AtomicBitSet); +operator!(impl<('a)()> for &'a AtomicBitSet); +operator!(impl<()(A)> for BitSetNot); +operator!(impl<('a)(A)> for &'a BitSetNot); +operator!(impl<()(A, B)> for BitSetAnd); +operator!(impl<('a)(A, B)> for &'a BitSetAnd); +operator!(impl<()(A, B)> for BitSetOr); +operator!(impl<('a)(A, B)> for &'a BitSetOr); +operator!(impl<()(A, B)> for BitSetXor); +operator!(impl<('a)(A, B)> for &'a BitSetXor); +operator!(impl<()()> for BitSetAll); +operator!(impl<('a)()> for &'a BitSetAll); + +macro_rules! iterator { + ( $bitset:ident ) => { + impl FromIterator for $bitset { + fn from_iter(iter: T) -> Self + where + T: IntoIterator, + { + let mut bitset = $bitset::new(); + for item in iter { + bitset.add(item); + } + bitset + } + } + + impl<'a> FromIterator<&'a Index> for $bitset { + fn from_iter(iter: T) -> Self + where + T: IntoIterator, + { + let mut bitset = $bitset::new(); + for item in iter { + bitset.add(*item); + } + bitset + } + } + + impl Extend for $bitset { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { + for item in iter { + self.add(item); + } + } + } + + impl<'a> Extend<&'a Index> for $bitset { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { + for item in iter { + self.add(*item); + } + } + } + }; +} + +iterator!(BitSet); +iterator!(AtomicBitSet); + +#[cfg(test)] +mod tests { + use {BitSet, BitSetLike, BitSetXor, Index}; + + #[test] + fn or_assign() { + use std::collections::HashSet; + use std::mem::size_of; + + let usize_bits = size_of::() as u32 * 8; + let n = 10_000; + let f1 = &|n| 7 * usize_bits * n; + let f2 = &|n| 13 * usize_bits * n; + + let mut c1: BitSet = (0..n).map(f1).collect(); + let c2: BitSet = (0..n).map(f2).collect(); + + c1 |= &c2; + + let h1: HashSet<_> = (0..n).map(f1).collect(); + let h2: HashSet<_> = (0..n).map(f2).collect(); + assert_eq!(c1.iter().collect::>(), &h1 | &h2); + } + + #[test] + fn or_assign_random() { + use rand::prelude::*; + + use std::collections::HashSet; + let limit = 1_048_576; + let mut rng = thread_rng(); + + let mut set1 = BitSet::new(); + let mut check_set1 = HashSet::new(); + for _ in 0..(limit / 100) { + let index = rng.gen_range(0, limit); + set1.add(index); + check_set1.insert(index); + } + + let mut set2 = BitSet::new(); + let mut check_set2 = HashSet::new(); + for _ in 0..(limit / 100) { + let index = rng.gen_range(0, limit); + set2.add(index); + check_set2.insert(index); + } + + let hs1 = (&set1).iter().collect::>(); + let hs2 = (&set2).iter().collect::>(); + let mut hs = (&hs1 | &hs2).iter().cloned().collect::>(); + + set1 |= &set2; + + for _ in 0..(limit / 1000) { + let index = rng.gen_range(0, limit); + set1.add(index); + hs.insert(index); + } + + assert_eq!(hs, set1.iter().collect()); + } + + #[test] + fn and_assign() { + use std::collections::HashSet; + use std::mem::size_of; + + let usize_bits = size_of::() as u32 * 8; + let n = 10_000; + let f1 = &|n| 7 * usize_bits * n; + let f2 = &|n| 13 * usize_bits * n; + + let mut c1: BitSet = (0..n).map(f1).collect(); + let c2: BitSet = (0..n).map(f2).collect(); + + c1 &= &c2; + + let h1: HashSet<_> = (0..n).map(f1).collect(); + let h2: HashSet<_> = (0..n).map(f2).collect(); + assert_eq!(c1.iter().collect::>(), &h1 & &h2); + } + + #[test] + fn and_assign_specific() { + use util::BITS; + + let mut c1 = BitSet::new(); + c1.add(0); + let common = ((1 << BITS) << BITS) << BITS; + c1.add(common); + c1.add((((1 << BITS) << BITS) + 1) << BITS); + + let mut c2: BitSet = BitSet::new(); + c2.add(common); + c2.add((((1 << BITS) << BITS) + 2) << BITS); + + c1 &= &c2; + + assert_eq!(c1.iter().collect::>(), [common]); + } + + #[test] + fn and_assign_with_modification() { + use util::BITS; + + let mut c1 = BitSet::new(); + c1.add(0); + c1.add((1 << BITS) << BITS); + + let mut c2: BitSet = BitSet::new(); + c2.add(0); + + c1 &= &c2; + + let added = ((1 << BITS) + 1) << BITS; + c1.add(added); + + assert_eq!(c1.iter().collect::>(), [0, added]); + } + + #[test] + fn and_assign_random() { + use rand::prelude::*; + + use std::collections::HashSet; + let limit = 1_048_576; + let mut rng = thread_rng(); + + let mut set1 = BitSet::new(); + let mut check_set1 = HashSet::new(); + for _ in 0..(limit / 100) { + let index = rng.gen_range(0, limit); + set1.add(index); + check_set1.insert(index); + } + + let mut set2 = BitSet::new(); + let mut check_set2 = HashSet::new(); + for _ in 0..(limit / 100) { + let index = rng.gen_range(0, limit); + set2.add(index); + check_set2.insert(index); + } + + let hs1 = (&set1).iter().collect::>(); + let hs2 = (&set2).iter().collect::>(); + let mut hs = (&hs1 & &hs2).iter().cloned().collect::>(); + + set1 &= &set2; + + for _ in 0..(limit / 1000) { + let index = rng.gen_range(0, limit); + set1.add(index); + hs.insert(index); + } + + assert_eq!(hs, set1.iter().collect()); + } + + #[test] + fn xor_assign() { + use std::collections::HashSet; + use std::mem::size_of; + + let usize_bits = size_of::() as u32 * 8; + let n = 10_000; + let f1 = &|n| 7 * usize_bits * n; + let f2 = &|n| 13 * usize_bits * n; + + let mut c1: BitSet = (0..n).map(f1).collect(); + let c2: BitSet = (0..n).map(f2).collect(); + c1 ^= &c2; + + let h1: HashSet<_> = (0..n).map(f1).collect(); + let h2: HashSet<_> = (0..n).map(f2).collect(); + assert_eq!(c1.iter().collect::>(), &h1 ^ &h2); + } + + #[test] + fn xor_assign_specific() { + use util::BITS; + + let mut c1 = BitSet::new(); + c1.add(0); + let common = ((1 << BITS) << BITS) << BITS; + c1.add(common); + let a = (((1 << BITS) + 1) << BITS) << BITS; + c1.add(a); + + let mut c2: BitSet = BitSet::new(); + c2.add(common); + let b = (((1 << BITS) + 2) << BITS) << BITS; + c2.add(b); + + c1 ^= &c2; + + assert_eq!(c1.iter().collect::>(), [0, a, b]); + } + + #[test] + fn xor_assign_random() { + use rand::prelude::*; + use std::collections::HashSet; + let limit = 1_048_576; + let mut rng = thread_rng(); + + let mut set1 = BitSet::new(); + let mut check_set1 = HashSet::new(); + for _ in 0..(limit / 100) { + let index = rng.gen_range(0, limit); + set1.add(index); + check_set1.insert(index); + } + + let mut set2 = BitSet::new(); + let mut check_set2 = HashSet::new(); + for _ in 0..(limit / 100) { + let index = rng.gen_range(0, limit); + set2.add(index); + check_set2.insert(index); + } + + let hs1 = (&set1).iter().collect::>(); + let hs2 = (&set2).iter().collect::>(); + let mut hs = (&hs1 ^ &hs2).iter().cloned().collect::>(); + + set1 ^= &set2; + + for _ in 0..(limit / 1000) { + let index = rng.gen_range(0, limit); + set1.add(index); + hs.insert(index); + } + + assert_eq!(hs, set1.iter().collect()); + } + + #[test] + fn operators() { + let mut bitset = BitSet::new(); + bitset.add(1); + bitset.add(3); + bitset.add(5); + bitset.add(15); + bitset.add(200); + bitset.add(50001); + + let mut other = BitSet::new(); + other.add(1); + other.add(3); + other.add(50000); + other.add(50001); + + { + let not = &bitset & !&bitset; + assert_eq!(not.iter().count(), 0); + } + + { + let either = &bitset | &other; + let collected = either.iter().collect::>(); + assert_eq!(collected, vec![1, 3, 5, 15, 200, 50000, 50001]); + + let either_sanity = bitset.clone() | other.clone(); + assert_eq!(collected, either_sanity.iter().collect::>()); + } + + { + let same = &bitset & &other; + let collected = same.iter().collect::>(); + assert_eq!(collected, vec![1, 3, 50001]); + + let same_sanity = bitset.clone() & other.clone(); + assert_eq!(collected, same_sanity.iter().collect::>()); + } + + { + let exclusive = &bitset ^ &other; + let collected = exclusive.iter().collect::>(); + assert_eq!(collected, vec![5, 15, 200, 50000]); + + let exclusive_sanity = bitset.clone() ^ other.clone(); + assert_eq!(collected, exclusive_sanity.iter().collect::>()); + } + } + + #[test] + fn xor() { + + let mut bitset = BitSet::new(); + bitset.add(2); + bitset.add(3); + bitset.add(50000); + + + let mut other = BitSet::new(); + other.add(1); + other.add(3); + other.add(50000); + other.add(50001); + + { + + let xor = BitSetXor(&bitset, &other); + let collected = xor.iter().collect::>(); + assert_eq!(collected, vec![1, 2, 50001]); + } + } +} diff --git a/third_party/rust/hibitset/src/util.rs b/third_party/rust/hibitset/src/util.rs new file mode 100644 index 000000000000..e9b9fa2367ab --- /dev/null +++ b/third_party/rust/hibitset/src/util.rs @@ -0,0 +1,377 @@ + +pub type Index = u32; + + +#[cfg(target_pointer_width = "64")] +pub const BITS: usize = 6; +#[cfg(target_pointer_width = "32")] +pub const BITS: usize = 5; + +pub const LAYERS: usize = 4; +pub const MAX: usize = BITS * LAYERS; + +pub const MAX_EID: usize = 2 << MAX - 1; + + +pub const SHIFT0: usize = 0; + +pub const SHIFT1: usize = SHIFT0 + BITS; + +pub const SHIFT2: usize = SHIFT1 + BITS; + +pub const SHIFT3: usize = SHIFT2 + BITS; + +pub trait Row: Sized + Copy { + + fn row(self, shift: usize) -> usize; + + + fn offset(self, shift: usize) -> usize; + + + #[inline(always)] + fn mask(self, shift: usize) -> usize { + 1usize << self.row(shift) + } +} + +impl Row for Index { + #[inline(always)] + fn row(self, shift: usize) -> usize { + ((self >> shift) as usize) & ((1 << BITS) - 1) + } + + #[inline(always)] + fn offset(self, shift: usize) -> usize { + self as usize / (1 << shift) + } +} + + + + +#[inline] +pub fn offsets(bit: Index) -> (usize, usize, usize) { + (bit.offset(SHIFT1), bit.offset(SHIFT2), bit.offset(SHIFT3)) +} + + + + + + + + + + + + + + + + + +#[cfg(feature = "parallel")] +pub fn average_ones(n: usize) -> Option { + #[cfg(target_pointer_width = "64")] + let average = average_ones_u64(n as u64).map(|n| n as usize); + + #[cfg(target_pointer_width = "32")] + let average = average_ones_u32(n as u32).map(|n| n as usize); + + average +} + +#[cfg(all(any(test, target_pointer_width = "32"), feature = "parallel"))] +fn average_ones_u32(n: u32) -> Option { + + const PAR: [u32; 5] = [!0 / 0x3, !0 / 0x5, !0 / 0x11, !0 / 0x101, !0 / 0x10001]; + + + let a = n - ((n >> 1) & PAR[0]); + let b = (a & PAR[1]) + ((a >> 2) & PAR[1]); + let c = (b + (b >> 4)) & PAR[2]; + let d = (c + (c >> 8)) & PAR[3]; + let mut cur = d >> 16; + let count = (d + cur) & PAR[4]; + if count <= 1 { + return None; + } + + + let mut target = count / 2; + + + let mut result = 32; + { + let mut descend = |child, child_stride, child_mask| { + if cur < target { + result -= 2 * child_stride; + target -= cur; + } + + + cur = (child >> (result - child_stride)) & child_mask; + }; + + descend(c, 8, 16 - 1); + descend(b, 4, 8 - 1); + descend(a, 2, 4 - 1); + descend(n, 1, 2 - 1); + } + if cur < target { + result -= 1; + } + + Some(result - 1) +} + +#[cfg(all(any(test, target_pointer_width = "64"), feature = "parallel"))] +fn average_ones_u64(n: u64) -> Option { + + const PAR: [u64; 6] = [ + !0 / 0x3, + !0 / 0x5, + !0 / 0x11, + !0 / 0x101, + !0 / 0x10001, + !0 / 0x100000001, + ]; + + + let a = n - ((n >> 1) & PAR[0]); + let b = (a & PAR[1]) + ((a >> 2) & PAR[1]); + let c = (b + (b >> 4)) & PAR[2]; + let d = (c + (c >> 8)) & PAR[3]; + let e = (d + (d >> 16)) & PAR[4]; + let mut cur = e >> 32; + let count = (e + cur) & PAR[5]; + if count <= 1 { + return None; + } + + + let mut target = count / 2; + + + let mut result = 64; + { + let mut descend = |child, child_stride, child_mask| { + if cur < target { + result -= 2 * child_stride; + target -= cur; + } + + + cur = (child >> (result - child_stride)) & child_mask; + }; + + descend(d, 16, 256 - 1); + descend(c, 8, 16 - 1); + descend(b, 4, 8 - 1); + descend(a, 2, 4 - 1); + descend(n, 1, 2 - 1); + } + if cur < target { + result -= 1; + } + + Some(result - 1) +} + +#[cfg(all(test, feature = "parallel"))] +mod test_average_ones { + use super::*; + #[test] + fn parity_0_average_ones_u32() { + struct EvenParity(u32); + + impl Iterator for EvenParity { + type Item = u32; + fn next(&mut self) -> Option { + if self.0 == u32::max_value() { + return None; + } + self.0 += 1; + while self.0.count_ones() & 1 != 0 { + if self.0 == u32::max_value() { + return None; + } + self.0 += 1; + } + Some(self.0) + } + } + + let steps = 1000; + for i in 0..steps { + let pos = i * (u32::max_value() / steps); + for i in EvenParity(pos).take(steps as usize) { + let mask = (1 << average_ones_u32(i).unwrap_or(31)) - 1; + assert_eq!((i & mask).count_ones(), (i & !mask).count_ones(), "{:x}", i); + } + } + } + + #[test] + fn parity_1_average_ones_u32() { + struct OddParity(u32); + + impl Iterator for OddParity { + type Item = u32; + fn next(&mut self) -> Option { + if self.0 == u32::max_value() { + return None; + } + self.0 += 1; + while self.0.count_ones() & 1 == 0 { + if self.0 == u32::max_value() { + return None; + } + self.0 += 1; + } + Some(self.0) + } + } + + let steps = 1000; + for i in 0..steps { + let pos = i * (u32::max_value() / steps); + for i in OddParity(pos).take(steps as usize) { + let mask = (1 << average_ones_u32(i).unwrap_or(31)) - 1; + let a = (i & mask).count_ones(); + let b = (i & !mask).count_ones(); + if a < b { + assert_eq!(a + 1, b, "{:x}", i); + } else if b < a { + assert_eq!(a, b + 1, "{:x}", i); + } else { + panic!("Odd parity shouldn't split in exactly half"); + } + } + } + } + + #[test] + fn empty_average_ones_u32() { + assert_eq!(None, average_ones_u32(0)); + } + + #[test] + fn singleton_average_ones_u32() { + for i in 0..32 { + assert_eq!(None, average_ones_u32(1 << i), "{:x}", i); + } + } + + #[test] + fn parity_0_average_ones_u64() { + struct EvenParity(u64); + + impl Iterator for EvenParity { + type Item = u64; + fn next(&mut self) -> Option { + if self.0 == u64::max_value() { + return None; + } + self.0 += 1; + while self.0.count_ones() & 1 != 0 { + if self.0 == u64::max_value() { + return None; + } + self.0 += 1; + } + Some(self.0) + } + } + + let steps = 1000; + for i in 0..steps { + let pos = i * (u64::max_value() / steps); + for i in EvenParity(pos).take(steps as usize) { + let mask = (1 << average_ones_u64(i).unwrap_or(63)) - 1; + assert_eq!((i & mask).count_ones(), (i & !mask).count_ones(), "{:x}", i); + } + } + } + + #[test] + fn parity_1_average_ones_u64() { + struct OddParity(u64); + + impl Iterator for OddParity { + type Item = u64; + fn next(&mut self) -> Option { + if self.0 == u64::max_value() { + return None; + } + self.0 += 1; + while self.0.count_ones() & 1 == 0 { + if self.0 == u64::max_value() { + return None; + } + self.0 += 1; + } + Some(self.0) + } + } + + let steps = 1000; + for i in 0..steps { + let pos = i * (u64::max_value() / steps); + for i in OddParity(pos).take(steps as usize) { + let mask = (1 << average_ones_u64(i).unwrap_or(63)) - 1; + let a = (i & mask).count_ones(); + let b = (i & !mask).count_ones(); + if a < b { + assert_eq!(a + 1, b, "{:x}", i); + } else if b < a { + assert_eq!(a, b + 1, "{:x}", i); + } else { + panic!("Odd parity shouldn't split in exactly half"); + } + } + } + } + + #[test] + fn empty_average_ones_u64() { + assert_eq!(None, average_ones_u64(0)); + } + + #[test] + fn singleton_average_ones_u64() { + for i in 0..64 { + assert_eq!(None, average_ones_u64(1 << i), "{:x}", i); + } + } + + #[test] + fn average_ones_agree_u32_u64() { + let steps = 1000; + for i in 0..steps { + let pos = i * (u32::max_value() / steps); + for i in pos..steps { + assert_eq!( + average_ones_u32(i), + average_ones_u64(i as u64).map(|n| n as u32), + "{:x}", + i + ); + } + } + } + + #[test] + fn specific_values() { + assert_eq!(Some(4), average_ones_u32(0b10110)); + assert_eq!(Some(5), average_ones_u32(0b100010)); + assert_eq!(None, average_ones_u32(0)); + assert_eq!(None, average_ones_u32(1)); + + assert_eq!(Some(4), average_ones_u64(0b10110)); + assert_eq!(Some(5), average_ones_u64(0b100010)); + assert_eq!(None, average_ones_u64(0)); + assert_eq!(None, average_ones_u64(1)); + } +} diff --git a/third_party/rust/malloc_buf/.cargo-checksum.json b/third_party/rust/malloc_buf/.cargo-checksum.json new file mode 100644 index 000000000000..70cd125dfeae --- /dev/null +++ b/third_party/rust/malloc_buf/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"d87624d9d117489bd49dbaeb70077a0721e00ac2a76eb371faec818a92da5661","src/lib.rs":"1130dacbe045a20c1ea1e65e00976f18eb7513da492b07d538e7b1b5c64b8842"},"package":"62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb"} \ No newline at end of file diff --git a/third_party/rust/malloc_buf/Cargo.toml b/third_party/rust/malloc_buf/Cargo.toml new file mode 100644 index 000000000000..cfb0c082d874 --- /dev/null +++ b/third_party/rust/malloc_buf/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "malloc_buf" +version = "0.0.6" +authors = ["Steven Sheldon"] + +description = "Structs for handling malloc'd memory passed to Rust." +repository = "https://github.com/SSheldon/malloc_buf" +documentation = "http://ssheldon.github.io/malloc_buf/malloc_buf/" +license = "MIT" + +exclude = [".gitignore"] + +[dependencies] +libc = ">= 0.1, < 0.3" diff --git a/third_party/rust/malloc_buf/src/lib.rs b/third_party/rust/malloc_buf/src/lib.rs new file mode 100644 index 000000000000..b9a1367beab7 --- /dev/null +++ b/third_party/rust/malloc_buf/src/lib.rs @@ -0,0 +1,95 @@ +extern crate libc; + +use std::marker::PhantomData; +use std::ops::Deref; +use std::slice; +use libc::c_void; + +struct MallocPtr(*mut c_void); + +impl Drop for MallocPtr { + fn drop(&mut self) { + unsafe { + libc::free(self.0); + } + } +} + + +pub struct MallocBuffer { + ptr: MallocPtr, + len: usize, + items: PhantomData<[T]>, +} + +impl MallocBuffer { + + + + + + + + + pub unsafe fn new(ptr: *mut T, len: usize) -> Option> { + if len > 0 && ptr.is_null() { + None + } else { + Some(MallocBuffer { + ptr: MallocPtr(ptr as *mut c_void), + len: len, + items: PhantomData, + }) + } + } +} + +impl Deref for MallocBuffer { + type Target = [T]; + + fn deref(&self) -> &[T] { + let ptr = if self.len == 0 && self.ptr.0.is_null() { + + 0x1 as *const T + } else { + self.ptr.0 as *const T + }; + unsafe { + slice::from_raw_parts(ptr, self.len) + } + } +} + +#[cfg(test)] +mod tests { + use std::ptr; + use libc; + + use super::MallocBuffer; + + #[test] + fn test_null_buf() { + let buf = unsafe { + MallocBuffer::::new(ptr::null_mut(), 0).unwrap() + }; + assert!(&*buf == []); + assert!(Some(&*buf) == Some(&[])); + + let buf = unsafe { + MallocBuffer::::new(ptr::null_mut(), 7) + }; + assert!(buf.is_none()); + } + + #[test] + fn test_buf() { + let buf = unsafe { + let ptr = libc::malloc(12) as *mut u32; + *ptr = 1; + *ptr.offset(1) = 2; + *ptr.offset(2) = 3; + MallocBuffer::new(ptr, 3).unwrap() + }; + assert!(&*buf == [1, 2, 3]); + } +} diff --git a/third_party/rust/metal/.cargo-checksum.json b/third_party/rust/metal/.cargo-checksum.json new file mode 100644 index 000000000000..b120af9d4c9f --- /dev/null +++ b/third_party/rust/metal/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.lock":"70903aff8c36006ea9805d0b6673bdb9b719e029d0aac1140b07067e2389059a","Cargo.toml":"fdec249c160dae703c334f8416731f586b565861af795a19768b4b523a4cc234","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"8961f19dddc0024f98c72d40689c8db0d317b0dfac1cfd6bfc9150d0fc9d4f95","bors.toml":"06ccf7395c5f6b9a0549ccb499b7e3d1b0a984d43a54861e82ca3c22c9d7e1ca","examples/argument-buffer/main.rs":"6634a20703e60e0c37a89333f4f65795f9d012f799b481a110612fd234dd8649","examples/bind/main.rs":"85b15b6edf2da657072ae9df14347cd1333182d74b511e88a5e6a2c1e865a430","examples/caps/main.rs":"145f5efc728232061d12f09ab954f738b6870921ee3f07744ee1450111c98ed8","examples/compute/compute-argument-buffer.metal":"6530bbd6a0101d9db2893805436f5dc877959e81ea97a27014c0fc52fc9fa77b","examples/compute/compute-argument-buffer.rs":"7ba732fdacf709d7075164bafd801fe025389d28620abb9979b3b75550771d32","examples/compute/default.metallib":"0f8579c77cf5d5f35023f1922b672081a23d625f199b0be0a68f81a0bfa95384","examples/compute/embedded-lib.rs":"faeee13d9c959f88c3950a693fb3bc57d61bc005209923964a5f7c2c196fb9ff","examples/compute/main.rs":"ee695accf6659c3cc0043b4067214edd16c484f30a5aef45d6a897a5f90c5e2b","examples/compute/shaders.metal":"f2b15551bb5247b88a3029c3d8ef37c6fa04a4a6cca9f90f069894ed6822b4bf","examples/library/main.rs":"a1420ec28a471f28a789b75b3ecf5abb699ed352b337747169914812fb98045a","examples/reflection/main.rs":"927902392a060e40a98f0d222cfa994bb60eee169fdcd550f0ecfc9e44872164","examples/window/default.metallib":"f43d32e42cb6cbd50a5364dfb11ff17fda4b5cf2046e371d2e3ddc1d2008bb9e","examples/window/main.rs":"bd264d90404d4955a8a119268c982346dd5a493415f2629d57e9d82820475fbc","examples/window/shaders.metal":"4c7ea146229caeb0df1d4feea13b927d27c1a6fe8fd0245ce84bf1975772434a","src/argument.rs":"f64e62439d67c02e47568ae91421033692dcec5abd174605c7b3abbcc0d3d9e4","src/buffer.rs":"960a8dd9375e7ce0783660baa0b15cea609dfe5018df9adc9bc18cea8a0a1b35","src/capturemanager.rs":"cc168fff60662f481a5580de2c1e5ff5a15a3248209a929b6a3d1caf8ad88913","src/commandbuffer.rs":"0dd0b34699b27e4891a0d2690ea01b69e3d40d66dabed023ccd1e7014fde8d0a","src/commandqueue.rs":"5b87621ae4495ae04a5de5ce980d0cde31b4bb0d1e31e932d34c49252240c9d9","src/constants.rs":"7be4d901da863f126d158d1042f5482a122fbcf760f7c225b0b1693a55f6bb4b","src/depthstencil.rs":"7f8fc0c9e9887ed0681fdec846e5e4a4830b69ea023d6506d567020ee7234408","src/device.rs":"4e8f6777aacb0089ea32d68b08fb5533494023ed1efe94c7480fcc78d161ed7a","src/drawable.rs":"03a8adb20da93839314586f0ec12bce139ece546a07e2f9ed60c89303fa52a82","src/encoder.rs":"d7acd5854a3048a466c3aa20bd8bf36f63ad721dd3ed1d222d6fd961a98e0964","src/heap.rs":"73d4082d29f31af58e3950c401da32aa8939c5fbdd2e587ef3dd977d30b79166","src/lib.rs":"e8396f361242ab37d2c791e6ee01f9cc149738635e8d854782d9b07e86dbc4c0","src/library.rs":"c8dad2294141f1d67e06fb3d2f9c407e48a7dc148c51dd52d5b5f0a86028a85a","src/pipeline/compute.rs":"5d14cb893bbe7374716d04fe451321f42bd5e623a45184f4776864f6dcc964cf","src/pipeline/mod.rs":"4f9679202950b1ae95c8d97072c69139908f8104631a8933f19117365b3d16e6","src/pipeline/render.rs":"23130546e9b329ac711a85756f68ed3be2f7c867b5d8a021126e253b3981a712","src/renderpass.rs":"22f2b5a996c5c32896923f428b84350d51b83b1ee91cd889f96465137dfa2e04","src/resource.rs":"03ad88f2d67a7c4ceac4ae00781d56b269c048be55b5c40f0e136cd6986d907d","src/sampler.rs":"69b370d3b12dc12bbc4e9efba23c4a7ec8e401602cee9656c7030c6a68812411","src/texture.rs":"d8352033d26e1a254914b01444fbd996a17220b69009a36a89caa19a9a4c37c8","src/types.rs":"9b3f094f5d22b619bd1572b723bc23b9b5a766dc65eaa752bb3106db28ca18ea","src/vertexdescriptor.rs":"554c9171df839273f9572814064a8dfd90074772710dff079ad8f7f9908e1e9a"},"package":"ddf8052f20601c7af6293d3f7bf7b9159aee5974804fe65d871d437f933ec1eb"} \ No newline at end of file diff --git a/third_party/rust/metal/Cargo.lock b/third_party/rust/metal/Cargo.lock new file mode 100644 index 000000000000..1f2c8d738f4e --- /dev/null +++ b/third_party/rust/metal/Cargo.lock @@ -0,0 +1,560 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "android_glue" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "bitflags" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "block" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "byteorder" +version = "1.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cc" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cfg-if" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cocoa" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", + "objc 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cocoa" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)", + "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", + "objc 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "core-foundation" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "core-foundation-sys 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "core-foundation-sys" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "core-graphics" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "core-graphics" +version = "0.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dlib" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "downcast-rs" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "fuchsia-zircon" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "fuchsia-zircon-sys" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "gcc" +version = "0.3.54" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "lazy_static" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.42" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libloading" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "lock_api" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "log" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "malloc_buf" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "memmap" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "metal" +version = "0.17.0" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cocoa 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)", + "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "objc 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sema 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "winit 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "nix" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "objc" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "malloc_buf 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "objc_exception 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "objc_exception" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "owning_ref" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "parking_lot" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lock_api 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "parking_lot_core" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "percent-encoding" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "pkg-config" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rand" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "redox_syscall" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "remove_dir_all" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scopeguard" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "sema" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "smallvec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "smithay-client-toolkit" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "dlib 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "nix 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tempfile 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "wayland-client 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)", + "wayland-commons 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)", + "wayland-protocols 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "stable_deref_trait" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "tempfile" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "time" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unreachable" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "wayland-client" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", + "wayland-commons 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)", + "wayland-scanner 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)", + "wayland-sys 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wayland-commons" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "downcast-rs 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "wayland-sys 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wayland-protocols" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "wayland-client 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)", + "wayland-commons 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)", + "wayland-scanner 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)", + "wayland-sys 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wayland-scanner" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "xml-rs 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wayland-sys" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "dlib 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winit" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "android_glue 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cocoa 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "objc 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "smithay-client-toolkit 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "wayland-client 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "x11-dl 2.18.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "x11-dl" +version = "2.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "xml-rs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[metadata] +"checksum android_glue 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "000444226fcff248f2bc4c7625be32c63caccfecc2723a2b9f78a7487a49c407" +"checksum bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d0c54bb8f454c567f21197eefcdbf5679d0bd99f2ddbe52e84c77061952e6789" +"checksum block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" +"checksum byteorder 1.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8389c509ec62b9fe8eca58c502a0acaf017737355615243496cde4994f8fa4f9" +"checksum cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)" = "2119ea4867bd2b8ed3aecab467709720b2d55b1bcfe09f772fd68066eaf15275" +"checksum cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "efe5c877e17a9c717a0bf3613b2709f723202c4e4675cc8f12926ded29bcb17e" +"checksum cocoa 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f5cd1afb83b2de9c41e5dfedb2bcccb779d433b958404876009ae4b01746ff23" +"checksum cocoa 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8cd20045e880893b4a8286d5639e9ade85fb1f6a14c291f882cf8cf2149d37d9" +"checksum core-foundation 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cc3532ec724375c7cb7ff0a097b714fde180bb1f6ed2ab27cfcd99ffca873cd2" +"checksum core-foundation-sys 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a3fb15cdbdd9cf8b82d97d0296bb5cd3631bba58d6e31650a002a8e7fb5721f9" +"checksum core-graphics 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "92801c908ea6301ae619ed842a72e01098085fc321b9c2f3f833dad555bba055" +"checksum core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)" = "56790968ab1c8a1202a102e6de05fc6e1ec87da99e4e93e9a7d13efbfc1e95a9" +"checksum dlib 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "77e51249a9d823a4cb79e3eca6dcd756153e8ed0157b6c04775d04bf1b13b76a" +"checksum downcast-rs 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "18df8ce4470c189d18aa926022da57544f31e154631eb4cfe796aea97051fe6c" +"checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +"checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" +"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" +"checksum gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)" = "5e33ec290da0d127825013597dbdfc28bee4964690c7ce1166cbc2a7bd08b1bb" +"checksum lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fb497c35d362b6a331cfd94956a07fc2c78a4604cdbee844a81170386b996dd3" +"checksum libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "e32a70cf75e5846d53a673923498228bbec6a8624708a9ea5645f075d6276122" +"checksum libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)" = "b685088df2b950fccadf07a7187c8ef846a959c142338a48f9dc0b94517eb5f1" +"checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2" +"checksum lock_api 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "949826a5ccf18c1b3a7c3d57692778d21768b79e46eb9dd07bfc4c2160036c54" +"checksum log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "61bd98ae7f7b754bc53dca7d44b604f733c6bba044ea6f41bc8d89272d8161d2" +"checksum malloc_buf 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" +"checksum memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e2ffa2c986de11a9df78620c01eeaaf27d94d3ff02bf81bfcca953102dd0c6ff" +"checksum nix 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d37e713a259ff641624b6cb20e3b12b2952313ba36b6823c0f16e6cfd9e5de17" +"checksum objc 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "31d20fd2b37e07cf5125be68357b588672e8cefe9a96f8c17a9d46053b3e590d" +"checksum objc_exception 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "098cd29a2fa3c230d3463ae069cecccc3fdfd64c0d2496ab5b96f82dab6a00dc" +"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37" +"checksum parking_lot 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "69376b761943787ebd5cc85a5bc95958651a22609c5c1c2b65de21786baec72b" +"checksum parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "4db1a8ccf734a7bce794cc19b3df06ed87ab2f3907036b693c68f56b4d4537fa" +"checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" +"checksum pkg-config 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)" = "110d5ee3593dbb73f56294327fe5668bcc997897097cbc76b51e7aed3f52452f" +"checksum rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)" = "15a732abf9d20f0ad8eeb6f909bf6868722d9a06e1e50802b6a70351f40b4eb1" +"checksum rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "eba5f8cb59cc50ed56be8880a5c7b496bfd9bd26394e176bc67884094145c2c5" +"checksum redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "c214e91d3ecf43e9a4e41e578973adeb14b474f2bee858742d127af75a0112b1" +"checksum remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3488ba1b9a2084d38645c4c08276a1752dcbf2c7130d74f1569681ad5d2799c5" +"checksum scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" +"checksum sema 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "e3af15ff9b4a7d4bd2b21222c05154ee58260780a4d492c22de810f4f4187832" +"checksum smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "26df3bb03ca5eac2e64192b723d51f56c1b1e0860e7c766281f4598f181acdc8" +"checksum smithay-client-toolkit 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2051bffc6cbf271176e8ba1527f801b6444567daee15951ff5152aaaf7777b2f" +"checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8" +"checksum tempfile 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "47776f63b85777d984a50ce49d6b9e58826b6a3766a449fc95bc66cd5663c15b" +"checksum time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "d825be0eb33fda1a7e68012d51e9c7f451dc1a69391e7fdc197060bb8c56667b" +"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" +"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +"checksum wayland-client 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b4ff113a1c1c5e5104c7abfc2a80ba5e2bf78e1ac725ebbf934772dbd2983847" +"checksum wayland-commons 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9892d32d32dd45121fdaee5c3e7acf9923e3fc9d8f0ab09e4148b6a5e9aebb9c" +"checksum wayland-protocols 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)" = "251d1dbf33e60c51878037e2d23ef2169ae2f8996bdc91d543a96cf8a641f323" +"checksum wayland-scanner 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3ca8187bffd85d8c8d88b55113aa8ff4276723617f85ab4ff1c36a88e0bbcd80" +"checksum wayland-sys 0.20.11 (registry+https://github.com/rust-lang/crates.io-index)" = "de74f07898a3f5b3407e19de2c175b5fbd989324b2c782e2d0e20d918639e63f" +"checksum winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "773ef9dcc5f24b7d850d0ff101e542ff24c3b090a9768e03ff889fdef41f00fd" +"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +"checksum winit 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "51fe58cceab36bef11fcb57d0a86f4cdf0c8668ad51fdbc6d48efa6b2db0cddd" +"checksum x11-dl 2.18.2 (registry+https://github.com/rust-lang/crates.io-index)" = "58cbeb06af6023f10ab7894bc2ede5318ee95d0f5edfeaa68188d436d8f10b45" +"checksum xml-rs 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3c1cb601d29fe2c2ac60a2b2e5e293994d87a1f6fa9687a31a15270f909be9c2" diff --git a/third_party/rust/metal/Cargo.toml b/third_party/rust/metal/Cargo.toml new file mode 100644 index 000000000000..ef1d0a035595 --- /dev/null +++ b/third_party/rust/metal/Cargo.toml @@ -0,0 +1,86 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "metal" +version = "0.17.0" +authors = ["GFX Developers"] +description = "Rust bindings for Metal" +homepage = "https://github.com/gfx-rs/metal-rs" +documentation = "https://docs.rs/crate/metal" +readme = "README.md" +keywords = ["metal", "graphics", "bindings"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/gfx-rs/metal-rs" +[package.metadata.docs.rs] +default-target = "x86_64-apple-darwin" + +[[example]] +name = "window" + +[[example]] +name = "library" + +[[example]] +name = "reflection" + +[[example]] +name = "caps" + +[[example]] +name = "argument-buffer" + +[[example]] +name = "compute" +path = "examples/compute/main.rs" + +[[example]] +name = "embedded-lib" +path = "examples/compute/embedded-lib.rs" + +[[example]] +name = "compute-argument-buffer" +path = "examples/compute/compute-argument-buffer.rs" + +[[example]] +name = "bind" +[dependencies.bitflags] +version = "1" + +[dependencies.block] +version = "0.1.5" + +[dependencies.cocoa] +version = "0.19" + +[dependencies.core-graphics] +version = "0.17.3" + +[dependencies.foreign-types] +version = "0.3" + +[dependencies.log] +version = "0.4" + +[dependencies.objc] +version = "0.2.4" +features = ["objc_exception"] +[dev-dependencies.sema] +version = "0.1.4" + +[dev-dependencies.winit] +version = "0.17" + +[features] +default = [] +private = [] diff --git a/third_party/rust/metal/LICENSE-APACHE b/third_party/rust/metal/LICENSE-APACHE new file mode 100644 index 000000000000..16fe87b06e80 --- /dev/null +++ b/third_party/rust/metal/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/metal/LICENSE-MIT b/third_party/rust/metal/LICENSE-MIT new file mode 100644 index 000000000000..25597d5838fa --- /dev/null +++ b/third_party/rust/metal/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2010 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/metal/README.md b/third_party/rust/metal/README.md new file mode 100644 index 000000000000..2292314f281d --- /dev/null +++ b/third_party/rust/metal/README.md @@ -0,0 +1,20 @@ +# metal-rs +[![Build Status](https://travis-ci.org/gfx-rs/metal-rs.svg?branch=master)](https://travis-ci.org/gfx-rs/metal-rs) +[![Crates.io](https://img.shields.io/crates/v/metal.svg?label=metal)](https://crates.io/crates/metal) + +Unsafe Rust bindings for the Metal 3D Graphics API. + +## License + +Licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/metal/bors.toml b/third_party/rust/metal/bors.toml new file mode 100644 index 000000000000..55c1d963ce3e --- /dev/null +++ b/third_party/rust/metal/bors.toml @@ -0,0 +1,8 @@ +status = [ "continuous-integration/travis-ci/push" ] + +# Based on , +# we can expect a build to spend four hours in Travis CI's queue. +# +# This gives the build five hours to run, with four hours to sit +# and one hour to actually work. +timeout-sec = 18000 diff --git a/third_party/rust/metal/examples/argument-buffer/main.rs b/third_party/rust/metal/examples/argument-buffer/main.rs new file mode 100644 index 000000000000..89d69ed3e3fd --- /dev/null +++ b/third_party/rust/metal/examples/argument-buffer/main.rs @@ -0,0 +1,43 @@ + + + + + + + +#[macro_use] +extern crate objc; + +use metal::*; + +use cocoa::foundation::NSAutoreleasePool; + +fn main() { + let pool = unsafe { NSAutoreleasePool::new(cocoa::base::nil) }; + + let device = Device::system_default().expect("no device found"); + + let desc1 = ArgumentDescriptor::new(); + desc1.set_data_type(MTLDataType::Texture); + let desc2 = ArgumentDescriptor::new(); + desc2.set_data_type(MTLDataType::Sampler); + desc2.set_index(1); + + let encoder = device.new_argument_encoder(&Array::from_slice(&[desc1, desc2])); + println!("{:?}", encoder); + + let buffer = device.new_buffer(encoder.encoded_length(), MTLResourceOptions::empty()); + encoder.set_argument_buffer(&buffer, 0); + + let sampler = { + let descriptor = SamplerDescriptor::new(); + descriptor.set_support_argument_buffers(true); + device.new_sampler(&descriptor) + }; + encoder.set_sampler_state(&sampler, 1); + println!("{:?}", sampler); + + unsafe { + let () = msg_send![pool, release]; + } +} diff --git a/third_party/rust/metal/examples/bind/main.rs b/third_party/rust/metal/examples/bind/main.rs new file mode 100644 index 000000000000..77f9d3a1bbfb --- /dev/null +++ b/third_party/rust/metal/examples/bind/main.rs @@ -0,0 +1,42 @@ + + + + + + + +#[macro_use] +extern crate objc; + +use metal::*; + +use cocoa::foundation::NSAutoreleasePool; + +fn main() { + let pool = unsafe { NSAutoreleasePool::new(cocoa::base::nil) }; + + let device = Device::system_default().expect("no device found"); + + let buffer = device.new_buffer(4, MTLResourceOptions::empty()); + let sampler = { + let descriptor = SamplerDescriptor::new(); + device.new_sampler(&descriptor) + }; + + let queue = device.new_command_queue(); + let cmd_buf = queue.new_command_buffer(); + + let encoder = cmd_buf.new_compute_command_encoder(); + + encoder.set_buffers(2, &[Some(&buffer), None], &[4, 0]); + encoder.set_sampler_states(1, &[Some(&sampler), None]); + + encoder.end_encoding(); + cmd_buf.commit(); + + println!("Everything is bound"); + + unsafe { + let () = msg_send![pool, release]; + } +} diff --git a/third_party/rust/metal/examples/caps/main.rs b/third_party/rust/metal/examples/caps/main.rs new file mode 100644 index 000000000000..87345e1853ce --- /dev/null +++ b/third_party/rust/metal/examples/caps/main.rs @@ -0,0 +1,32 @@ + + + + + + + +use metal::*; + +fn main() { + let device = Device::system_default().expect("no device found"); + + #[cfg(feature = "private")] + { + println!("Vendor: {:?}", unsafe { device.vendor() }); + println!("Family: {:?}", unsafe { device.family_name() }); + } + println!( + "Max threads per threadgroup: {:?}", + device.max_threads_per_threadgroup() + ); + #[cfg(target_os = "macos")] + { + println!("Integrated GPU: {:?}", device.is_low_power()); + println!("Headless: {:?}", device.is_headless()); + println!("D24S8: {:?}", device.d24_s8_supported()); + } + println!( + "Indirect argument buffer: {:?}", + device.argument_buffers_support() + ); +} diff --git a/third_party/rust/metal/examples/compute/compute-argument-buffer.metal b/third_party/rust/metal/examples/compute/compute-argument-buffer.metal new file mode 100644 index 000000000000..1dcc79daf55e --- /dev/null +++ b/third_party/rust/metal/examples/compute/compute-argument-buffer.metal @@ -0,0 +1,14 @@ +#include + +using namespace metal; + +struct SumInput { + device uint *data; + volatile device atomic_uint *sum; +}; + +kernel void sum(device SumInput& input [[ buffer(0) ]], + uint gid [[ thread_position_in_grid ]]) +{ + atomic_fetch_add_explicit(input.sum, input.data[gid], memory_order_relaxed); +} diff --git a/third_party/rust/metal/examples/compute/compute-argument-buffer.rs b/third_party/rust/metal/examples/compute/compute-argument-buffer.rs new file mode 100644 index 000000000000..6ffaa34dcc28 --- /dev/null +++ b/third_party/rust/metal/examples/compute/compute-argument-buffer.rs @@ -0,0 +1,101 @@ + + + + + + + +#[macro_use] +extern crate objc; + +use metal::*; + +use cocoa::foundation::NSAutoreleasePool; + +use std::mem; + +static LIBRARY_SRC: &str = include_str!("compute-argument-buffer.metal"); + +fn main() { + let pool = unsafe { NSAutoreleasePool::new(cocoa::base::nil) }; + let device = Device::system_default().expect("no device found"); + let command_queue = device.new_command_queue(); + + let data = [ + 1u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, + ]; + + let buffer = device.new_buffer_with_data( + unsafe { mem::transmute(data.as_ptr()) }, + (data.len() * mem::size_of::()) as u64, + MTLResourceOptions::CPUCacheModeDefaultCache, + ); + + let sum = { + let data = [0u32]; + device.new_buffer_with_data( + unsafe { mem::transmute(data.as_ptr()) }, + (data.len() * mem::size_of::()) as u64, + MTLResourceOptions::CPUCacheModeDefaultCache, + ) + }; + + let command_buffer = command_queue.new_command_buffer(); + let encoder = command_buffer.new_compute_command_encoder(); + + let library = device + .new_library_with_source(LIBRARY_SRC, &CompileOptions::new()) + .unwrap(); + let kernel = library.get_function("sum", None).unwrap(); + + let argument_encoder = kernel.new_argument_encoder(0); + let arg_buffer = device.new_buffer( + argument_encoder.encoded_length(), + MTLResourceOptions::empty(), + ); + argument_encoder.set_argument_buffer(&arg_buffer, 0); + argument_encoder.set_buffer(&buffer, 0, 0); + argument_encoder.set_buffer(&sum, 0, 1); + + let pipeline_state_descriptor = ComputePipelineDescriptor::new(); + pipeline_state_descriptor.set_compute_function(Some(&kernel)); + + let pipeline_state = device + .new_compute_pipeline_state_with_function( + pipeline_state_descriptor.compute_function().unwrap(), + ) + .unwrap(); + + encoder.set_compute_pipeline_state(&pipeline_state); + encoder.set_buffer(0, Some(&arg_buffer), 0); + + encoder.use_resource(&buffer, MTLResourceUsage::Read); + encoder.use_resource(&sum, MTLResourceUsage::Write); + + let width = 16; + + let thread_group_count = MTLSize { + width, + height: 1, + depth: 1, + }; + + let thread_group_size = MTLSize { + width: (data.len() as u64 + width) / width, + height: 1, + depth: 1, + }; + + encoder.dispatch_thread_groups(thread_group_count, thread_group_size); + encoder.end_encoding(); + command_buffer.commit(); + command_buffer.wait_until_completed(); + + let ptr = sum.contents() as *mut u32; + + unsafe { + assert_eq!(465, *ptr); + let () = msg_send![pool, release]; + } +} diff --git a/third_party/rust/metal/examples/compute/default.metallib b/third_party/rust/metal/examples/compute/default.metallib new file mode 100644 index 0000000000000000000000000000000000000000..d93aa04396d7619fc7a1e33fdf28fa9715047548 GIT binary patch literal 3237 zcmds3eN0=|6~FL|pOcvToIqAn8}v+?jHoShU`uRlHro#fZ6P%!%17JDHeiBRe_=m} zaWu(br*;w|xhYBAHI+=2Oj%S-Y+@3TGR?#!b2}P&QM$1#&7c9Aw1m(=qGg@5ooh%N zl1Tf1Pfz#b+bY5%F6@2oAwe)>YSy!i0_h0AaM zW9?zpQ?r__^;M0piqHeFmuqUaOP3TpUhSxUrBReIfEZD|t!lgAk@{NVsSK@rke`Cq zf8aB4iEBRs0R^%qvGSRKBqv4272tr+0cceZo59Qj8Qs0^EqzMxf>2VXK9Us@ug?(` z6bTMB1titkh9j#CAxT-jN+f-0WtkyT8_j3An#f61P-d8{#nIKpBSw8gcke`S>YzYT zBWwUez5z^GFN?kxzrvQDBW##JRc}(03Tpe3sx+mU&1pfMCwqmbq- zCp!_;#6UaIC>!L!(u!qDzcezp5JSjktaylaMwccO)Qm2(UB}!pGE+!0JBJ-g=B69Z z8u4UW_%C2Phv&fxKuhBZ9dqM54~$Zkp`_|VPL>F2&jMj^IC!3uMYzgXP)+y`Yfl7! z+(!jyqz5L<7e>4QP)34tmu04ujE?w!q`MGRRSqS!U|ru6S>07K;K2DgJgs9KW;`=T z5CZBD1y-n$EYgog%)!$Vvj0DkT%W^t2smCy;QMJI@V`5bCo|?uM)AEUX@iYlvkW2g z1iK(eMmbf4(g2w>r#eAt28UIk3-%pNLl^)VB!SqNS27L+e#6(JBy51#9C6WuZPopc zZKalcE+#%V<2w?33qTNaJBnv@%oNKwjCdxBZ>5DWVKyxUjT7djpn^b}2JJ+n@RkHG z1ePoPf7{>B$af;u9e#u7~=g z`D^MTy{kf@I*D=}@(Rq#h(u6sozFP_x{h(exr8HIU45t?bggzk*f(15O&rTt3U6Q`j8 z-DL>o<)h|DM}3Guc>p<6I?T*6a5Ll4Jp^uMmgzDxJS_Z{t(jS_n&~n>2F?<5+P^K+ z!g~_Bi>3W6-2sy`C2;TuHRo&Xl9JQUBNWZara@X(6cZP$u8a1r*s%42fIC<$ z%_`|O#0$wq`x}U_*P$*K1>_?W^49#U$_S?lC3_74y30pg&aTx=*hH(YqJ zU*4+B$xI8%ASFUd@1-h7eZ9P!-Y25FYQt_f?RQh1zOb7O7mThZhk8QEIF$@!syQPr zmy8r%Q^>~@h370qt;z$Mvq4Q7E&@t>HmD^jZZ5ymOz&%>x3od1(OUvxcQoua)14)B z=Rb1*c5fhTg7}k$J+W|CtoylHNEhSyZUQnD)s*U&Pxi}4Gv!r_{9}b2((T6<;a}hq zgqtgtDTk7(zQv4}`~fKacaP>WNq6Eu+QRN#VUIh=w~;{SrC~lEF1Xmp%x3Z)q*FLW zi&soJTT*y+xagd|=#oNywY%u*xO{S=@XM^CR%wF@Zn5KV;~1Y=GQ=F->!W?KhPs6) z10H|}4*30T@O$yrRJ377ehQlC4Ws?_f>3v{`p;`ui}g8|267S7yWMsxTUWQEp6#@H zyf&AURVm9#H?buSD{ry4TA$B!OUr+_v7QJlHcx4@%i*@!t)(qir_1B;np$jLOOxID z@KCG8%bOe)zRhHHJ~rmDIIK+_t*us%snyCiw>=5KY2j_1R#S_s1CSq{u(*)uvb&mL zlFMnbIa{ngBJo%)EvCI58=v7~@$512U2dz%Yx6G`SnRev&d0@0;MHMs@&t-3CjAx* zZy|jz@9Ajf7x8S)Ru`XfPep}=cR6g$CbA4(l4K^|mZ9Nxd2LJVOnW@GmOZwXOpX;K z1;c2AC~K2mbTC&xU>W%J8ugQhMuW zALJT4+1!Qrg#DV3#U4si?z(4X0Mh-uvl$FFn2cUD;=S zy06w8+weux&JA1Mxi`Ie{n(59PkwY}VE>u-Ki&WS(VzeA=trNN{Qa5w6+_zy<)sZZ zx!i=Yw=r_@n?(G@UB5W~r&r%S{#wD``^Ga*?_WVok;m$)m7WqfBeZ{)JI28?1ENu# zPWDX8FZAc$(32xPtth;qD7-dYh%78Z>tRivK>wTsYLXN9bqIZ}M2Ng;Gx3S53ZRLS z^9X?-T@I(s?c%LYo^5Wkcp7aE5~AF*&$8PMdOgg`3!NX6d^r53KKDu>ceB_wF0>C= ze8DYdxX8Y5pOGAdt7e~&+Fc_)q5x;NXLs9HiX-sAhqbQ>6+Uv`k*}Ra2#x@H1xt!s zdQQmMs?=nyS{I4-_vVB|8x!_nwxKu2j@H~yOe!PevO{_35F-x0d_U!Q0D=AsD+OW4 literal 0 HcmV?d00001 diff --git a/third_party/rust/metal/examples/compute/embedded-lib.rs b/third_party/rust/metal/examples/compute/embedded-lib.rs new file mode 100644 index 000000000000..6c3834dee103 --- /dev/null +++ b/third_party/rust/metal/examples/compute/embedded-lib.rs @@ -0,0 +1,31 @@ + + + + + + + +#[macro_use] +extern crate objc; + +use metal::*; + +use cocoa::foundation::NSAutoreleasePool; + +fn main() { + let library_data = include_bytes!("default.metallib"); + + let pool = unsafe { NSAutoreleasePool::new(cocoa::base::nil) }; + let device = Device::system_default().expect("no device found"); + + let library = device.new_library_with_data(&library_data[..]).unwrap(); + let kernel = library.get_function("sum", None).unwrap(); + + println!("Function name: {}", kernel.name()); + println!("Function type: {:?}", kernel.function_type()); + println!("OK"); + + unsafe { + let () = msg_send![pool, release]; + } +} diff --git a/third_party/rust/metal/examples/compute/main.rs b/third_party/rust/metal/examples/compute/main.rs new file mode 100644 index 000000000000..6d12dd636969 --- /dev/null +++ b/third_party/rust/metal/examples/compute/main.rs @@ -0,0 +1,88 @@ + + + + + + + +#[macro_use] +extern crate objc; + +use metal::*; + +use cocoa::foundation::NSAutoreleasePool; + +use std::mem; + +fn main() { + let pool = unsafe { NSAutoreleasePool::new(cocoa::base::nil) }; + let device = Device::system_default().expect("no device found"); + let command_queue = device.new_command_queue(); + + let data = [ + 1u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, + ]; + + let buffer = device.new_buffer_with_data( + unsafe { mem::transmute(data.as_ptr()) }, + (data.len() * mem::size_of::()) as u64, + MTLResourceOptions::CPUCacheModeDefaultCache, + ); + + let sum = { + let data = [0u32]; + device.new_buffer_with_data( + unsafe { mem::transmute(data.as_ptr()) }, + (data.len() * mem::size_of::()) as u64, + MTLResourceOptions::CPUCacheModeDefaultCache, + ) + }; + + let command_buffer = command_queue.new_command_buffer(); + let encoder = command_buffer.new_compute_command_encoder(); + + let library = device + .new_library_with_file("examples/compute/default.metallib") + .unwrap(); + let kernel = library.get_function("sum", None).unwrap(); + + let pipeline_state_descriptor = ComputePipelineDescriptor::new(); + pipeline_state_descriptor.set_compute_function(Some(&kernel)); + + let pipeline_state = device + .new_compute_pipeline_state_with_function( + pipeline_state_descriptor.compute_function().unwrap(), + ) + .unwrap(); + + encoder.set_compute_pipeline_state(&pipeline_state); + encoder.set_buffer(0, Some(&buffer), 0); + encoder.set_buffer(1, Some(&sum), 0); + + let width = 16; + + let thread_group_count = MTLSize { + width, + height: 1, + depth: 1, + }; + + let thread_group_size = MTLSize { + width: (data.len() as u64 + width) / width, + height: 1, + depth: 1, + }; + + encoder.dispatch_thread_groups(thread_group_count, thread_group_size); + encoder.end_encoding(); + command_buffer.commit(); + command_buffer.wait_until_completed(); + + let ptr = sum.contents() as *mut u32; + + unsafe { + assert_eq!(465, *ptr); + let () = msg_send![pool, release]; + } +} diff --git a/third_party/rust/metal/examples/compute/shaders.metal b/third_party/rust/metal/examples/compute/shaders.metal new file mode 100644 index 000000000000..51363a1d36f6 --- /dev/null +++ b/third_party/rust/metal/examples/compute/shaders.metal @@ -0,0 +1,10 @@ +#include + +using namespace metal; + +kernel void sum(device uint *data [[ buffer(0) ]], + volatile device atomic_uint *sum [[ buffer(1) ]], + uint gid [[ thread_position_in_grid ]]) +{ + atomic_fetch_add_explicit(sum, data[gid], memory_order_relaxed); +} diff --git a/third_party/rust/metal/examples/library/main.rs b/third_party/rust/metal/examples/library/main.rs new file mode 100644 index 000000000000..293a5dd9f06d --- /dev/null +++ b/third_party/rust/metal/examples/library/main.rs @@ -0,0 +1,17 @@ + + + + + + + +use metal::*; + +const PROGRAM: &'static str = ""; + +fn main() { + let device = Device::system_default().expect("no device found"); + + let options = CompileOptions::new(); + let _library = device.new_library_with_source(PROGRAM, &options); +} diff --git a/third_party/rust/metal/examples/reflection/main.rs b/third_party/rust/metal/examples/reflection/main.rs new file mode 100644 index 000000000000..98a2947920dc --- /dev/null +++ b/third_party/rust/metal/examples/reflection/main.rs @@ -0,0 +1,83 @@ + + + + + + + +#[macro_use] +extern crate objc; + +use metal::*; + +use cocoa::foundation::NSAutoreleasePool; + +const PROGRAM: &'static str = " + #include \n\ + + using namespace metal;\n\ + + typedef struct {\n\ + float2 position;\n\ + float3 color;\n\ + } vertex_t;\n\ + + struct ColorInOut {\n\ + float4 position [[position]];\n\ + float4 color;\n\ + };\n\ + + vertex ColorInOut vs(device vertex_t* vertex_array [[ buffer(0) ]],\n\ + unsigned int vid [[ vertex_id ]])\n\ + {\n\ + ColorInOut out;\n\ + + out.position = float4(float2(vertex_array[vid].position), 0.0, 1.0);\n\ + out.color = float4(float3(vertex_array[vid].color), 1.0);\n\ + + return out;\n\ + }\n\ + + fragment float4 ps(ColorInOut in [[stage_in]])\n\ + {\n\ + return in.color;\n\ + };\n\ +"; + +fn main() { + let pool = unsafe { NSAutoreleasePool::new(cocoa::base::nil) }; + + let device = Device::system_default().expect("no device found"); + + let options = CompileOptions::new(); + let library = device.new_library_with_source(PROGRAM, &options).unwrap(); + let (vs, ps) = ( + library.get_function("vs", None).unwrap(), + library.get_function("ps", None).unwrap(), + ); + + let vertex_desc = VertexDescriptor::new(); + + let desc = RenderPipelineDescriptor::new(); + desc.set_vertex_function(Some(&vs)); + desc.set_fragment_function(Some(&ps)); + desc.set_vertex_descriptor(Some(vertex_desc)); + + println!("{:?}", desc); + + #[cfg(features = "private")] + let _reflection = unsafe { + RenderPipelineReflection::new( + desc.serialize_vertex_data(), + desc.serialize_fragment_data(), + vertex_desc.serialize_descriptor(), + &device, + 0x8, + 0x0, + ) + }; + + unsafe { + let () = msg_send![pool, release]; + } +} diff --git a/third_party/rust/metal/examples/window/default.metallib b/third_party/rust/metal/examples/window/default.metallib new file mode 100644 index 0000000000000000000000000000000000000000..731a3ab30e2a1387eecc18249e5e5b453864e3ac GIT binary patch literal 5844 zcmeI0e^49OoxpclpCQ7xE|W;j8{&umDWFm zz#u7|&Ppp-)g>M|$7OWAv?Ec{p`G59Hq4aicv=bhT*?qf$9bldh2zS`LfN@?onI^4 z@Ir-IyZ+4D^~&0{V@<#-n!*iq*$@AZ+Yh!dSg-d**Qw(tx{ma9d3(Ej*r883TR=do z{kc{ao4q<+^UqVCd2{Ty_bwV;?<)7i6Dx>SYA`xPtT0W#o0c^a;pTO9klFf?nmpX}-K z_S7ExoXb_)eERt_$ey0sgI&F@6Ao9;i_hHKYS7ikyXE|QupUl zxs==_&j?z08`S<24hNT9GL~W3o8Shj5G#9EB|ImCHRtWqiv3Dvm-Q7qT(-5C+H6pK z>8PM-DRR7`a*Q&&G!{y`zjRmgme$g!vF2ijP`26V-9k>cRvaCRkJ&Eek>j<4KOBUz zYKX-)#uv<0Q%I{?P_)@v#yRV3x^V(;oaU@UT>UI(od{Z!Ttk?!Vs}{eubniOqi`8<6+OW$$Qw=>FXN&0$*#+s(NreVT7nQnl-)0|}jgy4-AxcW(=X;^HW zuv^Eu`U$QI8ccFkpfN03v2eo$u4+_lh=F#bv1)>|WQh88N^dgsO|$wWPvX>_^zjw z^p6wuZqXx4RTJ zd0ij_@(1O3^sV3XmszMp_vo) z3OsT~reY*Ak0CuQnz6zqc-wmim8FqVkPowD4)4bxdZJ^x2pj`k1lwhZX*O51MTbhZ zJ$p&P)z@HKootJfDLynJ@GZ|R3jEe`>hL!OzPQ!7W#Sp^v%&&p3u2fbe9H?mxFOLQ z5<4@|siv^l7$dav4(oKfaTK;onz_mh=a`MHqfYwgPW9WE@*7!tB~SCb>KdzlCraPS z(wyS;PUbsBJLVlbgdo~*vt0cJu|Y`bChdA51rpBvoEl_Hy_DopsLmcX6@Qv4KCG=a z&4ixP&Djl$ylyF>&l>gf3EiTgn@Q=H?1lxG;R`|CoXap}*TMZks`L#(CG|$heOjsC zP5N5NJ`=gmEO}yN;NCzVMFwor5fj-@!9+=35Pmtdi+X>1(oadyv3rc{F-zX4bOg?E zsT=yTQg2N1fLod*W2opUN<@C|5y8>>1gRHfdI%66XeD1Vp~n4VvZ8y%-d4%Kru66* zp?dqjGLF9;J`ovl9(cWT=5<~bljRUT*f6coUF!Ep#qE|+v2oIFnNC~p7d0&tjl=2M zNjYxqG|vY0OGe#lkltv{Md{mFWsX(9omW99`lXXrF^|eI4vlR8yN>OzdbclzO!7=7 z4KsGbYC@OKZCXkgR&m3PpkZU@+fMo>#7hWZPI_5To-xz6lJqTBJqjBjuUd&JRcE_> zWS@`pYaOPqjwrcgmKy#9F4kBSTgx0k5o z3P>KemC94iOZ{5O6PJ2@WdFT^UJvOtk)Q9(hPnD_u?hk_1oit<5UuMooONe>%BWvX zm9GR1%L&7!AS}1xHg1^7ZJJBzXLI^N=d&Kov+-y!q)*rBf*s1z?n7fE+)ZFIRo3zM|Ak4?lH6l z`s0${CmuGD0gCKn20SsTS1VcBfqm`)*byF{Y^T6urS2GX5&bsF!=n8HI|BNmE}g9M zv{MB+>b6;Zny3Gbp>uiVXr8|6RD+}5w5i|8EC1d}-(l!qu%$V(`ns9!v6XTWuKgyT z;?%dJbS_K3oupw!?t&1T`kJ6t3C5-G=y1aHd5@y}imkj?Q5L_KsxV`OImzUxL2aqY zG?KA#nyng5yPIv%eA#V(NA-Q1>J5Q>Cn~wSkq7iUFNSQ9AOy5d9F5pi^9H#J8B%+^B85UCI3pxFC!hqsZQa5kZ zU3MA1P6=j*i|iYh`aEJU%!!8##44GZR}0%5`4Y?|_@=o!}e4Na!J2Ud-tre%^2$ z$SUP{z6tWdZc4s?OoKN?I7>J^Ec;^4(%40FN#M!3I0 zVc&`PSqcB%86inw1DmnR-I(GMWn&b-Qp0WR_(ZIZ{ZN(`ex=+py{!x@oetyXLFU8; zWm1=}@IaxRN*>f=J!OuJT%@Mt!amog#jch+iAwfztP}zn9Rn1r8M3n&_MpX<59rYl&lAA^xE}%0X9ZUbYgj@8^lXpKjOk~jsC1$=QYti4IV@3LdH7ErJ8}fOe!2fDzL3dZl`BzLiAzEDpN#Q$a)KfvyO|Vvklj<0gDj32D)i z0mw{l;$hC-3mYs0R^>7F1_&|gX2JNRE}JM{fW(RGu|QM`m`Gtv9&$|THU%%tu32_Xrx~?>X%ab!PWtrfLACL&qna}ijCbB&p&`0;n=dJboo*xd0yn#Kvf-DU+&=agXox6+jpGcG+csGcfa9$}r=y z*RAn9H!;G_#Ld(m^W)X!X9XwWD35%Ey#keLdXNckW{!_zbp!IdjN}0WI-^lXJ^JoK zG=ZY!bkXQzGeN^tP!F;Hq6^QJ!pq$?vqD?VoG>fO&x&rMh^B^;5sRtPK@VN>oQ+d- zytC{$MrW~z)f6)-a>|QUf0@`>WL)5J^Ti#*)fTLpK8yLIr2njJc8Gw_7+Uxc@|Q(@ z+X~c&lJn(+9$t!9KLGq}e`fGw4i><#MWHSGr#2b<&=*AjKZIrE1kTV15D`Qv6YR^i WmgX(YM2o5Qs*0{CX+~jq{qbKdES<~% literal 0 HcmV?d00001 diff --git a/third_party/rust/metal/examples/window/main.rs b/third_party/rust/metal/examples/window/main.rs new file mode 100644 index 000000000000..16346152a08e --- /dev/null +++ b/third_party/rust/metal/examples/window/main.rs @@ -0,0 +1,185 @@ + + + + + + + +#[macro_use] +extern crate objc; + +use cocoa::appkit::{NSView, NSWindow}; +use cocoa::base::id as cocoa_id; +use cocoa::foundation::{NSAutoreleasePool, NSRange}; +use core_graphics::geometry::CGSize; + +use objc::runtime::YES; + +use metal::*; + +use winit::os::macos::WindowExt; + +use std::mem; + +fn prepare_pipeline_state<'a>(device: &DeviceRef, library: &LibraryRef) -> RenderPipelineState { + let vert = library.get_function("triangle_vertex", None).unwrap(); + let frag = library.get_function("triangle_fragment", None).unwrap(); + + let pipeline_state_descriptor = RenderPipelineDescriptor::new(); + pipeline_state_descriptor.set_vertex_function(Some(&vert)); + pipeline_state_descriptor.set_fragment_function(Some(&frag)); + pipeline_state_descriptor + .color_attachments() + .object_at(0) + .unwrap() + .set_pixel_format(MTLPixelFormat::BGRA8Unorm); + + device + .new_render_pipeline_state(&pipeline_state_descriptor) + .unwrap() +} + +fn prepare_render_pass_descriptor(descriptor: &RenderPassDescriptorRef, texture: &TextureRef) { + + + let color_attachment = descriptor.color_attachments().object_at(0).unwrap(); + + color_attachment.set_texture(Some(texture)); + color_attachment.set_load_action(MTLLoadAction::Clear); + color_attachment.set_clear_color(MTLClearColor::new(0.5, 0.2, 0.2, 1.0)); + color_attachment.set_store_action(MTLStoreAction::Store); +} + +fn main() { + let mut events_loop = winit::EventsLoop::new(); + let glutin_window = winit::WindowBuilder::new() + .with_dimensions((800, 600).into()) + .with_title("Metal".to_string()) + .build(&events_loop) + .unwrap(); + + let window: cocoa_id = unsafe { mem::transmute(glutin_window.get_nswindow()) }; + let device = Device::system_default().expect("no device found"); + + let layer = CoreAnimationLayer::new(); + layer.set_device(&device); + layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm); + layer.set_presents_with_transaction(false); + + unsafe { + let view = window.contentView(); + view.setWantsBestResolutionOpenGLSurface_(YES); + view.setWantsLayer(YES); + view.setLayer(mem::transmute(layer.as_ref())); + } + + let draw_size = glutin_window.get_inner_size().unwrap(); + layer.set_drawable_size(CGSize::new(draw_size.width as f64, draw_size.height as f64)); + + let library = device + .new_library_with_file("examples/window/default.metallib") + .unwrap(); + let pipeline_state = prepare_pipeline_state(&device, &library); + let command_queue = device.new_command_queue(); + + + let vbuf = { + let vertex_data = [ + 0.0f32, 0.5, 1.0, 0.0, 0.0, -0.5, -0.5, 0.0, 1.0, 0.0, 0.5, 0.5, 0.0, 0.0, 1.0, + ]; + + device.new_buffer_with_data( + unsafe { mem::transmute(vertex_data.as_ptr()) }, + (vertex_data.len() * mem::size_of::()) as u64, + MTLResourceOptions::CPUCacheModeDefaultCache, + ) + }; + + let mut pool = unsafe { NSAutoreleasePool::new(cocoa::base::nil) }; + let mut r = 0.0f32; + let mut running = true; + + while running { + events_loop.poll_events(|event| match event { + winit::Event::WindowEvent { + event: winit::WindowEvent::CloseRequested, + .. + } => running = false, + _ => (), + }); + + if let Some(drawable) = layer.next_drawable() { + let render_pass_descriptor = RenderPassDescriptor::new(); + let _a = prepare_render_pass_descriptor(&render_pass_descriptor, drawable.texture()); + + let command_buffer = command_queue.new_command_buffer(); + let parallel_encoder = + command_buffer.new_parallel_render_command_encoder(&render_pass_descriptor); + let encoder = parallel_encoder.render_command_encoder(); + encoder.set_render_pipeline_state(&pipeline_state); + encoder.set_vertex_buffer(0, Some(&vbuf), 0); + encoder.draw_primitives(MTLPrimitiveType::Triangle, 0, 3); + encoder.end_encoding(); + parallel_encoder.end_encoding(); + + render_pass_descriptor + .color_attachments() + .object_at(0) + .unwrap() + .set_load_action(MTLLoadAction::DontCare); + + let parallel_encoder = + command_buffer.new_parallel_render_command_encoder(&render_pass_descriptor); + let encoder = parallel_encoder.render_command_encoder(); + let p = vbuf.contents(); + let vertex_data: &[u8; 60] = unsafe { + mem::transmute(&[ + 0.0f32, + 0.5, + 1.0, + 0.0 - r, + 0.0, + -0.5, + -0.5, + 0.0, + 1.0 - r, + 0.0, + 0.5, + 0.5, + 0.0, + 0.0, + 1.0 + r, + ]) + }; + use std::ptr; + + unsafe { + ptr::copy( + vertex_data.as_ptr(), + p as *mut u8, + (vertex_data.len() * mem::size_of::()) as usize, + ); + } + vbuf.did_modify_range(NSRange::new( + 0 as u64, + (vertex_data.len() * mem::size_of::()) as u64, + )); + + encoder.set_render_pipeline_state(&pipeline_state); + encoder.set_vertex_buffer(0, Some(&vbuf), 0); + encoder.draw_primitives(MTLPrimitiveType::Triangle, 0, 3); + encoder.end_encoding(); + parallel_encoder.end_encoding(); + + command_buffer.present_drawable(&drawable); + command_buffer.commit(); + + r += 0.01f32; + + unsafe { + let () = msg_send![pool, release]; + pool = NSAutoreleasePool::new(cocoa::base::nil); + } + } + } +} diff --git a/third_party/rust/metal/examples/window/shaders.metal b/third_party/rust/metal/examples/window/shaders.metal new file mode 100644 index 000000000000..db1cc0241a76 --- /dev/null +++ b/third_party/rust/metal/examples/window/shaders.metal @@ -0,0 +1,31 @@ +#include + +using namespace metal; + +typedef struct { + float2 position; + float3 color; +} vertex_t; + +struct ColorInOut { + float4 position [[position]]; + float4 color; +}; + +// vertex shader function +vertex ColorInOut default_fragment(device vertex_t* vertex_array [[ buffer(0) ]], + unsigned int vid [[ vertex_id ]]) +{ + ColorInOut out; + + out.position = float4(float2(vertex_array[vid].position), 0.0, 1.0); + out.color = float4(float3(vertex_array[vid].color), 1.0); + + return out; +} + +// fragment shader function +fragment float4 default_vertex(ColorInOut in [[stage_in]]) +{ + return in.color; +}; diff --git a/third_party/rust/metal/src/argument.rs b/third_party/rust/metal/src/argument.rs new file mode 100644 index 000000000000..053472a31690 --- /dev/null +++ b/third_party/rust/metal/src/argument.rs @@ -0,0 +1,322 @@ + + + + + + + +use crate::{Array, MTLTextureType}; + +use cocoa::foundation::NSUInteger; +use objc::runtime::{NO, YES}; + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Copy, Clone, Debug)] +pub enum MTLDataType { + None = 0, + + Struct = 1, + Array = 2, + + Float = 3, + Float2 = 4, + Float3 = 5, + Float4 = 6, + + Float2x2 = 7, + Float2x3 = 8, + Float2x4 = 9, + + Float3x2 = 10, + Float3x3 = 11, + Float3x4 = 12, + + Float4x2 = 13, + Float4x3 = 14, + Float4x4 = 15, + + Half = 16, + Half2 = 17, + Half3 = 18, + Half4 = 19, + + Half2x2 = 20, + Half2x3 = 21, + Half2x4 = 22, + + Half3x2 = 23, + Half3x3 = 24, + Half3x4 = 25, + + Half4x2 = 26, + Half4x3 = 27, + Half4x4 = 28, + + Int = 29, + Int2 = 30, + Int3 = 31, + Int4 = 32, + + UInt = 33, + UInt2 = 34, + UInt3 = 35, + UInt4 = 36, + + Short = 37, + Short2 = 38, + Short3 = 39, + Short4 = 40, + + UShort = 41, + UShort2 = 42, + UShort3 = 43, + UShort4 = 44, + + Char = 45, + Char2 = 46, + Char3 = 47, + Char4 = 48, + + UChar = 49, + UChar2 = 50, + UChar3 = 51, + UChar4 = 52, + + Bool = 53, + Bool2 = 54, + Bool3 = 55, + Bool4 = 56, + + Texture = 58, + Sampler = 59, + Pointer = 60, + R8Unorm = 62, + R8Snorm = 63, + R16Unorm = 64, + R16Snorm = 65, + RG8Unorm = 66, + RG8Snorm = 67, + RG16Unorm = 68, + RG16Snorm = 69, + RGBA8Unorm = 70, + RGBA8Unorm_sRGB = 71, + RGBA8Snorm = 72, + RGBA16Unorm = 73, + RGBA16Snorm = 74, + RGB10A2Unorm = 75, + RG11B10Float = 76, + RGB9E5Float = 77, +} + +#[repr(u32)] +#[allow(non_camel_case_types)] +pub enum MTLArgumentType { + Buffer = 0, + ThreadgroupMemory = 1, + Texture = 2, + Sampler = 3, + ImageblockData = 16, + Imageblock = 17, +} + +#[repr(u32)] +#[allow(non_camel_case_types)] +pub enum MTLArgumentAccess { + ReadOnly = 0, + ReadWrite = 1, + WriteOnly = 2, +} + +pub enum MTLStructMember {} + +foreign_obj_type! { + type CType = MTLStructMember; + pub struct StructMember; + pub struct StructMemberRef; +} + +impl StructMemberRef { + pub fn name(&self) -> &str { + unsafe { + let name = msg_send![self, name]; + crate::nsstring_as_str(name) + } + } + + pub fn offset(&self) -> NSUInteger { + unsafe { msg_send![self, offset] } + } + + pub fn data_type(&self) -> MTLDataType { + unsafe { msg_send![self, dataType] } + } + + pub fn struct_type(&self) -> MTLStructType { + unsafe { msg_send![self, structType] } + } + + pub fn array_type(&self) -> MTLArrayType { + unsafe { msg_send![self, arrayType] } + } +} + +pub enum MTLStructType {} + +foreign_obj_type! { + type CType = MTLStructType; + pub struct StructType; + pub struct StructTypeRef; +} + +impl StructTypeRef { + pub fn members(&self) -> &Array { + unsafe { msg_send![self, members] } + } + + pub fn member_from_name(&self, name: &str) -> Option<&StructMemberRef> { + let nsname = crate::nsstring_from_str(name); + + unsafe { msg_send![self, memberByName: nsname] } + } +} + +pub enum MTLArrayType {} + +foreign_obj_type! { + type CType = MTLArrayType; + pub struct ArrayType; + pub struct ArrayTypeRef; +} + +impl ArrayTypeRef { + pub fn array_length(&self) -> NSUInteger { + unsafe { msg_send![self, arrayLength] } + } + + pub fn stride(&self) -> NSUInteger { + unsafe { msg_send![self, stride] } + } + + pub fn element_type(&self) -> MTLDataType { + unsafe { msg_send![self, elementType] } + } + + pub fn element_struct_type(&self) -> MTLStructType { + unsafe { msg_send![self, elementStructType] } + } + + pub fn element_array_type(&self) -> MTLArrayType { + unsafe { msg_send![self, elementArrayType] } + } +} + +pub enum MTLArgument {} + +foreign_obj_type! { + type CType = MTLArgument; + pub struct Argument; + pub struct ArgumentRef; +} + +impl ArgumentRef { + pub fn name(&self) -> &str { + unsafe { + let name = msg_send![self, name]; + crate::nsstring_as_str(name) + } + } + + pub fn type_(&self) -> MTLArgumentType { + unsafe { msg_send![self, type] } + } + + pub fn access(&self) -> MTLArgumentAccess { + unsafe { msg_send![self, access] } + } + + pub fn index(&self) -> NSUInteger { + unsafe { msg_send![self, index] } + } + + pub fn is_active(&self) -> bool { + unsafe { + match msg_send![self, isActive] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn buffer_alignment(&self) -> NSUInteger { + unsafe { msg_send![self, bufferAlignment] } + } + + pub fn buffer_data_size(&self) -> NSUInteger { + unsafe { msg_send![self, bufferDataSize] } + } + + pub fn buffer_data_type(&self) -> MTLDataType { + unsafe { msg_send![self, bufferDataType] } + } + + pub fn buffer_struct_type(&self) -> &StructTypeRef { + unsafe { msg_send![self, bufferStructType] } + } + + pub fn threadgroup_memory_alignment(&self) -> NSUInteger { + unsafe { msg_send![self, threadgroupMemoryAlignment] } + } + + pub fn threadgroup_memory_data_size(&self) -> NSUInteger { + unsafe { msg_send![self, threadgroupMemoryDataSize] } + } + + pub fn texture_type(&self) -> MTLTextureType { + unsafe { msg_send![self, textureType] } + } + + pub fn texture_data_type(&self) -> MTLDataType { + unsafe { msg_send![self, textureDataType] } + } +} + +pub enum MTLArgumentDescriptor {} + +foreign_obj_type! { + type CType = MTLArgumentDescriptor; + pub struct ArgumentDescriptor; + pub struct ArgumentDescriptorRef; +} + +impl ArgumentDescriptor { + pub fn new<'a>() -> &'a ArgumentDescriptorRef { + unsafe { + let class = class!(MTLArgumentDescriptor); + msg_send![class, argumentDescriptor] + } + } +} + +impl ArgumentDescriptorRef { + pub fn set_data_type(&self, ty: MTLDataType) { + unsafe { msg_send![self, setDataType: ty] } + } + + pub fn set_index(&self, index: NSUInteger) { + unsafe { msg_send![self, setIndex: index] } + } + + pub fn set_access(&self, access: MTLArgumentAccess) { + unsafe { msg_send![self, setAccess: access] } + } + + pub fn set_array_length(&self, length: NSUInteger) { + unsafe { msg_send![self, setArrayLength: length] } + } + + pub fn set_texture_type(&self, ty: MTLTextureType) { + unsafe { msg_send![self, setTextureType: ty] } + } +} diff --git a/third_party/rust/metal/src/buffer.rs b/third_party/rust/metal/src/buffer.rs new file mode 100644 index 000000000000..87a02eb304a9 --- /dev/null +++ b/third_party/rust/metal/src/buffer.rs @@ -0,0 +1,62 @@ + + + + + + + +use super::*; + +use cocoa::foundation::NSRange; + +pub enum MTLBuffer {} + +foreign_obj_type! { + type CType = MTLBuffer; + pub struct Buffer; + pub struct BufferRef; + type ParentType = ResourceRef; +} + +impl BufferRef { + pub fn length(&self) -> u64 { + unsafe { msg_send![self, length] } + } + + pub fn contents(&self) -> *mut std::ffi::c_void { + unsafe { msg_send![self, contents] } + } + + pub fn did_modify_range(&self, range: NSRange) { + unsafe { msg_send![self, didModifyRange: range] } + } + + pub fn new_texture_from_contents( + &self, + descriptor: &TextureDescriptorRef, + offset: u64, + stride: u64, + ) -> Texture { + unsafe { + msg_send![self, + newTextureWithDescriptor:descriptor + offset:offset + bytesPerRow:stride + ] + } + } + + pub fn set_label(&self, name: &str) { + unsafe { + let name = crate::nsstring_from_str(name); + msg_send![self, setLabel:name] + } + } + + pub fn add_debug_marker(&self, name: &str, range: NSRange) { + unsafe { + let name = crate::nsstring_from_str(name); + msg_send![self, addDebugMarker:name range:range] + } + } +} diff --git a/third_party/rust/metal/src/capturemanager.rs b/third_party/rust/metal/src/capturemanager.rs new file mode 100644 index 000000000000..b2b2201b3b04 --- /dev/null +++ b/third_party/rust/metal/src/capturemanager.rs @@ -0,0 +1,103 @@ + + + + + + + +use super::*; + +pub enum MTLCaptureScope {} + +foreign_obj_type! { + type CType = MTLCaptureScope; + pub struct CaptureScope; + pub struct CaptureScopeRef; +} + +impl CaptureScopeRef { + pub fn begin_scope(&self) { + unsafe { + msg_send![self, beginScope] + } + } + + pub fn end_scope(&self) { + unsafe { + msg_send![self, endScope] + } + } + + pub fn label(&self) -> &str { + unsafe { + let label = msg_send![self, label]; + crate::nsstring_as_str(label) + } + } +} + +pub enum MTLCaptureManager {} + +foreign_obj_type! { + type CType = MTLCaptureManager; + pub struct CaptureManager; + pub struct CaptureManagerRef; +} + +impl CaptureManager { + pub fn shared<'a>() -> &'a CaptureManagerRef { + unsafe { + let class = class!(MTLCaptureManager); + msg_send![class, sharedCaptureManager] + } + } +} + +impl CaptureManagerRef { + pub fn new_capture_scope_with_device(&self, device: &DeviceRef) -> CaptureScope { + unsafe { msg_send![self, newCaptureScopeWithDevice: device] } + } + + pub fn new_capture_scope_with_command_queue( + &self, + command_queue: &CommandQueueRef, + ) -> CaptureScope { + unsafe { msg_send![self, newCaptureScopeWithCommandQueue: command_queue] } + } + + pub fn default_capture_scope(&self) -> Option<&CaptureScopeRef> { + unsafe { msg_send![self, defaultCaptureScope] } + } + + pub fn set_default_capture_scope(&self, scope: &CaptureScopeRef) { + unsafe { msg_send![self, setDefaultCaptureScope: scope] } + } + + pub fn start_capture_with_device(&self, device: &DeviceRef) { + unsafe { + msg_send![self, startCaptureWithDevice: device] + } + } + + pub fn start_capture_with_command_queue(&self, command_queue: &CommandQueueRef) { + unsafe { + msg_send![self, startCaptureWithCommandQueue: command_queue] + } + } + + pub fn start_capture_with_scope(&self, scope: &CaptureScopeRef) { + unsafe { + msg_send![self, startCaptureWithScope: scope] + } + } + + pub fn stop_capture(&self) { + unsafe { + msg_send![self, stopCapture] + } + } + + pub fn is_capturing(&self) -> bool { + unsafe { msg_send![self, isCapturing] } + } +} diff --git a/third_party/rust/metal/src/commandbuffer.rs b/third_party/rust/metal/src/commandbuffer.rs new file mode 100644 index 000000000000..b9b2999e3dd2 --- /dev/null +++ b/third_party/rust/metal/src/commandbuffer.rs @@ -0,0 +1,122 @@ + + + + + + + +use super::*; + +use block::Block; + +#[repr(u32)] +#[allow(non_camel_case_types)] +pub enum MTLCommandBufferStatus { + NotEnqueued = 0, + Enqueued = 1, + Committed = 2, + Scheduled = 3, + Completed = 4, + Error = 5, +} + +#[repr(u32)] +#[allow(non_camel_case_types)] +pub enum MTLCommandBufferError { + None = 0, + Internal = 1, + Timeout = 2, + PageFault = 3, + Blacklisted = 4, + NotPermitted = 7, + OutOfMemory = 8, + InvalidResource = 9, + Memoryless = 10, + DeviceRemoved = 11, +} + +#[repr(u32)] +#[allow(non_camel_case_types)] +pub enum MTLDispatchType { + Serial = 0, + Concurrent = 1, +} + +type _MTLCommandBufferHandler = Block<(MTLCommandBuffer), ()>; + +pub enum MTLCommandBuffer {} + +foreign_obj_type! { + type CType = MTLCommandBuffer; + pub struct CommandBuffer; + pub struct CommandBufferRef; +} + +impl CommandBufferRef { + pub fn label(&self) -> &str { + unsafe { + let label = msg_send![self, label]; + crate::nsstring_as_str(label) + } + } + + pub fn set_label(&self, label: &str) { + unsafe { + let nslabel = crate::nsstring_from_str(label); + let () = msg_send![self, setLabel: nslabel]; + } + } + + pub fn enqueue(&self) { + unsafe { msg_send![self, enqueue] } + } + + pub fn commit(&self) { + unsafe { msg_send![self, commit] } + } + + pub fn status(&self) -> MTLCommandBufferStatus { + unsafe { msg_send![self, status] } + } + + pub fn present_drawable(&self, drawable: &DrawableRef) { + unsafe { msg_send![self, presentDrawable: drawable] } + } + + pub fn wait_until_completed(&self) { + unsafe { msg_send![self, waitUntilCompleted] } + } + + pub fn wait_until_scheduled(&self) { + unsafe { msg_send![self, waitUntilScheduled] } + } + + pub fn new_blit_command_encoder(&self) -> &BlitCommandEncoderRef { + unsafe { msg_send![self, blitCommandEncoder] } + } + + pub fn new_compute_command_encoder(&self) -> &ComputeCommandEncoderRef { + unsafe { msg_send![self, computeCommandEncoder] } + } + + pub fn new_render_command_encoder( + &self, + descriptor: &RenderPassDescriptorRef, + ) -> &RenderCommandEncoderRef { + unsafe { msg_send![self, renderCommandEncoderWithDescriptor: descriptor] } + } + + pub fn new_parallel_render_command_encoder( + &self, + descriptor: &RenderPassDescriptorRef, + ) -> &ParallelRenderCommandEncoderRef { + unsafe { msg_send![self, parallelRenderCommandEncoderWithDescriptor: descriptor] } + } + + pub fn compute_command_encoder_with_dispatch_type( + &self, + ty: MTLDispatchType, + ) -> &ComputeCommandEncoderRef { + unsafe { msg_send![self, computeCommandEncoderWithDispatchType: ty] } + } +} diff --git a/third_party/rust/metal/src/commandqueue.rs b/third_party/rust/metal/src/commandqueue.rs new file mode 100644 index 000000000000..11617dbb9ed0 --- /dev/null +++ b/third_party/rust/metal/src/commandqueue.rs @@ -0,0 +1,44 @@ + + + + + + + +use super::*; + +pub enum MTLCommandQueue {} + +foreign_obj_type! { + type CType = MTLCommandQueue; + pub struct CommandQueue; + pub struct CommandQueueRef; +} + +impl CommandQueueRef { + pub fn label(&self) -> &str { + unsafe { + let label = msg_send![self, label]; + crate::nsstring_as_str(label) + } + } + + pub fn set_label(&self, label: &str) { + unsafe { + let nslabel = crate::nsstring_from_str(label); + let () = msg_send![self, setLabel: nslabel]; + } + } + + pub fn new_command_buffer(&self) -> &CommandBufferRef { + unsafe { msg_send![self, commandBuffer] } + } + + pub fn new_command_buffer_with_unretained_references(&self) -> &CommandBufferRef { + unsafe { msg_send![self, commandBufferWithUnretainedReferences] } + } + + pub fn device(&self) -> &DeviceRef { + unsafe { msg_send![self, device] } + } +} diff --git a/third_party/rust/metal/src/constants.rs b/third_party/rust/metal/src/constants.rs new file mode 100644 index 000000000000..96e7268d16dd --- /dev/null +++ b/third_party/rust/metal/src/constants.rs @@ -0,0 +1,137 @@ + + + + + + + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum MTLPixelFormat { + Invalid = 0, + A8Unorm = 1, + R8Unorm = 10, + R8Unorm_sRGB = 11, + R8Snorm = 12, + R8Uint = 13, + R8Sint = 14, + R16Unorm = 20, + R16Snorm = 22, + R16Uint = 23, + R16Sint = 24, + R16Float = 25, + RG8Unorm = 30, + RG8Unorm_sRGB = 31, + RG8Snorm = 32, + RG8Uint = 33, + RG8Sint = 34, + B5G6R5Unorm = 40, + A1BGR5Unorm = 41, + ABGR4Unorm = 42, + BGR5A1Unorm = 43, + R32Uint = 53, + R32Sint = 54, + R32Float = 55, + RG16Unorm = 60, + RG16Snorm = 62, + RG16Uint = 63, + RG16Sint = 64, + RG16Float = 65, + RGBA8Unorm = 70, + RGBA8Unorm_sRGB = 71, + RGBA8Snorm = 72, + RGBA8Uint = 73, + RGBA8Sint = 74, + BGRA8Unorm = 80, + BGRA8Unorm_sRGB = 81, + RGB10A2Unorm = 90, + RGB10A2Uint = 91, + RG11B10Float = 92, + RGB9E5Float = 93, + BGR10A2Unorm = 94, + RG32Uint = 103, + RG32Sint = 104, + RG32Float = 105, + RGBA16Unorm = 110, + RGBA16Snorm = 112, + RGBA16Uint = 113, + RGBA16Sint = 114, + RGBA16Float = 115, + RGBA32Uint = 123, + RGBA32Sint = 124, + RGBA32Float = 125, + BC1_RGBA = 130, + BC1_RGBA_sRGB = 131, + BC2_RGBA = 132, + BC2_RGBA_sRGB = 133, + BC3_RGBA = 134, + BC3_RGBA_sRGB = 135, + BC4_RUnorm = 140, + BC4_RSnorm = 141, + BC5_RGUnorm = 142, + BC5_RGSnorm = 143, + BC6H_RGBFloat = 150, + BC6H_RGBUfloat = 151, + BC7_RGBAUnorm = 152, + BC7_RGBAUnorm_sRGB = 153, + PVRTC_RGB_2BPP = 160, + PVRTC_RGB_2BPP_sRGB = 161, + PVRTC_RGB_4BPP = 162, + PVRTC_RGB_4BPP_sRGB = 163, + PVRTC_RGBA_2BPP = 164, + PVRTC_RGBA_2BPP_sRGB = 165, + PVRTC_RGBA_4BPP = 166, + PVRTC_RGBA_4BPP_sRGB = 167, + EAC_R11Unorm = 170, + EAC_R11Snorm = 172, + EAC_RG11Unorm = 174, + EAC_RG11Snorm = 176, + EAC_RGBA8 = 178, + EAC_RGBA8_sRGB = 179, + ETC2_RGB8 = 180, + ETC2_RGB8_sRGB = 181, + ETC2_RGB8A1 = 182, + ETC2_RGB8A1_sRGB = 183, + ASTC_4x4_sRGB = 186, + ASTC_5x4_sRGB = 187, + ASTC_5x5_sRGB = 188, + ASTC_6x5_sRGB = 189, + ASTC_6x6_sRGB = 190, + ASTC_8x5_sRGB = 192, + ASTC_8x6_sRGB = 193, + ASTC_8x8_sRGB = 194, + ASTC_10x5_sRGB = 195, + ASTC_10x6_sRGB = 196, + ASTC_10x8_sRGB = 197, + ASTC_10x10_sRGB = 198, + ASTC_12x10_sRGB = 199, + ASTC_12x12_sRGB = 200, + ASTC_4x4_LDR = 204, + ASTC_5x4_LDR = 205, + ASTC_5x5_LDR = 206, + ASTC_6x5_LDR = 207, + ASTC_6x6_LDR = 208, + ASTC_8x5_LDR = 210, + ASTC_8x6_LDR = 211, + ASTC_8x8_LDR = 212, + ASTC_10x5_LDR = 213, + ASTC_10x6_LDR = 214, + ASTC_10x8_LDR = 215, + ASTC_10x10_LDR = 216, + ASTC_12x10_LDR = 217, + ASTC_12x12_LDR = 218, + GBGR422 = 240, + BGRG422 = 241, + Depth16Unorm = 250, + Depth32Float = 252, + Stencil8 = 253, + Depth24Unorm_Stencil8 = 255, + Depth32Float_Stencil8 = 260, + X32_Stencil8 = 261, + X24_Stencil8 = 262, + BGRA10_XR = 552, + BGRA10_XR_SRGB = 553, + BGR10_XR = 554, + BGR10_XR_SRGB = 555, +} diff --git a/third_party/rust/metal/src/depthstencil.rs b/third_party/rust/metal/src/depthstencil.rs new file mode 100644 index 000000000000..b01eb75248b8 --- /dev/null +++ b/third_party/rust/metal/src/depthstencil.rs @@ -0,0 +1,164 @@ + + + + + + + +use objc::runtime::{NO, YES}; + +#[repr(u64)] +pub enum MTLCompareFunction { + Never = 0, + Less = 1, + Equal = 2, + LessEqual = 3, + Greater = 4, + NotEqual = 5, + GreaterEqual = 6, + Always = 7, +} + +#[repr(u64)] +pub enum MTLStencilOperation { + Keep = 0, + Zero = 1, + Replace = 2, + IncrementClamp = 3, + DecrementClamp = 4, + Invert = 5, + IncrementWrap = 6, + DecrementWrap = 7, +} + +pub enum MTLStencilDescriptor {} + +foreign_obj_type! { + type CType = MTLStencilDescriptor; + pub struct StencilDescriptor; + pub struct StencilDescriptorRef; +} + +impl StencilDescriptor { + pub fn new() -> Self { + unsafe { + let class = class!(MTLStencilDescriptor); + msg_send![class, new] + } + } +} + +impl StencilDescriptorRef { + pub fn stencil_compare_function(&self) -> MTLCompareFunction { + unsafe { msg_send![self, stencilCompareFunction] } + } + + pub fn set_stencil_compare_function(&self, func: MTLCompareFunction) { + unsafe { msg_send![self, setStencilCompareFunction: func] } + } + + pub fn stencil_failure_operation(&self) -> MTLStencilOperation { + unsafe { msg_send![self, stencilFailureOperation] } + } + + pub fn set_stencil_failure_operation(&self, operation: MTLStencilOperation) { + unsafe { msg_send![self, setStencilFailureOperation: operation] } + } + + pub fn depth_failure_operation(&self) -> MTLStencilOperation { + unsafe { msg_send![self, depthFailureOperation] } + } + + pub fn set_depth_failure_operation(&self, operation: MTLStencilOperation) { + unsafe { msg_send![self, setDepthFailureOperation: operation] } + } + + pub fn depth_stencil_pass_operation(&self) -> MTLStencilOperation { + unsafe { msg_send![self, depthStencilPassOperation] } + } + + pub fn set_depth_stencil_pass_operation(&self, operation: MTLStencilOperation) { + unsafe { msg_send![self, setDepthStencilPassOperation: operation] } + } + + pub fn read_mask(&self) -> u32 { + unsafe { msg_send![self, readMask] } + } + + pub fn set_read_mask(&self, mask: u32) { + unsafe { msg_send![self, setReadMask: mask] } + } + + pub fn write_mask(&self) -> u32 { + unsafe { msg_send![self, writeMask] } + } + + pub fn set_write_mask(&self, mask: u32) { + unsafe { msg_send![self, setWriteMask: mask] } + } +} + +pub enum MTLDepthStencilDescriptor {} + +foreign_obj_type! { + type CType = MTLDepthStencilDescriptor; + pub struct DepthStencilDescriptor; + pub struct DepthStencilDescriptorRef; +} + +impl DepthStencilDescriptor { + pub fn new() -> Self { + unsafe { + let class = class!(MTLDepthStencilDescriptor); + msg_send![class, new] + } + } +} + +impl DepthStencilDescriptorRef { + pub fn depth_compare_function(&self) -> MTLCompareFunction { + unsafe { msg_send![self, depthCompareFunction] } + } + + pub fn set_depth_compare_function(&self, func: MTLCompareFunction) { + unsafe { msg_send![self, setDepthCompareFunction: func] } + } + + pub fn depth_write_enabled(&self) -> bool { + unsafe { + match msg_send![self, isDepthWriteEnabled] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_depth_write_enabled(&self, enabled: bool) { + unsafe { msg_send![self, setDepthWriteEnabled: enabled] } + } + + pub fn front_face_stencil(&self) -> Option<&StencilDescriptorRef> { + unsafe { msg_send![self, frontFaceStencil] } + } + + pub fn set_front_face_stencil(&self, descriptor: Option<&StencilDescriptorRef>) { + unsafe { msg_send![self, setFrontFaceStencil: descriptor] } + } + + pub fn back_face_stencil(&self) -> Option<&StencilDescriptorRef> { + unsafe { msg_send![self, backFaceStencil] } + } + + pub fn set_back_face_stencil(&self, descriptor: Option<&StencilDescriptorRef>) { + unsafe { msg_send![self, setBackFaceStencil: descriptor] } + } +} + +pub enum MTLDepthStencilState {} + +foreign_obj_type! { + type CType = MTLDepthStencilState; + pub struct DepthStencilState; + pub struct DepthStencilStateRef; +} diff --git a/third_party/rust/metal/src/device.rs b/third_party/rust/metal/src/device.rs new file mode 100644 index 000000000000..803fb25f593d --- /dev/null +++ b/third_party/rust/metal/src/device.rs @@ -0,0 +1,1741 @@ + + + + + + + +use block::{Block, ConcreteBlock}; +use cocoa::base::id; +use cocoa::foundation::NSUInteger; +use foreign_types::ForeignType; +use objc::runtime::{Object, BOOL, NO, YES}; + +use super::*; + +use std::ffi::CStr; +use std::path::Path; +use std::ptr; + +#[allow(non_camel_case_types)] +#[repr(u64)] +#[derive(Copy, Clone, Debug)] +pub enum MTLFeatureSet { + iOS_GPUFamily1_v1 = 0, + iOS_GPUFamily2_v1 = 1, + iOS_GPUFamily1_v2 = 2, + iOS_GPUFamily2_v2 = 3, + iOS_GPUFamily3_v1 = 4, + iOS_GPUFamily1_v3 = 5, + iOS_GPUFamily2_v3 = 6, + iOS_GPUFamily3_v2 = 7, + iOS_GPUFamily1_v4 = 8, + iOS_GPUFamily2_v4 = 9, + iOS_GPUFamily3_v3 = 10, + iOS_GPUFamily4_v1 = 11, + iOS_GPUFamily1_v5 = 12, + iOS_GPUFamily2_v5 = 13, + iOS_GPUFamily3_v4 = 14, + iOS_GPUFamily4_v2 = 15, + iOS_GPUFamily5_v1 = 16, + + tvOS_GPUFamily1_v1 = 30000, + tvOS_GPUFamily1_v2 = 30001, + tvOS_GPUFamily1_v3 = 30002, + tvOS_GPUFamily2_v1 = 30003, + tvOS_GPUFamily1_v4 = 30004, + tvOS_GPUFamily2_v2 = 30005, + + macOS_GPUFamily1_v1 = 10000, + macOS_GPUFamily1_v2 = 10001, + + macOS_GPUFamily1_v3 = 10003, + macOS_GPUFamily1_v4 = 10004, + macOS_GPUFamily2_v1 = 10005, +} + +bitflags! { + pub struct PixelFormatCapabilities: u32 { + const Filter = 1 << 0; + const Write = 1 << 1; + const Color = 1 << 2; + const Blend = 1 << 3; + const Msaa = 1 << 4; + const Resolve = 1 << 5; + } +} + +#[allow(non_camel_case_types)] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +enum OS { + iOS, + tvOS, + macOS, +} + +const KB: u32 = 1024; +const MB: u32 = 1024 * KB; +const GB: u32 = 1024 * MB; + +impl MTLFeatureSet { + fn os(&self) -> OS { + let value = *self as u64; + if value < 10_000 { + OS::iOS + } else if value < 20_000 { + OS::macOS + } else if value >= 30_000 || value < 40_000 { + OS::tvOS + } else { + unreachable!() + } + } + + + fn os_version(&self) -> u32 { + use MTLFeatureSet::*; + match self { + iOS_GPUFamily1_v1 | iOS_GPUFamily2_v1 => 8, + iOS_GPUFamily1_v2 | iOS_GPUFamily2_v2 | iOS_GPUFamily3_v1 => 9, + iOS_GPUFamily1_v3 | iOS_GPUFamily2_v3 | iOS_GPUFamily3_v2 => 10, + iOS_GPUFamily1_v4 | iOS_GPUFamily2_v4 | iOS_GPUFamily3_v3 | iOS_GPUFamily4_v1 => 11, + iOS_GPUFamily1_v5 | iOS_GPUFamily2_v5 | iOS_GPUFamily3_v4 | iOS_GPUFamily4_v2 + | iOS_GPUFamily5_v1 => 12, + tvOS_GPUFamily1_v1 => 9, + tvOS_GPUFamily1_v2 => 10, + tvOS_GPUFamily1_v3 | tvOS_GPUFamily2_v1 => 11, + tvOS_GPUFamily1_v4 | tvOS_GPUFamily2_v2 => 12, + macOS_GPUFamily1_v1 => 11, + macOS_GPUFamily1_v2 => 12, + macOS_GPUFamily1_v3 => 13, + macOS_GPUFamily1_v4 | macOS_GPUFamily2_v1 => 14, + } + } + + fn gpu_family(&self) -> u32 { + use MTLFeatureSet::*; + match self { + iOS_GPUFamily1_v1 | iOS_GPUFamily1_v2 | iOS_GPUFamily1_v3 | iOS_GPUFamily1_v4 + | iOS_GPUFamily1_v5 | tvOS_GPUFamily1_v1 | tvOS_GPUFamily1_v2 | tvOS_GPUFamily1_v3 + | tvOS_GPUFamily1_v4 | macOS_GPUFamily1_v1 | macOS_GPUFamily1_v2 + | macOS_GPUFamily1_v3 | macOS_GPUFamily1_v4 => 1, + iOS_GPUFamily2_v1 | iOS_GPUFamily2_v2 | iOS_GPUFamily2_v3 | iOS_GPUFamily2_v4 + | iOS_GPUFamily2_v5 | tvOS_GPUFamily2_v1 | tvOS_GPUFamily2_v2 | macOS_GPUFamily2_v1 => { + 2 + } + iOS_GPUFamily3_v1 | iOS_GPUFamily3_v2 | iOS_GPUFamily3_v3 | iOS_GPUFamily3_v4 => 3, + iOS_GPUFamily4_v1 | iOS_GPUFamily4_v2 => 4, + iOS_GPUFamily5_v1 => 5, + } + } + + fn version(&self) -> u32 { + use MTLFeatureSet::*; + match self { + iOS_GPUFamily1_v1 | iOS_GPUFamily2_v1 | iOS_GPUFamily3_v1 | iOS_GPUFamily4_v1 + | iOS_GPUFamily5_v1 | macOS_GPUFamily1_v1 | macOS_GPUFamily2_v1 + | tvOS_GPUFamily1_v1 | tvOS_GPUFamily2_v1 => 1, + iOS_GPUFamily1_v2 | iOS_GPUFamily2_v2 | iOS_GPUFamily3_v2 | iOS_GPUFamily4_v2 + | macOS_GPUFamily1_v2 | tvOS_GPUFamily1_v2 | tvOS_GPUFamily2_v2 => 2, + iOS_GPUFamily1_v3 | iOS_GPUFamily2_v3 | iOS_GPUFamily3_v3 | macOS_GPUFamily1_v3 + | tvOS_GPUFamily1_v3 => 3, + iOS_GPUFamily1_v4 | iOS_GPUFamily2_v4 | iOS_GPUFamily3_v4 | tvOS_GPUFamily1_v4 + | macOS_GPUFamily1_v4 => 4, + iOS_GPUFamily1_v5 | iOS_GPUFamily2_v5 => 5, + } + } + + pub fn supports_metal_kit(&self) -> bool { + true + } + + pub fn supports_metal_performance_shaders(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 2, + OS::tvOS => true, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_programmable_blending(&self) -> bool { + self.os() != OS::macOS + } + + pub fn supports_pvrtc_pixel_formats(&self) -> bool { + self.os() != OS::macOS + } + + pub fn supports_eac_etc_pixel_formats(&self) -> bool { + self.os() != OS::macOS + } + + pub fn supports_astc_pixel_formats(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 2, + OS::tvOS => true, + OS::macOS => false, + } + } + + pub fn supports_linear_textures(&self) -> bool { + self.os() != OS::macOS || self.os_version() >= 13 + } + + pub fn supports_bc_pixel_formats(&self) -> bool { + self.os() == OS::macOS + } + + pub fn supports_msaa_depth_resolve(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 3, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => false, + } + } + + pub fn supports_counting_occlusion_query(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 3, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => true, + } + } + + pub fn supports_base_vertex_instance_drawing(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 3, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => true, + } + } + + pub fn supports_indirect_buffers(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 3, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => true, + } + } + + pub fn supports_cube_map_texture_arrays(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 4, + OS::tvOS => false, + OS::macOS => true, + } + } + + pub fn supports_texture_barriers(&self) -> bool { + self.os() == OS::macOS + } + + pub fn supports_layered_rendering(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 5, + OS::tvOS => false, + OS::macOS => true, + } + } + + pub fn supports_tessellation(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 3 && self.os_version() >= 10, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => self.os_version() >= 12, + } + } + + pub fn supports_resource_heaps(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 10, + OS::tvOS => self.os_version() >= 10, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_memoryless_render_targets(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 10, + OS::tvOS => self.os_version() >= 10, + OS::macOS => false, + } + } + + pub fn supports_function_specialization(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 10, + OS::tvOS => self.os_version() >= 10, + OS::macOS => self.os_version() >= 12, + } + } + + pub fn supports_function_buffer_read_writes(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 3 && self.os_version() >= 10, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => self.os_version() >= 12, + } + } + + pub fn supports_function_texture_read_writes(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 4, + OS::tvOS => false, + OS::macOS => self.os_version() >= 12, + } + } + + pub fn supports_array_of_textures(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 3 && self.os_version() >= 10, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_array_of_samplers(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 3 && self.os_version() >= 11, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => self.os_version() >= 12, + } + } + + pub fn supports_stencil_texture_views(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 10, + OS::tvOS => self.os_version() >= 10, + OS::macOS => self.os_version() >= 12, + } + } + + pub fn supports_depth_16_pixel_format(&self) -> bool { + self.os() == OS::macOS && self.os_version() >= 12 + } + + pub fn supports_extended_range_pixel_formats(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 3 && self.os_version() >= 10, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => false, + } + } + + pub fn supports_wide_color_pixel_format(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 11, + OS::tvOS => self.os_version() >= 11, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_combined_msaa_store_and_resolve_action(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 3 && self.os_version() >= 10, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => self.os_version() >= 12, + } + } + + pub fn supports_deferred_store_action(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 10, + OS::tvOS => self.os_version() >= 10, + OS::macOS => self.os_version() >= 12, + } + } + + pub fn supports_msaa_blits(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 10, + OS::tvOS => self.os_version() >= 10, + OS::macOS => true, + } + } + + pub fn supports_srgb_writes(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 3 || (self.gpu_family() >= 2 && self.version() >= 3), + OS::tvOS => self.os_version() >= 10, + OS::macOS => self.gpu_family() >= 2, + } + } + + pub fn supports_16_bit_unsigned_integer_coordinates(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 10, + OS::tvOS => self.os_version() >= 10, + OS::macOS => self.os_version() >= 12, + } + } + + pub fn supports_extract_insert_and_reverse_bits(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 10, + OS::tvOS => self.os_version() >= 10, + OS::macOS => self.os_version() >= 12, + } + } + + pub fn supports_simd_barrier(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 10, + OS::tvOS => self.os_version() >= 10, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_sampler_max_anisotropy(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 10, + OS::tvOS => self.os_version() >= 10, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_sampler_lod_clamp(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 10, + OS::tvOS => self.os_version() >= 10, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_border_color(&self) -> bool { + self.os() == OS::macOS && self.os_version() >= 12 + } + + pub fn supports_dual_source_blending(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 11, + OS::tvOS => self.os_version() >= 11, + OS::macOS => self.os_version() >= 12, + } + } + + pub fn supports_argument_buffers(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 11, + OS::tvOS => self.os_version() >= 11, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_programmable_sample_positions(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 11, + OS::tvOS => self.os_version() >= 11, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_uniform_type(&self) -> bool { + match self.os() { + OS::iOS => self.os_version() >= 11, + OS::tvOS => self.os_version() >= 11, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_imageblocks(&self) -> bool { + self.os() == OS::iOS && self.gpu_family() >= 4 + } + + pub fn supports_tile_shaders(&self) -> bool { + self.os() == OS::iOS && self.gpu_family() >= 4 + } + + pub fn supports_imageblock_sample_coverage_control(&self) -> bool { + self.os() == OS::iOS && self.gpu_family() >= 4 + } + + pub fn supports_threadgroup_sharing(&self) -> bool { + self.os() == OS::iOS && self.gpu_family() >= 4 + } + + pub fn supports_post_depth_coverage(&self) -> bool { + self.os() == OS::iOS && self.gpu_family() >= 4 + } + + pub fn supports_quad_scoped_permute_operations(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 4, + OS::tvOS => false, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_raster_order_groups(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 4, + OS::tvOS => false, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_non_uniform_threadgroup_size(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 4, + OS::tvOS => false, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_multiple_viewports(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 5, + OS::tvOS => false, + OS::macOS => self.os_version() >= 13, + } + } + + pub fn supports_device_notifications(&self) -> bool { + self.os() == OS::macOS && self.os_version() >= 13 + } + + pub fn supports_stencil_feedback(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 5, + OS::tvOS => false, + OS::macOS => self.gpu_family() >= 2, + } + } + + pub fn supports_stencil_resolve(&self) -> bool { + match self.os() { + OS::iOS => self.gpu_family() >= 5, + OS::tvOS => false, + OS::macOS => self.gpu_family() >= 2, + } + } + + pub fn max_vertex_attributes(&self) -> u32 { + 31 + } + + pub fn max_buffer_argument_entries(&self) -> u32 { + 31 + } + + pub fn max_texture_argument_entries(&self) -> u32 { + if self.os() == OS::macOS { + 128 + } else { + 31 + } + } + + pub fn max_sampler_state_argument_entries(&self) -> u32 { + 16 + } + + pub fn max_threadgroup_memory_argument_entries(&self) -> u32 { + 31 + } + + pub fn max_inlined_constant_data_buffers(&self) -> u32 { + if self.os() == OS::macOS { + 14 + } else { + 31 + } + } + + pub fn max_inline_constant_buffer_length(&self) -> u32 { + 4 * KB + } + + pub fn max_threads_per_threadgroup(&self) -> u32 { + if self.os() == OS::macOS || self.gpu_family() >= 4 { + 1024 + } else { + 512 + } + } + + pub fn max_total_threadgroup_memory_allocation(&self) -> u32 { + match (self.os(), self.gpu_family()) { + (OS::iOS, 5) => 64 * KB, + (OS::iOS, 4) => { + if self.os_version() >= 12 { + 64 * KB + } else { + 32 * KB + } + } + (OS::iOS, 3) => 16 * KB, + (OS::iOS, _) => 16 * KB - 32, + (OS::tvOS, 1) => 16 * KB - 32, + (OS::tvOS, _) => 16 * KB, + (OS::macOS, _) => 32 * KB, + } + } + + pub fn max_total_tile_memory_allocation(&self) -> u32 { + if self.os() == OS::iOS && self.gpu_family() == 4 { + 32 * KB + } else { + 0 + } + } + + pub fn threadgroup_memory_length_alignment(&self) -> u32 { + 16 + } + + pub fn max_constant_buffer_function_memory_allocation(&self) -> Option { + if self.os() == OS::macOS { + Some(64 * KB) + } else { + None + } + } + + pub fn max_fragment_inputs(&self) -> u32 { + if self.os() == OS::macOS { + 32 + } else { + 60 + } + } + + pub fn max_fragment_input_components(&self) -> u32 { + if self.os() == OS::macOS { + 128 + } else { + 60 + } + } + + pub fn max_function_constants(&self) -> u32 { + match self.os() { + OS::iOS if self.os_version() >= 11 => 65536, + OS::tvOS if self.os_version() >= 10 => 65536, + OS::macOS if self.os_version() >= 12 => 65536, + _ => 0, + } + } + + pub fn max_tessellation_factor(&self) -> u32 { + if self.supports_tessellation() { + match self.os() { + OS::iOS if self.gpu_family() >= 5 => 64, + OS::iOS => 16, + OS::tvOS => 16, + OS::macOS => 64, + } + } else { + 0 + } + } + + pub fn max_viewports_and_scissor_rectangles(&self) -> u32 { + if self.supports_multiple_viewports() { + 16 + } else { + 1 + } + } + + pub fn max_raster_order_groups(&self) -> u32 { + if self.supports_raster_order_groups() { + 8 + } else { + 0 + } + } + + pub fn max_buffer_length(&self) -> u32 { + if self.os() == OS::macOS && self.os_version() >= 12 { + 1 * GB + } else { + 256 * MB + } + } + + pub fn min_buffer_offset_alignment(&self) -> u32 { + if self.os() == OS::macOS { + 256 + } else { + 4 + } + } + + pub fn max_1d_texture_size(&self) -> u32 { + match (self.os(), self.gpu_family()) { + (OS::iOS, 1) | (OS::iOS, 2) => { + if self.version() <= 2 { + 4096 + } else { + 8192 + } + } + (OS::tvOS, 1) => 8192, + _ => 16384, + } + } + + pub fn max_2d_texture_size(&self) -> u32 { + match (self.os(), self.gpu_family()) { + (OS::iOS, 1) | (OS::iOS, 2) => { + if self.version() <= 2 { + 4096 + } else { + 8192 + } + } + (OS::tvOS, 1) => 8192, + _ => 16384, + } + } + + pub fn max_cube_map_texture_size(&self) -> u32 { + match (self.os(), self.gpu_family()) { + (OS::iOS, 1) | (OS::iOS, 2) => { + if self.version() <= 2 { + 4096 + } else { + 8192 + } + } + (OS::tvOS, 1) => 8192, + _ => 16384, + } + } + + pub fn max_3d_texture_size(&self) -> u32 { + 2048 + } + + pub fn max_array_layers(&self) -> u32 { + 2048 + } + + pub fn copy_texture_buffer_alignment(&self) -> u32 { + match (self.os(), self.gpu_family()) { + (OS::iOS, 1) | (OS::iOS, 2) | (OS::tvOS, 1) => 64, + (OS::iOS, _) | (OS::tvOS, _) => 16, + (OS::macOS, _) => 256, + } + } + + + + pub fn new_texture_buffer_alignment(&self) -> Option { + match self.os() { + OS::iOS => { + if self.os_version() >= 11 { + None + } else if self.gpu_family() == 3 { + Some(16) + } else { + Some(64) + } + } + OS::tvOS => { + if self.os_version() >= 11 { + None + } else { + Some(64) + } + } + OS::macOS => None, + } + } + + pub fn max_color_render_targets(&self) -> u32 { + if self.os() == OS::iOS && self.gpu_family() == 1 { + 4 + } else { + 8 + } + } + + pub fn max_point_primitive_size(&self) -> u32 { + 511 + } + + pub fn max_total_color_render_target_size(&self) -> Option { + match (self.os(), self.gpu_family()) { + (OS::iOS, 1) => Some(128), + (OS::iOS, 2) | (OS::iOS, 3) => Some(256), + (OS::iOS, _) => Some(512), + (OS::tvOS, _) => Some(256), + (OS::macOS, _) => None, + } + } + + pub fn max_visibility_query_offset(&self) -> u32 { + 64 * KB - 8 + } + + pub fn a8_unorm_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Filter + } + + pub fn r8_unorm_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::all() + } + + pub fn r8_unorm_srgb_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::empty() + } else if self.supports_srgb_writes() { + PixelFormatCapabilities::all() + } else { + !PixelFormatCapabilities::Write + } + } + + pub fn r8_snorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::iOS && self.gpu_family() == 1 { + !PixelFormatCapabilities::Resolve + } else { + PixelFormatCapabilities::all() + } + } + + pub fn r8_uint_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } + + pub fn r8_sint_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } + + pub fn r16_unorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() != OS::macOS { + !PixelFormatCapabilities::Resolve + } else { + PixelFormatCapabilities::all() + } + } + + pub fn r16_snorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() != OS::macOS { + !PixelFormatCapabilities::Resolve + } else { + PixelFormatCapabilities::all() + } + } + + pub fn r16_uint_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } + + pub fn r16_sint_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } + + pub fn r16_float_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::all() + } + + pub fn rg8_unorm_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::all() + } + + pub fn rg8_unorm_srgb_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::empty() + } else if self.supports_srgb_writes() { + PixelFormatCapabilities::all() + } else { + !PixelFormatCapabilities::Write + } + } + + pub fn rg8_snorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::iOS && self.gpu_family() == 1 { + !PixelFormatCapabilities::Resolve + } else { + PixelFormatCapabilities::all() + } + } + + pub fn rg8_uint_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } + + pub fn rg8_sint_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } + + pub fn b5_g6_r5_unorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::empty() + } else { + !PixelFormatCapabilities::Write + } + } + + pub fn a1_bgr5_unorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::empty() + } else { + !PixelFormatCapabilities::Write + } + } + + pub fn abgr4_unorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::empty() + } else { + !PixelFormatCapabilities::Write + } + } + + pub fn bgr5_a1_unorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::empty() + } else { + !PixelFormatCapabilities::Write + } + } + + pub fn r32_uint_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::iOS && self.os_version() == 8 { + PixelFormatCapabilities::Color + } else if self.os() == OS::macOS { + PixelFormatCapabilities::Color + | PixelFormatCapabilities::Write + | PixelFormatCapabilities::Msaa + } else { + PixelFormatCapabilities::Color | PixelFormatCapabilities::Write + } + } + + pub fn r32_sint_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::iOS && self.os_version() == 8 { + PixelFormatCapabilities::Color + } else if self.os() == OS::macOS { + PixelFormatCapabilities::Color + | PixelFormatCapabilities::Write + | PixelFormatCapabilities::Msaa + } else { + PixelFormatCapabilities::Color | PixelFormatCapabilities::Write + } + } + + pub fn r32_float_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::iOS && self.os_version() == 8 { + PixelFormatCapabilities::Color + | PixelFormatCapabilities::Blend + | PixelFormatCapabilities::Msaa + } else if self.os() == OS::macOS { + PixelFormatCapabilities::all() + } else { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Blend + | PixelFormatCapabilities::Msaa + } + } + + pub fn rg16_unorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::all() + } else { + !PixelFormatCapabilities::Resolve + } + } + + pub fn rg16_snorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::all() + } else { + !PixelFormatCapabilities::Resolve + } + } + + pub fn rg16_uint_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } + + pub fn rg16_sint_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } + + pub fn rg16_float_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::all() + } + + pub fn rgba8_unorm_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::all() + } + + pub fn rgba8_unorm_srgb_capabilities(&self) -> PixelFormatCapabilities { + if self.supports_srgb_writes() { + PixelFormatCapabilities::all() + } else { + !PixelFormatCapabilities::Write + } + } + + pub fn rgba8_snorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::iOS && self.gpu_family() == 1 { + !PixelFormatCapabilities::Resolve + } else { + PixelFormatCapabilities::all() + } + } + + pub fn rgba8_uint_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } + + pub fn rgba8_sint_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } + + pub fn bgra8_unorm_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::all() + } + + pub fn bgra8_unorm_srgb_capabilities(&self) -> PixelFormatCapabilities { + if self.supports_srgb_writes() { + PixelFormatCapabilities::all() + } else { + !PixelFormatCapabilities::Write + } + } + + pub fn rgb10_a2_unorm_capabilities(&self) -> PixelFormatCapabilities { + let supports_writes = match self.os() { + OS::iOS => self.gpu_family() >= 3, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => true, + }; + if supports_writes { + PixelFormatCapabilities::all() + } else { + !PixelFormatCapabilities::Write + } + } + + pub fn rgb10_a2_uint_capabilities(&self) -> PixelFormatCapabilities { + let supports_writes = match self.os() { + OS::iOS => self.gpu_family() >= 3, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => true, + }; + if supports_writes { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } else { + PixelFormatCapabilities::Color | PixelFormatCapabilities::Msaa + } + } + + pub fn rg11_b10_float_capabilities(&self) -> PixelFormatCapabilities { + let supports_writes = match self.os() { + OS::iOS => self.gpu_family() >= 3, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => true, + }; + if supports_writes { + PixelFormatCapabilities::all() + } else { + !PixelFormatCapabilities::Write + } + } + + pub fn rgb9_e5_float_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::Filter + } else { + let supports_writes = match self.os() { + OS::iOS => self.gpu_family() >= 3, + OS::tvOS => self.gpu_family() >= 2, + OS::macOS => false, + }; + if supports_writes { + PixelFormatCapabilities::all() + } else { + !PixelFormatCapabilities::Write + } + } + } + + pub fn rg32_uint_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::iOS && self.os_version() == 8 { + PixelFormatCapabilities::Color + } else if self.os() == OS::macOS { + PixelFormatCapabilities::Color + | PixelFormatCapabilities::Write + | PixelFormatCapabilities::Msaa + } else { + PixelFormatCapabilities::Color | PixelFormatCapabilities::Write + } + } + + pub fn rg32_sint_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::iOS && self.os_version() == 8 { + PixelFormatCapabilities::Color + } else if self.os() == OS::macOS { + PixelFormatCapabilities::Color + | PixelFormatCapabilities::Write + | PixelFormatCapabilities::Msaa + } else { + PixelFormatCapabilities::Color | PixelFormatCapabilities::Write + } + } + + pub fn rg32_float_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::all() + } else if self.os() == OS::iOS && self.os_version() == 8 { + PixelFormatCapabilities::Color | PixelFormatCapabilities::Blend + } else { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Blend + } + } + + pub fn rgba16_unorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::all() + } else { + !PixelFormatCapabilities::Write + } + } + + pub fn rgba16_snorm_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::all() + } else { + !PixelFormatCapabilities::Write + } + } + + pub fn rgba16_uint_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } + + pub fn rgba16_sint_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Write + | PixelFormatCapabilities::Color + | PixelFormatCapabilities::Msaa + } + + pub fn rgba16_float_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::all() + } + + pub fn rgba32_uint_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::iOS && self.os_version() == 8 { + PixelFormatCapabilities::Color + } else if self.os() == OS::macOS { + PixelFormatCapabilities::Color + | PixelFormatCapabilities::Write + | PixelFormatCapabilities::Msaa + } else { + PixelFormatCapabilities::Color | PixelFormatCapabilities::Write + } + } + + pub fn rgba32_sint_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::iOS && self.os_version() == 8 { + PixelFormatCapabilities::Color + } else if self.os() == OS::macOS { + PixelFormatCapabilities::Color + | PixelFormatCapabilities::Write + | PixelFormatCapabilities::Msaa + } else { + PixelFormatCapabilities::Color | PixelFormatCapabilities::Write + } + } + + pub fn rgba32_float_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::all() + } else if self.os() == OS::iOS && self.version() == 8 { + PixelFormatCapabilities::Color + } else { + PixelFormatCapabilities::Write | PixelFormatCapabilities::Color + } + } + + pub fn pvrtc_pixel_formats_capabilities(&self) -> PixelFormatCapabilities { + if self.supports_pvrtc_pixel_formats() { + PixelFormatCapabilities::Filter + } else { + PixelFormatCapabilities::empty() + } + } + + pub fn eac_etc_pixel_formats_capabilities(&self) -> PixelFormatCapabilities { + if self.supports_eac_etc_pixel_formats() { + PixelFormatCapabilities::Filter + } else { + PixelFormatCapabilities::empty() + } + } + + pub fn astc_pixel_formats_capabilities(&self) -> PixelFormatCapabilities { + if self.supports_astc_pixel_formats() { + PixelFormatCapabilities::Filter + } else { + PixelFormatCapabilities::empty() + } + } + + pub fn bc_pixel_formats_capabilities(&self) -> PixelFormatCapabilities { + if self.supports_bc_pixel_formats() { + PixelFormatCapabilities::Filter + } else { + PixelFormatCapabilities::empty() + } + } + + pub fn gbgr422_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Filter + } + + pub fn bgrg422_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Filter + } + + pub fn depth16_unorm_capabilities(&self) -> PixelFormatCapabilities { + if self.supports_depth_16_pixel_format() { + PixelFormatCapabilities::Filter + | PixelFormatCapabilities::Msaa + | PixelFormatCapabilities::Resolve + } else { + PixelFormatCapabilities::empty() + } + } + + pub fn depth32_float_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::Filter + | PixelFormatCapabilities::Msaa + | PixelFormatCapabilities::Resolve + } else if self.supports_msaa_depth_resolve() { + PixelFormatCapabilities::Msaa | PixelFormatCapabilities::Resolve + } else { + PixelFormatCapabilities::Msaa + } + } + + pub fn stencil8_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Msaa + } + + pub fn depth24_unorm_stencil8_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::Filter + | PixelFormatCapabilities::Msaa + | PixelFormatCapabilities::Resolve + } else { + PixelFormatCapabilities::empty() + } + } + + pub fn depth32_float_stencil8_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::Filter + | PixelFormatCapabilities::Msaa + | PixelFormatCapabilities::Resolve + } else if self.supports_msaa_depth_resolve() { + PixelFormatCapabilities::Msaa | PixelFormatCapabilities::Resolve + } else { + PixelFormatCapabilities::Msaa + } + } + + pub fn x24_stencil8_capabilities(&self) -> PixelFormatCapabilities { + if self.os() == OS::macOS { + PixelFormatCapabilities::Msaa + } else { + PixelFormatCapabilities::empty() + } + } + + pub fn x32_stencil8_capabilities(&self) -> PixelFormatCapabilities { + PixelFormatCapabilities::Msaa + } + + pub fn bgra10_xr_capabilities(&self) -> PixelFormatCapabilities { + if self.supports_extended_range_pixel_formats() { + PixelFormatCapabilities::all() + } else { + PixelFormatCapabilities::empty() + } + } + + pub fn bgra10_xr_srgb_capabilities(&self) -> PixelFormatCapabilities { + if self.supports_extended_range_pixel_formats() { + PixelFormatCapabilities::all() + } else { + PixelFormatCapabilities::empty() + } + } + + pub fn bgr10_xr_capabilities(&self) -> PixelFormatCapabilities { + if self.supports_extended_range_pixel_formats() { + PixelFormatCapabilities::all() + } else { + PixelFormatCapabilities::empty() + } + } + + pub fn bgr10_xr_srgb_capabilities(&self) -> PixelFormatCapabilities { + if self.supports_extended_range_pixel_formats() { + PixelFormatCapabilities::all() + } else { + PixelFormatCapabilities::empty() + } + } + + pub fn bgr10_a2_unorm_capabilities(&self) -> PixelFormatCapabilities { + if self.supports_wide_color_pixel_format() { + if self.os() == OS::macOS { + !PixelFormatCapabilities::Write + } else { + PixelFormatCapabilities::all() + } + } else { + PixelFormatCapabilities::empty() + } + } +} + +#[allow(non_camel_case_types)] +#[repr(u64)] +#[derive(Copy, Clone, Debug)] +pub enum MTLArgumentBuffersTier { + tier1 = 0, + tier2 = 1, +} + +bitflags! { + struct MTLPipelineOption: NSUInteger { + const ArgumentInfo = 1 << 0; + const BufferTypeInfo = 1 << 1; + } +} + +#[link(name = "Metal", kind = "framework")] +extern "C" { + fn MTLCreateSystemDefaultDevice() -> *mut MTLDevice; + #[cfg(not(target_os = "ios"))] + fn MTLCopyAllDevices() -> *mut Object; +} + +#[allow(non_camel_case_types)] +type dispatch_data_t = id; +#[allow(non_camel_case_types)] +type dispatch_queue_t = id; +#[allow(non_camel_case_types)] +type dispatch_block_t = *const Block<(), ()>; + +#[cfg_attr( + any(target_os = "macos", target_os = "ios"), + link(name = "System", kind = "dylib") +)] +#[cfg_attr( + not(any(target_os = "macos", target_os = "ios")), + link(name = "dispatch", kind = "dylib") +)] +#[allow(improper_ctypes)] +extern "C" { + static _dispatch_main_q: dispatch_queue_t; + + fn dispatch_data_create( + buffer: *const std::ffi::c_void, + size: crate::c_size_t, + queue: dispatch_queue_t, + destructor: dispatch_block_t, + ) -> dispatch_data_t; + fn dispatch_release(object: dispatch_data_t); +} + + + + + + + +pub enum MTLDevice {} + +foreign_obj_type! { + type CType = MTLDevice; + pub struct Device; + pub struct DeviceRef; +} + +impl Device { + pub fn system_default() -> Option { + + unsafe { MTLCreateSystemDefaultDevice().as_mut().map(|x| Self(x)) } + } + + pub fn all() -> Vec { + #[cfg(target_os = "ios")] + { + Self::system_default().into_iter().collect() + } + #[cfg(not(target_os = "ios"))] + unsafe { + let array = MTLCopyAllDevices(); + let count: NSUInteger = msg_send![array, count]; + let ret = (0..count) + .map(|i| msg_send![array, objectAtIndex: i]) + + + + .map(|device: *mut Object| msg_send![device, retain]) + .collect(); + let () = msg_send![array, release]; + ret + } + } +} + +impl DeviceRef { + pub fn name(&self) -> &str { + unsafe { + let name = msg_send![self, name]; + crate::nsstring_as_str(name) + } + } + + #[cfg(feature = "private")] + pub unsafe fn vendor(&self) -> &str { + let name = msg_send![self, vendorName]; + crate::nsstring_as_str(name) + } + + #[cfg(feature = "private")] + pub unsafe fn family_name(&self) -> &str { + let name = msg_send![self, familyName]; + crate::nsstring_as_str(name) + } + + pub fn registry_id(&self) -> u64 { + unsafe { msg_send![self, registryID] } + } + + pub fn max_threads_per_threadgroup(&self) -> MTLSize { + unsafe { msg_send![self, maxThreadsPerThreadgroup] } + } + + pub fn is_low_power(&self) -> bool { + unsafe { + match msg_send![self, isLowPower] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn is_headless(&self) -> bool { + unsafe { + match msg_send![self, isHeadless] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn is_removable(&self) -> bool { + unsafe { + match msg_send![self, isRemovable] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn supports_feature_set(&self, feature: MTLFeatureSet) -> bool { + unsafe { + match msg_send![self, supportsFeatureSet: feature] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn supports_sample_count(&self, count: NSUInteger) -> bool { + unsafe { + match msg_send![self, supportsTextureSampleCount: count] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn d24_s8_supported(&self) -> bool { + unsafe { + match msg_send![self, isDepth24Stencil8PixelFormatSupported] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn new_command_queue(&self) -> CommandQueue { + unsafe { msg_send![self, newCommandQueue] } + } + + pub fn new_command_queue_with_max_command_buffer_count( + &self, + count: NSUInteger, + ) -> CommandQueue { + unsafe { msg_send![self, newCommandQueueWithMaxCommandBufferCount: count] } + } + + pub fn new_default_library(&self) -> Library { + unsafe { msg_send![self, newDefaultLibrary] } + } + + pub fn new_library_with_source( + &self, + src: &str, + options: &CompileOptionsRef, + ) -> Result { + use cocoa::base::nil as cocoa_nil; + use cocoa::foundation::NSString as cocoa_NSString; + + unsafe { + let source = cocoa_NSString::alloc(cocoa_nil).init_str(src); + let mut err: *mut Object = ptr::null_mut(); + let library: *mut MTLLibrary = msg_send![self, newLibraryWithSource:source + options:options + error:&mut err]; + let () = msg_send![source, release]; + if !err.is_null() { + let desc: *mut Object = msg_send![err, localizedDescription]; + let compile_error: *const std::os::raw::c_char = msg_send![desc, UTF8String]; + let message = CStr::from_ptr(compile_error).to_string_lossy().into_owned(); + if library.is_null() { + let () = msg_send![err, release]; + return Err(message); + } else { + warn!("Shader warnings: {}", message); + } + } + + assert!(!library.is_null()); + Ok(Library::from_ptr(library)) + } + } + + pub fn new_library_with_file>(&self, file: P) -> Result { + use cocoa::base::nil as cocoa_nil; + use cocoa::foundation::NSString as cocoa_NSString; + + unsafe { + let filename = + cocoa_NSString::alloc(cocoa_nil).init_str(file.as_ref().to_string_lossy().as_ref()); + + let library: *mut MTLLibrary = try_objc! { err => + msg_send![self, newLibraryWithFile:filename.as_ref() + error:&mut err] + }; + + Ok(Library::from_ptr(library)) + } + } + + pub fn new_library_with_data(&self, library_data: &[u8]) -> Result { + unsafe { + let destructor_block = ConcreteBlock::new(|| {}).copy(); + let data = dispatch_data_create( + library_data.as_ptr() as *const std::ffi::c_void, + library_data.len() as crate::c_size_t, + &_dispatch_main_q as *const _ as dispatch_queue_t, + &*destructor_block.deref(), + ); + + let library: *mut MTLLibrary = try_objc! { err => + msg_send![self, newLibraryWithData:data + error:&mut err] + }; + dispatch_release(data); + Ok(Library::from_ptr(library)) + } + } + + pub fn new_render_pipeline_state_with_reflection( + &self, + descriptor: &RenderPipelineDescriptorRef, + reflection: &RenderPipelineReflectionRef, + ) -> Result { + unsafe { + let reflection_options = + MTLPipelineOption::ArgumentInfo | MTLPipelineOption::BufferTypeInfo; + + let pipeline_state: *mut MTLRenderPipelineState = try_objc! { err => + msg_send![self, newRenderPipelineStateWithDescriptor:descriptor + options:reflection_options + reflection:reflection + error:&mut err] + }; + + Ok(RenderPipelineState::from_ptr(pipeline_state)) + } + } + + pub fn new_render_pipeline_state( + &self, + descriptor: &RenderPipelineDescriptorRef, + ) -> Result { + unsafe { + let pipeline_state: *mut MTLRenderPipelineState = try_objc! { err => + msg_send![self, newRenderPipelineStateWithDescriptor:descriptor + error:&mut err] + }; + + Ok(RenderPipelineState::from_ptr(pipeline_state)) + } + } + + pub fn new_compute_pipeline_state_with_function( + &self, + function: &FunctionRef, + ) -> Result { + unsafe { + let pipeline_state: *mut MTLComputePipelineState = try_objc! { err => + msg_send![self, newComputePipelineStateWithFunction:function + error:&mut err] + }; + + Ok(ComputePipelineState::from_ptr(pipeline_state)) + } + } + + pub unsafe fn new_compute_pipeline_state( + &self, + descriptor: &ComputePipelineDescriptorRef, + ) -> Result { + let pipeline_state: *mut MTLComputePipelineState = try_objc! { err => + msg_send![self, newComputePipelineStateWithDescriptor:descriptor + error:&mut err] + }; + + Ok(ComputePipelineState::from_ptr(pipeline_state)) + } + + pub fn new_buffer(&self, length: u64, options: MTLResourceOptions) -> Buffer { + unsafe { + msg_send![self, newBufferWithLength:length + options:options] + } + } + + pub fn new_buffer_with_data( + &self, + bytes: *const std::ffi::c_void, + length: NSUInteger, + options: MTLResourceOptions, + ) -> Buffer { + unsafe { + msg_send![self, newBufferWithBytes:bytes + length:length + options:options] + } + } + + pub fn new_texture(&self, descriptor: &TextureDescriptorRef) -> Texture { + unsafe { msg_send![self, newTextureWithDescriptor: descriptor] } + } + + pub fn new_sampler(&self, descriptor: &SamplerDescriptorRef) -> SamplerState { + unsafe { msg_send![self, newSamplerStateWithDescriptor: descriptor] } + } + + pub fn new_depth_stencil_state( + &self, + descriptor: &DepthStencilDescriptorRef, + ) -> DepthStencilState { + unsafe { msg_send![self, newDepthStencilStateWithDescriptor: descriptor] } + } + + pub fn argument_buffers_support(&self) -> Option { + unsafe { + let has_arg_buffers: BOOL = + msg_send![self, respondsToSelector: sel!(argumentBuffersSupport)]; + if has_arg_buffers == YES { + Some(msg_send![self, argumentBuffersSupport]) + } else { + None + } + } + } + + pub fn new_argument_encoder( + &self, + arguments: &ArrayRef, + ) -> ArgumentEncoder { + unsafe { msg_send![self, newArgumentEncoderWithArguments: arguments] } + } + + pub fn new_heap(&self, descriptor: &HeapDescriptorRef) -> Heap { + unsafe { msg_send![self, newHeapWithDescriptor: descriptor] } + } + + pub fn heap_buffer_size_and_align( + &self, + length: NSUInteger, + options: MTLResourceOptions, + ) -> MTLSizeAndAlign { + unsafe { msg_send![self, heapBufferSizeAndAlignWithLength: length options: options] } + } + + pub fn heap_texture_size_and_align( + &self, + descriptor: &TextureDescriptorRef, + ) -> MTLSizeAndAlign { + unsafe { msg_send![self, heapTextureSizeAndAlignWithDescriptor: descriptor] } + } + + pub fn minimum_linear_texture_alignment_for_pixel_format( + &self, + format: MTLPixelFormat, + ) -> NSUInteger { + unsafe { msg_send![self, minimumLinearTextureAlignmentForPixelFormat: format] } + } + + pub fn minimum_texture_buffer_alignment_for_pixel_format( + &self, + format: MTLPixelFormat, + ) -> NSUInteger { + unsafe { msg_send![self, minimumTextureBufferAlignmentForPixelFormat: format] } + } + + pub fn max_argument_buffer_sampler_count(&self) -> NSUInteger { + unsafe { msg_send![self, maxArgumentBufferSamplerCount] } + } +} diff --git a/third_party/rust/metal/src/drawable.rs b/third_party/rust/metal/src/drawable.rs new file mode 100644 index 000000000000..d5fb83c7eb44 --- /dev/null +++ b/third_party/rust/metal/src/drawable.rs @@ -0,0 +1,20 @@ + + + + + + + +pub enum MTLDrawable {} + +foreign_obj_type! { + type CType = MTLDrawable; + pub struct Drawable; + pub struct DrawableRef; +} + +impl DrawableRef { + pub fn present(&self) { + unsafe { msg_send![self, present] } + } +} diff --git a/third_party/rust/metal/src/encoder.rs b/third_party/rust/metal/src/encoder.rs new file mode 100644 index 000000000000..0c535d4c9a1e --- /dev/null +++ b/third_party/rust/metal/src/encoder.rs @@ -0,0 +1,1069 @@ + + + + + + + +use super::*; + +use cocoa::foundation::{NSInteger, NSRange, NSUInteger}; + +use std::ops::Range; + +#[repr(u64)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum MTLPrimitiveType { + Point = 0, + Line = 1, + LineStrip = 2, + Triangle = 3, + TriangleStrip = 4, +} + +#[repr(u64)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum MTLIndexType { + UInt16 = 0, + UInt32 = 1, +} + +#[repr(u64)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum MTLVisibilityResultMode { + Disabled = 0, + Boolean = 1, + Counting = 2, +} + +#[repr(u64)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum MTLCullMode { + None = 0, + Front = 1, + Back = 2, +} + +#[repr(u64)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum MTLWinding { + Clockwise = 0, + CounterClockwise = 1, +} + +#[repr(u64)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum MTLDepthClipMode { + Clip = 0, + Clamp = 1, +} + +#[repr(u64)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum MTLTriangleFillMode { + Fill = 0, + Lines = 1, +} + +bitflags! { + #[allow(non_upper_case_globals)] + pub struct MTLBlitOption: NSUInteger { + const DepthFromDepthStencil = 1 << 0; + const StencilFromDepthStencil = 1 << 1; + const RowLinearPVRTC = 1 << 2; + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct MTLScissorRect { + pub x: NSUInteger, + pub y: NSUInteger, + pub width: NSUInteger, + pub height: NSUInteger, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct MTLViewport { + pub originX: f64, + pub originY: f64, + pub width: f64, + pub height: f64, + pub znear: f64, + pub zfar: f64, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct MTLDrawPrimitivesIndirectArguments { + pub vertexCount: u32, + pub instanceCount: u32, + pub vertexStart: u32, + pub baseInstance: u32, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct MTLDrawIndexedPrimitivesIndirectArguments { + pub indexCount: u32, + pub instanceCount: u32, + pub indexStart: u32, + pub baseVertex: i32, + pub baseInstance: u32, +} + +pub enum MTLCommandEncoder {} + +foreign_obj_type! { + type CType = MTLCommandEncoder; + pub struct CommandEncoder; + pub struct CommandEncoderRef; +} + +impl CommandEncoderRef { + pub fn label(&self) -> &str { + unsafe { + let label = msg_send![self, label]; + crate::nsstring_as_str(label) + } + } + + pub fn set_label(&self, label: &str) { + unsafe { + let nslabel = crate::nsstring_from_str(label); + let () = msg_send![self, setLabel: nslabel]; + } + } + + pub fn end_encoding(&self) { + unsafe { + msg_send![self, endEncoding] + } + } +} + +pub enum MTLParallelRenderCommandEncoder {} + +foreign_obj_type! { + type CType = MTLParallelRenderCommandEncoder; + pub struct ParallelRenderCommandEncoder; + pub struct ParallelRenderCommandEncoderRef; + type ParentType = CommandEncoderRef; +} + +impl ParallelRenderCommandEncoderRef { + pub fn render_command_encoder(&self) -> &RenderCommandEncoderRef { + unsafe { msg_send![self, renderCommandEncoder] } + } +} + +pub enum MTLRenderCommandEncoder {} + +foreign_obj_type! { + type CType = MTLRenderCommandEncoder; + pub struct RenderCommandEncoder; + pub struct RenderCommandEncoderRef; + type ParentType = CommandEncoderRef; +} + +impl RenderCommandEncoderRef { + pub fn set_render_pipeline_state(&self, pipeline_state: &RenderPipelineStateRef) { + unsafe { msg_send![self, setRenderPipelineState: pipeline_state] } + } + + pub fn set_viewport(&self, viewport: MTLViewport) { + unsafe { msg_send![self, setViewport: viewport] } + } + + pub fn set_front_facing_winding(&self, winding: MTLWinding) { + unsafe { msg_send![self, setFrontFacingWinding: winding] } + } + + pub fn set_cull_mode(&self, mode: MTLCullMode) { + unsafe { msg_send![self, setCullMode: mode] } + } + + pub fn set_depth_clip_mode(&self, mode: MTLDepthClipMode) { + unsafe { msg_send![self, setDepthClipMode: mode] } + } + + pub fn set_depth_bias(&self, bias: f32, scale: f32, clamp: f32) { + unsafe { + msg_send![self, setDepthBias:bias + slopeScale:scale + clamp:clamp] + } + } + + pub fn set_scissor_rect(&self, rect: MTLScissorRect) { + unsafe { msg_send![self, setScissorRect: rect] } + } + + pub fn set_triangle_fill_mode(&self, mode: MTLTriangleFillMode) { + unsafe { msg_send![self, setTriangleFillMode: mode] } + } + + pub fn set_blend_color(&self, red: f32, green: f32, blue: f32, alpha: f32) { + unsafe { + msg_send![self, setBlendColorRed:red + green:green + blue:blue + alpha:alpha] + } + } + + pub fn set_depth_stencil_state(&self, depth_stencil_state: &DepthStencilStateRef) { + unsafe { msg_send![self, setDepthStencilState: depth_stencil_state] } + } + + pub fn set_stencil_reference_value(&self, value: u32) { + unsafe { msg_send![self, setStencilReferenceValue: value] } + } + + pub fn set_stencil_front_back_reference_value(&self, front: u32, back: u32) { + unsafe { + msg_send![self, setStencilFrontReferenceValue:front + backReferenceValue:back] + } + } + + pub fn set_visibility_result_mode(&self, mode: MTLVisibilityResultMode, offset: NSUInteger) { + unsafe { + msg_send![self, setVisibilityResultMode:mode + offset:offset] + } + } + + + + pub fn set_vertex_bytes( + &self, + index: NSUInteger, + length: NSUInteger, + bytes: *const std::ffi::c_void, + ) { + unsafe { + msg_send![self, + setVertexBytes:bytes + length:length + atIndex:index + ] + } + } + + pub fn set_vertex_buffer( + &self, + index: NSUInteger, + buffer: Option<&BufferRef>, + offset: NSUInteger, + ) { + unsafe { + msg_send![self, + setVertexBuffer:buffer + offset:offset + atIndex:index + ] + } + } + + pub fn set_vertex_buffers( + &self, + start_index: NSUInteger, + data: &[Option<&BufferRef>], + offsets: &[NSUInteger], + ) { + debug_assert_eq!(offsets.len(), data.len()); + unsafe { + msg_send![self, + setVertexBuffers: data.as_ptr() + offsets: offsets.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + pub fn set_vertex_texture(&self, index: u64, texture: Option<&TextureRef>) { + unsafe { + msg_send![self, + setVertexTexture:texture + atIndex:index + ] + } + } + + pub fn set_vertex_textures(&self, start_index: NSUInteger, data: &[Option<&TextureRef>]) { + unsafe { + msg_send![self, + setVertexTextures: data.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + pub fn set_vertex_sampler_state(&self, index: u64, sampler: Option<&SamplerStateRef>) { + unsafe { + msg_send![self, + setVertexSamplerState:sampler + atIndex:index + ] + } + } + + pub fn set_vertex_sampler_states( + &self, + start_index: NSUInteger, + data: &[Option<&SamplerStateRef>], + ) { + unsafe { + msg_send![self, + setVertexSamplerStates: data.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + pub fn set_vertex_sampler_state_with_lod( + &self, + index: NSUInteger, + sampler: Option<&SamplerStateRef>, + lod_clamp: Range, + ) { + unsafe { + msg_send![self, + setVertexSamplerState:sampler + lodMinClamp:lod_clamp.start + lodMaxClamp:lod_clamp.end + atIndex:index + ] + } + } + + + + pub fn set_fragment_bytes( + &self, + index: NSUInteger, + length: NSUInteger, + bytes: *const std::ffi::c_void, + ) { + unsafe { + msg_send![self, + setFragmentBytes:bytes + length:length + atIndex:index + ] + } + } + + pub fn set_fragment_buffer( + &self, + index: NSUInteger, + buffer: Option<&BufferRef>, + offset: NSUInteger, + ) { + unsafe { + msg_send![self, + setFragmentBuffer:buffer + offset:offset + atIndex:index + ] + } + } + + pub fn set_fragment_buffers( + &self, + start_index: NSUInteger, + data: &[Option<&BufferRef>], + offsets: &[NSUInteger], + ) { + debug_assert_eq!(offsets.len(), data.len()); + unsafe { + msg_send![self, + setFragmentBuffers: data.as_ptr() + offsets: offsets.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + pub fn set_fragment_texture(&self, index: NSUInteger, texture: Option<&TextureRef>) { + unsafe { + msg_send![self, + setFragmentTexture:texture + atIndex:index + ] + } + } + + pub fn set_fragment_textures(&self, start_index: NSUInteger, data: &[Option<&TextureRef>]) { + unsafe { + msg_send![self, + setFragmentTextures: data.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + pub fn set_fragment_sampler_state(&self, index: NSUInteger, sampler: Option<&SamplerStateRef>) { + unsafe { + msg_send![self, setFragmentSamplerState:sampler + atIndex:index] + } + } + + pub fn set_fragment_sampler_states( + &self, + start_index: NSUInteger, + data: &[Option<&SamplerStateRef>], + ) { + unsafe { + msg_send![self, + setFragmentSamplerStates: data.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + pub fn set_fragment_sampler_state_with_lod( + &self, + index: NSUInteger, + sampler: Option<&SamplerStateRef>, + lod_clamp: Range, + ) { + unsafe { + msg_send![self, + setFragmentSamplerState:sampler + lodMinClamp:lod_clamp.start + lodMaxClamp:lod_clamp.end + atIndex:index + ] + } + } + + + + pub fn draw_primitives( + &self, + primitive_type: MTLPrimitiveType, + vertex_start: NSUInteger, + vertex_count: NSUInteger, + ) { + unsafe { + msg_send![self, + drawPrimitives: primitive_type + vertexStart: vertex_start + vertexCount: vertex_count + ] + } + } + + pub fn draw_primitives_instanced( + &self, + primitive_type: MTLPrimitiveType, + vertex_start: NSUInteger, + vertex_count: NSUInteger, + instance_count: NSUInteger, + ) { + unsafe { + msg_send![self, + drawPrimitives: primitive_type + vertexStart: vertex_start + vertexCount: vertex_count + instanceCount: instance_count + ] + } + } + + pub fn draw_primitives_instanced_base_instance( + &self, + primitive_type: MTLPrimitiveType, + vertex_start: NSUInteger, + vertex_count: NSUInteger, + instance_count: NSUInteger, + base_instance: NSUInteger, + ) { + unsafe { + msg_send![self, + drawPrimitives: primitive_type + vertexStart: vertex_start + vertexCount: vertex_count + instanceCount: instance_count + baseInstance: base_instance + ] + } + } + + pub fn draw_primitives_indirect( + &self, + primitive_type: MTLPrimitiveType, + indirect_buffer: &BufferRef, + indirect_buffer_offset: NSUInteger, + ) { + unsafe { + msg_send![self, + drawPrimitives: primitive_type + indirectBuffer: indirect_buffer + indirectBufferOffset: indirect_buffer_offset + ] + } + } + + pub fn draw_indexed_primitives( + &self, + primitive_type: MTLPrimitiveType, + index_count: NSUInteger, + index_type: MTLIndexType, + index_buffer: &BufferRef, + index_buffer_offset: NSUInteger, + ) { + unsafe { + msg_send![self, + drawIndexedPrimitives: primitive_type + indexCount: index_count + indexType: index_type + indexBuffer: index_buffer + indexBufferOffset: index_buffer_offset + ] + } + } + + pub fn draw_indexed_primitives_instanced( + &self, + primitive_type: MTLPrimitiveType, + index_count: NSUInteger, + index_type: MTLIndexType, + index_buffer: &BufferRef, + index_buffer_offset: NSUInteger, + instance_count: NSUInteger, + ) { + unsafe { + msg_send![self, + drawIndexedPrimitives: primitive_type + indexCount: index_count + indexType: index_type + indexBuffer: index_buffer + indexBufferOffset: index_buffer_offset + instanceCount: instance_count + ] + } + } + + pub fn draw_indexed_primitives_instanced_base_instance( + &self, + primitive_type: MTLPrimitiveType, + index_count: NSUInteger, + index_type: MTLIndexType, + index_buffer: &BufferRef, + index_buffer_offset: NSUInteger, + instance_count: NSUInteger, + base_vertex: NSInteger, + base_instance: NSUInteger, + ) { + unsafe { + msg_send![self, + drawIndexedPrimitives: primitive_type + indexCount: index_count + indexType: index_type + indexBuffer: index_buffer + indexBufferOffset: index_buffer_offset + instanceCount: instance_count + baseVertex: base_vertex + baseInstance: base_instance + ] + } + } + + pub fn draw_indexed_primitives_indirect( + &self, + primitive_type: MTLPrimitiveType, + index_type: MTLIndexType, + index_buffer: &BufferRef, + index_buffer_offset: NSUInteger, + indirect_buffer: &BufferRef, + indirect_buffer_offset: NSUInteger, + ) { + unsafe { + msg_send![self, + drawIndexedPrimitives: primitive_type + indexType: index_type + indexBuffer: index_buffer + indexBufferOffset: index_buffer_offset + indirectBuffer: indirect_buffer + indirectBufferOffset: indirect_buffer_offset + ] + } + } + + + + + + + pub fn use_resource(&self, resource: &ResourceRef, usage: MTLResourceUsage) { + unsafe { + msg_send![self, useResource:resource + usage:usage] + } + } + + pub fn use_heap(&self, heap: &HeapRef) { + unsafe { msg_send![self, useHeap: heap] } + } +} + +pub enum MTLBlitCommandEncoder {} + +foreign_obj_type! { + type CType = MTLBlitCommandEncoder; + pub struct BlitCommandEncoder; + pub struct BlitCommandEncoderRef; + type ParentType = CommandEncoderRef; +} + +impl BlitCommandEncoderRef { + pub fn synchronize_resource(&self, resource: &ResourceRef) { + unsafe { msg_send![self, synchronizeResource: resource] } + } + + pub fn fill_buffer(&self, destination_buffer: &BufferRef, range: NSRange, value: u8) { + unsafe { + msg_send![self, + fillBuffer: destination_buffer + range: range + value: value + ] + } + } + + pub fn copy_from_buffer( + &self, + source_buffer: &BufferRef, + source_offset: NSUInteger, + destination_buffer: &BufferRef, + destination_offset: NSUInteger, + size: NSUInteger, + ) { + unsafe { + msg_send![self, + copyFromBuffer: source_buffer + sourceOffset: source_offset + toBuffer: destination_buffer + destinationOffset: destination_offset + size: size + ] + } + } + + pub fn copy_from_texture( + &self, + source_texture: &TextureRef, + source_slice: NSUInteger, + source_level: NSUInteger, + source_origin: MTLOrigin, + source_size: MTLSize, + destination_texture: &TextureRef, + destination_slice: NSUInteger, + destination_level: NSUInteger, + destination_origin: MTLOrigin, + ) { + unsafe { + msg_send![self, + copyFromTexture: source_texture + sourceSlice: source_slice + sourceLevel: source_level + sourceOrigin: source_origin + sourceSize: source_size + toTexture: destination_texture + destinationSlice: destination_slice + destinationLevel: destination_level + destinationOrigin: destination_origin + ] + } + } + + pub fn copy_from_buffer_to_texture( + &self, + source_buffer: &BufferRef, + source_offset: NSUInteger, + source_bytes_per_row: NSUInteger, + source_bytes_per_image: NSUInteger, + source_size: MTLSize, + destination_texture: &TextureRef, + destination_slice: NSUInteger, + destination_level: NSUInteger, + destination_origin: MTLOrigin, + options: MTLBlitOption, + ) { + unsafe { + msg_send![self, + copyFromBuffer: source_buffer + sourceOffset: source_offset + sourceBytesPerRow: source_bytes_per_row + sourceBytesPerImage: source_bytes_per_image + sourceSize: source_size + toTexture: destination_texture + destinationSlice: destination_slice + destinationLevel: destination_level + destinationOrigin: destination_origin + options: options + ] + } + } + + pub fn copy_from_texture_to_buffer( + &self, + source_texture: &TextureRef, + source_slice: NSUInteger, + source_level: NSUInteger, + source_origin: MTLOrigin, + source_size: MTLSize, + destination_buffer: &BufferRef, + destination_offset: NSUInteger, + destination_bytes_per_row: NSUInteger, + destination_bytes_per_image: NSUInteger, + options: MTLBlitOption, + ) { + unsafe { + msg_send![self, + copyFromTexture: source_texture + sourceSlice: source_slice + sourceLevel: source_level + sourceOrigin: source_origin + sourceSize: source_size + toBuffer: destination_buffer + destinationOffset: destination_offset + destinationBytesPerRow: destination_bytes_per_row + destinationBytesPerImage: destination_bytes_per_image + options: options + ] + } + } + + pub fn optimize_contents_for_gpu_access(&self, texture: &TextureRef) { + unsafe { msg_send![self, optimizeContentsForGPUAccess: texture] } + } + + pub fn optimize_contents_for_gpu_access_slice_level( + &self, + texture: &TextureRef, + slice: NSUInteger, + level: NSUInteger, + ) { + unsafe { + msg_send![self, + optimizeContentsForGPUAccess: texture + slice: slice + level: level + ] + } + } + + pub fn optimize_contents_for_cpu_access(&self, texture: &TextureRef) { + unsafe { msg_send![self, optimizeContentsForCPUAccess: texture] } + } + + pub fn optimize_contents_for_cpu_access_slice_level( + &self, + texture: &TextureRef, + slice: NSUInteger, + level: NSUInteger, + ) { + unsafe { + msg_send![self, + optimizeContentsForCPUAccess: texture + slice: slice + level: level + ] + } + } +} + +pub enum MTLComputeCommandEncoder {} + +foreign_obj_type! { + type CType = MTLComputeCommandEncoder; + pub struct ComputeCommandEncoder; + pub struct ComputeCommandEncoderRef; + type ParentType = CommandEncoderRef; +} + +impl ComputeCommandEncoderRef { + pub fn set_compute_pipeline_state(&self, state: &ComputePipelineStateRef) { + unsafe { msg_send![self, setComputePipelineState: state] } + } + + pub fn set_buffer(&self, index: NSUInteger, buffer: Option<&BufferRef>, offset: NSUInteger) { + unsafe { msg_send![self, setBuffer:buffer offset:offset atIndex:index] } + } + + pub fn set_buffers( + &self, + start_index: NSUInteger, + data: &[Option<&BufferRef>], + offsets: &[NSUInteger], + ) { + debug_assert_eq!(offsets.len(), data.len()); + unsafe { + msg_send![self, + setBuffers: data.as_ptr() + offsets: offsets.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + pub fn set_texture(&self, index: NSUInteger, texture: Option<&TextureRef>) { + unsafe { + msg_send![self, + setTexture:texture + atIndex:index + ] + } + } + + pub fn set_textures(&self, start_index: NSUInteger, data: &[Option<&TextureRef>]) { + unsafe { + msg_send![self, + setTextures: data.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + pub fn set_sampler_state(&self, index: NSUInteger, sampler: Option<&SamplerStateRef>) { + unsafe { + msg_send![self, + setSamplerState:sampler + atIndex:index + ] + } + } + + pub fn set_sampler_states(&self, start_index: NSUInteger, data: &[Option<&SamplerStateRef>]) { + unsafe { + msg_send![self, + setSamplerStates: data.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + pub fn set_sampler_state_with_lod( + &self, + index: NSUInteger, + sampler: Option<&SamplerStateRef>, + lod_clamp: Range, + ) { + unsafe { + msg_send![self, + setSamplerState:sampler + lodMinClamp:lod_clamp.start + lodMaxClamp:lod_clamp.end + atIndex:index + ] + } + } + + pub fn set_bytes(&self, index: NSUInteger, length: NSUInteger, bytes: *const std::ffi::c_void) { + unsafe { + msg_send![self, + setBytes: bytes + length: length + atIndex: index + ] + } + } + + pub fn dispatch_thread_groups( + &self, + thread_groups_count: MTLSize, + threads_per_thread_group: MTLSize, + ) { + unsafe { + msg_send![self, + dispatchThreadgroups:thread_groups_count + threadsPerThreadgroup:threads_per_thread_group + ] + } + } + + pub fn dispatch_thread_groups_indirect( + &self, + buffer: &BufferRef, + offset: NSUInteger, + threads_per_thread_group: MTLSize, + ) { + unsafe { + msg_send![self, + dispatchThreadgroupsWithIndirectBuffer:buffer + indirectBufferOffset:offset + threadsPerThreadgroup:threads_per_thread_group + ] + } + } + + pub fn use_resource(&self, resource: &ResourceRef, usage: MTLResourceUsage) { + unsafe { + msg_send![self, + useResource:resource + usage:usage + ] + } + } + + pub fn use_heap(&self, heap: &HeapRef) { + unsafe { msg_send![self, useHeap: heap] } + } +} + +pub enum MTLArgumentEncoder {} + +foreign_obj_type! { + type CType = MTLArgumentEncoder; + pub struct ArgumentEncoder; + pub struct ArgumentEncoderRef; +} + +impl ArgumentEncoderRef { + pub fn encoded_length(&self) -> NSUInteger { + unsafe { msg_send![self, encodedLength] } + } + + pub fn alignment(&self) -> NSUInteger { + unsafe { msg_send![self, alignment] } + } + + pub fn set_argument_buffer(&self, buffer: &BufferRef, offset: NSUInteger) { + unsafe { + msg_send![self, + setArgumentBuffer: buffer + offset: offset + ] + } + } + + pub fn set_argument_buffer_to_element( + &self, + buffer: &BufferRef, + offset: NSUInteger, + array_element: NSUInteger, + ) { + unsafe { + msg_send![self, + setArgumentBuffer: buffer + startOffset: offset + arrayElement: array_element + ] + } + } + + pub fn set_buffer(&self, buffer: &BufferRef, offset: NSUInteger, at_index: NSUInteger) { + unsafe { + msg_send![self, + setBuffer: buffer + offset: offset + atIndex: at_index + ] + } + } + + pub fn set_buffers( + &self, + data: &[&BufferRef], + offsets: &[NSUInteger], + start_index: NSUInteger, + ) { + assert_eq!(offsets.len(), data.len()); + unsafe { + msg_send![self, + setBuffers: data.as_ptr() + offsets: offsets.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + pub fn set_texture(&self, texture: &TextureRef, at_index: NSUInteger) { + unsafe { + msg_send![self, + setTexture: texture + atIndex: at_index + ] + } + } + + pub fn set_textures(&self, data: &[&TextureRef], start_index: NSUInteger) { + unsafe { + msg_send![self, + setTextures: data.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + pub fn set_sampler_state(&self, sampler_state: &SamplerStateRef, at_index: NSUInteger) { + unsafe { + msg_send![self, + setSamplerState: sampler_state + atIndex: at_index + ] + } + } + + pub fn set_sampler_states(&self, data: &[&SamplerStateRef], start_index: NSUInteger) { + unsafe { + msg_send![self, + setSamplerStates: data.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + pub fn constant_data(&self, at_index: NSUInteger) -> *mut std::ffi::c_void { + unsafe { msg_send![self, constantDataAtIndex: at_index] } + } + + pub fn new_argument_encoder_for_buffer(&self, index: NSUInteger) -> ArgumentEncoder { + unsafe { + let ptr = msg_send![self, newArgumentEncoderForBufferAtIndex: index]; + ArgumentEncoder::from_ptr(ptr) + } + } +} diff --git a/third_party/rust/metal/src/heap.rs b/third_party/rust/metal/src/heap.rs new file mode 100644 index 000000000000..a6ca94dd8f63 --- /dev/null +++ b/third_party/rust/metal/src/heap.rs @@ -0,0 +1,110 @@ + + + + + + + +use super::*; + +use cocoa::foundation::NSUInteger; + +pub enum MTLHeap {} + +foreign_obj_type! { + type CType = MTLHeap; + pub struct Heap; + pub struct HeapRef; +} + +impl HeapRef { + pub fn cpu_cache_mode(&self) -> MTLCPUCacheMode { + unsafe { msg_send![self, cpuCacheMode] } + } + + pub fn storage_mode(&self) -> MTLStorageMode { + unsafe { msg_send![self, storageMode] } + } + + pub fn set_purgeable_state(&self, state: MTLPurgeableState) -> MTLPurgeableState { + unsafe { msg_send![self, setPurgeableState: state] } + } + + pub fn size(&self) -> NSUInteger { + unsafe { msg_send![self, size] } + } + + pub fn used_size(&self) -> NSUInteger { + unsafe { msg_send![self, usedSize] } + } + + pub fn max_available_size(&self, alignment: NSUInteger) -> NSUInteger { + unsafe { msg_send![self, maxAvailableSizeWithAlignment: alignment] } + } + + pub fn new_buffer(&self, length: u64, options: MTLResourceOptions) -> Option { + unsafe { + let ptr: *mut MTLBuffer = msg_send![self, newBufferWithLength:length + options:options]; + if !ptr.is_null() { + Some(Buffer::from_ptr(ptr)) + } else { + None + } + } + } + + pub fn new_texture(&self, descriptor: &TextureDescriptorRef) -> Option { + unsafe { + let ptr: *mut MTLTexture = msg_send![self, newTextureWithDescriptor: descriptor]; + if !ptr.is_null() { + Some(Texture::from_ptr(ptr)) + } else { + None + } + } + } +} + +pub enum MTLHeapDescriptor {} + +foreign_obj_type! { + type CType = MTLHeapDescriptor; + pub struct HeapDescriptor; + pub struct HeapDescriptorRef; +} + +impl HeapDescriptor { + pub fn new() -> Self { + unsafe { + let class = class!(MTLHeapDescriptor); + msg_send![class, new] + } + } +} + +impl HeapDescriptorRef { + pub fn cpu_cache_mode(&self) -> MTLCPUCacheMode { + unsafe { msg_send![self, cpuCacheMode] } + } + + pub fn set_cpu_cache_mode(&self, mode: MTLCPUCacheMode) { + unsafe { msg_send![self, setCpuCacheMode: mode] } + } + + pub fn storage_mode(&self) -> MTLStorageMode { + unsafe { msg_send![self, storageMode] } + } + + pub fn set_storage_mode(&self, mode: MTLStorageMode) { + unsafe { msg_send![self, setStorageMode: mode] } + } + + pub fn size(&self) -> NSUInteger { + unsafe { msg_send![self, size] } + } + + pub fn set_size(&self, size: NSUInteger) { + unsafe { msg_send![self, setSize: size] } + } +} diff --git a/third_party/rust/metal/src/lib.rs b/third_party/rust/metal/src/lib.rs new file mode 100644 index 000000000000..b74bb5714364 --- /dev/null +++ b/third_party/rust/metal/src/lib.rs @@ -0,0 +1,401 @@ + + + + + + + +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] + +#[macro_use] +extern crate bitflags; +#[macro_use] +extern crate log; +#[macro_use] +extern crate objc; +#[macro_use] +extern crate foreign_types; + +use std::borrow::{Borrow, ToOwned}; +use std::marker::PhantomData; +use std::mem; +use std::ops::Deref; +use std::os::raw::c_void; + +use core_graphics::base::CGFloat; +use core_graphics::geometry::CGSize; +use foreign_types::ForeignType; +use objc::runtime::{Object, NO, YES}; +use cocoa::foundation::NSUInteger; + +fn nsstring_as_str(nsstr: &objc::runtime::Object) -> &str { + let bytes = unsafe { + let bytes: *const std::os::raw::c_char = msg_send![nsstr, UTF8String]; + bytes as *const u8 + }; + let len: NSUInteger = unsafe { msg_send![nsstr, length] }; + unsafe { + let bytes = std::slice::from_raw_parts(bytes, len as usize); + std::str::from_utf8(bytes).unwrap() + } +} + +fn nsstring_from_str(string: &str) -> *mut objc::runtime::Object { + const UTF8_ENCODING: usize = 4; + + let cls = class!(NSString); + let bytes = string.as_ptr() as *const c_void; + unsafe { + let obj: *mut objc::runtime::Object = msg_send![cls, alloc]; + let obj: *mut objc::runtime::Object = msg_send![ + obj, + initWithBytes:bytes + length:string.len() + encoding:UTF8_ENCODING + ]; + let _: *mut c_void = msg_send![obj, autorelease]; + obj + } +} + +macro_rules! foreign_obj_type { + {type CType = $raw_ident:ident; + pub struct $owned_ident:ident; + pub struct $ref_ident:ident; + type ParentType = $parent_ref:ident; + } => { + foreign_obj_type! { + type CType = $raw_ident; + pub struct $owned_ident; + pub struct $ref_ident; + } + + impl ::std::ops::Deref for $ref_ident { + type Target = $parent_ref; + + fn deref(&self) -> &$parent_ref { + unsafe { &*(self as *const $ref_ident as *const $parent_ref) } + } + } + }; + {type CType = $raw_ident:ident; + pub struct $owned_ident:ident; + pub struct $ref_ident:ident; + } => { + foreign_type! { + type CType = $raw_ident; + fn drop = crate::obj_drop; + fn clone = crate::obj_clone; + pub struct $owned_ident; + pub struct $ref_ident; + } + + unsafe impl ::objc::Message for $raw_ident { + } + unsafe impl ::objc::Message for $ref_ident { + } + + impl ::std::fmt::Debug for $ref_ident { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + unsafe { + let string: *mut ::objc::runtime::Object = msg_send![self, debugDescription]; + write!(f, "{}", crate::nsstring_as_str(&*string)) + } + } + } + + impl ::std::fmt::Debug for $owned_ident { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + ::std::ops::Deref::deref(self).fmt(f) + } + } + }; +} + +macro_rules! try_objc { + { + $err_name: ident => $body:expr + } => { + { + let mut $err_name: *mut ::objc::runtime::Object = ::std::ptr::null_mut(); + let value = $body; + if !$err_name.is_null() { + let desc: *mut Object = msg_send![$err_name, localizedDescription]; + let compile_error: *const std::os::raw::c_char = msg_send![desc, UTF8String]; + let message = CStr::from_ptr(compile_error).to_string_lossy().into_owned(); + let () = msg_send![$err_name, release]; + return Err(message); + } + value + } + }; +} + +pub struct NSArray { + _phantom: PhantomData, +} + +pub struct Array(*mut NSArray) +where + T: ForeignType + 'static, + T::Ref: objc::Message + 'static; +pub struct ArrayRef(foreign_types::Opaque, PhantomData) +where + T: ForeignType + 'static, + T::Ref: objc::Message + 'static; + +impl Drop for Array +where + T: ForeignType + 'static, + T::Ref: objc::Message + 'static, +{ + fn drop(&mut self) { + unsafe { + let () = msg_send![self.0, release]; + } + } +} + +impl Clone for Array +where + T: ForeignType + 'static, + T::Ref: objc::Message + 'static, +{ + fn clone(&self) -> Self { + unsafe { Array(msg_send![self.0, retain]) } + } +} + +unsafe impl objc::Message for NSArray +where + T: ForeignType + 'static, + T::Ref: objc::Message + 'static, +{ +} +unsafe impl objc::Message for ArrayRef +where + T: ForeignType + 'static, + T::Ref: objc::Message + 'static, +{ +} + +impl Array +where + T: ForeignType + 'static, + T::Ref: objc::Message + 'static, +{ + pub fn from_slice<'a>(s: &[&T::Ref]) -> &'a ArrayRef { + unsafe { + let class = class!(NSArray); + msg_send![class, arrayWithObjects: s.as_ptr() count: s.len()] + } + } + + pub fn from_owned_slice<'a>(s: &[T]) -> &'a ArrayRef { + unsafe { + let class = class!(NSArray); + msg_send![class, arrayWithObjects: s.as_ptr() count: s.len()] + } + } +} + +impl foreign_types::ForeignType for Array +where + T: ForeignType + 'static, + T::Ref: objc::Message + 'static, +{ + type CType = NSArray; + type Ref = ArrayRef; + + unsafe fn from_ptr(p: *mut NSArray) -> Self { + Array(p) + } + + fn as_ptr(&self) -> *mut NSArray { + self.0 + } +} + +impl foreign_types::ForeignTypeRef for ArrayRef +where + T: ForeignType + 'static, + T::Ref: objc::Message + 'static, +{ + type CType = NSArray; +} + +impl Deref for Array +where + T: ForeignType + 'static, + T::Ref: objc::Message + 'static, +{ + type Target = ArrayRef; + + fn deref(&self) -> &ArrayRef { + unsafe { mem::transmute(self.as_ptr()) } + } +} + +impl Borrow> for Array +where + T: ForeignType + 'static, + T::Ref: objc::Message + 'static, +{ + fn borrow(&self) -> &ArrayRef { + unsafe { mem::transmute(self.as_ptr()) } + } +} + +impl ToOwned for ArrayRef +where + T: ForeignType + 'static, + T::Ref: objc::Message + 'static, +{ + type Owned = Array; + + fn to_owned(&self) -> Array { + unsafe { Array::from_ptr(msg_send![self, retain]) } + } +} + +pub enum CAMetalDrawable {} + +foreign_obj_type! { + type CType = CAMetalDrawable; + pub struct CoreAnimationDrawable; + pub struct CoreAnimationDrawableRef; + type ParentType = DrawableRef; +} + +impl CoreAnimationDrawableRef { + pub fn texture(&self) -> &TextureRef { + unsafe { msg_send![self, texture] } + } +} + +pub enum CAMetalLayer {} + +foreign_obj_type! { + type CType = CAMetalLayer; + pub struct CoreAnimationLayer; + pub struct CoreAnimationLayerRef; +} + +impl CoreAnimationLayer { + pub fn new() -> Self { + unsafe { + let class = class!(CAMetalLayer); + msg_send![class, new] + } + } +} + +impl CoreAnimationLayerRef { + pub fn set_device(&self, device: &DeviceRef) { + unsafe { msg_send![self, setDevice: device] } + } + + pub fn pixel_format(&self) -> MTLPixelFormat { + unsafe { msg_send![self, pixelFormat] } + } + + pub fn set_pixel_format(&self, pixel_format: MTLPixelFormat) { + unsafe { msg_send![self, setPixelFormat: pixel_format] } + } + + pub fn drawable_size(&self) -> CGSize { + unsafe { msg_send![self, drawableSize] } + } + + pub fn set_drawable_size(&self, size: CGSize) { + unsafe { msg_send![self, setDrawableSize: size] } + } + + pub fn presents_with_transaction(&self) -> bool { + unsafe { + match msg_send![self, presentsWithTransaction] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_presents_with_transaction(&self, transaction: bool) { + unsafe { msg_send![self, setPresentsWithTransaction: transaction] } + } + + pub fn set_edge_antialiasing_mask(&self, mask: u64) { + unsafe { msg_send![self, setEdgeAntialiasingMask: mask] } + } + + pub fn set_masks_to_bounds(&self, masks: bool) { + unsafe { msg_send![self, setMasksToBounds: masks] } + } + + pub fn remove_all_animations(&self) { + unsafe { msg_send![self, removeAllAnimations] } + } + + pub fn next_drawable(&self) -> Option<&CoreAnimationDrawableRef> { + unsafe { msg_send![self, nextDrawable] } + } + + pub fn set_contents_scale(&self, scale: CGFloat) { + unsafe { msg_send![self, setContentsScale: scale] } + } +} + +mod argument; +mod buffer; +mod capturemanager; +mod commandbuffer; +mod commandqueue; +mod constants; +mod depthstencil; +mod device; +mod drawable; +mod encoder; +mod heap; +mod library; +mod pipeline; +mod renderpass; +mod resource; +mod sampler; +mod texture; +mod types; +mod vertexdescriptor; + +pub use argument::*; +pub use buffer::*; +pub use capturemanager::*; +pub use commandbuffer::*; +pub use commandqueue::*; +pub use constants::*; +pub use depthstencil::*; +pub use device::*; +pub use drawable::*; +pub use encoder::*; +pub use heap::*; +pub use library::*; +pub use pipeline::*; +pub use renderpass::*; +pub use resource::*; +pub use sampler::*; +pub use texture::*; +pub use types::*; +pub use vertexdescriptor::*; + +#[inline] +unsafe fn obj_drop(p: *mut T) { + msg_send![(p as *mut Object), release] +} + +#[inline] +unsafe fn obj_clone(p: *mut T) -> *mut T { + msg_send![(p as *mut Object), retain] +} + +#[allow(non_camel_case_types)] +type c_size_t = usize; diff --git a/third_party/rust/metal/src/library.rs b/third_party/rust/metal/src/library.rs new file mode 100644 index 000000000000..d76840a841bd --- /dev/null +++ b/third_party/rust/metal/src/library.rs @@ -0,0 +1,254 @@ + + + + + + + +use super::*; + +use cocoa::foundation::NSUInteger; +use foreign_types::ForeignType; +use objc::runtime::{Object, NO, YES}; +use std::ffi::CStr; + +pub enum MTLVertexAttribute {} + +foreign_obj_type! { + type CType = MTLVertexAttribute; + pub struct VertexAttribute; + pub struct VertexAttributeRef; +} + +impl VertexAttributeRef { + pub fn name(&self) -> &str { + unsafe { + let name = msg_send![self, name]; + crate::nsstring_as_str(name) + } + } + + pub fn attribute_index(&self) -> u64 { + unsafe { msg_send![self, attributeIndex] } + } + + pub fn attribute_type(&self) -> MTLDataType { + unsafe { msg_send![self, attributeType] } + } + + pub fn is_active(&self) -> bool { + unsafe { + match msg_send![self, isActive] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } +} + +#[repr(u64)] +#[derive(Debug)] +pub enum MTLFunctionType { + Vertex = 1, + Fragment = 2, + Kernel = 3, +} + +pub enum MTLFunction {} + +foreign_obj_type! { + type CType = MTLFunction; + pub struct Function; + pub struct FunctionRef; +} + +impl FunctionRef { + pub fn name(&self) -> &str { + unsafe { + let name = msg_send![self, name]; + crate::nsstring_as_str(name) + } + } + + pub fn function_type(&self) -> MTLFunctionType { + unsafe { msg_send![self, functionType] } + } + + pub fn vertex_attributes(&self) -> &Array { + unsafe { msg_send![self, vertexAttributes] } + } + + pub fn new_argument_encoder(&self, buffer_index: NSUInteger) -> ArgumentEncoder { + unsafe { + let ptr = msg_send![self, newArgumentEncoderWithBufferIndex: buffer_index]; + ArgumentEncoder::from_ptr(ptr) + } + } + + pub fn function_constants_dictionary(&self) -> *mut Object { + unsafe { msg_send![self, functionConstantsDictionary] } + } +} + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub enum MTLLanguageVersion { + V1_0 = 0x10000, + V1_1 = 0x10001, + V1_2 = 0x10002, + V2_0 = 0x20000, + V2_1 = 0x20001, + V2_2 = 0x20002, +} + +pub enum MTLFunctionConstantValues {} + +foreign_obj_type! { + type CType = MTLFunctionConstantValues; + pub struct FunctionConstantValues; + pub struct FunctionConstantValuesRef; +} + +impl FunctionConstantValues { + pub fn new() -> Self { + unsafe { + let class = class!(MTLFunctionConstantValues); + msg_send![class, new] + } + } +} + +impl FunctionConstantValuesRef { + pub unsafe fn set_constant_value_at_index( + &self, + index: NSUInteger, + ty: MTLDataType, + value: *const std::os::raw::c_void, + ) { + msg_send![self, setConstantValue:value type:ty atIndex:index] + } +} + +pub enum MTLCompileOptions {} + +foreign_obj_type! { + type CType = MTLCompileOptions; + pub struct CompileOptions; + pub struct CompileOptionsRef; +} + +impl CompileOptions { + pub fn new() -> Self { + unsafe { + let class = class!(MTLCompileOptions); + msg_send![class, new] + } + } +} + +impl CompileOptionsRef { + pub unsafe fn preprocessor_defines(&self) -> *mut Object { + msg_send![self, preprocessorMacros] + } + + pub unsafe fn set_preprocessor_defines(&self, defines: *mut Object) { + msg_send![self, setPreprocessorMacros: defines] + } + + pub fn is_fast_math_enabled(&self) -> bool { + unsafe { + match msg_send![self, fastMathEnabled] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_fast_math_enabled(&self, enabled: bool) { + unsafe { msg_send![self, setFastMathEnabled: enabled] } + } + + pub fn language_version(&self) -> MTLLanguageVersion { + unsafe { msg_send![self, languageVersion] } + } + + pub fn set_language_version(&self, version: MTLLanguageVersion) { + unsafe { msg_send![self, setLanguageVersion: version] } + } +} + +#[repr(u64)] +#[allow(non_camel_case_types)] +pub enum MTLLibraryError { + Unsupported = 1, + Internal = 2, + CompileFailure = 3, + CompileWarning = 4, +} + +pub enum MTLLibrary {} + +foreign_obj_type! { + type CType = MTLLibrary; + pub struct Library; + pub struct LibraryRef; +} + +impl LibraryRef { + pub fn label(&self) -> &str { + unsafe { + let label = msg_send![self, label]; + crate::nsstring_as_str(label) + } + } + + pub fn set_label(&self, label: &str) { + unsafe { + let nslabel = crate::nsstring_from_str(label); + let () = msg_send![self, setLabel: nslabel]; + } + } + + pub fn get_function( + &self, + name: &str, + constants: Option, + ) -> Result { + unsafe { + let nsname = crate::nsstring_from_str(name); + + let function: *mut MTLFunction = match constants { + Some(c) => try_objc! { err => msg_send![self, + newFunctionWithName: nsname.as_ref() + constantValues: c.as_ref() + error: &mut err + ]}, + None => msg_send![self, newFunctionWithName: nsname.as_ref()], + }; + + if !function.is_null() { + Ok(Function::from_ptr(function)) + } else { + Err(format!("Function '{}' does not exist", name)) + } + } + } + + pub fn function_names(&self) -> Vec { + unsafe { + let names: *mut Object = msg_send![self, functionNames]; + let count: NSUInteger = msg_send![names, count]; + let ret = (0..count) + .map(|i| { + let name = msg_send![names, objectAtIndex: i]; + nsstring_as_str(name).to_string() + }) + .collect(); + let () = msg_send![names, release]; + ret + } + } +} diff --git a/third_party/rust/metal/src/pipeline/compute.rs b/third_party/rust/metal/src/pipeline/compute.rs new file mode 100644 index 000000000000..1e3094add0e7 --- /dev/null +++ b/third_party/rust/metal/src/pipeline/compute.rs @@ -0,0 +1,410 @@ + + + + + + + +use super::*; + +use cocoa::foundation::NSUInteger; +use objc::runtime::{NO, YES}; + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum MTLMutability { + Default = 0, + Mutable = 1, + Immutable = 2, +} + +impl Default for MTLMutability { + #[inline] + fn default() -> Self { + MTLMutability::Default + } +} + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum MTLIndexType { + UInt16 = 0, + UInt32 = 1, +} + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum MTLAttributeFormat { + Invalid = 0, + UChar2 = 1, + UChar3 = 2, + UChar4 = 3, + Char2 = 4, + Char3 = 5, + Char4 = 6, + UChar2Normalized = 7, + UChar3Normalized = 8, + UChar4Normalized = 9, + Char2Normalized = 10, + Char3Normalized = 11, + Char4Normalized = 12, + UShort2 = 13, + UShort3 = 14, + UShort4 = 15, + Short2 = 16, + Short3 = 17, + Short4 = 18, + UShort2Normalized = 19, + UShort3Normalized = 20, + UShort4Normalized = 21, + Short2Normalized = 22, + Short3Normalized = 23, + Short4Normalized = 24, + Half2 = 25, + Half3 = 26, + Half4 = 27, + Float = 28, + Float2 = 29, + Float3 = 30, + Float4 = 31, + Int = 32, + Int2 = 33, + Int3 = 34, + Int4 = 35, + UInt = 36, + UInt2 = 37, + UInt3 = 38, + UInt4 = 39, + Int1010102Normalized = 40, + UInt1010102Normalized = 41, + UChar4Normalized_BGRA = 42, + UChar = 45, + Char = 46, + UCharNormalized = 47, + CharNormalized = 48, + UShort = 49, + Short = 50, + UShortNormalized = 51, + ShortNormalized = 52, + Half = 53, +} + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum MTLStepFunction { + Constant = 0, + PerInstance = 1, + PerPatch = 2, + PerPatchControlPoint = 3, + PerVertex = 4, + ThreadPositionInGridX = 5, + ThreadPositionInGridXIndexed = 6, + ThreadPositionInGridY = 7, + ThreadPositionInGridYIndexed = 8, +} + +pub enum MTLComputePipelineDescriptor {} + +foreign_obj_type! { + type CType = MTLComputePipelineDescriptor; + pub struct ComputePipelineDescriptor; + pub struct ComputePipelineDescriptorRef; +} + +impl ComputePipelineDescriptor { + pub fn new() -> Self { + unsafe { + let class = class!(MTLComputePipelineDescriptor); + msg_send![class, new] + } + } +} + +impl ComputePipelineDescriptorRef { + pub fn label(&self) -> &str { + unsafe { + let label = msg_send![self, label]; + crate::nsstring_as_str(label) + } + } + + pub fn set_label(&self, label: &str) { + unsafe { + let nslabel = crate::nsstring_from_str(label); + let () = msg_send![self, setLabel: nslabel]; + } + } + + pub fn compute_function(&self) -> Option<&FunctionRef> { + unsafe { msg_send![self, computeFunction] } + } + + pub fn set_compute_function(&self, function: Option<&FunctionRef>) { + unsafe { msg_send![self, setComputeFunction: function] } + } + + pub fn thread_group_size_is_multiple_of_thread_execution_width(&self) -> bool { + unsafe { + match msg_send![self, threadGroupSizeIsMultipleOfThreadExecutionWidth] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_thread_group_size_is_multiple_of_thread_execution_width( + &self, + size_is_multiple_of_width: bool, + ) { + unsafe { + msg_send![ + self, + setThreadGroupSizeIsMultipleOfThreadExecutionWidth: size_is_multiple_of_width + ] + } + } + + pub fn stage_input_descriptor(&self) -> Option<&StageInputOutputDescriptorRef> { + unsafe { msg_send![self, stageInputDescriptor] } + } + + pub fn set_stage_input_descriptor(&self, descriptor: Option<&StageInputOutputDescriptorRef>) { + unsafe { msg_send![self, setStageInputDescriptor: descriptor] } + } + + pub fn buffers(&self) -> Option<&PipelineBufferDescriptorArrayRef> { + unsafe { msg_send![self, buffers] } + } + + pub fn reset(&self) { + unsafe { msg_send![self, reset] } + } +} + +pub enum MTLComputePipelineState {} + +foreign_obj_type! { + type CType = MTLComputePipelineState; + pub struct ComputePipelineState; + pub struct ComputePipelineStateRef; +} + +impl ComputePipelineStateRef { + pub fn label(&self) -> &str { + unsafe { + let label = msg_send![self, label]; + crate::nsstring_as_str(label) + } + } + + pub fn set_label(&self, label: &str) { + unsafe { + let nslabel = crate::nsstring_from_str(label); + let () = msg_send![self, setLabel: nslabel]; + } + } + + pub fn max_total_threads_per_group(&self) -> NSUInteger { + unsafe { msg_send![self, maxTotalThreadsPerThreadgroup] } + } + + pub fn thread_execution_width(&self) -> NSUInteger { + unsafe { msg_send![self, threadExecutionWidth] } + } + + pub fn static_threadgroup_memory_length(&self) -> NSUInteger { + unsafe { msg_send![self, staticThreadgroupMemoryLength] } + } +} + +pub enum MTLPipelineBufferDescriptorArray {} + +foreign_obj_type! { + type CType = MTLPipelineBufferDescriptorArray; + pub struct PipelineBufferDescriptorArray; + pub struct PipelineBufferDescriptorArrayRef; +} + +impl PipelineBufferDescriptorArrayRef { + pub fn object_at(&self, index: usize) -> Option<&PipelineBufferDescriptorRef> { + unsafe { msg_send![self, objectAtIndexedSubscript: index] } + } + + pub fn set_object_at(&self, index: usize, buffer_desc: Option<&PipelineBufferDescriptorRef>) { + unsafe { msg_send![self, setObject:buffer_desc atIndexedSubscript:index] } + } +} + +pub enum MTLPipelineBufferDescriptor {} + +foreign_obj_type! { + type CType = MTLPipelineBufferDescriptor; + pub struct PipelineBufferDescriptor; + pub struct PipelineBufferDescriptorRef; +} + +impl PipelineBufferDescriptorRef { + pub fn mutability(&self) -> MTLMutability { + unsafe { msg_send![self, mutability] } + } + + pub fn set_mutability(&self, new_mutability: MTLMutability) { + unsafe { msg_send![self, setMutability: new_mutability] } + } +} + +pub enum MTLStageInputOutputDescriptor {} + +foreign_obj_type! { + type CType = MTLStageInputOutputDescriptor; + pub struct StageInputOutputDescriptor; + pub struct StageInputOutputDescriptorRef; +} + +impl StageInputOutputDescriptor { + pub fn new<'a>() -> &'a StageInputOutputDescriptorRef { + unsafe { + let class = class!(MTLStageInputOutputDescriptor); + msg_send![class, stageInputOutputDescriptor] + } + } +} + +impl StageInputOutputDescriptorRef { + pub fn attributes(&self) -> Option<&AttributeDescriptorArrayRef> { + unsafe { msg_send![self, attributes] } + } + + pub fn index_buffer_index(&self) -> NSUInteger { + unsafe { msg_send![self, indexBufferIndex] } + } + + pub fn set_index_buffer_index(&self, idx_buffer_idx: NSUInteger) { + unsafe { msg_send![self, setIndexBufferIndex: idx_buffer_idx] } + } + + pub fn index_type(&self) -> MTLIndexType { + unsafe { msg_send![self, indexType] } + } + + pub fn set_index_type(&self, index_ty: MTLIndexType) { + unsafe { msg_send![self, setIndexType: index_ty] } + } + + pub fn layouts(&self) -> Option<&BufferLayoutDescriptorArrayRef> { + unsafe { msg_send![self, layouts] } + } + + pub fn reset(&self) { + unsafe { msg_send![self, reset] } + } +} + +pub enum MTLAttributeDescriptorArray {} + +foreign_obj_type! { + type CType = MTLAttributeDescriptorArray; + pub struct AttributeDescriptorArray; + pub struct AttributeDescriptorArrayRef; +} + +impl AttributeDescriptorArrayRef { + pub fn object_at(&self, index: usize) -> Option<&AttributeDescriptorRef> { + unsafe { msg_send![self, objectAtIndexedSubscript: index] } + } + + pub fn set_object_at(&self, index: usize, buffer_desc: Option<&AttributeDescriptorRef>) { + unsafe { msg_send![self, setObject:buffer_desc atIndexedSubscript:index] } + } +} + +pub enum MTLAttributeDescriptor {} + +foreign_obj_type! { + type CType = MTLAttributeDescriptor; + pub struct AttributeDescriptor; + pub struct AttributeDescriptorRef; +} + +impl AttributeDescriptorRef { + pub fn buffer_index(&self) -> NSUInteger { + unsafe { msg_send![self, bufferIndex] } + } + + pub fn set_buffer_index(&self, buffer_index: NSUInteger) { + unsafe { msg_send![self, setBufferIndex: buffer_index] } + } + + pub fn format(&self) -> MTLAttributeFormat { + unsafe { msg_send![self, format] } + } + + pub fn set_format(&self, format: MTLAttributeFormat) { + unsafe { msg_send![self, setFormat: format] } + } + + pub fn offset(&self) -> NSUInteger { + unsafe { msg_send![self, offset] } + } + + pub fn set_offset(&self, offset: NSUInteger) { + unsafe { msg_send![self, setOffset: offset] } + } +} + +pub enum MTLBufferLayoutDescriptorArray {} + +foreign_obj_type! { + type CType = MTLBufferLayoutDescriptorArray; + pub struct BufferLayoutDescriptorArray; + pub struct BufferLayoutDescriptorArrayRef; +} + +impl BufferLayoutDescriptorArrayRef { + pub fn object_at(&self, index: usize) -> Option<&BufferLayoutDescriptorRef> { + unsafe { msg_send![self, objectAtIndexedSubscript: index] } + } + + pub fn set_object_at(&self, index: usize, buffer_desc: Option<&BufferLayoutDescriptorRef>) { + unsafe { msg_send![self, setObject:buffer_desc atIndexedSubscript:index] } + } +} + +pub enum MTLBufferLayoutDescriptor {} + +foreign_obj_type! { + type CType = MTLBufferLayoutDescriptor; + pub struct BufferLayoutDescriptor; + pub struct BufferLayoutDescriptorRef; +} + +impl BufferLayoutDescriptorRef { + pub fn step_function(&self) -> MTLStepFunction { + unsafe { msg_send![self, stepFunction] } + } + + pub fn set_step_function(&self, step_function: MTLStepFunction) { + unsafe { msg_send![self, setStepFunction: step_function] } + } + + pub fn step_rate(&self) -> NSUInteger { + unsafe { msg_send![self, stepRate] } + } + + pub fn set_step_rate(&self, step_rate: NSUInteger) { + unsafe { msg_send![self, setStepRate: step_rate] } + } + + pub fn stride(&self) -> NSUInteger { + unsafe { msg_send![self, stride] } + } + + pub fn set_stride(&self, stride: NSUInteger) { + unsafe { msg_send![self, setStride: stride] } + } +} diff --git a/third_party/rust/metal/src/pipeline/mod.rs b/third_party/rust/metal/src/pipeline/mod.rs new file mode 100644 index 000000000000..6b889960ead8 --- /dev/null +++ b/third_party/rust/metal/src/pipeline/mod.rs @@ -0,0 +1,14 @@ + + + + + + + +use super::*; + +mod compute; +mod render; + +pub use self::compute::*; +pub use self::render::*; diff --git a/third_party/rust/metal/src/pipeline/render.rs b/third_party/rust/metal/src/pipeline/render.rs new file mode 100644 index 000000000000..825a179e1c0e --- /dev/null +++ b/third_party/rust/metal/src/pipeline/render.rs @@ -0,0 +1,409 @@ + + + + + + + +use super::*; + +use cocoa::foundation::NSUInteger; +use objc::runtime::{NO, YES}; + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum MTLBlendFactor { + Zero = 0, + One = 1, + SourceColor = 2, + OneMinusSourceColor = 3, + SourceAlpha = 4, + OneMinusSourceAlpha = 5, + DestinationColor = 6, + OneMinusDestinationColor = 7, + DestinationAlpha = 8, + OneMinusDestinationAlpha = 9, + SourceAlphaSaturated = 10, + BlendColor = 11, + OneMinusBlendColor = 12, + BlendAlpha = 13, + OneMinusBlendAlpha = 14, + Source1Color = 15, + OneMinusSource1Color = 16, + Source1Alpha = 17, + OneMinusSource1Alpha = 18, +} + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum MTLBlendOperation { + Add = 0, + Subtract = 1, + ReverseSubtract = 2, + Min = 3, + Max = 4, +} + +bitflags! { + pub struct MTLColorWriteMask: NSUInteger { + const Red = 0x1 << 3; + const Green = 0x1 << 2; + const Blue = 0x1 << 1; + const Alpha = 0x1 << 0; + } +} + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum MTLPrimitiveTopologyClass { + Unspecified = 0, + Point = 1, + Line = 2, + Triangle = 3, +} + +pub enum MTLRenderPipelineColorAttachmentDescriptor {} + +foreign_obj_type! { + type CType = MTLRenderPipelineColorAttachmentDescriptor; + pub struct RenderPipelineColorAttachmentDescriptor; + pub struct RenderPipelineColorAttachmentDescriptorRef; +} + +impl RenderPipelineColorAttachmentDescriptorRef { + pub fn pixel_format(&self) -> MTLPixelFormat { + unsafe { msg_send![self, pixelFormat] } + } + + pub fn set_pixel_format(&self, pixel_format: MTLPixelFormat) { + unsafe { msg_send![self, setPixelFormat: pixel_format] } + } + + pub fn is_blending_enabled(&self) -> bool { + unsafe { + match msg_send![self, isBlendingEnabled] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_blending_enabled(&self, enabled: bool) { + unsafe { msg_send![self, setBlendingEnabled: enabled] } + } + + pub fn source_rgb_blend_factor(&self) -> MTLBlendFactor { + unsafe { msg_send![self, sourceRGBBlendFactor] } + } + + pub fn set_source_rgb_blend_factor(&self, blend_factor: MTLBlendFactor) { + unsafe { msg_send![self, setSourceRGBBlendFactor: blend_factor] } + } + + pub fn destination_rgb_blend_factor(&self) -> MTLBlendFactor { + unsafe { msg_send![self, destinationRGBBlendFactor] } + } + + pub fn set_destination_rgb_blend_factor(&self, blend_factor: MTLBlendFactor) { + unsafe { msg_send![self, setDestinationRGBBlendFactor: blend_factor] } + } + + pub fn rgb_blend_operation(&self) -> MTLBlendOperation { + unsafe { msg_send![self, rgbBlendOperation] } + } + + pub fn set_rgb_blend_operation(&self, blend_operation: MTLBlendOperation) { + unsafe { msg_send![self, setRgbBlendOperation: blend_operation] } + } + + pub fn source_alpha_blend_factor(&self) -> MTLBlendFactor { + unsafe { msg_send![self, sourceAlphaBlendFactor] } + } + + pub fn set_source_alpha_blend_factor(&self, blend_factor: MTLBlendFactor) { + unsafe { msg_send![self, setSourceAlphaBlendFactor: blend_factor] } + } + + pub fn destination_alpha_blend_factor(&self) -> MTLBlendFactor { + unsafe { msg_send![self, destinationAlphaBlendFactor] } + } + + pub fn set_destination_alpha_blend_factor(&self, blend_factor: MTLBlendFactor) { + unsafe { msg_send![self, setDestinationAlphaBlendFactor: blend_factor] } + } + + pub fn alpha_blend_operation(&self) -> MTLBlendOperation { + unsafe { msg_send![self, alphaBlendOperation] } + } + + pub fn set_alpha_blend_operation(&self, blend_operation: MTLBlendOperation) { + unsafe { msg_send![self, setAlphaBlendOperation: blend_operation] } + } + + pub fn write_mask(&self) -> MTLColorWriteMask { + unsafe { msg_send![self, writeMask] } + } + + pub fn set_write_mask(&self, mask: MTLColorWriteMask) { + unsafe { msg_send![self, setWriteMask: mask] } + } +} + +pub enum MTLRenderPipelineReflection {} + +foreign_obj_type! { + type CType = MTLRenderPipelineReflection; + pub struct RenderPipelineReflection; + pub struct RenderPipelineReflectionRef; +} + +impl RenderPipelineReflection { + #[cfg(feature = "private")] + pub unsafe fn new( + vertex_data: *mut std::ffi::c_void, + fragment_data: *mut std::ffi::c_void, + vertex_desc: *mut std::ffi::c_void, + device: &DeviceRef, + options: u64, + flags: u64, + ) -> Self { + let class = class!(MTLRenderPipelineReflection); + let this: RenderPipelineReflection = msg_send![class, alloc]; + let this_alias: *mut Object = msg_send![this.as_ref(), initWithVertexData:vertex_data + fragmentData:fragment_data + serializedVertexDescriptor:vertex_desc + device:device + options:options + flags:flags]; + if this_alias.is_null() { + panic!("[MTLRenderPipelineReflection init] failed"); + } + this + } +} + +impl RenderPipelineReflectionRef { + pub fn fragment_arguments(&self) -> &Array { + unsafe { msg_send![self, fragmentArguments] } + } + + pub fn vertex_arguments(&self) -> &Array { + unsafe { msg_send![self, vertexArguments] } + } +} + +pub enum MTLRenderPipelineDescriptor {} + +foreign_obj_type! { + type CType = MTLRenderPipelineDescriptor; + pub struct RenderPipelineDescriptor; + pub struct RenderPipelineDescriptorRef; +} + +impl RenderPipelineDescriptor { + pub fn new() -> Self { + unsafe { + let class = class!(MTLRenderPipelineDescriptor); + msg_send![class, new] + } + } +} + +impl RenderPipelineDescriptorRef { + pub fn label(&self) -> &str { + unsafe { + let label = msg_send![self, label]; + crate::nsstring_as_str(label) + } + } + + pub fn set_label(&self, label: &str) { + unsafe { + let nslabel = crate::nsstring_from_str(label); + let () = msg_send![self, setLabel: nslabel]; + } + } + + pub fn vertex_function(&self) -> Option<&FunctionRef> { + unsafe { msg_send![self, vertexFunction] } + } + + pub fn set_vertex_function(&self, function: Option<&FunctionRef>) { + unsafe { msg_send![self, setVertexFunction: function] } + } + + pub fn fragment_function(&self) -> Option<&FunctionRef> { + unsafe { msg_send![self, fragmentFunction] } + } + + pub fn set_fragment_function(&self, function: Option<&FunctionRef>) { + unsafe { msg_send![self, setFragmentFunction: function] } + } + + pub fn vertex_descriptor(&self) -> Option<&VertexDescriptorRef> { + unsafe { msg_send![self, vertexDescriptor] } + } + + pub fn set_vertex_descriptor(&self, descriptor: Option<&VertexDescriptorRef>) { + unsafe { msg_send![self, setVertexDescriptor: descriptor] } + } + + pub fn sample_count(&self) -> NSUInteger { + unsafe { msg_send![self, sampleCount] } + } + + pub fn set_sample_count(&self, count: NSUInteger) { + unsafe { msg_send![self, setSampleCount: count] } + } + + pub fn is_alpha_to_coverage_enabled(&self) -> bool { + unsafe { + match msg_send![self, isAlphaToCoverageEnabled] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_alpha_to_coverage_enabled(&self, enabled: bool) { + unsafe { msg_send![self, setAlphaToCoverageEnabled: enabled] } + } + + pub fn is_alpha_to_one_enabled(&self) -> bool { + unsafe { + match msg_send![self, isAlphaToOneEnabled] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_alpha_to_one_enabled(&self, enabled: bool) { + unsafe { msg_send![self, setAlphaToOneEnabled: enabled] } + } + + pub fn is_rasterization_enabled(&self) -> bool { + unsafe { + match msg_send![self, isRasterizationEnabled] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_rasterization_enabled(&self, enabled: bool) { + unsafe { msg_send![self, setRasterizationEnabled: enabled] } + } + + pub fn color_attachments(&self) -> &RenderPipelineColorAttachmentDescriptorArrayRef { + unsafe { msg_send![self, colorAttachments] } + } + + pub fn depth_attachment_pixel_format(&self) -> MTLPixelFormat { + unsafe { msg_send![self, depthAttachmentPixelFormat] } + } + + pub fn set_depth_attachment_pixel_format(&self, pixel_format: MTLPixelFormat) { + unsafe { msg_send![self, setDepthAttachmentPixelFormat: pixel_format] } + } + + pub fn stencil_attachment_pixel_format(&self) -> MTLPixelFormat { + unsafe { msg_send![self, stencilAttachmentPixelFormat] } + } + + pub fn set_stencil_attachment_pixel_format(&self, pixel_format: MTLPixelFormat) { + unsafe { msg_send![self, setStencilAttachmentPixelFormat: pixel_format] } + } + + pub fn input_primitive_topology(&self) -> MTLPrimitiveTopologyClass { + unsafe { msg_send![self, inputPrimitiveTopology] } + } + + pub fn set_input_primitive_topology(&self, topology: MTLPrimitiveTopologyClass) { + unsafe { msg_send![self, setInputPrimitiveTopology: topology] } + } + + #[cfg(feature = "private")] + pub unsafe fn serialize_vertex_data(&self) -> *mut std::ffi::c_void { + use std::ptr; + let flags = 0; + let err: *mut Object = ptr::null_mut(); + msg_send![self, newSerializedVertexDataWithFlags:flags + error:err] + } + + #[cfg(feature = "private")] + pub unsafe fn serialize_fragment_data(&self) -> *mut std::ffi::c_void { + msg_send![self, serializeFragmentData] + } + + pub fn support_indirect_command_buffers(&self) -> bool { + unsafe { + match msg_send![self, supportIndirectCommandBuffers] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_support_indirect_command_buffers(&self, support: bool) { + unsafe { msg_send![self, setSupportIndirectCommandBuffers: support] } + } +} + +pub enum MTLRenderPipelineState {} + +foreign_obj_type! { + type CType = MTLRenderPipelineState; + pub struct RenderPipelineState; + pub struct RenderPipelineStateRef; +} + +impl RenderPipelineStateRef { + pub fn label(&self) -> &str { + unsafe { + let label = msg_send![self, label]; + crate::nsstring_as_str(label) + } + } + + pub fn set_label(&self, label: &str) { + unsafe { + let nslabel = crate::nsstring_from_str(label); + let () = msg_send![self, setLabel: nslabel]; + } + } +} + +pub enum MTLRenderPipelineColorAttachmentDescriptorArray {} + +foreign_obj_type! { + type CType = MTLRenderPipelineColorAttachmentDescriptorArray; + pub struct RenderPipelineColorAttachmentDescriptorArray; + pub struct RenderPipelineColorAttachmentDescriptorArrayRef; +} + +impl RenderPipelineColorAttachmentDescriptorArrayRef { + pub fn object_at(&self, index: usize) -> Option<&RenderPipelineColorAttachmentDescriptorRef> { + unsafe { msg_send![self, objectAtIndexedSubscript: index] } + } + + pub fn set_object_at( + &self, + index: usize, + attachment: Option<&RenderPipelineColorAttachmentDescriptorRef>, + ) { + unsafe { + msg_send![self, setObject:attachment + atIndexedSubscript:index] + } + } +} diff --git a/third_party/rust/metal/src/renderpass.rs b/third_party/rust/metal/src/renderpass.rs new file mode 100644 index 000000000000..c2fce9dd3d7a --- /dev/null +++ b/third_party/rust/metal/src/renderpass.rs @@ -0,0 +1,310 @@ + + + + + + + +use super::*; + +use cocoa::foundation::NSUInteger; + +#[repr(u64)] +#[derive(Clone, Debug)] +pub enum MTLLoadAction { + DontCare = 0, + Load = 1, + Clear = 2, +} + +#[repr(u64)] +#[derive(Clone, Debug)] +pub enum MTLStoreAction { + DontCare = 0, + Store = 1, + MultisampleResolve = 2, + StoreAndMultisampleResolve = 3, + Unknown = 4, + CustomSampleDepthStore = 5, +} + +#[repr(C)] +#[derive(Clone, Debug)] +pub struct MTLClearColor { + red: f64, + green: f64, + blue: f64, + alpha: f64, +} + +impl MTLClearColor { + #[inline] + pub fn new(red: f64, green: f64, blue: f64, alpha: f64) -> Self { + MTLClearColor { + red, + green, + blue, + alpha, + } + } +} + +#[repr(u32)] +#[allow(non_camel_case_types)] +pub enum MTLMultisampleStencilResolveFilter { + Sample0 = 0, + DepthResolvedSample = 1, +} + +pub enum MTLRenderPassAttachmentDescriptor {} + +foreign_obj_type! { + type CType = MTLRenderPassAttachmentDescriptor; + pub struct RenderPassAttachmentDescriptor; + pub struct RenderPassAttachmentDescriptorRef; +} + +impl RenderPassAttachmentDescriptorRef { + pub fn texture(&self) -> Option<&TextureRef> { + unsafe { msg_send![self, texture] } + } + + pub fn set_texture(&self, texture: Option<&TextureRef>) { + unsafe { msg_send![self, setTexture: texture] } + } + + pub fn level(&self) -> NSUInteger { + unsafe { msg_send![self, level] } + } + + pub fn set_level(&self, level: NSUInteger) { + unsafe { msg_send![self, setLevel: level] } + } + + pub fn slice(&self) -> NSUInteger { + unsafe { msg_send![self, slice] } + } + + pub fn set_slice(&self, slice: NSUInteger) { + unsafe { msg_send![self, setSlice: slice] } + } + + pub fn depth_plane(&self) -> NSUInteger { + unsafe { msg_send![self, depthPlane] } + } + + pub fn set_depth_plane(&self, depth_plane: NSUInteger) { + unsafe { msg_send![self, setDepthPlane: depth_plane] } + } + + pub fn resolve_texture(&self) -> Option<&TextureRef> { + unsafe { msg_send![self, resolveTexture] } + } + + pub fn set_resolve_texture(&self, resolve_texture: Option<&TextureRef>) { + unsafe { msg_send![self, setResolveTexture: resolve_texture] } + } + + pub fn resolve_level(&self) -> NSUInteger { + unsafe { msg_send![self, resolveLevel] } + } + + pub fn set_resolve_level(&self, resolve_level: NSUInteger) { + unsafe { msg_send![self, setResolveLevel: resolve_level] } + } + + pub fn resolve_slice(&self) -> NSUInteger { + unsafe { msg_send![self, resolveSlice] } + } + + pub fn set_resolve_slice(&self, resolve_slice: NSUInteger) { + unsafe { msg_send![self, setResolveSlice: resolve_slice] } + } + + pub fn resolve_depth_plane(&self) -> NSUInteger { + unsafe { msg_send![self, resolveDepthPlane] } + } + + pub fn set_resolve_depth_plane(&self, resolve_depth_plane: NSUInteger) { + unsafe { msg_send![self, setResolveDepthPlane: resolve_depth_plane] } + } + + pub fn load_action(&self) -> MTLLoadAction { + unsafe { msg_send![self, loadAction] } + } + + pub fn set_load_action(&self, load_action: MTLLoadAction) { + unsafe { msg_send![self, setLoadAction: load_action] } + } + + pub fn store_action(&self) -> MTLStoreAction { + unsafe { msg_send![self, storeAction] } + } + + pub fn set_store_action(&self, store_action: MTLStoreAction) { + unsafe { msg_send![self, setStoreAction: store_action] } + } +} + +pub enum MTLRenderPassColorAttachmentDescriptor {} + +foreign_obj_type! { + type CType = MTLRenderPassColorAttachmentDescriptor; + pub struct RenderPassColorAttachmentDescriptor; + pub struct RenderPassColorAttachmentDescriptorRef; + type ParentType = RenderPassAttachmentDescriptorRef; +} + +impl RenderPassColorAttachmentDescriptor { + pub fn new() -> Self { + unsafe { + let class = class!(MTLRenderPassColorAttachmentDescriptor); + msg_send![class, new] + } + } +} + +impl RenderPassColorAttachmentDescriptorRef { + pub fn clear_color(&self) -> MTLClearColor { + unsafe { msg_send![self, clearColor] } + } + + pub fn set_clear_color(&self, clear_color: MTLClearColor) { + unsafe { msg_send![self, setClearColor: clear_color] } + } +} + +pub enum MTLRenderPassDepthAttachmentDescriptor {} + +foreign_obj_type! { + type CType = MTLRenderPassDepthAttachmentDescriptor; + pub struct RenderPassDepthAttachmentDescriptor; + pub struct RenderPassDepthAttachmentDescriptorRef; + type ParentType = RenderPassAttachmentDescriptorRef; +} + +impl RenderPassDepthAttachmentDescriptorRef { + pub fn clear_depth(&self) -> f64 { + unsafe { msg_send![self, clearDepth] } + } + + pub fn set_clear_depth(&self, clear_depth: f64) { + unsafe { msg_send![self, setClearDepth: clear_depth] } + } +} + +pub enum MTLRenderPassStencilAttachmentDescriptor {} + +foreign_obj_type! { + type CType = MTLRenderPassStencilAttachmentDescriptor; + pub struct RenderPassStencilAttachmentDescriptor; + pub struct RenderPassStencilAttachmentDescriptorRef; + type ParentType = RenderPassAttachmentDescriptorRef; +} + +impl RenderPassStencilAttachmentDescriptorRef { + pub fn clear_stencil(&self) -> u32 { + unsafe { msg_send![self, clearStencil] } + } + + pub fn set_clear_stencil(&self, clear_stencil: u32) { + unsafe { msg_send![self, setClearStencil: clear_stencil] } + } + + pub fn stencil_resolve_filter(&self) -> MTLMultisampleStencilResolveFilter { + unsafe { msg_send![self, stencilResolveFilter] } + } + + pub fn set_stencil_resolve_filter( + &self, + stencil_resolve_filter: MTLMultisampleStencilResolveFilter, + ) { + unsafe { msg_send![self, setStencilResolveFilter: stencil_resolve_filter] } + } +} + +pub enum MTLRenderPassColorAttachmentDescriptorArray {} + +foreign_obj_type! { + type CType = MTLRenderPassColorAttachmentDescriptorArray; + pub struct RenderPassColorAttachmentDescriptorArray; + pub struct RenderPassColorAttachmentDescriptorArrayRef; +} + +impl RenderPassColorAttachmentDescriptorArrayRef { + pub fn object_at(&self, index: usize) -> Option<&RenderPassColorAttachmentDescriptorRef> { + unsafe { msg_send![self, objectAtIndexedSubscript: index] } + } + + pub fn set_object_at( + &self, + index: usize, + attachment: Option<&RenderPassColorAttachmentDescriptorRef>, + ) { + unsafe { + msg_send![self, setObject:attachment + atIndexedSubscript:index] + } + } +} + +pub enum MTLRenderPassDescriptor {} + +foreign_obj_type! { + type CType = MTLRenderPassDescriptor; + pub struct RenderPassDescriptor; + pub struct RenderPassDescriptorRef; +} + +impl RenderPassDescriptor { + pub fn new<'a>() -> &'a RenderPassDescriptorRef { + unsafe { + let class = class!(MTLRenderPassDescriptorInternal); + msg_send![class, renderPassDescriptor] + } + } +} + +impl RenderPassDescriptorRef { + pub fn color_attachments(&self) -> &RenderPassColorAttachmentDescriptorArrayRef { + unsafe { msg_send![self, colorAttachments] } + } + + pub fn depth_attachment(&self) -> Option<&RenderPassDepthAttachmentDescriptorRef> { + unsafe { msg_send![self, depthAttachment] } + } + + pub fn set_depth_attachment( + &self, + depth_attachment: Option<&RenderPassDepthAttachmentDescriptorRef>, + ) { + unsafe { msg_send![self, setDepthAttachment: depth_attachment] } + } + + pub fn stencil_attachment(&self) -> Option<&RenderPassStencilAttachmentDescriptorRef> { + unsafe { msg_send![self, stencilAttachment] } + } + + pub fn set_stencil_attachment( + &self, + stencil_attachment: Option<&RenderPassStencilAttachmentDescriptorRef>, + ) { + unsafe { msg_send![self, setStencilAttachment: stencil_attachment] } + } + + pub fn visibility_result_buffer(&self) -> Option<&BufferRef> { + unsafe { msg_send![self, visibilityResultBuffer] } + } + + pub fn set_visibility_result_buffer(&self, buffer: Option<&BufferRef>) { + unsafe { msg_send![self, setVisibilityResultBuffer: buffer] } + } + + pub fn render_target_array_length(&self) -> NSUInteger { + unsafe { msg_send![self, renderTargetArrayLength] } + } + + pub fn set_render_target_array_length(&self, length: NSUInteger) { + unsafe { msg_send![self, setRenderTargetArrayLength: length] } + } +} diff --git a/third_party/rust/metal/src/resource.rs b/third_party/rust/metal/src/resource.rs new file mode 100644 index 000000000000..57b0e7fb1c50 --- /dev/null +++ b/third_party/rust/metal/src/resource.rs @@ -0,0 +1,105 @@ + + + + + + + +use cocoa::foundation::NSUInteger; + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum MTLPurgeableState { + KeepCurrent = 1, + NonVolatile = 2, + Volatile = 3, + Empty = 4, +} + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum MTLCPUCacheMode { + DefaultCache = 0, + WriteCombined = 1, +} + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum MTLStorageMode { + Shared = 0, + Managed = 1, + Private = 2, + Memoryless = 3, +} + +pub const MTLResourceCPUCacheModeShift: NSUInteger = 0; +pub const MTLResourceCPUCacheModeMask: NSUInteger = (0xf << MTLResourceCPUCacheModeShift); +pub const MTLResourceStorageModeShift: NSUInteger = 4; +pub const MTLResourceStorageModeMask: NSUInteger = (0xf << MTLResourceStorageModeShift); + +bitflags! { + #[allow(non_upper_case_globals)] + pub struct MTLResourceOptions: NSUInteger { + const CPUCacheModeDefaultCache = (MTLCPUCacheMode::DefaultCache as NSUInteger) << MTLResourceCPUCacheModeShift; + const CPUCacheModeWriteCombined = (MTLCPUCacheMode::WriteCombined as NSUInteger) << MTLResourceCPUCacheModeShift; + + const StorageModeShared = (MTLStorageMode::Shared as NSUInteger) << MTLResourceStorageModeShift; + const StorageModeManaged = (MTLStorageMode::Managed as NSUInteger) << MTLResourceStorageModeShift; + const StorageModePrivate = (MTLStorageMode::Private as NSUInteger) << MTLResourceStorageModeShift; + const StorageModeMemoryless = (MTLStorageMode::Memoryless as NSUInteger) << MTLResourceStorageModeShift; + } +} + +bitflags! { + pub struct MTLResourceUsage: NSUInteger { + const Read = 1 << 0; + const Write = 1 << 1; + const Sample = 1 << 2; + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +#[repr(C)] +pub struct MTLSizeAndAlign { + pub size: NSUInteger, + pub align: NSUInteger, +} + +pub enum MTLResource {} + +foreign_obj_type! { + type CType = MTLResource; + pub struct Resource; + pub struct ResourceRef; +} + +impl ResourceRef { + pub fn label(&self) -> &str { + unsafe { + let label = msg_send![self, label]; + crate::nsstring_as_str(label) + } + } + + pub fn set_label(&self, label: &str) { + unsafe { + let nslabel = crate::nsstring_from_str(label); + let () = msg_send![self, setLabel: nslabel]; + } + } + + pub fn cpu_cache_mode(&self) -> MTLCPUCacheMode { + unsafe { msg_send![self, cpuCacheMode] } + } + + pub fn storage_mode(&self) -> MTLStorageMode { + unsafe { msg_send![self, storageMode] } + } + + pub fn set_purgeable_state(&self, state: MTLPurgeableState) -> MTLPurgeableState { + unsafe { msg_send![self, setPurgeableState: state] } + } +} diff --git a/third_party/rust/metal/src/sampler.rs b/third_party/rust/metal/src/sampler.rs new file mode 100644 index 000000000000..b573a61f423d --- /dev/null +++ b/third_party/rust/metal/src/sampler.rs @@ -0,0 +1,132 @@ + + + + + + + +use cocoa::foundation::NSUInteger; + +use crate::depthstencil::MTLCompareFunction; + +#[repr(u64)] +#[derive(Copy, Clone)] +pub enum MTLSamplerMinMagFilter { + Nearest = 0, + Linear = 1, +} + +#[repr(u64)] +#[derive(Copy, Clone)] +pub enum MTLSamplerMipFilter { + NotMipmapped = 0, + Nearest = 1, + Linear = 2, +} + +#[repr(u64)] +#[derive(Copy, Clone)] +pub enum MTLSamplerAddressMode { + ClampToEdge = 0, + MirrorClampToEdge = 1, + Repeat = 2, + MirrorRepeat = 3, + ClampToZero = 4, + ClampToBorderColor = 5, +} + +#[repr(u64)] +#[derive(Copy, Clone)] +pub enum MTLSamplerBorderColor { + TransparentBlack = 0, + OpaqueBlack = 1, + OpaqueWhite = 2, +} + +pub enum MTLSamplerDescriptor {} + +foreign_obj_type! { + type CType = MTLSamplerDescriptor; + pub struct SamplerDescriptor; + pub struct SamplerDescriptorRef; +} + +impl SamplerDescriptor { + pub fn new() -> Self { + unsafe { + let class = class!(MTLSamplerDescriptor); + msg_send![class, new] + } + } +} + +impl SamplerDescriptorRef { + pub fn set_min_filter(&self, filter: MTLSamplerMinMagFilter) { + unsafe { msg_send![self, setMinFilter: filter] } + } + + pub fn set_mag_filter(&self, filter: MTLSamplerMinMagFilter) { + unsafe { msg_send![self, setMagFilter: filter] } + } + + pub fn set_mip_filter(&self, filter: MTLSamplerMipFilter) { + unsafe { msg_send![self, setMipFilter: filter] } + } + + pub fn set_address_mode_s(&self, mode: MTLSamplerAddressMode) { + unsafe { msg_send![self, setSAddressMode: mode] } + } + + pub fn set_address_mode_t(&self, mode: MTLSamplerAddressMode) { + unsafe { msg_send![self, setTAddressMode: mode] } + } + + pub fn set_address_mode_r(&self, mode: MTLSamplerAddressMode) { + unsafe { msg_send![self, setRAddressMode: mode] } + } + + pub fn set_max_anisotropy(&self, anisotropy: NSUInteger) { + unsafe { msg_send![self, setMaxAnisotropy: anisotropy] } + } + + pub fn set_compare_function(&self, func: MTLCompareFunction) { + unsafe { msg_send![self, setCompareFunction: func] } + } + + #[cfg(feature = "private")] + pub unsafe fn set_lod_bias(&self, bias: f32) { + msg_send![self, setLodBias: bias] + } + + pub fn set_lod_min_clamp(&self, clamp: f32) { + unsafe { msg_send![self, setLodMinClamp: clamp] } + } + + pub fn set_lod_max_clamp(&self, clamp: f32) { + unsafe { msg_send![self, setLodMaxClamp: clamp] } + } + + pub fn set_lod_average(&self, enable: bool) { + unsafe { msg_send![self, setLodAverage: enable] } + } + + pub fn set_normalized_coordinates(&self, enable: bool) { + unsafe { msg_send![self, setNormalizedCoordinates: enable] } + } + + pub fn set_support_argument_buffers(&self, enable: bool) { + unsafe { msg_send![self, setSupportArgumentBuffers: enable] } + } + + pub fn set_border_color(&self, color: MTLSamplerBorderColor) { + unsafe { msg_send![self, setBorderColor: color] } + } +} + +pub enum MTLSamplerState {} + +foreign_obj_type! { + type CType = MTLSamplerState; + pub struct SamplerState; + pub struct SamplerStateRef; +} diff --git a/third_party/rust/metal/src/texture.rs b/third_party/rust/metal/src/texture.rs new file mode 100644 index 000000000000..8ff4e6fa5d7c --- /dev/null +++ b/third_party/rust/metal/src/texture.rs @@ -0,0 +1,323 @@ + + + + + + + +use super::*; + +use cocoa::foundation::{NSRange, NSUInteger}; +use objc::runtime::{NO, YES}; + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub enum MTLTextureType { + D1 = 0, + D1Array = 1, + D2 = 2, + D2Array = 3, + D2Multisample = 4, + Cube = 5, + CubeArray = 6, + D3 = 7, +} + +bitflags! { + pub struct MTLTextureUsage: NSUInteger { + const Unknown = 0x0000; + const ShaderRead = 0x0001; + const ShaderWrite = 0x0002; + const RenderTarget = 0x0004; + const PixelFormatView = 0x0010; + } +} + +pub enum MTLTextureDescriptor {} + +foreign_obj_type! { + type CType = MTLTextureDescriptor; + pub struct TextureDescriptor; + pub struct TextureDescriptorRef; +} + +impl TextureDescriptor { + pub fn new() -> Self { + unsafe { + let class = class!(MTLTextureDescriptor); + msg_send![class, new] + } + } +} + +impl TextureDescriptorRef { + pub fn texture_type(&self) -> MTLTextureType { + unsafe { msg_send![self, textureType] } + } + + pub fn set_texture_type(&self, texture_type: MTLTextureType) { + unsafe { msg_send![self, setTextureType: texture_type] } + } + + pub fn pixel_format(&self) -> MTLPixelFormat { + unsafe { msg_send![self, pixelFormat] } + } + + pub fn set_pixel_format(&self, pixel_format: MTLPixelFormat) { + unsafe { msg_send![self, setPixelFormat: pixel_format] } + } + + pub fn width(&self) -> NSUInteger { + unsafe { msg_send![self, width] } + } + + pub fn set_width(&self, width: NSUInteger) { + unsafe { msg_send![self, setWidth: width] } + } + + pub fn height(&self) -> NSUInteger { + unsafe { msg_send![self, height] } + } + + pub fn set_height(&self, height: NSUInteger) { + unsafe { msg_send![self, setHeight: height] } + } + + pub fn depth(&self) -> NSUInteger { + unsafe { msg_send![self, depth] } + } + + pub fn set_depth(&self, depth: NSUInteger) { + unsafe { msg_send![self, setDepth: depth] } + } + + pub fn mipmap_level_count(&self) -> NSUInteger { + unsafe { msg_send![self, mipmapLevelCount] } + } + + pub fn set_mipmap_level_count(&self, count: NSUInteger) { + unsafe { msg_send![self, setMipmapLevelCount: count] } + } + + pub fn sample_count(&self) -> NSUInteger { + unsafe { msg_send![self, sampleCount] } + } + + pub fn set_sample_count(&self, count: NSUInteger) { + unsafe { msg_send![self, setSampleCount: count] } + } + + pub fn array_length(&self) -> NSUInteger { + unsafe { msg_send![self, arrayLength] } + } + + pub fn set_array_length(&self, length: NSUInteger) { + unsafe { msg_send![self, setArrayLength: length] } + } + + pub fn resource_options(&self) -> MTLResourceOptions { + unsafe { msg_send![self, resourceOptions] } + } + + pub fn set_resource_options(&self, options: MTLResourceOptions) { + unsafe { msg_send![self, setResourceOptions: options] } + } + + pub fn cpu_cache_mode(&self) -> MTLCPUCacheMode { + unsafe { msg_send![self, cpuCacheMode] } + } + + pub fn set_cpu_cache_mode(&self, mode: MTLCPUCacheMode) { + unsafe { msg_send![self, setCpuCacheMode: mode] } + } + + pub fn storage_mode(&self) -> MTLStorageMode { + unsafe { msg_send![self, storageMode] } + } + + pub fn set_storage_mode(&self, mode: MTLStorageMode) { + unsafe { msg_send![self, setStorageMode: mode] } + } + + pub fn usage(&self) -> MTLTextureUsage { + unsafe { msg_send![self, usage] } + } + + pub fn set_usage(&self, usage: MTLTextureUsage) { + unsafe { msg_send![self, setUsage: usage] } + } +} + +pub enum MTLTexture {} + +foreign_obj_type! { + type CType = MTLTexture; + pub struct Texture; + pub struct TextureRef; + type ParentType = ResourceRef; +} + +impl TextureRef { + #[deprecated(since = "0.13.0")] + pub fn root_resource(&self) -> Option<&ResourceRef> { + unsafe { msg_send![self, rootResource] } + } + + pub fn parent_texture(&self) -> Option<&TextureRef> { + unsafe { msg_send![self, parentTexture] } + } + + pub fn parent_relative_level(&self) -> NSUInteger { + unsafe { msg_send![self, parentRelativeLevel] } + } + + pub fn parent_relative_slice(&self) -> NSUInteger { + unsafe { msg_send![self, parentRelativeSlice] } + } + + pub fn buffer(&self) -> Option<&BufferRef> { + unsafe { msg_send![self, buffer] } + } + + pub fn buffer_offset(&self) -> NSUInteger { + unsafe { msg_send![self, bufferOffset] } + } + + pub fn buffer_stride(&self) -> NSUInteger { + unsafe { msg_send![self, bufferBytesPerRow] } + } + + pub fn texture_type(&self) -> MTLTextureType { + unsafe { msg_send![self, textureType] } + } + + pub fn pixel_format(&self) -> MTLPixelFormat { + unsafe { msg_send![self, pixelFormat] } + } + + pub fn width(&self) -> NSUInteger { + unsafe { msg_send![self, width] } + } + + pub fn height(&self) -> NSUInteger { + unsafe { msg_send![self, height] } + } + + pub fn depth(&self) -> NSUInteger { + unsafe { msg_send![self, depth] } + } + + pub fn mipmap_level_count(&self) -> NSUInteger { + unsafe { msg_send![self, mipmapLevelCount] } + } + + pub fn sample_count(&self) -> NSUInteger { + unsafe { msg_send![self, sampleCount] } + } + + pub fn array_length(&self) -> NSUInteger { + unsafe { msg_send![self, arrayLength] } + } + + pub fn usage(&self) -> MTLTextureUsage { + unsafe { msg_send![self, usage] } + } + + pub fn framebuffer_only(&self) -> bool { + unsafe { + match msg_send![self, framebufferOnly] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn get_bytes( + &self, + bytes: *mut std::ffi::c_void, + region: MTLRegion, + mipmap_level: NSUInteger, + stride: NSUInteger, + ) { + unsafe { + msg_send![self, getBytes:bytes + bytesPerRow:stride + fromRegion:region + mipmapLevel:mipmap_level] + } + } + + pub fn get_bytes_in_slice( + &self, + bytes: *mut std::ffi::c_void, + region: MTLRegion, + mipmap_level: NSUInteger, + stride: NSUInteger, + image_stride: NSUInteger, + slice: NSUInteger, + ) { + unsafe { + msg_send![self, getBytes:bytes + bytesPerRow:stride + bytesPerImage:image_stride + fromRegion:region + mipmapLevel:mipmap_level + slice:slice] + } + } + + pub fn replace_region( + &self, + region: MTLRegion, + mipmap_level: NSUInteger, + stride: NSUInteger, + bytes: *const std::ffi::c_void, + ) { + unsafe { + msg_send![self, replaceRegion:region + mipmapLevel:mipmap_level + withBytes:bytes + bytesPerRow:stride] + } + } + + pub fn replace_region_in_slice( + &self, + region: MTLRegion, + mipmap_level: NSUInteger, + image_stride: NSUInteger, + stride: NSUInteger, + slice: NSUInteger, + bytes: *const std::ffi::c_void, + ) { + unsafe { + msg_send![self, replaceRegion:region + mipmapLevel:mipmap_level + slice:slice + withBytes:bytes + bytesPerRow:stride + bytesPerImage:image_stride] + } + } + + pub fn new_texture_view(&self, pixel_format: MTLPixelFormat) -> Texture { + unsafe { msg_send![self, newTextureViewWithPixelFormat: pixel_format] } + } + + pub fn new_texture_view_from_slice( + &self, + pixel_format: MTLPixelFormat, + texture_type: MTLTextureType, + mipmap_levels: NSRange, + slices: NSRange, + ) -> Texture { + unsafe { + msg_send![self, newTextureViewWithPixelFormat:pixel_format + textureType:texture_type + levels:mipmap_levels + slices:slices] + } + } +} diff --git a/third_party/rust/metal/src/types.rs b/third_party/rust/metal/src/types.rs new file mode 100644 index 000000000000..553762b23fc8 --- /dev/null +++ b/third_party/rust/metal/src/types.rs @@ -0,0 +1,38 @@ + + + + + + + +use cocoa::foundation::NSUInteger; + +#[repr(C)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub struct MTLOrigin { + pub x: NSUInteger, + pub y: NSUInteger, + pub z: NSUInteger, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub struct MTLSize { + pub width: NSUInteger, + pub height: NSUInteger, + pub depth: NSUInteger, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub struct MTLRegion { + pub origin: MTLOrigin, + pub size: MTLSize, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct MTLSamplePosition { + pub x: f32, + pub y: f32, +} diff --git a/third_party/rust/metal/src/vertexdescriptor.rs b/third_party/rust/metal/src/vertexdescriptor.rs new file mode 100644 index 000000000000..06321f143185 --- /dev/null +++ b/third_party/rust/metal/src/vertexdescriptor.rs @@ -0,0 +1,239 @@ + + + + + + + +use cocoa::foundation::NSUInteger; + +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum MTLVertexFormat { + Invalid = 0, + UChar2 = 1, + UChar3 = 2, + UChar4 = 3, + Char2 = 4, + Char3 = 5, + Char4 = 6, + UChar2Normalized = 7, + UChar3Normalized = 8, + UChar4Normalized = 9, + Char2Normalized = 10, + Char3Normalized = 11, + Char4Normalized = 12, + UShort2 = 13, + UShort3 = 14, + UShort4 = 15, + Short2 = 16, + Short3 = 17, + Short4 = 18, + UShort2Normalized = 19, + UShort3Normalized = 20, + UShort4Normalized = 21, + Short2Normalized = 22, + Short3Normalized = 23, + Short4Normalized = 24, + Half2 = 25, + Half3 = 26, + Half4 = 27, + Float = 28, + Float2 = 29, + Float3 = 30, + Float4 = 31, + Int = 32, + Int2 = 33, + Int3 = 34, + Int4 = 35, + UInt = 36, + UInt2 = 37, + UInt3 = 38, + UInt4 = 39, + Int1010102Normalized = 40, + UInt1010102Normalized = 41, + UChar4Normalized_BGRA = 42, + UChar = 45, + Char = 46, + UCharNormalized = 47, + CharNormalized = 48, + UShort = 49, + Short = 50, + UShortNormalized = 51, + ShortNormalized = 52, + Half = 53, +} + +#[repr(u64)] +pub enum MTLVertexStepFunction { + Constant = 0, + PerVertex = 1, + PerInstance = 2, + PerPatch = 3, + PerPatchControlPoint = 4, +} + +pub enum MTLVertexBufferLayoutDescriptor {} + +foreign_obj_type! { + type CType = MTLVertexBufferLayoutDescriptor; + pub struct VertexBufferLayoutDescriptor; + pub struct VertexBufferLayoutDescriptorRef; +} + +impl VertexBufferLayoutDescriptor { + pub fn new() -> Self { + unsafe { + let class = class!(MTLVertexBufferLayoutDescriptor); + msg_send![class, new] + } + } +} + +impl VertexBufferLayoutDescriptorRef { + pub fn stride(&self) -> NSUInteger { + unsafe { msg_send![self, stride] } + } + + pub fn set_stride(&self, stride: NSUInteger) { + unsafe { msg_send![self, setStride: stride] } + } + + pub fn step_function(&self) -> MTLVertexStepFunction { + unsafe { msg_send![self, stepFunction] } + } + + pub fn set_step_function(&self, func: MTLVertexStepFunction) { + unsafe { msg_send![self, setStepFunction: func] } + } + + pub fn step_rate(&self) -> NSUInteger { + unsafe { msg_send![self, stepRate] } + } + + pub fn set_step_rate(&self, step_rate: NSUInteger) { + unsafe { msg_send![self, setStepRate: step_rate] } + } +} + +pub enum MTLVertexBufferLayoutDescriptorArray {} + +foreign_obj_type! { + type CType = MTLVertexBufferLayoutDescriptorArray; + pub struct VertexBufferLayoutDescriptorArray; + pub struct VertexBufferLayoutDescriptorArrayRef; +} + +impl VertexBufferLayoutDescriptorArrayRef { + pub fn object_at(&self, index: usize) -> Option<&VertexBufferLayoutDescriptorRef> { + unsafe { msg_send![self, objectAtIndexedSubscript: index] } + } + + pub fn set_object_at(&self, index: usize, layout: Option<&VertexBufferLayoutDescriptorRef>) { + unsafe { + msg_send![self, setObject:layout + atIndexedSubscript:index] + } + } +} + +pub enum MTLVertexAttributeDescriptor {} + +foreign_obj_type! { + type CType = MTLVertexAttributeDescriptor; + pub struct VertexAttributeDescriptor; + pub struct VertexAttributeDescriptorRef; +} + +impl VertexAttributeDescriptor { + pub fn new() -> Self { + unsafe { + let class = class!(MTLVertexAttributeDescriptor); + msg_send![class, new] + } + } +} + +impl VertexAttributeDescriptorRef { + pub fn format(&self) -> MTLVertexFormat { + unsafe { msg_send![self, format] } + } + + pub fn set_format(&self, format: MTLVertexFormat) { + unsafe { msg_send![self, setFormat: format] } + } + + pub fn offset(&self) -> NSUInteger { + unsafe { msg_send![self, offset] } + } + + pub fn set_offset(&self, offset: NSUInteger) { + unsafe { msg_send![self, setOffset: offset] } + } + + pub fn buffer_index(&self) -> NSUInteger { + unsafe { msg_send![self, bufferIndex] } + } + + pub fn set_buffer_index(&self, index: NSUInteger) { + unsafe { msg_send![self, setBufferIndex: index] } + } +} + +pub enum MTLVertexAttributeDescriptorArray {} + +foreign_obj_type! { + type CType = MTLVertexAttributeDescriptorArray; + pub struct VertexAttributeDescriptorArray; + pub struct VertexAttributeDescriptorArrayRef; +} + +impl VertexAttributeDescriptorArrayRef { + pub fn object_at(&self, index: usize) -> Option<&VertexAttributeDescriptorRef> { + unsafe { msg_send![self, objectAtIndexedSubscript: index] } + } + + pub fn set_object_at(&self, index: usize, attribute: Option<&VertexAttributeDescriptorRef>) { + unsafe { + msg_send![self, setObject:attribute + atIndexedSubscript:index] + } + } +} + +pub enum MTLVertexDescriptor {} + +foreign_obj_type! { + type CType = MTLVertexDescriptor; + pub struct VertexDescriptor; + pub struct VertexDescriptorRef; +} + +impl VertexDescriptor { + pub fn new<'a>() -> &'a VertexDescriptorRef { + unsafe { + let class = class!(MTLVertexDescriptor); + msg_send![class, vertexDescriptor] + } + } +} + +impl VertexDescriptorRef { + pub fn layouts(&self) -> &VertexBufferLayoutDescriptorArrayRef { + unsafe { msg_send![self, layouts] } + } + + pub fn attributes(&self) -> &VertexAttributeDescriptorArrayRef { + unsafe { msg_send![self, attributes] } + } + + #[cfg(feature = "private")] + pub unsafe fn serialize_descriptor(&self) -> *mut std::ffi::c_void { + msg_send![self, newSerializedDescriptor] + } + + pub fn reset(&self) { + unsafe { msg_send![self, reset] } + } +} diff --git a/third_party/rust/neqo-common/.cargo-checksum.json b/third_party/rust/neqo-common/.cargo-checksum.json index ba51f96c36e3..c9cdf2998f62 100644 --- a/third_party/rust/neqo-common/.cargo-checksum.json +++ b/third_party/rust/neqo-common/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"6791b2f6143d31a8b2511e995a1ca902e0827f8cefdd966a477ad11068602e80","src/codec.rs":"6c619c42d1293297fd1e1763213033a82723bfa97938f5dd9fde6745f965940e","src/datagram.rs":"47d69797b66108cec997375818cb43ba9575b89f8730277047c6889de09b12aa","src/incrdecoder.rs":"4d55c9d992a4c409e4b4e37a8c04d3741d2ba260f46d5385cb3eff5121db03de","src/lib.rs":"7204f3eb32908563ffd50e615534326509be781cb5afd111f3bccdd6cf04249c","src/log.rs":"68a0a30344edcfad6c222eed62f5810fb9aa896fea7ec782b6ca2b2fc9a0bd4b","src/once.rs":"ad0d1ac0233dda75e294b5ccab65caceaec66d277659e22b1236aceea0c53ede","src/timer.rs":"13fb2ad4ef435d57895c61c291aca82a261c93c0f2cae2634929fb6ca5fdac85","tests/log.rs":"79e01eeef039d1abb17aadb2212256ad064c53e6d72bbebe254230119a623510"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"6791b2f6143d31a8b2511e995a1ca902e0827f8cefdd966a477ad11068602e80","src/codec.rs":"6c619c42d1293297fd1e1763213033a82723bfa97938f5dd9fde6745f965940e","src/datagram.rs":"47d69797b66108cec997375818cb43ba9575b89f8730277047c6889de09b12aa","src/incrdecoder.rs":"4d55c9d992a4c409e4b4e37a8c04d3741d2ba260f46d5385cb3eff5121db03de","src/lib.rs":"7204f3eb32908563ffd50e615534326509be781cb5afd111f3bccdd6cf04249c","src/log.rs":"943e4e332400d94805d60f965d1d0ae7aad180f6d5b50936d0bd9e085bbc1502","src/once.rs":"ad0d1ac0233dda75e294b5ccab65caceaec66d277659e22b1236aceea0c53ede","src/timer.rs":"13fb2ad4ef435d57895c61c291aca82a261c93c0f2cae2634929fb6ca5fdac85","tests/log.rs":"1c7ae6cb43047877fff27f3d01abc8ac456ea0907e88473d0179215342391d5d"},"package":null} \ No newline at end of file diff --git a/third_party/rust/neqo-common/src/log.rs b/third_party/rust/neqo-common/src/log.rs index c913c718255d..f7bbd2216365 100644 --- a/third_party/rust/neqo-common/src/log.rs +++ b/third_party/rust/neqo-common/src/log.rs @@ -47,30 +47,25 @@ macro_rules! qlog { #[macro_export] macro_rules! qerror { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Error, $ctx, $($arg)*);); - ([$ctx:expr] $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Error, $ctx, $($arg)*);); ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::log::log!(::log::Level::Error, $($arg)*); } ); } #[macro_export] macro_rules! qwarn { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Warn, $ctx, $($arg)*);); - ([$ctx:expr] $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Warn, $ctx, $($arg)*);); ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::log::log!(::log::Level::Warn, $($arg)*); } ); } #[macro_export] macro_rules! qinfo { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Info, $ctx, $($arg)*);); - ([$ctx:expr] $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Info, $ctx, $($arg)*);); ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::log::log!(::log::Level::Info, $($arg)*); } ); } #[macro_export] macro_rules! qdebug { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Debug, $ctx, $($arg)*);); - ([$ctx:expr] $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Debug, $ctx, $($arg)*);); ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::log::log!(::log::Level::Debug, $($arg)*); } ); } #[macro_export] macro_rules! qtrace { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Trace, $ctx, $($arg)*);); - ([$ctx:expr] $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Trace, $ctx, $($arg)*);); ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::log::log!(::log::Level::Trace, $($arg)*); } ); } diff --git a/third_party/rust/neqo-common/tests/log.rs b/third_party/rust/neqo-common/tests/log.rs index fba42d1ff9d3..669c24faa324 100644 --- a/third_party/rust/neqo-common/tests/log.rs +++ b/third_party/rust/neqo-common/tests/log.rs @@ -31,11 +31,11 @@ fn args() { #[test] fn context() { let context = "context"; - qerror!([context] "error"); - qwarn!([context] "warn"); - qinfo!([context] "info"); - qdebug!([context] "debug"); - qtrace!([context] "trace"); + qerror!([context], "error"); + qwarn!([context], "warn"); + qinfo!([context], "info"); + qdebug!([context], "debug"); + qtrace!([context], "trace"); } #[test] diff --git a/third_party/rust/neqo-crypto/.cargo-checksum.json b/third_party/rust/neqo-crypto/.cargo-checksum.json index c608470dfe30..66c110db362c 100644 --- a/third_party/rust/neqo-crypto/.cargo-checksum.json +++ b/third_party/rust/neqo-crypto/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"d7afc57115b1915c379e45550e58e5b09245db87affd7a55b8b733a852a542a0","TODO":"ac0f1c2ebcca03f5b3c0cc56c5aedbb030a4b511e438bc07a57361c789f91e9f","bindings/bindings.toml":"0f305bda9513e7fb4b521df79912ad5ba21784377b84f4b531895619e561f356","bindings/mozpkix.hpp":"77072c8bb0f6eb6bfe8cbadc111dcd92e0c79936d13f2e501aae1e5d289a6675","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"ebb685e47de005413d8e24370ba22576b45b0273aaf0a5116af96dcdc48d32c0","src/aead.rs":"bfbd4b72354fb7103ff31a2a600dd457e10fdc14ad670a5de1f5bb0f9b6e1504","src/agent.rs":"b8f59b2f1d3432db0b810383c2ae6d17a79b89819b90da50eecc3370cae77398","src/agentio.rs":"615a805e0f27970755daa5bfe864b2e9b3b09309dfa4178b85f38a13ae6f7131","src/auth.rs":"846a4954186f0bcabc4084214ae216819215f3bccc33cc08d93abb9c5fefb7f2","src/cert.rs":"fd3fd2bbb38754bdcee3898549feae412943c9f719032531c1ad6e61783b5394","src/constants.rs":"75dec8e3c74326f492a115a0e7a487daba32eba30bcbd64d2223333b3caa4008","src/err.rs":"81d5c9b75457d4367607d0a456c276fa037c0f66a1c8df40af58ff42426c9890","src/exp.rs":"61586662407359c1ecb8ed4987bc3c702f26ba2e203a091a51b6d6363cbd510f","src/ext.rs":"f3cc5bfdd96d46affff43653258d91eb447251f83da19b37dd415129052413e3","src/hkdf.rs":"6d44f63493f0c558a23339f88fe766f8afdb0bda3dc11a79e8a99d3c8d0b6acb","src/hp.rs":"854ce7b9d44892fbb01ac4078b84266771a9254cebfea5b94e7f4b4a7fb1b946","src/lib.rs":"6c3540b4e54510f6a298f04093f44f6d957f30f475214fd3ec9e39fa4d98e386","src/p11.rs":"89df482ae683646add0f46333991f31fe996fdce67859a1ff37c090a4166ce6e","src/prio.rs":"0e213056f6bf0c797c2cfe13c6d14dbb64a64b1218fff21cbf36fb3309b852f9","src/replay.rs":"01eae2accfefbc26719fcccd4bcb8c1ea6400ab96fbb696ecdb8f32164f931a2","src/result.rs":"d76c7bc5e99c80a5a124909ab586cdb91d894856e52f5146430da43584a6d6c1","src/secrets.rs":"e929b69927d93b4bde3bd490c0ed9a4e1e4c5a047492259ab1dae7fbad885c22","src/selfencrypt.rs":"9bffad6af2f186f606bd7305a8528d76e66471a71f7103c7498b90507fb031e1","src/ssl.rs":"4c7c850777a1b4b7b66ad765e24a25780e64f24da08175b5cc722a840d35f693","src/time.rs":"4dffa6f4ac9cfc8db240c370fb04a7d7241c80793ecf6acda2d41d0bc94b0236","tests/aead.rs":"bedf985ba0b95a9c6db6a84f870d15062d821f3b24cb3cb9dfa89445be795d50","tests/agent.rs":"8b9ca3c182cf065b7668fd9c7e5885b1cde8bb1d0ea3afbb5fb7a3186d7a9d2e","tests/ext.rs":"b1d2f9d68d18e24df5f246e4ad6f80a0a0d98f824094a9fa447a580b02486d90","tests/handshake.rs":"2752bd6c575e7d28db2bce8484aa08ba08846f30aa0bb9aa07153d1763dab830","tests/hkdf.rs":"83300020a18d5485a1adcd3b806aed64fd008072589875112145f401340f3602","tests/hp.rs":"83f453a792ef17eb51a20b2764407c28a60620f5d3b917c8e237a121b32988df","tests/init.rs":"abb08d3d5d9610d62dc94535230ed4f52743d90b7059224b152caa8cf7cf43d7","tests/selfencrypt.rs":"365bc96be63d2e970bab7cf0134a59526513e1c1c3b854c34fa44fc8ed8c10d3"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"d7afc57115b1915c379e45550e58e5b09245db87affd7a55b8b733a852a542a0","TODO":"ac0f1c2ebcca03f5b3c0cc56c5aedbb030a4b511e438bc07a57361c789f91e9f","bindings/bindings.toml":"0f305bda9513e7fb4b521df79912ad5ba21784377b84f4b531895619e561f356","bindings/mozpkix.hpp":"77072c8bb0f6eb6bfe8cbadc111dcd92e0c79936d13f2e501aae1e5d289a6675","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"0a4eaffefed2e190286a0de3eb69cd860ed4aea215d58a7135331b1850a5f82d","src/aead.rs":"bfbd4b72354fb7103ff31a2a600dd457e10fdc14ad670a5de1f5bb0f9b6e1504","src/agent.rs":"f6b412b87c17b560fd1fb92f534f2aaa3732eec1c12cdc41a7bda67b2f0904a9","src/agentio.rs":"25b87af28366d32671770900bed3bf093b141cc92c5193be71e4f748eea2a313","src/auth.rs":"846a4954186f0bcabc4084214ae216819215f3bccc33cc08d93abb9c5fefb7f2","src/cert.rs":"fd3fd2bbb38754bdcee3898549feae412943c9f719032531c1ad6e61783b5394","src/constants.rs":"75dec8e3c74326f492a115a0e7a487daba32eba30bcbd64d2223333b3caa4008","src/err.rs":"81d5c9b75457d4367607d0a456c276fa037c0f66a1c8df40af58ff42426c9890","src/exp.rs":"61586662407359c1ecb8ed4987bc3c702f26ba2e203a091a51b6d6363cbd510f","src/ext.rs":"f3cc5bfdd96d46affff43653258d91eb447251f83da19b37dd415129052413e3","src/hkdf.rs":"6d44f63493f0c558a23339f88fe766f8afdb0bda3dc11a79e8a99d3c8d0b6acb","src/hp.rs":"854ce7b9d44892fbb01ac4078b84266771a9254cebfea5b94e7f4b4a7fb1b946","src/lib.rs":"6c3540b4e54510f6a298f04093f44f6d957f30f475214fd3ec9e39fa4d98e386","src/p11.rs":"89df482ae683646add0f46333991f31fe996fdce67859a1ff37c090a4166ce6e","src/prio.rs":"0e213056f6bf0c797c2cfe13c6d14dbb64a64b1218fff21cbf36fb3309b852f9","src/replay.rs":"01eae2accfefbc26719fcccd4bcb8c1ea6400ab96fbb696ecdb8f32164f931a2","src/result.rs":"d76c7bc5e99c80a5a124909ab586cdb91d894856e52f5146430da43584a6d6c1","src/secrets.rs":"e929b69927d93b4bde3bd490c0ed9a4e1e4c5a047492259ab1dae7fbad885c22","src/selfencrypt.rs":"9bffad6af2f186f606bd7305a8528d76e66471a71f7103c7498b90507fb031e1","src/ssl.rs":"4c7c850777a1b4b7b66ad765e24a25780e64f24da08175b5cc722a840d35f693","src/time.rs":"4dffa6f4ac9cfc8db240c370fb04a7d7241c80793ecf6acda2d41d0bc94b0236","tests/aead.rs":"bedf985ba0b95a9c6db6a84f870d15062d821f3b24cb3cb9dfa89445be795d50","tests/agent.rs":"8b9ca3c182cf065b7668fd9c7e5885b1cde8bb1d0ea3afbb5fb7a3186d7a9d2e","tests/ext.rs":"b1d2f9d68d18e24df5f246e4ad6f80a0a0d98f824094a9fa447a580b02486d90","tests/handshake.rs":"2752bd6c575e7d28db2bce8484aa08ba08846f30aa0bb9aa07153d1763dab830","tests/hkdf.rs":"83300020a18d5485a1adcd3b806aed64fd008072589875112145f401340f3602","tests/hp.rs":"83f453a792ef17eb51a20b2764407c28a60620f5d3b917c8e237a121b32988df","tests/init.rs":"abb08d3d5d9610d62dc94535230ed4f52743d90b7059224b152caa8cf7cf43d7","tests/selfencrypt.rs":"365bc96be63d2e970bab7cf0134a59526513e1c1c3b854c34fa44fc8ed8c10d3"},"package":null} \ No newline at end of file diff --git a/third_party/rust/neqo-crypto/build.rs b/third_party/rust/neqo-crypto/build.rs index 48f7a2e275e6..4a338b5bd67a 100644 --- a/third_party/rust/neqo-crypto/build.rs +++ b/third_party/rust/neqo-crypto/build.rs @@ -231,7 +231,9 @@ fn build_bindings(base: &str, bindings: &Bindings, flags: &[String], gecko: bool println!("cargo:rerun-if-changed={}", header); - let mut builder = Builder::default().header(header).generate_comments(false); + let mut builder = Builder::default().header(header); + builder = builder.generate_comments(false); + builder = builder.derive_debug(false); builder = builder.clang_arg("-v"); diff --git a/third_party/rust/neqo-crypto/src/agent.rs b/third_party/rust/neqo-crypto/src/agent.rs index 2d62a6f8c247..09c76def4df0 100644 --- a/third_party/rust/neqo-crypto/src/agent.rs +++ b/third_party/rust/neqo-crypto/src/agent.rs @@ -75,7 +75,7 @@ fn get_alpn(fd: *mut ssl::PRFileDesc, pre: bool) -> Res> { } _ => None, }; - qinfo!([format!("{:p}", fd)] "got ALPN {:?}", alpn); + qinfo!([format!("{:p}", fd)], "got ALPN {:?}", alpn); Ok(alpn) } @@ -290,7 +290,11 @@ impl SecretAgent { if st.is_none() { *st = Some(alert.description); } else { - qwarn!([format!("{:p}", fd)] "duplicate alert {}", alert.description); + qwarn!( + [format!("{:p}", fd)], + "duplicate alert {}", + alert.description + ); } } } @@ -510,7 +514,7 @@ impl SecretAgent { fn capture_error(&mut self, res: Res) -> Res { if let Err(e) = &res { - qwarn!([self] "error: {:?}", e); + qwarn!([self], "error: {:?}", e); self.state = HandshakeState::Failed(e.clone()); } res @@ -528,7 +532,7 @@ impl SecretAgent { let info = self.capture_error(SecretAgentInfo::new(self.fd))?; HandshakeState::Complete(info) }; - qinfo!([self] "state -> {:?}", self.state); + qinfo!([self], "state -> {:?}", self.state); Ok(()) } @@ -602,7 +606,7 @@ impl SecretAgent { if let HandshakeState::Authenticated(ref err) = self.state { let result = secstatus_to_res(unsafe { ssl::SSL_AuthCertificateComplete(self.fd, *err) }); - qdebug!([self] "SSL_AuthCertificateComplete: {:?}", result); + qdebug!([self], "SSL_AuthCertificateComplete: {:?}", result); // This should return SECSuccess, so don't use update_state(). self.capture_error(result)?; } @@ -679,7 +683,7 @@ impl Client { let resumption = resumption_ptr.as_mut().unwrap(); let mut v = Vec::with_capacity(len as usize); v.extend_from_slice(std::slice::from_raw_parts(token, len as usize)); - qdebug!([format!("{:p}", fd)] "Got resumption token"); + qdebug!([format!("{:p}", fd)], "Got resumption token"); *resumption = Some(v); ssl::SECSuccess } diff --git a/third_party/rust/neqo-crypto/src/agentio.rs b/third_party/rust/neqo-crypto/src/agentio.rs index 1221788de76d..88f77f72a348 100644 --- a/third_party/rust/neqo-crypto/src/agentio.rs +++ b/third_party/rust/neqo-crypto/src/agentio.rs @@ -177,7 +177,7 @@ impl AgentIoInput { } let src = unsafe { std::slice::from_raw_parts(self.input, amount) }; - qtrace!([self] "read {}", hex(src)); + qtrace!([self], "read {}", hex(src)); let dst = unsafe { std::slice::from_raw_parts_mut(buf, amount) }; dst.copy_from_slice(&src); self.input = self.input.wrapping_add(amount); @@ -186,7 +186,7 @@ impl AgentIoInput { } fn reset(&mut self) { - qtrace!([self] "reset"); + qtrace!([self], "reset"); self.input = null(); self.available = 0; } @@ -232,12 +232,12 @@ impl AgentIo { fn save_output(&mut self, buf: *const u8, count: usize) { let slice = unsafe { std::slice::from_raw_parts(buf, count) }; - qtrace!([self] "save output {}", hex(slice)); + qtrace!([self], "save output {}", hex(slice)); self.output.extend_from_slice(slice); } pub fn take_output(&mut self) -> Vec { - qtrace!([self] "take output"); + qtrace!([self], "take output"); mem::replace(&mut self.output, Vec::new()) } } diff --git a/third_party/rust/neqo-http3/.cargo-checksum.json b/third_party/rust/neqo-http3/.cargo-checksum.json index fc1abbd0db36..938bd4029769 100644 --- a/third_party/rust/neqo-http3/.cargo-checksum.json +++ b/third_party/rust/neqo-http3/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"bde1a1af255191ae9785b8f438fe72addc291496e4fa7d140437de5828aa6d8c","src/client_events.rs":"c7022955c4c7c1257204948a0a2a88ccd895dfdb64b62ff76e56bc989d066277","src/connection.rs":"5a3003fad10db9c92ae5c4728e6944e3fc3433555cd210300908f4bf55eabe4f","src/connection_client.rs":"96395301b0b82aa09f3684b6f8571cab9261a1590565ad67209b71b40f610d81","src/connection_server.rs":"ec6c6182f4522b2c8bddbd441790bc8f07f122c35292dc227779c7a7574d8a43","src/control_stream_local.rs":"8b685d5236ea59914ca91f6bd9613586ede581ca400e54e8f429f9e51580774b","src/control_stream_remote.rs":"94ba2cdeeb1e76e7bab7ed19ef02b9516fb4c13f496a4c8066f92230c62cf083","src/hframe.rs":"15dce78028666c12dbc99e252f3db8e87659da9260fc7bf0d2d182cb9e6d3081","src/lib.rs":"8b5607f7dee014e11771ea05b4ed52fcd1eafe146ee8ebee75da742921867b57","src/server_events.rs":"374b40c645443eae19a0a039f74e3f5353b1a6373c7a3089e9f270db9609596e","src/stream_type_reader.rs":"e96f55f9e1df4e894d4d93250a79a4a46db1669d8c17353b94525db0a75c774e","src/transaction_client.rs":"d2664b0eb6505c6de9ec86d5c422d30576c42896e6bbbf5c907a9fd7e01caecd","src/transaction_server.rs":"9c1eb5a7cc81269a063fa891aeae0fc93bf773f23ead8a177452ea2dc425ff09","tests/httpconn.rs":"7a78d258bb940c88b3b935e4e3c76c103fb4d400ddfad14107b17a15759eb3e9"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"bde1a1af255191ae9785b8f438fe72addc291496e4fa7d140437de5828aa6d8c","src/client_events.rs":"c7022955c4c7c1257204948a0a2a88ccd895dfdb64b62ff76e56bc989d066277","src/connection.rs":"9b68a6287e046cb73b0d41ec981012a0bba8d1c82959a566163bc8825acd9cb9","src/connection_client.rs":"fdd7ab7f3b398648387da54e1459959f82eb80c6334f9830da4fcbd9b904ffac","src/connection_server.rs":"def86b38c836a2dec5683d30c46f325ac8a704a28b2f9b2f83d6ac5b71e3f913","src/control_stream_local.rs":"319f8277fc4765b31a4a094bfd663e681d71831532925b0043bae5da96202e64","src/control_stream_remote.rs":"1b96316d6eecc582436382517fcffdb2bb4f9d73194301bc3e2e253d3322e95e","src/hframe.rs":"6e65670cfe5944e88a0631e39d077c8e9d225fde650c978405816f1b2028c9a1","src/lib.rs":"8b5607f7dee014e11771ea05b4ed52fcd1eafe146ee8ebee75da742921867b57","src/server_events.rs":"374b40c645443eae19a0a039f74e3f5353b1a6373c7a3089e9f270db9609596e","src/stream_type_reader.rs":"be1ea1f553292b5447f0d6d89bdfa73732189db236ce34b4067cda0276229540","src/transaction_client.rs":"2e4a2ca83bdc6583697ab5b6e8c2767b4fd46fd4215f4fe9cf79891ad5523bd6","src/transaction_server.rs":"bc663380058a2ede7a99e5618ba28ae99bc6cdbe9034bff10b6a542602b6230a","tests/httpconn.rs":"7a78d258bb940c88b3b935e4e3c76c103fb4d400ddfad14107b17a15759eb3e9"},"package":null} \ No newline at end of file diff --git a/third_party/rust/neqo-http3/src/connection.rs b/third_party/rust/neqo-http3/src/connection.rs index b645be59087b..e352451cbee8 100644 --- a/third_party/rust/neqo-http3/src/connection.rs +++ b/third_party/rust/neqo-http3/src/connection.rs @@ -193,7 +193,7 @@ impl> } fn initialize_http3_connection(&mut self) -> Res<()> { - qinfo!([self] "Initialize the http3 connection."); + qinfo!([self], "Initialize the http3 connection."); self.control_stream_local.create(&mut self.conn)?; self.send_settings(); self.create_qpack_streams()?; @@ -201,7 +201,7 @@ impl> } fn send_settings(&mut self) { - qdebug!([self] "Send settings."); + qdebug!([self], "Send settings."); self.control_stream_local.queue_frame(HFrame::Settings { settings: vec![ ( @@ -217,7 +217,7 @@ impl> } fn create_qpack_streams(&mut self) -> Res<()> { - qdebug!([self] "create_qpack_streams."); + qdebug!([self], "create_qpack_streams."); self.qpack_encoder .add_send_stream(self.conn.stream_create(StreamType::UniDi)?); self.qpack_decoder @@ -230,7 +230,7 @@ impl> fn check_result(&mut self, now: Instant, res: Res) -> bool { match &res { Err(e) => { - qinfo!([self] "Connection error: {}.", e); + qinfo!([self], "Connection error: {}.", e); self.close(now, e.code(), &format!("{}", e)); true } @@ -243,7 +243,7 @@ impl> } pub fn process(&mut self, dgram: Option, now: Instant) -> Output { - qtrace!([self] "Process."); + qtrace!([self], "Process."); if let Some(d) = dgram { self.process_input(d, now); } @@ -252,12 +252,12 @@ impl> } pub fn process_input(&mut self, dgram: Datagram, now: Instant) { - qtrace!([self] "Process input."); + qtrace!([self], "Process input."); self.conn.process_input(dgram, now); } pub fn process_timer(&mut self, now: Instant) { - qtrace!([self] "Process timer."); + qtrace!([self], "Process timer."); self.conn.process_timer(now); } @@ -266,7 +266,7 @@ impl> } pub fn process_http3(&mut self, now: Instant) { - qtrace!([self] "Process http3 internal."); + qtrace!([self], "Process http3 internal."); match self.state { Http3State::Connected | Http3State::GoingAway => { let res = self.check_connection_events(); @@ -285,7 +285,7 @@ impl> } pub fn process_output(&mut self, now: Instant) -> Output { - qtrace!([self] "Process output."); + qtrace!([self], "Process output."); self.conn.process_output(now) } @@ -318,9 +318,9 @@ impl> fn check_connection_events(&mut self) -> Res<()> { - qtrace!([self] "Check connection events."); + qtrace!([self], "Check connection events."); while let Some(e) = self.conn.next_event() { - qdebug!([self] "check_connection_events - event {:?}.", e); + qdebug!([self], "check_connection_events - event {:?}.", e); match e { ConnectionEvent::NewStream { stream_id, @@ -381,7 +381,7 @@ impl> } fn handle_new_stream(&mut self, stream_id: u64, stream_type: StreamType) -> Res<()> { - qinfo!([self] "A new stream: {:?} {}.", stream_type, stream_id); + qinfo!([self], "A new stream: {:?} {}.", stream_type, stream_id); assert!(self.state_active()); match stream_type { StreamType::BiDi => self.handler.handle_new_bidi_stream( @@ -416,7 +416,7 @@ impl> } fn handle_stream_readable(&mut self, stream_id: u64) -> Res<()> { - qtrace!([self] "Readable stream {}.", stream_id); + qtrace!([self], "Readable stream {}.", stream_id); assert!(self.state_active()); @@ -428,13 +428,13 @@ impl> let mut unblocked_streams: Vec = Vec::new(); if self.handle_read_stream(stream_id)? { - qdebug!([label] "Request/response stream {} read.", stream_id); + qdebug!([label], "Request/response stream {} read.", stream_id); } else if self .control_stream_remote .receive_if_this_stream(&mut self.conn, stream_id)? { qdebug!( - [self] + [self], "The remote control stream ({}) is readable.", stream_id ); @@ -450,13 +450,13 @@ impl> .recv_if_encoder_stream(&mut self.conn, stream_id)? { qdebug!( - [self] + [self], "The qpack encoder stream ({}) is readable.", stream_id ); } else if self.qpack_decoder.is_recv_stream(stream_id) { qdebug!( - [self] + [self], "The qpack decoder stream ({}) is readable.", stream_id ); @@ -482,14 +482,19 @@ impl> } for stream_id in unblocked_streams { - qinfo!([self] "Stream {} is unblocked", stream_id); + qinfo!([self], "Stream {} is unblocked", stream_id); self.handle_read_stream(stream_id)?; } Ok(()) } fn handle_stream_reset(&mut self, stream_id: u64, app_err: AppError) -> Res<()> { - qinfo!([self] "Handle a stream reset stream_id={} app_err={}", stream_id, app_err); + qinfo!( + [self], + "Handle a stream reset stream_id={} app_err={}", + stream_id, + app_err + ); assert!(self.state_active()); @@ -544,10 +549,14 @@ impl> assert!(self.state_active()); if let Some(transaction) = &mut self.transactions.get_mut(&stream_id) { - qinfo!([label] "Request/response stream {} is readable.", stream_id); + qinfo!( + [label], + "Request/response stream {} is readable.", + stream_id + ); match transaction.receive(&mut self.conn, &mut self.qpack_decoder) { Err(e) => { - qerror!([label] "Error {} ocurred", e); + qerror!([label], "Error {} ocurred", e); return Err(e); } Ok(()) => { @@ -570,17 +579,17 @@ impl> } HTTP3_UNI_STREAM_TYPE_PUSH => { - qinfo!([self] "A new push stream {}.", stream_id); + qinfo!([self], "A new push stream {}.", stream_id); self.handler.handle_new_push_stream() } QPACK_UNI_STREAM_TYPE_ENCODER => { - qinfo!([self] "A new remote qpack encoder stream {}", stream_id); + qinfo!([self], "A new remote qpack encoder stream {}", stream_id); self.qpack_decoder .add_recv_stream(stream_id) .map_err(|_| Error::HttpStreamCreationError) } QPACK_UNI_STREAM_TYPE_DECODER => { - qinfo!([self] "A new remote qpack decoder stream {}", stream_id); + qinfo!([self], "A new remote qpack decoder stream {}", stream_id); self.qpack_encoder .add_recv_stream(stream_id) .map_err(|_| Error::HttpStreamCreationError) @@ -595,7 +604,7 @@ impl> } pub fn close(&mut self, now: Instant, error: AppError, msg: &str) { - qinfo!([self] "Close connection error {:?} msg={}.", error, msg); + qinfo!([self], "Close connection error {:?} msg={}.", error, msg); assert!(self.state_active()); self.state = Http3State::Closing(CloseError::Application(error)); if !self.transactions.is_empty() && (error == 0) { @@ -606,7 +615,7 @@ impl> } pub fn stream_reset(&mut self, stream_id: u64, error: AppError) -> Res<()> { - qinfo!([self] "Reset stream {} error={}.", stream_id, error); + qinfo!([self], "Reset stream {} error={}.", stream_id, error); assert!(self.state_active()); let mut transaction = self .transactions @@ -623,7 +632,7 @@ impl> } pub fn stream_close_send(&mut self, stream_id: u64) -> Res<()> { - qinfo!([self] "Close sending side for stream {}.", stream_id); + qinfo!([self], "Close sending side for stream {}.", stream_id); assert!(self.state_active()); let transaction = self .transactions @@ -642,15 +651,15 @@ impl> } if self.control_stream_remote.frame_reader_done() { let f = self.control_stream_remote.get_frame()?; - qinfo!([self] "Handle a control frame {:?}", f); + qinfo!([self], "Handle a control frame {:?}", f); if let HFrame::Settings { .. } = f { if self.settings_received { - qerror!([self] "SETTINGS frame already received"); + qerror!([self], "SETTINGS frame already received"); return Err(Error::HttpFrameUnexpected); } self.settings_received = true; } else if !self.settings_received { - qerror!([self] "SETTINGS frame not received"); + qerror!([self], "SETTINGS frame not received"); return Err(Error::HttpMissingSettings); } return match f { @@ -673,9 +682,9 @@ impl> } fn handle_settings(&mut self, s: &[(HSettingType, u64)]) -> Res<()> { - qinfo!([self] "Handle SETTINGS frame."); + qinfo!([self], "Handle SETTINGS frame."); for (t, v) in s { - qinfo!([self] " {:?} = {:?}", t, v); + qinfo!([self], " {:?} = {:?}", t, v); match t { HSettingType::MaxHeaderListSize => { self.max_header_list_size = *v; @@ -690,10 +699,7 @@ impl> } fn state_active(&self) -> bool { - match self.state { - Http3State::Connected | Http3State::GoingAway => true, - _ => false, - } + matches!(self.state, Http3State::Connected | Http3State::GoingAway) } fn state_closing(&self) -> bool { @@ -731,7 +737,7 @@ impl Http3Handler for Http3ClientHandler { fn handle_new_push_stream(&mut self) -> Res<()> { // TODO implement PUSH - qerror!([self] "PUSH is not implemented!"); + qerror!([self], "PUSH is not implemented!"); Err(Error::HttpIdError) } @@ -751,7 +757,7 @@ impl Http3Handler for Http3ClientHandler { events: &mut Http3ClientEvents, stream_id: u64, ) -> Res<()> { - qtrace!([self] "Writable stream {}.", stream_id); + qtrace!([self], "Writable stream {}.", stream_id); if let Some(t) = transactions.get_mut(&stream_id) { if t.is_state_sending_data() { @@ -769,7 +775,12 @@ impl Http3Handler for Http3ClientHandler { stop_stream_id: u64, app_err: AppError, ) -> Res<()> { - qinfo!([self] "Handle stream_stop_sending stream_id={} app_err={}", stop_stream_id, app_err); + qinfo!( + [self], + "Handle stream_stop_sending stream_id={} app_err={}", + stop_stream_id, + app_err + ); if let Some(t) = transactions.get_mut(&stop_stream_id) { // close sending side. @@ -802,7 +813,7 @@ impl Http3Handler for Http3ClientHandler { state: &mut Http3State, goaway_stream_id: u64, ) -> Res<()> { - qinfo!([self] "handle_goaway"); + qinfo!([self], "handle_goaway"); // Issue reset events for streams >= goaway stream id for id in transactions .iter() @@ -823,7 +834,7 @@ impl Http3Handler for Http3ClientHandler { } fn handle_max_push_id(&mut self, stream_id: u64) -> Res<()> { - qerror!([self] "handle_max_push_id={}.", stream_id); + qerror!([self], "handle_max_push_id={}.", stream_id); Err(Error::HttpFrameUnexpected) } @@ -856,7 +867,7 @@ impl Http3Handler for Http3ServerHandler { } fn handle_new_push_stream(&mut self) -> Res<()> { - qerror!([self] "Error: server receives a push stream!"); + qerror!([self], "Error: server receives a push stream!"); Err(Error::HttpStreamCreationError) } @@ -886,7 +897,7 @@ impl Http3Handler for Http3ServerHandler { _state: &mut Http3State, _goaway_stream_id: u64, ) -> Res<()> { - qerror!([self] "handle_goaway"); + qerror!([self], "handle_goaway"); Err(Error::HttpFrameUnexpected) } @@ -902,7 +913,7 @@ impl Http3Handler for Http3ServerHandler { } fn handle_max_push_id(&mut self, stream_id: u64) -> Res<()> { - qinfo!([self] "handle_max_push_id={}.", stream_id); + qinfo!([self], "handle_max_push_id={}.", stream_id); // TODO Ok(()) } diff --git a/third_party/rust/neqo-http3/src/connection_client.rs b/third_party/rust/neqo-http3/src/connection_client.rs index 7813e5340698..82be9f5b0f81 100644 --- a/third_party/rust/neqo-http3/src/connection_client.rs +++ b/third_party/rust/neqo-http3/src/connection_client.rs @@ -88,7 +88,7 @@ impl Http3Client { } pub fn close(&mut self, now: Instant, error: AppError, msg: &str) { - qinfo!([self] "Close the connection error={} msg={}.", error, msg); + qinfo!([self], "Close the connection error={} msg={}.", error, msg); self.base_handler.close(now, error, msg); } @@ -101,7 +101,7 @@ impl Http3Client { headers: &[Header], ) -> Res { qinfo!( - [self] + [self], "Fetch method={}, scheme={}, host={}, path={}", method, scheme, @@ -125,17 +125,22 @@ impl Http3Client { } pub fn stream_reset(&mut self, stream_id: u64, error: AppError) -> Res<()> { - qinfo!([self] "reset_stream {} error={}.", stream_id, error); + qinfo!([self], "reset_stream {} error={}.", stream_id, error); self.base_handler.stream_reset(stream_id, error) } pub fn stream_close_send(&mut self, stream_id: u64) -> Res<()> { - qinfo!([self] "Close senidng side stream={}.", stream_id); + qinfo!([self], "Close senidng side stream={}.", stream_id); self.base_handler.stream_close_send(stream_id) } pub fn send_request_body(&mut self, stream_id: u64, buf: &[u8]) -> Res { - qinfo!([self] "send_request_body from stream {} sending {} bytes.", stream_id, buf.len()); + qinfo!( + [self], + "send_request_body from stream {} sending {} bytes.", + stream_id, + buf.len() + ); self.base_handler .transactions .get_mut(&stream_id) @@ -144,7 +149,7 @@ impl Http3Client { } pub fn read_response_headers(&mut self, stream_id: u64) -> Res<(Vec